diff --git a/vendor/github.com/Azure/go-ansiterm/LICENSE b/vendor/github.com/Azure/go-ansiterm/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..e3d9a64d1d853fc99c49142e2cbf1b588cf763fc
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Microsoft Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/Azure/go-ansiterm/README.md b/vendor/github.com/Azure/go-ansiterm/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..261c041e7abaa5e53c74b4a52ff5328b9631f9f7
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/README.md
@@ -0,0 +1,12 @@
+# go-ansiterm
+
+This is a cross platform Ansi Terminal Emulation library.  It reads a stream of Ansi characters and produces the appropriate function calls.  The results of the function calls are platform dependent.
+
+For example the parser might receive "ESC, [, A" as a stream of three characters.  This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU).  The parser then calls the cursor up function (CUU()) on an event handler.  The event handler determines what platform specific work must be done to cause the cursor to move up one position.
+
+The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png).  There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go).
+
+See parser_test.go for examples exercising the state machine and generating appropriate function calls.
+
+-----
+This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
diff --git a/vendor/github.com/Azure/go-ansiterm/constants.go b/vendor/github.com/Azure/go-ansiterm/constants.go
new file mode 100644
index 0000000000000000000000000000000000000000..96504a33bc9ece527eb46534a496ff0f9a40ec8f
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/constants.go
@@ -0,0 +1,188 @@
+package ansiterm
+
+const LogEnv = "DEBUG_TERMINAL"
+
+// ANSI constants
+// References:
+// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm
+// -- http://man7.org/linux/man-pages/man4/console_codes.4.html
+// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html
+// -- http://en.wikipedia.org/wiki/ANSI_escape_code
+// -- http://vt100.net/emu/dec_ansi_parser
+// -- http://vt100.net/emu/vt500_parser.svg
+// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html
+// -- http://www.inwap.com/pdp10/ansicode.txt
+const (
+	// ECMA-48 Set Graphics Rendition
+	// Note:
+	// -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved
+	// -- Fonts could possibly be supported via SetCurrentConsoleFontEx
+	// -- Windows does not expose the per-window cursor (i.e., caret) blink times
+	ANSI_SGR_RESET              = 0
+	ANSI_SGR_BOLD               = 1
+	ANSI_SGR_DIM                = 2
+	_ANSI_SGR_ITALIC            = 3
+	ANSI_SGR_UNDERLINE          = 4
+	_ANSI_SGR_BLINKSLOW         = 5
+	_ANSI_SGR_BLINKFAST         = 6
+	ANSI_SGR_REVERSE            = 7
+	_ANSI_SGR_INVISIBLE         = 8
+	_ANSI_SGR_LINETHROUGH       = 9
+	_ANSI_SGR_FONT_00           = 10
+	_ANSI_SGR_FONT_01           = 11
+	_ANSI_SGR_FONT_02           = 12
+	_ANSI_SGR_FONT_03           = 13
+	_ANSI_SGR_FONT_04           = 14
+	_ANSI_SGR_FONT_05           = 15
+	_ANSI_SGR_FONT_06           = 16
+	_ANSI_SGR_FONT_07           = 17
+	_ANSI_SGR_FONT_08           = 18
+	_ANSI_SGR_FONT_09           = 19
+	_ANSI_SGR_FONT_10           = 20
+	_ANSI_SGR_DOUBLEUNDERLINE   = 21
+	ANSI_SGR_BOLD_DIM_OFF       = 22
+	_ANSI_SGR_ITALIC_OFF        = 23
+	ANSI_SGR_UNDERLINE_OFF      = 24
+	_ANSI_SGR_BLINK_OFF         = 25
+	_ANSI_SGR_RESERVED_00       = 26
+	ANSI_SGR_REVERSE_OFF        = 27
+	_ANSI_SGR_INVISIBLE_OFF     = 28
+	_ANSI_SGR_LINETHROUGH_OFF   = 29
+	ANSI_SGR_FOREGROUND_BLACK   = 30
+	ANSI_SGR_FOREGROUND_RED     = 31
+	ANSI_SGR_FOREGROUND_GREEN   = 32
+	ANSI_SGR_FOREGROUND_YELLOW  = 33
+	ANSI_SGR_FOREGROUND_BLUE    = 34
+	ANSI_SGR_FOREGROUND_MAGENTA = 35
+	ANSI_SGR_FOREGROUND_CYAN    = 36
+	ANSI_SGR_FOREGROUND_WHITE   = 37
+	_ANSI_SGR_RESERVED_01       = 38
+	ANSI_SGR_FOREGROUND_DEFAULT = 39
+	ANSI_SGR_BACKGROUND_BLACK   = 40
+	ANSI_SGR_BACKGROUND_RED     = 41
+	ANSI_SGR_BACKGROUND_GREEN   = 42
+	ANSI_SGR_BACKGROUND_YELLOW  = 43
+	ANSI_SGR_BACKGROUND_BLUE    = 44
+	ANSI_SGR_BACKGROUND_MAGENTA = 45
+	ANSI_SGR_BACKGROUND_CYAN    = 46
+	ANSI_SGR_BACKGROUND_WHITE   = 47
+	_ANSI_SGR_RESERVED_02       = 48
+	ANSI_SGR_BACKGROUND_DEFAULT = 49
+	// 50 - 65: Unsupported
+
+	ANSI_MAX_CMD_LENGTH = 4096
+
+	MAX_INPUT_EVENTS = 128
+	DEFAULT_WIDTH    = 80
+	DEFAULT_HEIGHT   = 24
+
+	ANSI_BEL              = 0x07
+	ANSI_BACKSPACE        = 0x08
+	ANSI_TAB              = 0x09
+	ANSI_LINE_FEED        = 0x0A
+	ANSI_VERTICAL_TAB     = 0x0B
+	ANSI_FORM_FEED        = 0x0C
+	ANSI_CARRIAGE_RETURN  = 0x0D
+	ANSI_ESCAPE_PRIMARY   = 0x1B
+	ANSI_ESCAPE_SECONDARY = 0x5B
+	ANSI_OSC_STRING_ENTRY = 0x5D
+	ANSI_COMMAND_FIRST    = 0x40
+	ANSI_COMMAND_LAST     = 0x7E
+	DCS_ENTRY             = 0x90
+	CSI_ENTRY             = 0x9B
+	OSC_STRING            = 0x9D
+	ANSI_PARAMETER_SEP    = ";"
+	ANSI_CMD_G0           = '('
+	ANSI_CMD_G1           = ')'
+	ANSI_CMD_G2           = '*'
+	ANSI_CMD_G3           = '+'
+	ANSI_CMD_DECPNM       = '>'
+	ANSI_CMD_DECPAM       = '='
+	ANSI_CMD_OSC          = ']'
+	ANSI_CMD_STR_TERM     = '\\'
+
+	KEY_CONTROL_PARAM_2 = ";2"
+	KEY_CONTROL_PARAM_3 = ";3"
+	KEY_CONTROL_PARAM_4 = ";4"
+	KEY_CONTROL_PARAM_5 = ";5"
+	KEY_CONTROL_PARAM_6 = ";6"
+	KEY_CONTROL_PARAM_7 = ";7"
+	KEY_CONTROL_PARAM_8 = ";8"
+	KEY_ESC_CSI         = "\x1B["
+	KEY_ESC_N           = "\x1BN"
+	KEY_ESC_O           = "\x1BO"
+
+	FILL_CHARACTER = ' '
+)
+
+func getByteRange(start byte, end byte) []byte {
+	bytes := make([]byte, 0, 32)
+	for i := start; i <= end; i++ {
+		bytes = append(bytes, byte(i))
+	}
+
+	return bytes
+}
+
+var toGroundBytes = getToGroundBytes()
+var executors = getExecuteBytes()
+
+// SPACE		  20+A0 hex  Always and everywhere a blank space
+// Intermediate	  20-2F hex   !"#$%&'()*+,-./
+var intermeds = getByteRange(0x20, 0x2F)
+
+// Parameters	  30-3F hex  0123456789:;<=>?
+// CSI Parameters 30-39, 3B hex 0123456789;
+var csiParams = getByteRange(0x30, 0x3F)
+
+var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...)
+
+// Uppercase	  40-5F hex  @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_
+var upperCase = getByteRange(0x40, 0x5F)
+
+// Lowercase	  60-7E hex  `abcdefghijlkmnopqrstuvwxyz{|}~
+var lowerCase = getByteRange(0x60, 0x7E)
+
+// Alphabetics	  40-7E hex  (all of upper and lower case)
+var alphabetics = append(upperCase, lowerCase...)
+
+var printables = getByteRange(0x20, 0x7F)
+
+var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E)
+var escapeToGroundBytes = getEscapeToGroundBytes()
+
+// See http://www.vt100.net/emu/vt500_parser.png for description of the complex
+// byte ranges below
+
+func getEscapeToGroundBytes() []byte {
+	escapeToGroundBytes := getByteRange(0x30, 0x4F)
+	escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...)
+	escapeToGroundBytes = append(escapeToGroundBytes, 0x59)
+	escapeToGroundBytes = append(escapeToGroundBytes, 0x5A)
+	escapeToGroundBytes = append(escapeToGroundBytes, 0x5C)
+	escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...)
+	return escapeToGroundBytes
+}
+
+func getExecuteBytes() []byte {
+	executeBytes := getByteRange(0x00, 0x17)
+	executeBytes = append(executeBytes, 0x19)
+	executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...)
+	return executeBytes
+}
+
+func getToGroundBytes() []byte {
+	groundBytes := []byte{0x18}
+	groundBytes = append(groundBytes, 0x1A)
+	groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...)
+	groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...)
+	groundBytes = append(groundBytes, 0x99)
+	groundBytes = append(groundBytes, 0x9A)
+	groundBytes = append(groundBytes, 0x9C)
+	return groundBytes
+}
+
+// Delete		     7F hex  Always and everywhere ignored
+// C1 Control	  80-9F hex  32 additional control characters
+// G1 Displayable A1-FE hex  94 additional displayable characters
+// Special		  A0+FF hex  Same as SPACE and DELETE
diff --git a/vendor/github.com/Azure/go-ansiterm/context.go b/vendor/github.com/Azure/go-ansiterm/context.go
new file mode 100644
index 0000000000000000000000000000000000000000..8d66e777c038da4ab00ff91af98a5c7eb7e4d573
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/context.go
@@ -0,0 +1,7 @@
+package ansiterm
+
+type ansiContext struct {
+	currentChar byte
+	paramBuffer []byte
+	interBuffer []byte
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go
new file mode 100644
index 0000000000000000000000000000000000000000..1bd6057da8a8e0d1006cafad968e099af8be9263
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go
@@ -0,0 +1,49 @@
+package ansiterm
+
+type csiEntryState struct {
+	baseState
+}
+
+func (csiState csiEntryState) Handle(b byte) (s state, e error) {
+	logger.Infof("CsiEntry::Handle %#x", b)
+
+	nextState, err := csiState.baseState.Handle(b)
+	if nextState != nil || err != nil {
+		return nextState, err
+	}
+
+	switch {
+	case sliceContains(alphabetics, b):
+		return csiState.parser.ground, nil
+	case sliceContains(csiCollectables, b):
+		return csiState.parser.csiParam, nil
+	case sliceContains(executors, b):
+		return csiState, csiState.parser.execute()
+	}
+
+	return csiState, nil
+}
+
+func (csiState csiEntryState) Transition(s state) error {
+	logger.Infof("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name())
+	csiState.baseState.Transition(s)
+
+	switch s {
+	case csiState.parser.ground:
+		return csiState.parser.csiDispatch()
+	case csiState.parser.csiParam:
+		switch {
+		case sliceContains(csiParams, csiState.parser.context.currentChar):
+			csiState.parser.collectParam()
+		case sliceContains(intermeds, csiState.parser.context.currentChar):
+			csiState.parser.collectInter()
+		}
+	}
+
+	return nil
+}
+
+func (csiState csiEntryState) Enter() error {
+	csiState.parser.clear()
+	return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go
new file mode 100644
index 0000000000000000000000000000000000000000..4be35c5fd2af7a0722a157d9540f56e511981b15
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go
@@ -0,0 +1,38 @@
+package ansiterm
+
+type csiParamState struct {
+	baseState
+}
+
+func (csiState csiParamState) Handle(b byte) (s state, e error) {
+	logger.Infof("CsiParam::Handle %#x", b)
+
+	nextState, err := csiState.baseState.Handle(b)
+	if nextState != nil || err != nil {
+		return nextState, err
+	}
+
+	switch {
+	case sliceContains(alphabetics, b):
+		return csiState.parser.ground, nil
+	case sliceContains(csiCollectables, b):
+		csiState.parser.collectParam()
+		return csiState, nil
+	case sliceContains(executors, b):
+		return csiState, csiState.parser.execute()
+	}
+
+	return csiState, nil
+}
+
+func (csiState csiParamState) Transition(s state) error {
+	logger.Infof("CsiParam::Transition %s --> %s", csiState.Name(), s.Name())
+	csiState.baseState.Transition(s)
+
+	switch s {
+	case csiState.parser.ground:
+		return csiState.parser.csiDispatch()
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go
new file mode 100644
index 0000000000000000000000000000000000000000..2189eb6b6b06575a66de51557f25756ef0669e77
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go
@@ -0,0 +1,36 @@
+package ansiterm
+
+type escapeIntermediateState struct {
+	baseState
+}
+
+func (escState escapeIntermediateState) Handle(b byte) (s state, e error) {
+	logger.Infof("escapeIntermediateState::Handle %#x", b)
+	nextState, err := escState.baseState.Handle(b)
+	if nextState != nil || err != nil {
+		return nextState, err
+	}
+
+	switch {
+	case sliceContains(intermeds, b):
+		return escState, escState.parser.collectInter()
+	case sliceContains(executors, b):
+		return escState, escState.parser.execute()
+	case sliceContains(escapeIntermediateToGroundBytes, b):
+		return escState.parser.ground, nil
+	}
+
+	return escState, nil
+}
+
+func (escState escapeIntermediateState) Transition(s state) error {
+	logger.Infof("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name())
+	escState.baseState.Transition(s)
+
+	switch s {
+	case escState.parser.ground:
+		return escState.parser.escDispatch()
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go
new file mode 100644
index 0000000000000000000000000000000000000000..7b1b9ad3f12e732c8641583ac1b577dc5a980692
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/escape_state.go
@@ -0,0 +1,47 @@
+package ansiterm
+
+type escapeState struct {
+	baseState
+}
+
+func (escState escapeState) Handle(b byte) (s state, e error) {
+	logger.Infof("escapeState::Handle %#x", b)
+	nextState, err := escState.baseState.Handle(b)
+	if nextState != nil || err != nil {
+		return nextState, err
+	}
+
+	switch {
+	case b == ANSI_ESCAPE_SECONDARY:
+		return escState.parser.csiEntry, nil
+	case b == ANSI_OSC_STRING_ENTRY:
+		return escState.parser.oscString, nil
+	case sliceContains(executors, b):
+		return escState, escState.parser.execute()
+	case sliceContains(escapeToGroundBytes, b):
+		return escState.parser.ground, nil
+	case sliceContains(intermeds, b):
+		return escState.parser.escapeIntermediate, nil
+	}
+
+	return escState, nil
+}
+
+func (escState escapeState) Transition(s state) error {
+	logger.Infof("Escape::Transition %s --> %s", escState.Name(), s.Name())
+	escState.baseState.Transition(s)
+
+	switch s {
+	case escState.parser.ground:
+		return escState.parser.escDispatch()
+	case escState.parser.escapeIntermediate:
+		return escState.parser.collectInter()
+	}
+
+	return nil
+}
+
+func (escState escapeState) Enter() error {
+	escState.parser.clear()
+	return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/event_handler.go b/vendor/github.com/Azure/go-ansiterm/event_handler.go
new file mode 100644
index 0000000000000000000000000000000000000000..98087b38c2025a2e4feb2920faf9376c0cda1f3b
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/event_handler.go
@@ -0,0 +1,90 @@
+package ansiterm
+
+type AnsiEventHandler interface {
+	// Print
+	Print(b byte) error
+
+	// Execute C0 commands
+	Execute(b byte) error
+
+	// CUrsor Up
+	CUU(int) error
+
+	// CUrsor Down
+	CUD(int) error
+
+	// CUrsor Forward
+	CUF(int) error
+
+	// CUrsor Backward
+	CUB(int) error
+
+	// Cursor to Next Line
+	CNL(int) error
+
+	// Cursor to Previous Line
+	CPL(int) error
+
+	// Cursor Horizontal position Absolute
+	CHA(int) error
+
+	// Vertical line Position Absolute
+	VPA(int) error
+
+	// CUrsor Position
+	CUP(int, int) error
+
+	// Horizontal and Vertical Position (depends on PUM)
+	HVP(int, int) error
+
+	// Text Cursor Enable Mode
+	DECTCEM(bool) error
+
+	// Origin Mode
+	DECOM(bool) error
+
+	// 132 Column Mode
+	DECCOLM(bool) error
+
+	// Erase in Display
+	ED(int) error
+
+	// Erase in Line
+	EL(int) error
+
+	// Insert Line
+	IL(int) error
+
+	// Delete Line
+	DL(int) error
+
+	// Insert Character
+	ICH(int) error
+
+	// Delete Character
+	DCH(int) error
+
+	// Set Graphics Rendition
+	SGR([]int) error
+
+	// Pan Down
+	SU(int) error
+
+	// Pan Up
+	SD(int) error
+
+	// Device Attributes
+	DA([]string) error
+
+	// Set Top and Bottom Margins
+	DECSTBM(int, int) error
+
+	// Index
+	IND() error
+
+	// Reverse Index
+	RI() error
+
+	// Flush updates from previous commands
+	Flush() error
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/ground_state.go b/vendor/github.com/Azure/go-ansiterm/ground_state.go
new file mode 100644
index 0000000000000000000000000000000000000000..52451e94693a136f15109c9ca99fa6347ac1d5a8
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/ground_state.go
@@ -0,0 +1,24 @@
+package ansiterm
+
+type groundState struct {
+	baseState
+}
+
+func (gs groundState) Handle(b byte) (s state, e error) {
+	gs.parser.context.currentChar = b
+
+	nextState, err := gs.baseState.Handle(b)
+	if nextState != nil || err != nil {
+		return nextState, err
+	}
+
+	switch {
+	case sliceContains(printables, b):
+		return gs, gs.parser.print()
+
+	case sliceContains(executors, b):
+		return gs, gs.parser.execute()
+	}
+
+	return gs, nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go
new file mode 100644
index 0000000000000000000000000000000000000000..24062d420ebea5e02cb813535820ee183ff77b54
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go
@@ -0,0 +1,31 @@
+package ansiterm
+
+type oscStringState struct {
+	baseState
+}
+
+func (oscState oscStringState) Handle(b byte) (s state, e error) {
+	logger.Infof("OscString::Handle %#x", b)
+	nextState, err := oscState.baseState.Handle(b)
+	if nextState != nil || err != nil {
+		return nextState, err
+	}
+
+	switch {
+	case isOscStringTerminator(b):
+		return oscState.parser.ground, nil
+	}
+
+	return oscState, nil
+}
+
+// See below for OSC string terminators for linux
+// http://man7.org/linux/man-pages/man4/console_codes.4.html
+func isOscStringTerminator(b byte) bool {
+
+	if b == ANSI_BEL || b == 0x5C {
+		return true
+	}
+
+	return false
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go
new file mode 100644
index 0000000000000000000000000000000000000000..169f68dbefc4982716eafdcf31016695ee4cf33b
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/parser.go
@@ -0,0 +1,136 @@
+package ansiterm
+
+import (
+	"errors"
+	"io/ioutil"
+	"os"
+
+	"github.com/Sirupsen/logrus"
+)
+
+var logger *logrus.Logger
+
+type AnsiParser struct {
+	currState          state
+	eventHandler       AnsiEventHandler
+	context            *ansiContext
+	csiEntry           state
+	csiParam           state
+	dcsEntry           state
+	escape             state
+	escapeIntermediate state
+	error              state
+	ground             state
+	oscString          state
+	stateMap           []state
+}
+
+func CreateParser(initialState string, evtHandler AnsiEventHandler) *AnsiParser {
+	logFile := ioutil.Discard
+
+	if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" {
+		logFile, _ = os.Create("ansiParser.log")
+	}
+
+	logger = &logrus.Logger{
+		Out:       logFile,
+		Formatter: new(logrus.TextFormatter),
+		Level:     logrus.InfoLevel,
+	}
+
+	parser := &AnsiParser{
+		eventHandler: evtHandler,
+		context:      &ansiContext{},
+	}
+
+	parser.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: parser}}
+	parser.csiParam = csiParamState{baseState{name: "CsiParam", parser: parser}}
+	parser.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: parser}}
+	parser.escape = escapeState{baseState{name: "Escape", parser: parser}}
+	parser.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: parser}}
+	parser.error = errorState{baseState{name: "Error", parser: parser}}
+	parser.ground = groundState{baseState{name: "Ground", parser: parser}}
+	parser.oscString = oscStringState{baseState{name: "OscString", parser: parser}}
+
+	parser.stateMap = []state{
+		parser.csiEntry,
+		parser.csiParam,
+		parser.dcsEntry,
+		parser.escape,
+		parser.escapeIntermediate,
+		parser.error,
+		parser.ground,
+		parser.oscString,
+	}
+
+	parser.currState = getState(initialState, parser.stateMap)
+
+	logger.Infof("CreateParser: parser %p", parser)
+	return parser
+}
+
+func getState(name string, states []state) state {
+	for _, el := range states {
+		if el.Name() == name {
+			return el
+		}
+	}
+
+	return nil
+}
+
+func (ap *AnsiParser) Parse(bytes []byte) (int, error) {
+	for i, b := range bytes {
+		if err := ap.handle(b); err != nil {
+			return i, err
+		}
+	}
+
+	return len(bytes), ap.eventHandler.Flush()
+}
+
+func (ap *AnsiParser) handle(b byte) error {
+	ap.context.currentChar = b
+	newState, err := ap.currState.Handle(b)
+	if err != nil {
+		return err
+	}
+
+	if newState == nil {
+		logger.Warning("newState is nil")
+		return errors.New("New state of 'nil' is invalid.")
+	}
+
+	if newState != ap.currState {
+		if err := ap.changeState(newState); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (ap *AnsiParser) changeState(newState state) error {
+	logger.Infof("ChangeState %s --> %s", ap.currState.Name(), newState.Name())
+
+	// Exit old state
+	if err := ap.currState.Exit(); err != nil {
+		logger.Infof("Exit state '%s' failed with : '%v'", ap.currState.Name(), err)
+		return err
+	}
+
+	// Perform transition action
+	if err := ap.currState.Transition(newState); err != nil {
+		logger.Infof("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err)
+		return err
+	}
+
+	// Enter new state
+	if err := newState.Enter(); err != nil {
+		logger.Infof("Enter state '%s' failed with: '%v'", newState.Name(), err)
+		return err
+	}
+
+	ap.currState = newState
+	return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go
new file mode 100644
index 0000000000000000000000000000000000000000..8b69a67a5aa18b4ccebe60ee0e394d6eeec1d1fd
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go
@@ -0,0 +1,103 @@
+package ansiterm
+
+import (
+	"strconv"
+)
+
+func parseParams(bytes []byte) ([]string, error) {
+	paramBuff := make([]byte, 0, 0)
+	params := []string{}
+
+	for _, v := range bytes {
+		if v == ';' {
+			if len(paramBuff) > 0 {
+				// Completed parameter, append it to the list
+				s := string(paramBuff)
+				params = append(params, s)
+				paramBuff = make([]byte, 0, 0)
+			}
+		} else {
+			paramBuff = append(paramBuff, v)
+		}
+	}
+
+	// Last parameter may not be terminated with ';'
+	if len(paramBuff) > 0 {
+		s := string(paramBuff)
+		params = append(params, s)
+	}
+
+	logger.Infof("Parsed params: %v with length: %d", params, len(params))
+	return params, nil
+}
+
+func parseCmd(context ansiContext) (string, error) {
+	return string(context.currentChar), nil
+}
+
+func getInt(params []string, dflt int) int {
+	i := getInts(params, 1, dflt)[0]
+	logger.Infof("getInt: %v", i)
+	return i
+}
+
+func getInts(params []string, minCount int, dflt int) []int {
+	ints := []int{}
+
+	for _, v := range params {
+		i, _ := strconv.Atoi(v)
+		// Zero is mapped to the default value in VT100.
+		if i == 0 {
+			i = dflt
+		}
+		ints = append(ints, i)
+	}
+
+	if len(ints) < minCount {
+		remaining := minCount - len(ints)
+		for i := 0; i < remaining; i++ {
+			ints = append(ints, dflt)
+		}
+	}
+
+	logger.Infof("getInts: %v", ints)
+
+	return ints
+}
+
+func (ap *AnsiParser) modeDispatch(param string, set bool) error {
+	switch param {
+	case "?3":
+		return ap.eventHandler.DECCOLM(set)
+	case "?6":
+		return ap.eventHandler.DECOM(set)
+	case "?25":
+		return ap.eventHandler.DECTCEM(set)
+	}
+	return nil
+}
+
+func (ap *AnsiParser) hDispatch(params []string) error {
+	if len(params) == 1 {
+		return ap.modeDispatch(params[0], true)
+	}
+
+	return nil
+}
+
+func (ap *AnsiParser) lDispatch(params []string) error {
+	if len(params) == 1 {
+		return ap.modeDispatch(params[0], false)
+	}
+
+	return nil
+}
+
+func getEraseParam(params []string) int {
+	param := getInt(params, 0)
+	if param < 0 || 3 < param {
+		param = 0
+	}
+
+	return param
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go
new file mode 100644
index 0000000000000000000000000000000000000000..58750a2d2b1b4a47a65c0143241d842e5f0efc09
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/parser_actions.go
@@ -0,0 +1,122 @@
+package ansiterm
+
+import (
+	"fmt"
+)
+
+func (ap *AnsiParser) collectParam() error {
+	currChar := ap.context.currentChar
+	logger.Infof("collectParam %#x", currChar)
+	ap.context.paramBuffer = append(ap.context.paramBuffer, currChar)
+	return nil
+}
+
+func (ap *AnsiParser) collectInter() error {
+	currChar := ap.context.currentChar
+	logger.Infof("collectInter %#x", currChar)
+	ap.context.paramBuffer = append(ap.context.interBuffer, currChar)
+	return nil
+}
+
+func (ap *AnsiParser) escDispatch() error {
+	cmd, _ := parseCmd(*ap.context)
+	intermeds := ap.context.interBuffer
+	logger.Infof("escDispatch currentChar: %#x", ap.context.currentChar)
+	logger.Infof("escDispatch: %v(%v)", cmd, intermeds)
+
+	switch cmd {
+	case "D": // IND
+		return ap.eventHandler.IND()
+	case "E": // NEL, equivalent to CRLF
+		err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN)
+		if err == nil {
+			err = ap.eventHandler.Execute(ANSI_LINE_FEED)
+		}
+		return err
+	case "M": // RI
+		return ap.eventHandler.RI()
+	}
+
+	return nil
+}
+
+func (ap *AnsiParser) csiDispatch() error {
+	cmd, _ := parseCmd(*ap.context)
+	params, _ := parseParams(ap.context.paramBuffer)
+
+	logger.Infof("csiDispatch: %v(%v)", cmd, params)
+
+	switch cmd {
+	case "@":
+		return ap.eventHandler.ICH(getInt(params, 1))
+	case "A":
+		return ap.eventHandler.CUU(getInt(params, 1))
+	case "B":
+		return ap.eventHandler.CUD(getInt(params, 1))
+	case "C":
+		return ap.eventHandler.CUF(getInt(params, 1))
+	case "D":
+		return ap.eventHandler.CUB(getInt(params, 1))
+	case "E":
+		return ap.eventHandler.CNL(getInt(params, 1))
+	case "F":
+		return ap.eventHandler.CPL(getInt(params, 1))
+	case "G":
+		return ap.eventHandler.CHA(getInt(params, 1))
+	case "H":
+		ints := getInts(params, 2, 1)
+		x, y := ints[0], ints[1]
+		return ap.eventHandler.CUP(x, y)
+	case "J":
+		param := getEraseParam(params)
+		return ap.eventHandler.ED(param)
+	case "K":
+		param := getEraseParam(params)
+		return ap.eventHandler.EL(param)
+	case "L":
+		return ap.eventHandler.IL(getInt(params, 1))
+	case "M":
+		return ap.eventHandler.DL(getInt(params, 1))
+	case "P":
+		return ap.eventHandler.DCH(getInt(params, 1))
+	case "S":
+		return ap.eventHandler.SU(getInt(params, 1))
+	case "T":
+		return ap.eventHandler.SD(getInt(params, 1))
+	case "c":
+		return ap.eventHandler.DA(params)
+	case "d":
+		return ap.eventHandler.VPA(getInt(params, 1))
+	case "f":
+		ints := getInts(params, 2, 1)
+		x, y := ints[0], ints[1]
+		return ap.eventHandler.HVP(x, y)
+	case "h":
+		return ap.hDispatch(params)
+	case "l":
+		return ap.lDispatch(params)
+	case "m":
+		return ap.eventHandler.SGR(getInts(params, 1, 0))
+	case "r":
+		ints := getInts(params, 2, 1)
+		top, bottom := ints[0], ints[1]
+		return ap.eventHandler.DECSTBM(top, bottom)
+	default:
+		logger.Errorf(fmt.Sprintf("Unsupported CSI command: '%s', with full context:  %v", cmd, ap.context))
+		return nil
+	}
+
+}
+
+func (ap *AnsiParser) print() error {
+	return ap.eventHandler.Print(ap.context.currentChar)
+}
+
+func (ap *AnsiParser) clear() error {
+	ap.context = &ansiContext{}
+	return nil
+}
+
+func (ap *AnsiParser) execute() error {
+	return ap.eventHandler.Execute(ap.context.currentChar)
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/states.go b/vendor/github.com/Azure/go-ansiterm/states.go
new file mode 100644
index 0000000000000000000000000000000000000000..f2ea1fcd12dab12ad5ceea4b6c708c80ee4ec778
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/states.go
@@ -0,0 +1,71 @@
+package ansiterm
+
+type stateID int
+
+type state interface {
+	Enter() error
+	Exit() error
+	Handle(byte) (state, error)
+	Name() string
+	Transition(state) error
+}
+
+type baseState struct {
+	name   string
+	parser *AnsiParser
+}
+
+func (base baseState) Enter() error {
+	return nil
+}
+
+func (base baseState) Exit() error {
+	return nil
+}
+
+func (base baseState) Handle(b byte) (s state, e error) {
+
+	switch {
+	case b == CSI_ENTRY:
+		return base.parser.csiEntry, nil
+	case b == DCS_ENTRY:
+		return base.parser.dcsEntry, nil
+	case b == ANSI_ESCAPE_PRIMARY:
+		return base.parser.escape, nil
+	case b == OSC_STRING:
+		return base.parser.oscString, nil
+	case sliceContains(toGroundBytes, b):
+		return base.parser.ground, nil
+	}
+
+	return nil, nil
+}
+
+func (base baseState) Name() string {
+	return base.name
+}
+
+func (base baseState) Transition(s state) error {
+	if s == base.parser.ground {
+		execBytes := []byte{0x18}
+		execBytes = append(execBytes, 0x1A)
+		execBytes = append(execBytes, getByteRange(0x80, 0x8F)...)
+		execBytes = append(execBytes, getByteRange(0x91, 0x97)...)
+		execBytes = append(execBytes, 0x99)
+		execBytes = append(execBytes, 0x9A)
+
+		if sliceContains(execBytes, base.parser.context.currentChar) {
+			return base.parser.execute()
+		}
+	}
+
+	return nil
+}
+
+type dcsEntryState struct {
+	baseState
+}
+
+type errorState struct {
+	baseState
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/utilities.go
new file mode 100644
index 0000000000000000000000000000000000000000..392114493a2221ed82dada3d9a2b8664206243f6
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/utilities.go
@@ -0,0 +1,21 @@
+package ansiterm
+
+import (
+	"strconv"
+)
+
+func sliceContains(bytes []byte, b byte) bool {
+	for _, v := range bytes {
+		if v == b {
+			return true
+		}
+	}
+
+	return false
+}
+
+func convertBytesToInteger(bytes []byte) int {
+	s := string(bytes)
+	i, _ := strconv.Atoi(s)
+	return i
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go
new file mode 100644
index 0000000000000000000000000000000000000000..daf2f06961588722360fd282e4c68438f79b19e6
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go
@@ -0,0 +1,182 @@
+// +build windows
+
+package winterm
+
+import (
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+	"syscall"
+
+	"github.com/Azure/go-ansiterm"
+)
+
+// Windows keyboard constants
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx.
+const (
+	VK_PRIOR    = 0x21 // PAGE UP key
+	VK_NEXT     = 0x22 // PAGE DOWN key
+	VK_END      = 0x23 // END key
+	VK_HOME     = 0x24 // HOME key
+	VK_LEFT     = 0x25 // LEFT ARROW key
+	VK_UP       = 0x26 // UP ARROW key
+	VK_RIGHT    = 0x27 // RIGHT ARROW key
+	VK_DOWN     = 0x28 // DOWN ARROW key
+	VK_SELECT   = 0x29 // SELECT key
+	VK_PRINT    = 0x2A // PRINT key
+	VK_EXECUTE  = 0x2B // EXECUTE key
+	VK_SNAPSHOT = 0x2C // PRINT SCREEN key
+	VK_INSERT   = 0x2D // INS key
+	VK_DELETE   = 0x2E // DEL key
+	VK_HELP     = 0x2F // HELP key
+	VK_F1       = 0x70 // F1 key
+	VK_F2       = 0x71 // F2 key
+	VK_F3       = 0x72 // F3 key
+	VK_F4       = 0x73 // F4 key
+	VK_F5       = 0x74 // F5 key
+	VK_F6       = 0x75 // F6 key
+	VK_F7       = 0x76 // F7 key
+	VK_F8       = 0x77 // F8 key
+	VK_F9       = 0x78 // F9 key
+	VK_F10      = 0x79 // F10 key
+	VK_F11      = 0x7A // F11 key
+	VK_F12      = 0x7B // F12 key
+
+	RIGHT_ALT_PRESSED  = 0x0001
+	LEFT_ALT_PRESSED   = 0x0002
+	RIGHT_CTRL_PRESSED = 0x0004
+	LEFT_CTRL_PRESSED  = 0x0008
+	SHIFT_PRESSED      = 0x0010
+	NUMLOCK_ON         = 0x0020
+	SCROLLLOCK_ON      = 0x0040
+	CAPSLOCK_ON        = 0x0080
+	ENHANCED_KEY       = 0x0100
+)
+
+type ansiCommand struct {
+	CommandBytes []byte
+	Command      string
+	Parameters   []string
+	IsSpecial    bool
+}
+
+func newAnsiCommand(command []byte) *ansiCommand {
+
+	if isCharacterSelectionCmdChar(command[1]) {
+		// Is Character Set Selection commands
+		return &ansiCommand{
+			CommandBytes: command,
+			Command:      string(command),
+			IsSpecial:    true,
+		}
+	}
+
+	// last char is command character
+	lastCharIndex := len(command) - 1
+
+	ac := &ansiCommand{
+		CommandBytes: command,
+		Command:      string(command[lastCharIndex]),
+		IsSpecial:    false,
+	}
+
+	// more than a single escape
+	if lastCharIndex != 0 {
+		start := 1
+		// skip if double char escape sequence
+		if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY {
+			start++
+		}
+		// convert this to GetNextParam method
+		ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP)
+	}
+
+	return ac
+}
+
+func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 {
+	if index < 0 || index >= len(ac.Parameters) {
+		return defaultValue
+	}
+
+	param, err := strconv.ParseInt(ac.Parameters[index], 10, 16)
+	if err != nil {
+		return defaultValue
+	}
+
+	return int16(param)
+}
+
+func (ac *ansiCommand) String() string {
+	return fmt.Sprintf("0x%v \"%v\" (\"%v\")",
+		bytesToHex(ac.CommandBytes),
+		ac.Command,
+		strings.Join(ac.Parameters, "\",\""))
+}
+
+// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands.
+// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html.
+func isAnsiCommandChar(b byte) bool {
+	switch {
+	case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY:
+		return true
+	case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM:
+		// non-CSI escape sequence terminator
+		return true
+	case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL:
+		// String escape sequence terminator
+		return true
+	}
+	return false
+}
+
+func isXtermOscSequence(command []byte, current byte) bool {
+	return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL)
+}
+
+func isCharacterSelectionCmdChar(b byte) bool {
+	return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3)
+}
+
+// bytesToHex converts a slice of bytes to a human-readable string.
+func bytesToHex(b []byte) string {
+	hex := make([]string, len(b))
+	for i, ch := range b {
+		hex[i] = fmt.Sprintf("%X", ch)
+	}
+	return strings.Join(hex, "")
+}
+
+// ensureInRange adjusts the passed value, if necessary, to ensure it is within
+// the passed min / max range.
+func ensureInRange(n int16, min int16, max int16) int16 {
+	if n < min {
+		return min
+	} else if n > max {
+		return max
+	} else {
+		return n
+	}
+}
+
+func GetStdFile(nFile int) (*os.File, uintptr) {
+	var file *os.File
+	switch nFile {
+	case syscall.STD_INPUT_HANDLE:
+		file = os.Stdin
+	case syscall.STD_OUTPUT_HANDLE:
+		file = os.Stdout
+	case syscall.STD_ERROR_HANDLE:
+		file = os.Stderr
+	default:
+		panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile))
+	}
+
+	fd, err := syscall.GetStdHandle(nFile)
+	if err != nil {
+		panic(fmt.Errorf("Invalid standard handle indentifier: %v -- %v", nFile, err))
+	}
+
+	return file, uintptr(fd)
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go
new file mode 100644
index 0000000000000000000000000000000000000000..462d92f8ef9f8daedf7b0f66de36f9fe653af41c
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/api.go
@@ -0,0 +1,322 @@
+// +build windows
+
+package winterm
+
+import (
+	"fmt"
+	"syscall"
+	"unsafe"
+)
+
+//===========================================================================================================
+// IMPORTANT NOTE:
+//
+//	The methods below make extensive use of the "unsafe" package to obtain the required pointers.
+//	Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack
+//	variables) the pointers reference *before* the API completes.
+//
+//  As a result, in those cases, the code must hint that the variables remain in active by invoking the
+//	dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer
+//	require unsafe pointers.
+//
+//	If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform
+//	the garbage collector the variables remain in use if:
+//
+//	-- The value is not a pointer (e.g., int32, struct)
+//	-- The value is not referenced by the method after passing the pointer to Windows
+//
+//	See http://golang.org/doc/go1.3.
+//===========================================================================================================
+
+var (
+	kernel32DLL = syscall.NewLazyDLL("kernel32.dll")
+
+	getConsoleCursorInfoProc       = kernel32DLL.NewProc("GetConsoleCursorInfo")
+	setConsoleCursorInfoProc       = kernel32DLL.NewProc("SetConsoleCursorInfo")
+	setConsoleCursorPositionProc   = kernel32DLL.NewProc("SetConsoleCursorPosition")
+	setConsoleModeProc             = kernel32DLL.NewProc("SetConsoleMode")
+	getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo")
+	setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize")
+	scrollConsoleScreenBufferProc  = kernel32DLL.NewProc("ScrollConsoleScreenBufferA")
+	setConsoleTextAttributeProc    = kernel32DLL.NewProc("SetConsoleTextAttribute")
+	setConsoleWindowInfoProc       = kernel32DLL.NewProc("SetConsoleWindowInfo")
+	writeConsoleOutputProc         = kernel32DLL.NewProc("WriteConsoleOutputW")
+	readConsoleInputProc           = kernel32DLL.NewProc("ReadConsoleInputW")
+	waitForSingleObjectProc        = kernel32DLL.NewProc("WaitForSingleObject")
+)
+
+// Windows Console constants
+const (
+	// Console modes
+	// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
+	ENABLE_PROCESSED_INPUT = 0x0001
+	ENABLE_LINE_INPUT      = 0x0002
+	ENABLE_ECHO_INPUT      = 0x0004
+	ENABLE_WINDOW_INPUT    = 0x0008
+	ENABLE_MOUSE_INPUT     = 0x0010
+	ENABLE_INSERT_MODE     = 0x0020
+	ENABLE_QUICK_EDIT_MODE = 0x0040
+	ENABLE_EXTENDED_FLAGS  = 0x0080
+
+	ENABLE_PROCESSED_OUTPUT   = 0x0001
+	ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002
+
+	// Character attributes
+	// Note:
+	// -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan).
+	//    Clearing all foreground or background colors results in black; setting all creates white.
+	// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes.
+	FOREGROUND_BLUE      uint16 = 0x0001
+	FOREGROUND_GREEN     uint16 = 0x0002
+	FOREGROUND_RED       uint16 = 0x0004
+	FOREGROUND_INTENSITY uint16 = 0x0008
+	FOREGROUND_MASK      uint16 = 0x000F
+
+	BACKGROUND_BLUE      uint16 = 0x0010
+	BACKGROUND_GREEN     uint16 = 0x0020
+	BACKGROUND_RED       uint16 = 0x0040
+	BACKGROUND_INTENSITY uint16 = 0x0080
+	BACKGROUND_MASK      uint16 = 0x00F0
+
+	COMMON_LVB_MASK          uint16 = 0xFF00
+	COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000
+	COMMON_LVB_UNDERSCORE    uint16 = 0x8000
+
+	// Input event types
+	// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
+	KEY_EVENT                = 0x0001
+	MOUSE_EVENT              = 0x0002
+	WINDOW_BUFFER_SIZE_EVENT = 0x0004
+	MENU_EVENT               = 0x0008
+	FOCUS_EVENT              = 0x0010
+
+	// WaitForSingleObject return codes
+	WAIT_ABANDONED = 0x00000080
+	WAIT_FAILED    = 0xFFFFFFFF
+	WAIT_SIGNALED  = 0x0000000
+	WAIT_TIMEOUT   = 0x00000102
+
+	// WaitForSingleObject wait duration
+	WAIT_INFINITE       = 0xFFFFFFFF
+	WAIT_ONE_SECOND     = 1000
+	WAIT_HALF_SECOND    = 500
+	WAIT_QUARTER_SECOND = 250
+)
+
+// Windows API Console types
+// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD)
+// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment
+type (
+	CHAR_INFO struct {
+		UnicodeChar uint16
+		Attributes  uint16
+	}
+
+	CONSOLE_CURSOR_INFO struct {
+		Size    uint32
+		Visible int32
+	}
+
+	CONSOLE_SCREEN_BUFFER_INFO struct {
+		Size              COORD
+		CursorPosition    COORD
+		Attributes        uint16
+		Window            SMALL_RECT
+		MaximumWindowSize COORD
+	}
+
+	COORD struct {
+		X int16
+		Y int16
+	}
+
+	SMALL_RECT struct {
+		Left   int16
+		Top    int16
+		Right  int16
+		Bottom int16
+	}
+
+	// INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest
+	// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
+	INPUT_RECORD struct {
+		EventType uint16
+		KeyEvent  KEY_EVENT_RECORD
+	}
+
+	KEY_EVENT_RECORD struct {
+		KeyDown         int32
+		RepeatCount     uint16
+		VirtualKeyCode  uint16
+		VirtualScanCode uint16
+		UnicodeChar     uint16
+		ControlKeyState uint32
+	}
+
+	WINDOW_BUFFER_SIZE struct {
+		Size COORD
+	}
+)
+
+// boolToBOOL converts a Go bool into a Windows int32.
+func boolToBOOL(f bool) int32 {
+	if f {
+		return int32(1)
+	} else {
+		return int32(0)
+	}
+}
+
+// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx.
+func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
+	r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
+	return checkError(r1, r2, err)
+}
+
+// SetConsoleCursorInfo sets the size and visiblity of the console cursor.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx.
+func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
+	r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
+	return checkError(r1, r2, err)
+}
+
+// SetConsoleCursorPosition location of the console cursor.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx.
+func SetConsoleCursorPosition(handle uintptr, coord COORD) error {
+	r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord))
+	use(coord)
+	return checkError(r1, r2, err)
+}
+
+// GetConsoleMode gets the console mode for given file descriptor
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx.
+func GetConsoleMode(handle uintptr) (mode uint32, err error) {
+	err = syscall.GetConsoleMode(syscall.Handle(handle), &mode)
+	return mode, err
+}
+
+// SetConsoleMode sets the console mode for given file descriptor
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
+func SetConsoleMode(handle uintptr, mode uint32) error {
+	r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0)
+	use(mode)
+	return checkError(r1, r2, err)
+}
+
+// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer.
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx.
+func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) {
+	info := CONSOLE_SCREEN_BUFFER_INFO{}
+	err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0))
+	if err != nil {
+		return nil, err
+	}
+	return &info, nil
+}
+
+func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error {
+	r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char)))
+	use(scrollRect)
+	use(clipRect)
+	use(destOrigin)
+	use(char)
+	return checkError(r1, r2, err)
+}
+
+// SetConsoleScreenBufferSize sets the size of the console screen buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx.
+func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error {
+	r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord))
+	use(coord)
+	return checkError(r1, r2, err)
+}
+
+// SetConsoleTextAttribute sets the attributes of characters written to the
+// console screen buffer by the WriteFile or WriteConsole function.
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx.
+func SetConsoleTextAttribute(handle uintptr, attribute uint16) error {
+	r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0)
+	use(attribute)
+	return checkError(r1, r2, err)
+}
+
+// SetConsoleWindowInfo sets the size and position of the console screen buffer's window.
+// Note that the size and location must be within and no larger than the backing console screen buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx.
+func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error {
+	r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect)))
+	use(isAbsolute)
+	use(rect)
+	return checkError(r1, r2, err)
+}
+
+// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx.
+func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error {
+	r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion)))
+	use(buffer)
+	use(bufferSize)
+	use(bufferCoord)
+	return checkError(r1, r2, err)
+}
+
+// ReadConsoleInput reads (and removes) data from the console input buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx.
+func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error {
+	r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count)))
+	use(buffer)
+	return checkError(r1, r2, err)
+}
+
+// WaitForSingleObject waits for the passed handle to be signaled.
+// It returns true if the handle was signaled; false otherwise.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx.
+func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) {
+	r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait)))
+	switch r1 {
+	case WAIT_ABANDONED, WAIT_TIMEOUT:
+		return false, nil
+	case WAIT_SIGNALED:
+		return true, nil
+	}
+	use(msWait)
+	return false, err
+}
+
+// String helpers
+func (info CONSOLE_SCREEN_BUFFER_INFO) String() string {
+	return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize)
+}
+
+func (coord COORD) String() string {
+	return fmt.Sprintf("%v,%v", coord.X, coord.Y)
+}
+
+func (rect SMALL_RECT) String() string {
+	return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom)
+}
+
+// checkError evaluates the results of a Windows API call and returns the error if it failed.
+func checkError(r1, r2 uintptr, err error) error {
+	// Windows APIs return non-zero to indicate success
+	if r1 != 0 {
+		return nil
+	}
+
+	// Return the error if provided, otherwise default to EINVAL
+	if err != nil {
+		return err
+	}
+	return syscall.EINVAL
+}
+
+// coordToPointer converts a COORD into a uintptr (by fooling the type system).
+func coordToPointer(c COORD) uintptr {
+	// Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass.
+	return uintptr(*((*uint32)(unsafe.Pointer(&c))))
+}
+
+// use is a no-op, but the compiler cannot see that it is.
+// Calling use(p) ensures that p is kept live until that point.
+func use(p interface{}) {}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go
new file mode 100644
index 0000000000000000000000000000000000000000..cbec8f728f49a54a0af75c718aa20577f427fad0
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go
@@ -0,0 +1,100 @@
+// +build windows
+
+package winterm
+
+import "github.com/Azure/go-ansiterm"
+
+const (
+	FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
+	BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
+)
+
+// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the
+// request represented by the passed ANSI mode.
+func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) {
+	switch ansiMode {
+
+	// Mode styles
+	case ansiterm.ANSI_SGR_BOLD:
+		windowsMode = windowsMode | FOREGROUND_INTENSITY
+
+	case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF:
+		windowsMode &^= FOREGROUND_INTENSITY
+
+	case ansiterm.ANSI_SGR_UNDERLINE:
+		windowsMode = windowsMode | COMMON_LVB_UNDERSCORE
+
+	case ansiterm.ANSI_SGR_REVERSE:
+		inverted = true
+
+	case ansiterm.ANSI_SGR_REVERSE_OFF:
+		inverted = false
+
+	case ansiterm.ANSI_SGR_UNDERLINE_OFF:
+		windowsMode &^= COMMON_LVB_UNDERSCORE
+
+		// Foreground colors
+	case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT:
+		windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK)
+
+	case ansiterm.ANSI_SGR_FOREGROUND_BLACK:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK)
+
+	case ansiterm.ANSI_SGR_FOREGROUND_RED:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED
+
+	case ansiterm.ANSI_SGR_FOREGROUND_GREEN:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN
+
+	case ansiterm.ANSI_SGR_FOREGROUND_YELLOW:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN
+
+	case ansiterm.ANSI_SGR_FOREGROUND_BLUE:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE
+
+	case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE
+
+	case ansiterm.ANSI_SGR_FOREGROUND_CYAN:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE
+
+	case ansiterm.ANSI_SGR_FOREGROUND_WHITE:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
+
+		// Background colors
+	case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT:
+		// Black with no intensity
+		windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK)
+
+	case ansiterm.ANSI_SGR_BACKGROUND_BLACK:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK)
+
+	case ansiterm.ANSI_SGR_BACKGROUND_RED:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED
+
+	case ansiterm.ANSI_SGR_BACKGROUND_GREEN:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN
+
+	case ansiterm.ANSI_SGR_BACKGROUND_YELLOW:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN
+
+	case ansiterm.ANSI_SGR_BACKGROUND_BLUE:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE
+
+	case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE
+
+	case ansiterm.ANSI_SGR_BACKGROUND_CYAN:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE
+
+	case ansiterm.ANSI_SGR_BACKGROUND_WHITE:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
+	}
+
+	return windowsMode, inverted
+}
+
+// invertAttributes inverts the foreground and background colors of a Windows attributes value
+func invertAttributes(windowsMode uint16) uint16 {
+	return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4)
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go
new file mode 100644
index 0000000000000000000000000000000000000000..f015723ade79efdc3702fd291571000337e0207d
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go
@@ -0,0 +1,101 @@
+// +build windows
+
+package winterm
+
+const (
+	horizontal = iota
+	vertical
+)
+
+func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT {
+	if h.originMode {
+		sr := h.effectiveSr(info.Window)
+		return SMALL_RECT{
+			Top:    sr.top,
+			Bottom: sr.bottom,
+			Left:   0,
+			Right:  info.Size.X - 1,
+		}
+	} else {
+		return SMALL_RECT{
+			Top:    info.Window.Top,
+			Bottom: info.Window.Bottom,
+			Left:   0,
+			Right:  info.Size.X - 1,
+		}
+	}
+}
+
+// setCursorPosition sets the cursor to the specified position, bounded to the screen size
+func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error {
+	position.X = ensureInRange(position.X, window.Left, window.Right)
+	position.Y = ensureInRange(position.Y, window.Top, window.Bottom)
+	err := SetConsoleCursorPosition(h.fd, position)
+	if err != nil {
+		return err
+	}
+	logger.Infof("Cursor position set: (%d, %d)", position.X, position.Y)
+	return err
+}
+
+func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error {
+	return h.moveCursor(vertical, param)
+}
+
+func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error {
+	return h.moveCursor(horizontal, param)
+}
+
+func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error {
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	position := info.CursorPosition
+	switch moveMode {
+	case horizontal:
+		position.X += int16(param)
+	case vertical:
+		position.Y += int16(param)
+	}
+
+	if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) moveCursorLine(param int) error {
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	position := info.CursorPosition
+	position.X = 0
+	position.Y += int16(param)
+
+	if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error {
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	position := info.CursorPosition
+	position.X = int16(param) - 1
+
+	if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go
new file mode 100644
index 0000000000000000000000000000000000000000..244b5fa25efbcd02faef9d085b2a67f578289398
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go
@@ -0,0 +1,84 @@
+// +build windows
+
+package winterm
+
+import "github.com/Azure/go-ansiterm"
+
+func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error {
+	// Ignore an invalid (negative area) request
+	if toCoord.Y < fromCoord.Y {
+		return nil
+	}
+
+	var err error
+
+	var coordStart = COORD{}
+	var coordEnd = COORD{}
+
+	xCurrent, yCurrent := fromCoord.X, fromCoord.Y
+	xEnd, yEnd := toCoord.X, toCoord.Y
+
+	// Clear any partial initial line
+	if xCurrent > 0 {
+		coordStart.X, coordStart.Y = xCurrent, yCurrent
+		coordEnd.X, coordEnd.Y = xEnd, yCurrent
+
+		err = h.clearRect(attributes, coordStart, coordEnd)
+		if err != nil {
+			return err
+		}
+
+		xCurrent = 0
+		yCurrent += 1
+	}
+
+	// Clear intervening rectangular section
+	if yCurrent < yEnd {
+		coordStart.X, coordStart.Y = xCurrent, yCurrent
+		coordEnd.X, coordEnd.Y = xEnd, yEnd-1
+
+		err = h.clearRect(attributes, coordStart, coordEnd)
+		if err != nil {
+			return err
+		}
+
+		xCurrent = 0
+		yCurrent = yEnd
+	}
+
+	// Clear remaining partial ending line
+	coordStart.X, coordStart.Y = xCurrent, yCurrent
+	coordEnd.X, coordEnd.Y = xEnd, yEnd
+
+	err = h.clearRect(attributes, coordStart, coordEnd)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error {
+	region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X}
+	width := toCoord.X - fromCoord.X + 1
+	height := toCoord.Y - fromCoord.Y + 1
+	size := uint32(width) * uint32(height)
+
+	if size <= 0 {
+		return nil
+	}
+
+	buffer := make([]CHAR_INFO, size)
+
+	char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes}
+	for i := 0; i < int(size); i++ {
+		buffer[i] = char
+	}
+
+	err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, &region)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go
new file mode 100644
index 0000000000000000000000000000000000000000..706d270577e3d72f81843189badd55db04b0d11d
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go
@@ -0,0 +1,118 @@
+// +build windows
+
+package winterm
+
+// effectiveSr gets the current effective scroll region in buffer coordinates
+func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion {
+	top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom)
+	bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom)
+	if top >= bottom {
+		top = window.Top
+		bottom = window.Bottom
+	}
+	return scrollRegion{top: top, bottom: bottom}
+}
+
+func (h *windowsAnsiEventHandler) scrollUp(param int) error {
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	sr := h.effectiveSr(info.Window)
+	return h.scroll(param, sr, info)
+}
+
+func (h *windowsAnsiEventHandler) scrollDown(param int) error {
+	return h.scrollUp(-param)
+}
+
+func (h *windowsAnsiEventHandler) deleteLines(param int) error {
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	start := info.CursorPosition.Y
+	sr := h.effectiveSr(info.Window)
+	// Lines cannot be inserted or deleted outside the scrolling region.
+	if start >= sr.top && start <= sr.bottom {
+		sr.top = start
+		return h.scroll(param, sr, info)
+	} else {
+		return nil
+	}
+}
+
+func (h *windowsAnsiEventHandler) insertLines(param int) error {
+	return h.deleteLines(-param)
+}
+
+// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates.
+func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error {
+	logger.Infof("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom)
+	logger.Infof("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom)
+
+	// Copy from and clip to the scroll region (full buffer width)
+	scrollRect := SMALL_RECT{
+		Top:    sr.top,
+		Bottom: sr.bottom,
+		Left:   0,
+		Right:  info.Size.X - 1,
+	}
+
+	// Origin to which area should be copied
+	destOrigin := COORD{
+		X: 0,
+		Y: sr.top - int16(param),
+	}
+
+	char := CHAR_INFO{
+		UnicodeChar: ' ',
+		Attributes:  h.attributes,
+	}
+
+	if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) deleteCharacters(param int) error {
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+	return h.scrollLine(param, info.CursorPosition, info)
+}
+
+func (h *windowsAnsiEventHandler) insertCharacters(param int) error {
+	return h.deleteCharacters(-param)
+}
+
+// scrollLine scrolls a line horizontally starting at the provided position by a number of columns.
+func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error {
+	// Copy from and clip to the scroll region (full buffer width)
+	scrollRect := SMALL_RECT{
+		Top:    position.Y,
+		Bottom: position.Y,
+		Left:   position.X,
+		Right:  info.Size.X - 1,
+	}
+
+	// Origin to which area should be copied
+	destOrigin := COORD{
+		X: position.X - int16(columns),
+		Y: position.Y,
+	}
+
+	char := CHAR_INFO{
+		UnicodeChar: ' ',
+		Attributes:  h.attributes,
+	}
+
+	if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go
new file mode 100644
index 0000000000000000000000000000000000000000..afa7635d77bab32a76544bd3763901bdb39e6dcd
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go
@@ -0,0 +1,9 @@
+// +build windows
+
+package winterm
+
+// AddInRange increments a value by the passed quantity while ensuring the values
+// always remain within the supplied min / max range.
+func addInRange(n int16, increment int16, min int16, max int16) int16 {
+	return ensureInRange(n+increment, min, max)
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go
new file mode 100644
index 0000000000000000000000000000000000000000..4d858ed61118b68b6d0cefa260e5a1d514c60630
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go
@@ -0,0 +1,726 @@
+// +build windows
+
+package winterm
+
+import (
+	"bytes"
+	"io/ioutil"
+	"os"
+	"strconv"
+
+	"github.com/Azure/go-ansiterm"
+	"github.com/Sirupsen/logrus"
+)
+
+var logger *logrus.Logger
+
+type windowsAnsiEventHandler struct {
+	fd             uintptr
+	file           *os.File
+	infoReset      *CONSOLE_SCREEN_BUFFER_INFO
+	sr             scrollRegion
+	buffer         bytes.Buffer
+	attributes     uint16
+	inverted       bool
+	wrapNext       bool
+	drewMarginByte bool
+	originMode     bool
+	marginByte     byte
+	curInfo        *CONSOLE_SCREEN_BUFFER_INFO
+	curPos         COORD
+}
+
+func CreateWinEventHandler(fd uintptr, file *os.File) ansiterm.AnsiEventHandler {
+	logFile := ioutil.Discard
+
+	if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
+		logFile, _ = os.Create("winEventHandler.log")
+	}
+
+	logger = &logrus.Logger{
+		Out:       logFile,
+		Formatter: new(logrus.TextFormatter),
+		Level:     logrus.DebugLevel,
+	}
+
+	infoReset, err := GetConsoleScreenBufferInfo(fd)
+	if err != nil {
+		return nil
+	}
+
+	return &windowsAnsiEventHandler{
+		fd:         fd,
+		file:       file,
+		infoReset:  infoReset,
+		attributes: infoReset.Attributes,
+	}
+}
+
+type scrollRegion struct {
+	top    int16
+	bottom int16
+}
+
+// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the
+// current cursor position and scroll region settings, in which case it returns
+// true. If no special handling is necessary, then it does nothing and returns
+// false.
+//
+// In the false case, the caller should ensure that a carriage return
+// and line feed are inserted or that the text is otherwise wrapped.
+func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) {
+	if h.wrapNext {
+		if err := h.Flush(); err != nil {
+			return false, err
+		}
+		h.clearWrap()
+	}
+	pos, info, err := h.getCurrentInfo()
+	if err != nil {
+		return false, err
+	}
+	sr := h.effectiveSr(info.Window)
+	if pos.Y == sr.bottom {
+		// Scrolling is necessary. Let Windows automatically scroll if the scrolling region
+		// is the full window.
+		if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom {
+			if includeCR {
+				pos.X = 0
+				h.updatePos(pos)
+			}
+			return false, nil
+		}
+
+		// A custom scroll region is active. Scroll the window manually to simulate
+		// the LF.
+		if err := h.Flush(); err != nil {
+			return false, err
+		}
+		logger.Info("Simulating LF inside scroll region")
+		if err := h.scrollUp(1); err != nil {
+			return false, err
+		}
+		if includeCR {
+			pos.X = 0
+			if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+				return false, err
+			}
+		}
+		return true, nil
+
+	} else if pos.Y < info.Window.Bottom {
+		// Let Windows handle the LF.
+		pos.Y++
+		if includeCR {
+			pos.X = 0
+		}
+		h.updatePos(pos)
+		return false, nil
+	} else {
+		// The cursor is at the bottom of the screen but outside the scroll
+		// region. Skip the LF.
+		logger.Info("Simulating LF outside scroll region")
+		if includeCR {
+			if err := h.Flush(); err != nil {
+				return false, err
+			}
+			pos.X = 0
+			if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+				return false, err
+			}
+		}
+		return true, nil
+	}
+}
+
+// executeLF executes a LF without a CR.
+func (h *windowsAnsiEventHandler) executeLF() error {
+	handled, err := h.simulateLF(false)
+	if err != nil {
+		return err
+	}
+	if !handled {
+		// Windows LF will reset the cursor column position. Write the LF
+		// and restore the cursor position.
+		pos, _, err := h.getCurrentInfo()
+		if err != nil {
+			return err
+		}
+		h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
+		if pos.X != 0 {
+			if err := h.Flush(); err != nil {
+				return err
+			}
+			logger.Info("Resetting cursor position for LF without CR")
+			if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) Print(b byte) error {
+	if h.wrapNext {
+		h.buffer.WriteByte(h.marginByte)
+		h.clearWrap()
+		if _, err := h.simulateLF(true); err != nil {
+			return err
+		}
+	}
+	pos, info, err := h.getCurrentInfo()
+	if err != nil {
+		return err
+	}
+	if pos.X == info.Size.X-1 {
+		h.wrapNext = true
+		h.marginByte = b
+	} else {
+		pos.X++
+		h.updatePos(pos)
+		h.buffer.WriteByte(b)
+	}
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) Execute(b byte) error {
+	switch b {
+	case ansiterm.ANSI_TAB:
+		logger.Info("Execute(TAB)")
+		// Move to the next tab stop, but preserve auto-wrap if already set.
+		if !h.wrapNext {
+			pos, info, err := h.getCurrentInfo()
+			if err != nil {
+				return err
+			}
+			pos.X = (pos.X + 8) - pos.X%8
+			if pos.X >= info.Size.X {
+				pos.X = info.Size.X - 1
+			}
+			if err := h.Flush(); err != nil {
+				return err
+			}
+			if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+				return err
+			}
+		}
+		return nil
+
+	case ansiterm.ANSI_BEL:
+		h.buffer.WriteByte(ansiterm.ANSI_BEL)
+		return nil
+
+	case ansiterm.ANSI_BACKSPACE:
+		if h.wrapNext {
+			if err := h.Flush(); err != nil {
+				return err
+			}
+			h.clearWrap()
+		}
+		pos, _, err := h.getCurrentInfo()
+		if err != nil {
+			return err
+		}
+		if pos.X > 0 {
+			pos.X--
+			h.updatePos(pos)
+			h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE)
+		}
+		return nil
+
+	case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED:
+		// Treat as true LF.
+		return h.executeLF()
+
+	case ansiterm.ANSI_LINE_FEED:
+		// Simulate a CR and LF for now since there is no way in go-ansiterm
+		// to tell if the LF should include CR (and more things break when it's
+		// missing than when it's incorrectly added).
+		handled, err := h.simulateLF(true)
+		if handled || err != nil {
+			return err
+		}
+		return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
+
+	case ansiterm.ANSI_CARRIAGE_RETURN:
+		if h.wrapNext {
+			if err := h.Flush(); err != nil {
+				return err
+			}
+			h.clearWrap()
+		}
+		pos, _, err := h.getCurrentInfo()
+		if err != nil {
+			return err
+		}
+		if pos.X != 0 {
+			pos.X = 0
+			h.updatePos(pos)
+			h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN)
+		}
+		return nil
+
+	default:
+		return nil
+	}
+}
+
+func (h *windowsAnsiEventHandler) CUU(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	logger.Infof("CUU: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.moveCursorVertical(-param)
+}
+
+func (h *windowsAnsiEventHandler) CUD(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	logger.Infof("CUD: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.moveCursorVertical(param)
+}
+
+func (h *windowsAnsiEventHandler) CUF(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	logger.Infof("CUF: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.moveCursorHorizontal(param)
+}
+
+func (h *windowsAnsiEventHandler) CUB(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	logger.Infof("CUB: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.moveCursorHorizontal(-param)
+}
+
+func (h *windowsAnsiEventHandler) CNL(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	logger.Infof("CNL: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.moveCursorLine(param)
+}
+
+func (h *windowsAnsiEventHandler) CPL(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	logger.Infof("CPL: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.moveCursorLine(-param)
+}
+
+func (h *windowsAnsiEventHandler) CHA(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	logger.Infof("CHA: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.moveCursorColumn(param)
+}
+
+func (h *windowsAnsiEventHandler) VPA(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	logger.Infof("VPA: [[%d]]", param)
+	h.clearWrap()
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+	window := h.getCursorWindow(info)
+	position := info.CursorPosition
+	position.Y = window.Top + int16(param) - 1
+	return h.setCursorPosition(position, window)
+}
+
+func (h *windowsAnsiEventHandler) CUP(row int, col int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	logger.Infof("CUP: [[%d %d]]", row, col)
+	h.clearWrap()
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	window := h.getCursorWindow(info)
+	position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1}
+	return h.setCursorPosition(position, window)
+}
+
+func (h *windowsAnsiEventHandler) HVP(row int, col int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	logger.Infof("HVP: [[%d %d]]", row, col)
+	h.clearWrap()
+	return h.CUP(row, col)
+}
+
+func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	logger.Infof("DECTCEM: [%v]", []string{strconv.FormatBool(visible)})
+	h.clearWrap()
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) DECOM(enable bool) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	logger.Infof("DECOM: [%v]", []string{strconv.FormatBool(enable)})
+	h.clearWrap()
+	h.originMode = enable
+	return h.CUP(1, 1)
+}
+
+func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	logger.Infof("DECCOLM: [%v]", []string{strconv.FormatBool(use132)})
+	h.clearWrap()
+	if err := h.ED(2); err != nil {
+		return err
+	}
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+	targetWidth := int16(80)
+	if use132 {
+		targetWidth = 132
+	}
+	if info.Size.X < targetWidth {
+		if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
+			logger.Info("set buffer failed:", err)
+			return err
+		}
+	}
+	window := info.Window
+	window.Left = 0
+	window.Right = targetWidth - 1
+	if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
+		logger.Info("set window failed:", err)
+		return err
+	}
+	if info.Size.X > targetWidth {
+		if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
+			logger.Info("set buffer failed:", err)
+			return err
+		}
+	}
+	return SetConsoleCursorPosition(h.fd, COORD{0, 0})
+}
+
+func (h *windowsAnsiEventHandler) ED(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	logger.Infof("ED: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+
+	// [J  -- Erases from the cursor to the end of the screen, including the cursor position.
+	// [1J -- Erases from the beginning of the screen to the cursor, including the cursor position.
+	// [2J -- Erases the complete display. The cursor does not move.
+	// Notes:
+	// -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles
+
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	var start COORD
+	var end COORD
+
+	switch param {
+	case 0:
+		start = info.CursorPosition
+		end = COORD{info.Size.X - 1, info.Size.Y - 1}
+
+	case 1:
+		start = COORD{0, 0}
+		end = info.CursorPosition
+
+	case 2:
+		start = COORD{0, 0}
+		end = COORD{info.Size.X - 1, info.Size.Y - 1}
+	}
+
+	err = h.clearRange(h.attributes, start, end)
+	if err != nil {
+		return err
+	}
+
+	// If the whole buffer was cleared, move the window to the top while preserving
+	// the window-relative cursor position.
+	if param == 2 {
+		pos := info.CursorPosition
+		window := info.Window
+		pos.Y -= window.Top
+		window.Bottom -= window.Top
+		window.Top = 0
+		if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+			return err
+		}
+		if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) EL(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	logger.Infof("EL: [%v]", strconv.Itoa(param))
+	h.clearWrap()
+
+	// [K  -- Erases from the cursor to the end of the line, including the cursor position.
+	// [1K -- Erases from the beginning of the line to the cursor, including the cursor position.
+	// [2K -- Erases the complete line.
+
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	var start COORD
+	var end COORD
+
+	switch param {
+	case 0:
+		start = info.CursorPosition
+		end = COORD{info.Size.X, info.CursorPosition.Y}
+
+	case 1:
+		start = COORD{0, info.CursorPosition.Y}
+		end = info.CursorPosition
+
+	case 2:
+		start = COORD{0, info.CursorPosition.Y}
+		end = COORD{info.Size.X, info.CursorPosition.Y}
+	}
+
+	err = h.clearRange(h.attributes, start, end)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) IL(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	logger.Infof("IL: [%v]", strconv.Itoa(param))
+	h.clearWrap()
+	return h.insertLines(param)
+}
+
+func (h *windowsAnsiEventHandler) DL(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	logger.Infof("DL: [%v]", strconv.Itoa(param))
+	h.clearWrap()
+	return h.deleteLines(param)
+}
+
+func (h *windowsAnsiEventHandler) ICH(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	logger.Infof("ICH: [%v]", strconv.Itoa(param))
+	h.clearWrap()
+	return h.insertCharacters(param)
+}
+
+func (h *windowsAnsiEventHandler) DCH(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	logger.Infof("DCH: [%v]", strconv.Itoa(param))
+	h.clearWrap()
+	return h.deleteCharacters(param)
+}
+
+func (h *windowsAnsiEventHandler) SGR(params []int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	strings := []string{}
+	for _, v := range params {
+		strings = append(strings, strconv.Itoa(v))
+	}
+
+	logger.Infof("SGR: [%v]", strings)
+
+	if len(params) <= 0 {
+		h.attributes = h.infoReset.Attributes
+		h.inverted = false
+	} else {
+		for _, attr := range params {
+
+			if attr == ansiterm.ANSI_SGR_RESET {
+				h.attributes = h.infoReset.Attributes
+				h.inverted = false
+				continue
+			}
+
+			h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr))
+		}
+	}
+
+	attributes := h.attributes
+	if h.inverted {
+		attributes = invertAttributes(attributes)
+	}
+	err := SetConsoleTextAttribute(h.fd, attributes)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) SU(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	logger.Infof("SU: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.scrollUp(param)
+}
+
+func (h *windowsAnsiEventHandler) SD(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	logger.Infof("SD: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.scrollDown(param)
+}
+
+func (h *windowsAnsiEventHandler) DA(params []string) error {
+	logger.Infof("DA: [%v]", params)
+	// DA cannot be implemented because it must send data on the VT100 input stream,
+	// which is not available to go-ansiterm.
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	logger.Infof("DECSTBM: [%d, %d]", top, bottom)
+
+	// Windows is 0 indexed, Linux is 1 indexed
+	h.sr.top = int16(top - 1)
+	h.sr.bottom = int16(bottom - 1)
+
+	// This command also moves the cursor to the origin.
+	h.clearWrap()
+	return h.CUP(1, 1)
+}
+
+func (h *windowsAnsiEventHandler) RI() error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	logger.Info("RI: []")
+	h.clearWrap()
+
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	sr := h.effectiveSr(info.Window)
+	if info.CursorPosition.Y == sr.top {
+		return h.scrollDown(1)
+	}
+
+	return h.moveCursorVertical(-1)
+}
+
+func (h *windowsAnsiEventHandler) IND() error {
+	logger.Info("IND: []")
+	return h.executeLF()
+}
+
+func (h *windowsAnsiEventHandler) Flush() error {
+	h.curInfo = nil
+	if h.buffer.Len() > 0 {
+		logger.Infof("Flush: [%s]", h.buffer.Bytes())
+		if _, err := h.buffer.WriteTo(h.file); err != nil {
+			return err
+		}
+	}
+
+	if h.wrapNext && !h.drewMarginByte {
+		logger.Infof("Flush: drawing margin byte '%c'", h.marginByte)
+
+		info, err := GetConsoleScreenBufferInfo(h.fd)
+		if err != nil {
+			return err
+		}
+
+		charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}}
+		size := COORD{1, 1}
+		position := COORD{0, 0}
+		region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y}
+		if err := WriteConsoleOutput(h.fd, charInfo, size, position, &region); err != nil {
+			return err
+		}
+		h.drewMarginByte = true
+	}
+	return nil
+}
+
+// cacheConsoleInfo ensures that the current console screen information has been queried
+// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos.
+func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) {
+	if h.curInfo == nil {
+		info, err := GetConsoleScreenBufferInfo(h.fd)
+		if err != nil {
+			return COORD{}, nil, err
+		}
+		h.curInfo = info
+		h.curPos = info.CursorPosition
+	}
+	return h.curPos, h.curInfo, nil
+}
+
+func (h *windowsAnsiEventHandler) updatePos(pos COORD) {
+	if h.curInfo == nil {
+		panic("failed to call getCurrentInfo before calling updatePos")
+	}
+	h.curPos = pos
+}
+
+// clearWrap clears the state where the cursor is in the margin
+// waiting for the next character before wrapping the line. This must
+// be done before most operations that act on the cursor.
+func (h *windowsAnsiEventHandler) clearWrap() {
+	h.wrapNext = false
+	h.drewMarginByte = false
+}
diff --git a/vendor/github.com/docker/distribution/BUILDING.md b/vendor/github.com/docker/distribution/BUILDING.md
new file mode 100644
index 0000000000000000000000000000000000000000..2d5a10119081a2d41c624a211402e61338408f56
--- /dev/null
+++ b/vendor/github.com/docker/distribution/BUILDING.md
@@ -0,0 +1,119 @@
+
+# Building the registry source
+
+## Use-case
+
+This is useful if you intend to actively work on the registry.
+
+### Alternatives
+
+Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/).
+
+People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`.
+
+OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md).
+
+### Gotchas
+
+You are expected to know your way around with go & git.
+
+If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you.
+
+## Build the development environment
+
+The first prerequisite of properly building distribution targets is to have a Go
+development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html)
+for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the
+environment.
+
+If a Go development environment is setup, one can use `go get` to install the
+`registry` command from the current latest:
+
+    go get github.com/docker/distribution/cmd/registry
+
+The above will install the source repository into the `GOPATH`.
+
+Now create the directory for the registry data (this might require you to set permissions properly)
+
+    mkdir -p /var/lib/registry
+
+... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location.
+
+The `registry`
+binary can then be run with the following:
+
+    $ $GOPATH/bin/registry --version
+    $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown
+
+> __NOTE:__ While you do not need to use `go get` to checkout the distribution
+> project, for these build instructions to work, the project must be checked
+> out in the correct location in the `GOPATH`. This should almost always be
+> `$GOPATH/src/github.com/docker/distribution`.
+
+The registry can be run with the default config using the following
+incantation:
+
+    $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml
+    INFO[0000] endpoint local-5003 disabled, skipping        app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
+    INFO[0000] endpoint local-8083 disabled, skipping        app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
+    INFO[0000] listening on :5000                            app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
+    INFO[0000] debug server listening localhost:5001
+
+If it is working, one should see the above log messages.
+
+### Repeatable Builds
+
+For the full development experience, one should `cd` into
+`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go`
+commands, such as `go test`, should work per package (please see
+[Developing](#developing) if they don't work).
+
+A `Makefile` has been provided as a convenience to support repeatable builds.
+Please install the following into `GOPATH` for it to work:
+
+    go get github.com/tools/godep github.com/golang/lint/golint
+
+**TODO(stevvooe):** Add a `make setup` command to Makefile to run this. Have to think about how to interact with Godeps properly.
+
+Once these commands are available in the `GOPATH`, run `make` to get a full
+build:
+
+    $ make
+    + clean
+    + fmt
+    + vet
+    + lint
+    + build
+    github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar
+    github.com/Sirupsen/logrus
+    github.com/docker/libtrust
+    ...
+    github.com/yvasiyarov/gorelic
+    github.com/docker/distribution/registry/handlers
+    github.com/docker/distribution/cmd/registry
+    + test
+    ...
+    ok    github.com/docker/distribution/digest 7.875s
+    ok    github.com/docker/distribution/manifest 0.028s
+    ok    github.com/docker/distribution/notifications  17.322s
+    ?     github.com/docker/distribution/registry [no test files]
+    ok    github.com/docker/distribution/registry/api/v2  0.101s
+    ?     github.com/docker/distribution/registry/auth  [no test files]
+    ok    github.com/docker/distribution/registry/auth/silly  0.011s
+    ...
+    + /Users/sday/go/src/github.com/docker/distribution/bin/registry
+    + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template
+    + binaries
+
+The above provides a repeatable build using the contents of the vendored
+Godeps directory. This includes formatting, vetting, linting, building,
+testing and generating tagged binaries. We can verify this worked by running
+the registry binary generated in the "./bin" directory:
+
+    $ ./bin/registry -version
+    ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m
+
+### Optional build tags
+
+Optional [build tags](http://golang.org/pkg/go/build/) can be provided using
+the environment variable `DOCKER_BUILDTAGS`.
diff --git a/vendor/github.com/docker/distribution/CHANGELOG.md b/vendor/github.com/docker/distribution/CHANGELOG.md
new file mode 100644
index 0000000000000000000000000000000000000000..e7b16b3c25812363f3a10dd12a0fe9f2a0edffb4
--- /dev/null
+++ b/vendor/github.com/docker/distribution/CHANGELOG.md
@@ -0,0 +1,108 @@
+# Changelog
+
+## 2.6.0 (2017-01-18)
+
+#### Storage
+- S3: fixed bug in delete due to read-after-write inconsistency
+- S3: allow EC2 IAM roles to be used when authorizing region endpoints
+- S3: add Object ACL Support
+- S3: fix delete method's notion of subpaths
+- S3: use multipart upload API in `Move` method for performance
+- S3: add v2 signature signing for legacy S3 clones
+- Swift: add simple heuristic to detect incomplete DLOs during read ops
+- Swift: support different user and tenant domains
+- Swift: bulk deletes in chunks
+- Aliyun OSS: fix delete method's notion of subpaths
+- Aliyun OSS: optimize data copy after upload finishes
+- Azure: close leaking response body
+- Fix storage drivers dropping non-EOF errors when listing repositories
+- Compare path properly when listing repositories in catalog
+- Add a foreign layer URL host whitelist
+- Improve catalog enumerate runtime
+
+#### Registry
+- Export `storage.CreateOptions` in top-level package
+- Enable notifications to endpoints that use self-signed certificates
+- Properly validate multi-URL foreign layers
+- Add control over validation of URLs in pushed manifests
+- Proxy mode: fix socket leak when pull is cancelled
+- Tag service: properly handle error responses on HEAD request
+- Support for custom authentication URL in proxying registry
+- Add configuration option to disable access logging
+- Add notification filtering by target media type
+- Manifest: `References()` returns all children
+- Honor `X-Forwarded-Port` and Forwarded headers
+- Reference: Preserve tag and digest in With* functions
+- Add policy configuration for enforcing repository classes
+
+#### Client
+- Changes the client Tags `All()` method to follow links
+- Allow registry clients to connect via HTTP2
+- Better handling of OAuth errors in client
+  
+#### Spec
+- Manifest: clarify relationship between urls and foreign layers
+- Authorization: add support for repository classes
+
+#### Manifest
+- Override media type returned from `Stat()` for existing manifests
+- Add plugin mediatype to distribution manifest
+
+#### Docs
+- Document `TOOMANYREQUESTS` error code
+- Document required Let's Encrypt port
+- Improve documentation around implementation of OAuth2
+- Improve documentation for configuration
+
+#### Auth
+- Add support for registry type in scope
+- Add support for using v2 ping challenges for v1
+- Add leeway to JWT `nbf` and `exp` checking
+- htpasswd: dynamically parse htpasswd file
+- Fix missing auth headers with PATCH HTTP request when pushing to default port
+
+#### Dockerfile
+- Update to go1.7
+- Reorder Dockerfile steps for better layer caching
+
+#### Notes
+
+Documentation has moved to the documentation repository at
+`github.com/docker/docker.github.io/tree/master/registry`
+
+The registry is go 1.7 compliant, and passes newer, more restrictive `lint` and `vet` ing.
+
+
+## 2.5.0 (2016-06-14)
+
+#### Storage
+- Ensure uploads directory is cleaned after upload is committed
+- Add ability to cap concurrent operations in filesystem driver
+- S3: Add 'us-gov-west-1' to the valid region list
+- Swift: Handle ceph not returning Last-Modified header for HEAD requests
+- Add redirect middleware
+
+#### Registry
+- Add support for blobAccessController middleware
+- Add support for layers from foreign sources
+- Remove signature store
+- Add support for Let's Encrypt
+- Correct yaml key names in configuration
+
+#### Client
+- Add option to get content digest from manifest get
+
+#### Spec
+- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported
+- Clarify API documentation around catalog fetch behavior
+
+#### API
+- Support returning HTTP 429 (Too Many Requests)
+
+#### Documentation
+- Update auth documentation examples to show "expires in" as int
+
+#### Docker Image
+- Use Alpine Linux as base image
+
+
diff --git a/vendor/github.com/docker/distribution/CONTRIBUTING.md b/vendor/github.com/docker/distribution/CONTRIBUTING.md
new file mode 100644
index 0000000000000000000000000000000000000000..7cc7aedffeb1df118caa45af19b9904478fe0838
--- /dev/null
+++ b/vendor/github.com/docker/distribution/CONTRIBUTING.md
@@ -0,0 +1,140 @@
+# Contributing to the registry
+
+## Before reporting an issue...
+
+### If your problem is with...
+
+ - automated builds
+ - your account on the [Docker Hub](https://hub.docker.com/)
+ - any other [Docker Hub](https://hub.docker.com/) issue
+
+Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com)
+
+### If you...
+
+ - need help setting up your registry
+ - can't figure out something
+ - are not sure what's going on or what your problem is
+
+Then please do not open an issue here yet - you should first try one of the following support forums:
+
+ - irc: #docker-distribution on freenode
+ - mailing-list: <distribution@dockerproject.org> or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
+
+## Reporting an issue properly
+
+By following these simple rules you will get better and faster feedback on your issue.
+
+ - search the bugtracker for an already reported issue
+
+### If you found an issue that describes your problem:
+
+ - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments
+ - please refrain from adding "same thing here" or "+1" comments
+ - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button
+ - comment if you have some new, technical and relevant information to add to the case
+ - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue.
+
+### If you have not found an existing issue that describes your problem:
+
+ 1. create a new issue, with a succinct title that describes your issue:
+   - bad title: "It doesn't work with my docker"
+   - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST"
+ 2. copy the output of:
+   - `docker version`
+   - `docker info`
+   - `docker exec <registry-container> registry -version`
+ 3. copy the command line you used to launch your Registry
+ 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments)
+ 5. reproduce your problem and get your docker daemon logs showing the error
+ 6. if relevant, copy your registry logs that show the error
+ 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used)
+ 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry
+
+## Contributing a patch for a known bug, or a small correction
+
+You should follow the basic GitHub workflow:
+
+ 1. fork
+ 2. commit a change
+ 3. make sure the tests pass
+ 4. PR
+
+Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple:
+
+ - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com`
+ - sign your commits using `-s`: `git commit -s -m "My commit"`
+
+Some simple rules to ensure quick merge:
+
+ - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`)
+ - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once
+ - if you need to amend your PR following comments, please squash instead of adding more commits
+
+## Contributing new features
+
+You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve.
+
+If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning.
+If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work.
+
+Then you should submit your implementation, clearly linking to the issue (and possible proposal).
+
+Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged.
+
+It's mandatory to:
+
+ - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines)
+ - address maintainers' comments and modify your submission accordingly
+ - write tests for any new code
+
+Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry.
+
+Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493)
+
+## Coding Style
+
+Unless explicitly stated, we follow all coding guidelines from the Go
+community. While some of these standards may seem arbitrary, they somehow seem
+to result in a solid, consistent codebase.
+
+It is possible that the code base does not currently comply with these
+guidelines. We are not looking for a massive PR that fixes this, since that
+goes against the spirit of the guidelines. All new contributions should make a
+best effort to clean up and make the code base better than they left it.
+Obviously, apply your best judgement. Remember, the goal here is to make the
+code base easier for humans to navigate and understand. Always keep that in
+mind when nudging others to comply.
+
+The rules:
+
+1. All code should be formatted with `gofmt -s`.
+2. All code should pass the default levels of
+   [`golint`](https://github.com/golang/lint).
+3. All code should follow the guidelines covered in [Effective
+   Go](http://golang.org/doc/effective_go.html) and [Go Code Review
+   Comments](https://github.com/golang/go/wiki/CodeReviewComments).
+4. Comment the code. Tell us the why, the history and the context.
+5. Document _all_ declarations and methods, even private ones. Declare
+   expectations, caveats and anything else that may be important. If a type
+   gets exported, having the comments already there will ensure it's ready.
+6. Variable name length should be proportional to its context and no longer.
+   `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`.
+   In practice, short methods will have short variable names and globals will
+   have longer names.
+7. No underscores in package names. If you need a compound name, step back,
+   and re-examine why you need a compound name. If you still think you need a
+   compound name, lose the underscore.
+8. No utils or helpers packages. If a function is not general enough to
+   warrant its own package, it has not been written generally enough to be a
+   part of a util package. Just leave it unexported and well-documented.
+9. All tests should run with `go test` and outside tooling should not be
+   required. No, we don't need another unit testing framework. Assertion
+   packages are acceptable if they provide _real_ incremental value.
+10. Even though we call these "rules" above, they are actually just
+    guidelines. Since you've read all the rules, you now know that.
+
+If you are having trouble getting into the mood of idiomatic Go, we recommend
+reading through [Effective Go](http://golang.org/doc/effective_go.html). The
+[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the
+kool-aid is a lot easier than going thirsty.
diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..426954a11231faa0026118debc3aa23bb7337c9f
--- /dev/null
+++ b/vendor/github.com/docker/distribution/Dockerfile
@@ -0,0 +1,18 @@
+FROM golang:1.7-alpine
+
+ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
+ENV DOCKER_BUILDTAGS include_oss include_gcs
+
+RUN set -ex \
+    && apk add --no-cache make git
+
+WORKDIR $DISTRIBUTION_DIR
+COPY . $DISTRIBUTION_DIR
+COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml
+
+RUN make PREFIX=/go clean binaries
+
+VOLUME ["/var/lib/registry"]
+EXPOSE 5000
+ENTRYPOINT ["registry"]
+CMD ["serve", "/etc/docker/registry/config.yml"]
diff --git a/vendor/github.com/docker/distribution/MAINTAINERS b/vendor/github.com/docker/distribution/MAINTAINERS
new file mode 100644
index 0000000000000000000000000000000000000000..bda400150c9077bf69d1b10601e5f798f99b41a9
--- /dev/null
+++ b/vendor/github.com/docker/distribution/MAINTAINERS
@@ -0,0 +1,58 @@
+# Distribution maintainers file
+#
+# This file describes who runs the docker/distribution project and how.
+# This is a living document - if you see something out of date or missing, speak up!
+#
+# It is structured to be consumable by both humans and programs.
+# To extract its contents programmatically, use any TOML-compliant parser.
+#
+# This file is compiled into the MAINTAINERS file in docker/opensource.
+#
+[Org]
+	[Org."Core maintainers"]
+		people = [
+			"aaronlehmann",
+			"dmcgowan",
+			"dmp42",
+			"richardscothern",
+			"shykes",
+			"stevvooe",
+		]
+
+[people]
+
+# A reference list of all people associated with the project.
+# All other sections should refer to people by their canonical key
+# in the people section.
+
+	# ADD YOURSELF HERE IN ALPHABETICAL ORDER
+
+	[people.aaronlehmann]
+	Name = "Aaron Lehmann"
+	Email = "aaron.lehmann@docker.com"
+	GitHub = "aaronlehmann"
+
+	[people.dmcgowan]
+	Name = "Derek McGowan"
+	Email = "derek@mcgstyle.net"
+	GitHub = "dmcgowan"
+
+	[people.dmp42]
+	Name = "Olivier Gambier"
+	Email = "olivier@docker.com"
+	GitHub = "dmp42"
+
+	[people.richardscothern]
+	Name = "Richard Scothern"
+	Email = "richard.scothern@gmail.com"
+	GitHub = "richardscothern"
+
+	[people.shykes]
+	Name = "Solomon Hykes"
+	Email = "solomon@docker.com"
+	GitHub = "shykes"
+
+	[people.stevvooe]
+	Name = "Stephen Day"
+	Email = "stephen.day@docker.com"
+	GitHub = "stevvooe"
diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..47b8f1d0b2539ae76e7a044759fa17f73d100c37
--- /dev/null
+++ b/vendor/github.com/docker/distribution/Makefile
@@ -0,0 +1,109 @@
+# Set an output prefix, which is the local directory if not specified
+PREFIX?=$(shell pwd)
+
+
+# Used to populate version variable in main package.
+VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
+
+# Allow turning off function inlining and variable registerization
+ifeq (${DISABLE_OPTIMIZATION},true)
+	GO_GCFLAGS=-gcflags "-N -l"
+	VERSION:="$(VERSION)-noopt"
+endif
+
+GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)"
+
+.PHONY: all build binaries clean dep-restore dep-save dep-validate fmt lint test test-full vet
+.DEFAULT: all
+all: fmt vet lint build test binaries
+
+AUTHORS: .mailmap .git/HEAD
+	 git log --format='%aN <%aE>' | sort -fu > $@
+
+# This only needs to be generated by hand when cutting full releases.
+version/version.go:
+	./version/version.sh > $@
+
+# Required for go 1.5 to build
+GO15VENDOREXPERIMENT := 1
+
+# Go files
+GOFILES=$(shell find . -type f -name '*.go')
+
+# Package list
+PKGS=$(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/)
+
+# Resolving binary dependencies for specific targets
+GOLINT=$(shell which golint || echo '')
+GODEP=$(shell which godep || echo '')
+
+${PREFIX}/bin/registry: $(GOFILES)
+	@echo "+ $@"
+	@go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS}  ${GO_GCFLAGS} ./cmd/registry
+
+${PREFIX}/bin/digest:  $(GOFILES)
+	@echo "+ $@"
+	@go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS}  ${GO_GCFLAGS} ./cmd/digest
+
+${PREFIX}/bin/registry-api-descriptor-template: $(GOFILES)
+	@echo "+ $@"
+	@go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template
+
+docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template
+	./bin/registry-api-descriptor-template $< > $@
+
+vet:
+	@echo "+ $@"
+	@go vet -tags "${DOCKER_BUILDTAGS}" $(PKGS)
+
+fmt:
+	@echo "+ $@"
+	@test -z "$$(gofmt -s -l . 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" || \
+		(echo >&2 "+ please format Go code with 'gofmt -s'" && false)
+
+lint:
+	@echo "+ $@"
+	$(if $(GOLINT), , \
+		$(error Please install golint: `go get -u github.com/golang/lint/golint`))
+	@test -z "$$($(GOLINT) ./... 2>&1 | grep -v ^vendor/ | tee /dev/stderr)"
+
+build:
+	@echo "+ $@"
+	@go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS)
+
+test:
+	@echo "+ $@"
+	@go test -test.short -tags "${DOCKER_BUILDTAGS}" $(PKGS)
+
+test-full:
+	@echo "+ $@"
+	@go test -tags "${DOCKER_BUILDTAGS}" $(PKGS)
+
+binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template
+	@echo "+ $@"
+
+clean:
+	@echo "+ $@"
+	@rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template"
+
+dep-save:
+	@echo "+ $@"
+	$(if $(GODEP), , \
+		$(error Please install godep: go get github.com/tools/godep))
+	@$(GODEP) save $(PKGS)
+
+dep-restore:
+	@echo "+ $@"
+	$(if $(GODEP), , \
+		$(error Please install godep: go get github.com/tools/godep))
+	@$(GODEP) restore -v
+
+dep-validate: dep-restore
+	@echo "+ $@"
+	@rm -Rf .vendor.bak
+	@mv vendor .vendor.bak
+	@rm -Rf Godeps
+	@$(GODEP) save ./...
+	@test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \
+		(echo >&2 "+ borked dependencies! what you have in Godeps/Godeps.json does not match with what you have in vendor" && false)
+	@rm -Rf .vendor.bak
diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..a6e8db0fb7db3650966e210fd4c363ca8c8fc8ee
--- /dev/null
+++ b/vendor/github.com/docker/distribution/README.md
@@ -0,0 +1,131 @@
+# Distribution
+
+The Docker toolset to pack, ship, store, and deliver content.
+
+This repository's main product is the Docker Registry 2.0 implementation
+for storing and distributing Docker images. It supersedes the
+[docker/docker-registry](https://github.com/docker/docker-registry)
+project with a new API design, focused around security and performance.
+
+<img src="https://www.docker.com/sites/default/files/oyster-registry-3.png" width=200px/>
+
+[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master)
+[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution)
+
+This repository contains the following components:
+
+|**Component**       |Description                                                                                                                                                                                         |
+|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| **registry**       | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+.                                                                                                  |
+| **libraries**      | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. |
+| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec)                                                                                                                        |
+| **documentation**  | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry.                                                                                                                                          |
+
+### How does this integrate with Docker engine?
+
+This project should provide an implementation to a V2 API for use in the [Docker
+core project](https://github.com/docker/docker). The API should be embeddable
+and simplify the process of securely pulling and pushing content from `docker`
+daemons.
+
+### What are the long term goals of the Distribution project?
+
+The _Distribution_ project has the further long term goal of providing a
+secure tool chain for distributing content. The specifications, APIs and tools
+should be as useful with Docker as they are without.
+
+Our goal is to design a professional grade and extensible content distribution
+system that allow users to:
+
+* Enjoy an efficient, secured and reliable way to store, manage, package and
+  exchange content
+* Hack/roll their own on top of healthy open-source components
+* Implement their own home made solution through good specs, and solid
+  extensions mechanism.
+
+## More about Registry 2.0
+
+The new registry implementation provides the following benefits:
+
+- faster push and pull
+- new, more efficient implementation
+- simplified deployment
+- pluggable storage backend
+- webhook notifications
+
+For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md).
+
+### Who needs to deploy a registry?
+
+By default, Docker users pull images from Docker's public registry instance.
+[Installing Docker](https://docs.docker.com/engine/installation/) gives users this
+ability. Users can also push images to a repository on Docker's public registry,
+if they have a [Docker Hub](https://hub.docker.com/) account.
+
+For some users and even companies, this default behavior is sufficient. For
+others, it is not.
+
+For example, users with their own software products may want to maintain a
+registry for private, company images. Also, you may wish to deploy your own
+image repository for images used to test or in continuous integration. For these
+use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md)
+may be the better choice.
+
+### Migration to Registry 2.0
+
+For those who have previously deployed their own registry based on the Registry
+1.0 implementation and wish to deploy a Registry 2.0 while retaining images,
+data migration is required. A tool to assist with migration efforts has been
+created. For more information see [docker/migrator]
+(https://github.com/docker/migrator).
+
+## Contribute
+
+Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
+issues, fixes, and patches to this project. If you are contributing code, see
+the instructions for [building a development environment](BUILDING.md).
+
+## Support
+
+If any issues are encountered while using the _Distribution_ project, several
+avenues are available for support:
+
+<table>
+<tr>
+	<th align="left">
+	IRC
+	</th>
+	<td>
+	#docker-distribution on FreeNode
+	</td>
+</tr>
+<tr>
+	<th align="left">
+	Issue Tracker
+	</th>
+	<td>
+	github.com/docker/distribution/issues
+	</td>
+</tr>
+<tr>
+	<th align="left">
+	Google Groups
+	</th>
+	<td>
+	https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
+	</td>
+</tr>
+<tr>
+	<th align="left">
+	Mailing List
+	</th>
+	<td>
+	docker@dockerproject.org
+	</td>
+</tr>
+</table>
+
+
+## License
+
+This project is distributed under [Apache License, Version 2.0](LICENSE).
diff --git a/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md b/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md
new file mode 100644
index 0000000000000000000000000000000000000000..49235cecda045d148541d06ade5eb591216ab3eb
--- /dev/null
+++ b/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md
@@ -0,0 +1,36 @@
+## Registry Release Checklist
+
+10. Compile release notes detailing features and since the last release.  Update the `CHANGELOG.md` file.
+
+20. Update the version file: `https://github.com/docker/distribution/blob/master/version/version.go`
+
+30. Update the `MAINTAINERS` (if necessary), `AUTHORS` and `.mailmap` files.
+
+    ```
+make AUTHORS
+```
+
+40. Create a signed tag.
+
+   Distribution uses semantic versioning.  Tags are of the format `vx.y.z[-rcn]`
+You will need PGP installed and a PGP key which has been added to your Github account.  The comment for the tag should include the release notes.
+
+50. Push the signed tag
+
+60. Create a new [release](https://github.com/docker/distribution/releases).  In the case of a release candidate, tick the `pre-release` checkbox. 
+
+70. Update the registry binary in [distribution library image repo](https://github.com/docker/distribution-library-image) by running the update script and  opening a pull request.
+
+80. Update the official image.  Add the new version in the [official images repo](https://github.com/docker-library/official-images) by appending a new version to the `registry/registry` file with the git hash pointed to by the signed tag.  Update the major version to point to the latest version and the minor version to point to new patch release if necessary.
+e.g. to release `2.3.1`
+
+   `2.3.1 (new)`
+
+   `2.3.0 -> 2.3.0` can be removed
+
+   `2 -> 2.3.1`
+
+   `2.3 -> 2.3.1`
+
+90. Build a new distribution/registry image on [Docker hub](https://hub.docker.com/u/distribution/dashboard) by adding a new automated build with the new tag and re-building the images.
+
diff --git a/vendor/github.com/docker/distribution/ROADMAP.md b/vendor/github.com/docker/distribution/ROADMAP.md
new file mode 100644
index 0000000000000000000000000000000000000000..701127afec6e1bab6545e3cfcb6f6bea8207bc2a
--- /dev/null
+++ b/vendor/github.com/docker/distribution/ROADMAP.md
@@ -0,0 +1,267 @@
+# Roadmap
+
+The Distribution Project consists of several components, some of which are
+still being defined. This document defines the high-level goals of the
+project, identifies the current components, and defines the release-
+relationship to the Docker Platform.
+
+* [Distribution Goals](#distribution-goals)
+* [Distribution Components](#distribution-components)
+* [Project Planning](#project-planning): release-relationship to the Docker Platform.
+
+This road map is a living document, providing an overview of the goals and
+considerations made in respect of the future of the project.
+
+## Distribution Goals
+
+- Replace the existing [docker registry](github.com/docker/docker-registry)
+  implementation as the primary implementation.
+- Replace the existing push and pull code in the docker engine with the
+  distribution package.
+- Define a strong data model for distributing docker images
+- Provide a flexible distribution tool kit for use in the docker platform
+- Unlock new distribution models
+
+## Distribution Components
+
+Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming
+features and bugfixes for a component will be added to the relevant milestone. If a feature or
+bugfix is not part of a milestone, it is currently unscheduled for
+implementation. 
+
+* [Registry](#registry)
+* [Distribution Package](#distribution-package)
+
+***
+
+### Registry
+
+The new Docker registry is the main portion of the distribution repository.
+Registry 2.0 is the first release of the next-generation registry. This was
+primarily focused on implementing the [new registry
+API](https://github.com/docker/distribution/blob/master/docs/spec/api.md),
+with a focus on security and performance. 
+
+Following from the Distribution project goals above, we have a set of goals
+for registry v2 that we would like to follow in the design. New features
+should be compared against these goals.
+
+#### Data Storage and Distribution First
+
+The registry's first goal is to provide a reliable, consistent storage
+location for Docker images. The registry should only provide the minimal
+amount of indexing required to fetch image data and no more.
+
+This means we should be selective in new features and API additions, including
+those that may require expensive, ever growing indexes. Requests should be
+servable in "constant time".
+
+#### Content Addressability
+
+All data objects used in the registry API should be content addressable.
+Content identifiers should be secure and verifiable. This provides a secure,
+reliable base from which to build more advanced content distribution systems.
+
+#### Content Agnostic
+
+In the past, changes to the image format would require large changes in Docker
+and the Registry. By decoupling the distribution and image format, we can
+allow the formats to progress without having to coordinate between the two.
+This means that we should be focused on decoupling Docker from the registry
+just as much as decoupling the registry from Docker. Such an approach will
+allow us to unlock new distribution models that haven't been possible before.
+
+We can take this further by saying that the new registry should be content
+agnostic. The registry provides a model of names, tags, manifests and content
+addresses and that model can be used to work with content.
+
+#### Simplicity
+
+The new registry should be closer to a microservice component than its
+predecessor. This means it should have a narrower API and a low number of
+service dependencies. It should be easy to deploy.
+
+This means that other solutions should be explored before changing the API or
+adding extra dependencies. If functionality is required, can it be added as an
+extension or companion service.
+
+#### Extensibility
+
+The registry should provide extension points to add functionality. By keeping
+the scope narrow, but providing the ability to add functionality.
+
+Features like search, indexing, synchronization and registry explorers fall
+into this category. No such feature should be added unless we've found it
+impossible to do through an extension.
+
+#### Active Feature Discussions
+
+The following are feature discussions that are currently active.
+
+If you don't see your favorite, unimplemented feature, feel free to contact us
+via IRC or the mailing list and we can talk about adding it. The goal here is
+to make sure that new features go through a rigid design process before
+landing in the registry.
+
+##### Proxying to other Registries
+
+A _pull-through caching_ mode exists for the registry, but is restricted from 
+within the docker client to only mirror the official Docker Hub.  This functionality
+can be expanded when image provenance has been specified and implemented in the 
+distribution project.
+
+##### Metadata storage
+
+Metadata for the registry is currently stored with the manifest and layer data on
+the storage backend.  While this is a big win for simplicity and reliably maintaining
+state, it comes with the cost of consistency and high latency.  The mutable registry
+metadata operations should be abstracted behind an API which will allow ACID compliant
+storage systems to handle metadata.
+
+##### Peer to Peer transfer
+
+Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit
+
+##### Indexing, Search and Discovery
+
+The original registry provided some implementation of search for use with
+private registries. Support has been elided from V2 since we'd like to both
+decouple search functionality from the registry. The makes the registry
+simpler to deploy, especially in use cases where search is not needed, and
+let's us decouple the image format from the registry.
+
+There are explorations into using the catalog API and notification system to
+build external indexes. The current line of thought is that we will define a
+common search API to index and query docker images. Such a system could be run
+as a companion to a registry or set of registries to power discovery.
+
+The main issue with search and discovery is that there are so many ways to
+accomplish it. There are two aspects to this project. The first is deciding on
+how it will be done, including an API definition that can work with changing
+data formats. The second is the process of integrating with `docker search`.
+We expect that someone attempts to address the problem with the existing tools
+and propose it as a standard search API or uses it to inform a standardization
+process. Once this has been explored, we integrate with the docker client.
+
+Please see the following for more detail:
+
+- https://github.com/docker/distribution/issues/206
+
+##### Deletes
+
+> __NOTE:__ Deletes are a much asked for feature. Before requesting this
+feature or participating in discussion, we ask that you read this section in
+full and understand the problems behind deletes.
+
+While, at first glance, implementing deleting seems simple, there are a number
+mitigating factors that make many solutions not ideal or even pathological in
+the context of a registry. The following paragraph discuss the background and
+approaches that could be applied to arrive at a solution.
+
+The goal of deletes in any system is to remove unused or unneeded data. Only
+data requested for deletion should be removed and no other data. Removing
+unintended data is worse than _not_ removing data that was requested for
+removal but ideally, both are supported. Generally, according to this rule, we
+err on holding data longer than needed, ensuring that it is only removed when
+we can be certain that it can be removed. With the current behavior, we opt to
+hold onto the data forever, ensuring that data cannot be incorrectly removed.
+
+To understand the problems with implementing deletes, one must understand the
+data model. All registry data is stored in a filesystem layout, implemented on
+a "storage driver", effectively a _virtual file system_ (VFS). The storage
+system must assume that this VFS layer will be eventually consistent and has
+poor read- after-write consistency, since this is the lower common denominator
+among the storage drivers. This is mitigated by writing values in reverse-
+dependent order, but makes wider transactional operations unsafe.
+
+Layered on the VFS model is a content-addressable _directed, acyclic graph_
+(DAG) made up of blobs. Manifests reference layers. Tags reference manifests.
+Since the same data can be referenced by multiple manifests, we only store
+data once, even if it is in different repositories. Thus, we have a set of
+blobs, referenced by tags and manifests. If we want to delete a blob we need
+to be certain that it is no longer referenced by another manifest or tag. When
+we delete a manifest, we also can try to delete the referenced blobs. Deciding
+whether or not a blob has an active reference is the crux of the problem.
+
+Conceptually, deleting a manifest and its resources is quite simple. Just find
+all the manifests, enumerate the referenced blobs and delete the blobs not in
+that set. An astute observer will recognize this as a garbage collection
+problem. As with garbage collection in programming languages, this is very
+simple when one always has a consistent view. When one adds parallelism and an
+inconsistent view of data, it becomes very challenging.
+
+A simple example can demonstrate this. Let's say we are deleting a manifest
+_A_ in one process. We scan the manifest and decide that all the blobs are
+ready for deletion. Concurrently, we have another process accepting a new
+manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_
+is accepted and all the blobs are considered present, so the operation
+proceeds. The original process then deletes the referenced blobs, assuming
+they were unreferenced. The manifest _B_, which we thought had all of its data
+present, can no longer be served by the registry, since the dependent data has
+been deleted.
+
+Deleting data from the registry safely requires some way to coordinate this
+operation. The following approaches are being considered:
+
+- _Reference Counting_ - Maintain a count of references to each blob. This is
+  challenging for a number of reasons: 1. maintaining a consistent consensus
+  of reference counts across a set of Registries and 2. Building the initial
+  list of reference counts for an existing registry. These challenges can be
+  met with a consensus protocol like Paxos or Raft in the first case and a
+  necessary but simple scan in the second..
+- _Lock the World GC_ - Halt all writes to the data store. Walk the data store
+  and find all blob references. Delete all unreferenced blobs. This approach
+  is very simple but requires disabling writes for a period of time while the
+  service reads all data. This is slow and expensive but very accurate and
+  effective.
+- _Generational GC_ - Do something similar to above but instead of blocking
+  writes, writes are sent to another storage backend while reads are broadcast
+  to the new and old backends. GC is then performed on the read-only portion.
+  Because writes land in the new backend, the data in the read-only section
+  can be safely deleted. The main drawbacks of this approach are complexity
+  and coordination.
+- _Centralized Oracle_ - Using a centralized, transactional database, we can
+  know exactly which data is referenced at any given time. This avoids
+  coordination problem by managing this data in a single location. We trade
+  off metadata scalability for simplicity and performance. This is a very good
+  option for most registry deployments. This would create a bottleneck for
+  registry metadata. However, metadata is generally not the main bottleneck
+  when serving images.
+
+Please let us know if other solutions exist that we have yet to enumerate.
+Note that for any approach, implementation is a massive consideration. For
+example, a mark-sweep based solution may seem simple but the amount of work in
+coordination offset the extra work it might take to build a _Centralized
+Oracle_. We'll accept proposals for any solution but please coordinate with us
+before dropping code.
+
+At this time, we have traded off simplicity and ease of deployment for disk
+space. Simplicity and ease of deployment tend to reduce developer involvement,
+which is currently the most expensive resource in software engineering. Taking
+on any solution for deletes will greatly effect these factors, trading off
+very cheap disk space for a complex deployment and operational story.
+
+Please see the following issues for more detail:
+
+- https://github.com/docker/distribution/issues/422
+- https://github.com/docker/distribution/issues/461
+- https://github.com/docker/distribution/issues/462
+
+### Distribution Package 
+
+At its core, the Distribution Project is a set of Go packages that make up
+Distribution Components. At this time, most of these packages make up the
+Registry implementation. 
+
+The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. 
+
+For feature additions, please see the Registry section. In the future, we may break out a
+separate Roadmap for distribution-specific features that apply to more than
+just the registry.
+
+***
+
+### Project Planning
+
+An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress.
+
diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go
new file mode 100644
index 0000000000000000000000000000000000000000..1f91ae21e9b9ffa1073c43283bca0952ce91b82f
--- /dev/null
+++ b/vendor/github.com/docker/distribution/blobs.go
@@ -0,0 +1,257 @@
+package distribution
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"net/http"
+	"time"
+
+	"github.com/docker/distribution/context"
+	"github.com/docker/distribution/digest"
+	"github.com/docker/distribution/reference"
+)
+
+var (
+	// ErrBlobExists returned when blob already exists
+	ErrBlobExists = errors.New("blob exists")
+
+	// ErrBlobDigestUnsupported when blob digest is an unsupported version.
+	ErrBlobDigestUnsupported = errors.New("unsupported blob digest")
+
+	// ErrBlobUnknown when blob is not found.
+	ErrBlobUnknown = errors.New("unknown blob")
+
+	// ErrBlobUploadUnknown returned when upload is not found.
+	ErrBlobUploadUnknown = errors.New("blob upload unknown")
+
+	// ErrBlobInvalidLength returned when the blob has an expected length on
+	// commit, meaning mismatched with the descriptor or an invalid value.
+	ErrBlobInvalidLength = errors.New("blob invalid length")
+)
+
+// ErrBlobInvalidDigest returned when digest check fails.
+type ErrBlobInvalidDigest struct {
+	Digest digest.Digest
+	Reason error
+}
+
+func (err ErrBlobInvalidDigest) Error() string {
+	return fmt.Sprintf("invalid digest for referenced layer: %v, %v",
+		err.Digest, err.Reason)
+}
+
+// ErrBlobMounted returned when a blob is mounted from another repository
+// instead of initiating an upload session.
+type ErrBlobMounted struct {
+	From       reference.Canonical
+	Descriptor Descriptor
+}
+
+func (err ErrBlobMounted) Error() string {
+	return fmt.Sprintf("blob mounted from: %v to: %v",
+		err.From, err.Descriptor)
+}
+
+// Descriptor describes targeted content. Used in conjunction with a blob
+// store, a descriptor can be used to fetch, store and target any kind of
+// blob. The struct also describes the wire protocol format. Fields should
+// only be added but never changed.
+type Descriptor struct {
+	// MediaType describe the type of the content. All text based formats are
+	// encoded as utf-8.
+	MediaType string `json:"mediaType,omitempty"`
+
+	// Size in bytes of content.
+	Size int64 `json:"size,omitempty"`
+
+	// Digest uniquely identifies the content. A byte stream can be verified
+	// against against this digest.
+	Digest digest.Digest `json:"digest,omitempty"`
+
+	// URLs contains the source URLs of this content.
+	URLs []string `json:"urls,omitempty"`
+
+	// NOTE: Before adding a field here, please ensure that all
+	// other options have been exhausted. Much of the type relationships
+	// depend on the simplicity of this type.
+}
+
+// Descriptor returns the descriptor, to make it satisfy the Describable
+// interface. Note that implementations of Describable are generally objects
+// which can be described, not simply descriptors; this exception is in place
+// to make it more convenient to pass actual descriptors to functions that
+// expect Describable objects.
+func (d Descriptor) Descriptor() Descriptor {
+	return d
+}
+
+// BlobStatter makes blob descriptors available by digest. The service may
+// provide a descriptor of a different digest if the provided digest is not
+// canonical.
+type BlobStatter interface {
+	// Stat provides metadata about a blob identified by the digest. If the
+	// blob is unknown to the describer, ErrBlobUnknown will be returned.
+	Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error)
+}
+
+// BlobDeleter enables deleting blobs from storage.
+type BlobDeleter interface {
+	Delete(ctx context.Context, dgst digest.Digest) error
+}
+
+// BlobEnumerator enables iterating over blobs from storage
+type BlobEnumerator interface {
+	Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error
+}
+
+// BlobDescriptorService manages metadata about a blob by digest. Most
+// implementations will not expose such an interface explicitly. Such mappings
+// should be maintained by interacting with the BlobIngester. Hence, this is
+// left off of BlobService and BlobStore.
+type BlobDescriptorService interface {
+	BlobStatter
+
+	// SetDescriptor assigns the descriptor to the digest. The provided digest and
+	// the digest in the descriptor must map to identical content but they may
+	// differ on their algorithm. The descriptor must have the canonical
+	// digest of the content and the digest algorithm must match the
+	// annotators canonical algorithm.
+	//
+	// Such a facility can be used to map blobs between digest domains, with
+	// the restriction that the algorithm of the descriptor must match the
+	// canonical algorithm (ie sha256) of the annotator.
+	SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error
+
+	// Clear enables descriptors to be unlinked
+	Clear(ctx context.Context, dgst digest.Digest) error
+}
+
+// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService.
+type BlobDescriptorServiceFactory interface {
+	BlobAccessController(svc BlobDescriptorService) BlobDescriptorService
+}
+
+// ReadSeekCloser is the primary reader type for blob data, combining
+// io.ReadSeeker with io.Closer.
+type ReadSeekCloser interface {
+	io.ReadSeeker
+	io.Closer
+}
+
+// BlobProvider describes operations for getting blob data.
+type BlobProvider interface {
+	// Get returns the entire blob identified by digest along with the descriptor.
+	Get(ctx context.Context, dgst digest.Digest) ([]byte, error)
+
+	// Open provides a ReadSeekCloser to the blob identified by the provided
+	// descriptor. If the blob is not known to the service, an error will be
+	// returned.
+	Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error)
+}
+
+// BlobServer can serve blobs via http.
+type BlobServer interface {
+	// ServeBlob attempts to serve the blob, identifed by dgst, via http. The
+	// service may decide to redirect the client elsewhere or serve the data
+	// directly.
+	//
+	// This handler only issues successful responses, such as 2xx or 3xx,
+	// meaning it serves data or issues a redirect. If the blob is not
+	// available, an error will be returned and the caller may still issue a
+	// response.
+	//
+	// The implementation may serve the same blob from a different digest
+	// domain. The appropriate headers will be set for the blob, unless they
+	// have already been set by the caller.
+	ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error
+}
+
+// BlobIngester ingests blob data.
+type BlobIngester interface {
+	// Put inserts the content p into the blob service, returning a descriptor
+	// or an error.
+	Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error)
+
+	// Create allocates a new blob writer to add a blob to this service. The
+	// returned handle can be written to and later resumed using an opaque
+	// identifier. With this approach, one can Close and Resume a BlobWriter
+	// multiple times until the BlobWriter is committed or cancelled.
+	Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error)
+
+	// Resume attempts to resume a write to a blob, identified by an id.
+	Resume(ctx context.Context, id string) (BlobWriter, error)
+}
+
+// BlobCreateOption is a general extensible function argument for blob creation
+// methods. A BlobIngester may choose to honor any or none of the given
+// BlobCreateOptions, which can be specific to the implementation of the
+// BlobIngester receiving them.
+// TODO (brianbland): unify this with ManifestServiceOption in the future
+type BlobCreateOption interface {
+	Apply(interface{}) error
+}
+
+// CreateOptions is a collection of blob creation modifiers relevant to general
+// blob storage intended to be configured by the BlobCreateOption.Apply method.
+type CreateOptions struct {
+	Mount struct {
+		ShouldMount bool
+		From        reference.Canonical
+		// Stat allows to pass precalculated descriptor to link and return.
+		// Blob access check will be skipped if set.
+		Stat *Descriptor
+	}
+}
+
+// BlobWriter provides a handle for inserting data into a blob store.
+// Instances should be obtained from BlobWriteService.Writer and
+// BlobWriteService.Resume. If supported by the store, a writer can be
+// recovered with the id.
+type BlobWriter interface {
+	io.WriteCloser
+	io.ReaderFrom
+
+	// Size returns the number of bytes written to this blob.
+	Size() int64
+
+	// ID returns the identifier for this writer. The ID can be used with the
+	// Blob service to later resume the write.
+	ID() string
+
+	// StartedAt returns the time this blob write was started.
+	StartedAt() time.Time
+
+	// Commit completes the blob writer process. The content is verified
+	// against the provided provisional descriptor, which may result in an
+	// error. Depending on the implementation, written data may be validated
+	// against the provisional descriptor fields. If MediaType is not present,
+	// the implementation may reject the commit or assign "application/octet-
+	// stream" to the blob. The returned descriptor may have a different
+	// digest depending on the blob store, referred to as the canonical
+	// descriptor.
+	Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error)
+
+	// Cancel ends the blob write without storing any data and frees any
+	// associated resources. Any data written thus far will be lost. Cancel
+	// implementations should allow multiple calls even after a commit that
+	// result in a no-op. This allows use of Cancel in a defer statement,
+	// increasing the assurance that it is correctly called.
+	Cancel(ctx context.Context) error
+}
+
+// BlobService combines the operations to access, read and write blobs. This
+// can be used to describe remote blob services.
+type BlobService interface {
+	BlobStatter
+	BlobProvider
+	BlobIngester
+}
+
+// BlobStore represent the entire suite of blob related operations. Such an
+// implementation can access, read, write, delete and serve blobs.
+type BlobStore interface {
+	BlobService
+	BlobServer
+	BlobDeleter
+}
diff --git a/vendor/github.com/docker/distribution/circle.yml b/vendor/github.com/docker/distribution/circle.yml
new file mode 100644
index 0000000000000000000000000000000000000000..61f8be0cb5d9cf6bcfe31093e099945b9e9cf9ea
--- /dev/null
+++ b/vendor/github.com/docker/distribution/circle.yml
@@ -0,0 +1,93 @@
+# Pony-up!
+machine:
+  pre:
+  # Install gvm
+    - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer)
+  # Install codecov for coverage
+    - pip install --user codecov
+
+  post:
+  # go
+    - gvm install go1.7 --prefer-binary --name=stable
+
+  environment:
+  # Convenient shortcuts to "common" locations
+    CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME
+    BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME
+  # Trick circle brainflat "no absolute path" behavior
+    BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR
+    DOCKER_BUILDTAGS: "include_oss include_gcs"
+  # Workaround Circle parsing dumb bugs and/or YAML wonkyness
+    CIRCLE_PAIN: "mode: set"
+
+  hosts:
+  # Not used yet
+    fancy: 127.0.0.1
+
+dependencies:
+  pre:
+  # Copy the code to the gopath of all go versions
+    - >
+      gvm use stable &&
+      mkdir -p "$(dirname $BASE_STABLE)" &&
+      cp -R "$CHECKOUT" "$BASE_STABLE"
+
+  override:
+  # Install dependencies for every copied clone/go version
+    - gvm use stable && go get github.com/tools/godep:
+        pwd: $BASE_STABLE
+
+  post:
+  # For the stable go version, additionally install linting tools
+    - >
+      gvm use stable &&
+      go get github.com/axw/gocov/gocov github.com/golang/lint/golint
+
+test:
+  pre:
+  # Output the go versions we are going to test
+    # - gvm use old && go version
+    - gvm use stable && go version
+
+  # todo(richard): replace with a more robust vendoring solution. Removed due to a fundamental disagreement in godep philosophies.
+  # Ensure validation of dependencies
+  #    - gvm use stable && if test -n "`git diff --stat=1000 master | grep -Ei \"vendor|godeps\"`"; then make dep-validate; fi:
+  #      pwd: $BASE_STABLE
+
+  # First thing: build everything. This will catch compile errors, and it's
+  # also necessary for go vet to work properly (see #807).
+    - gvm use stable && godep go install $(go list ./... | grep -v "/vendor/"):
+        pwd: $BASE_STABLE
+
+  # FMT
+    - gvm use stable && make fmt:
+        pwd: $BASE_STABLE
+
+   # VET
+    - gvm use stable && make vet:
+        pwd: $BASE_STABLE
+
+  # LINT
+    - gvm use stable && make lint:
+        pwd: $BASE_STABLE
+
+  override:
+  # Test stable, and report
+     - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE':
+         timeout: 1000
+         pwd: $BASE_STABLE
+
+  # Test stable with race
+     - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | grep -v "registry/handlers" | grep -v "registry/storage/driver" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -race -tags "$DOCKER_BUILDTAGS" -test.short $PACKAGE':
+         timeout: 1000
+         pwd: $BASE_STABLE
+  post:
+  # Report to codecov
+    - bash <(curl -s https://codecov.io/bash):
+        pwd: $BASE_STABLE
+
+  ## Notes
+  # Do we want these as well?
+  # - go get code.google.com/p/go.tools/cmd/goimports
+  # - test -z "$(goimports -l -w ./... | tee /dev/stderr)"
+  # http://labix.org/gocheck
diff --git a/vendor/github.com/docker/distribution/context/context.go b/vendor/github.com/docker/distribution/context/context.go
new file mode 100644
index 0000000000000000000000000000000000000000..23cbf5b5450e42a5e70e8e576a830c84910b48bd
--- /dev/null
+++ b/vendor/github.com/docker/distribution/context/context.go
@@ -0,0 +1,85 @@
+package context
+
+import (
+	"sync"
+
+	"github.com/docker/distribution/uuid"
+	"golang.org/x/net/context"
+)
+
+// Context is a copy of Context from the golang.org/x/net/context package.
+type Context interface {
+	context.Context
+}
+
+// instanceContext is a context that provides only an instance id. It is
+// provided as the main background context.
+type instanceContext struct {
+	Context
+	id   string    // id of context, logged as "instance.id"
+	once sync.Once // once protect generation of the id
+}
+
+func (ic *instanceContext) Value(key interface{}) interface{} {
+	if key == "instance.id" {
+		ic.once.Do(func() {
+			// We want to lazy initialize the UUID such that we don't
+			// call a random generator from the package initialization
+			// code. For various reasons random could not be available
+			// https://github.com/docker/distribution/issues/782
+			ic.id = uuid.Generate().String()
+		})
+		return ic.id
+	}
+
+	return ic.Context.Value(key)
+}
+
+var background = &instanceContext{
+	Context: context.Background(),
+}
+
+// Background returns a non-nil, empty Context. The background context
+// provides a single key, "instance.id" that is globally unique to the
+// process.
+func Background() Context {
+	return background
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val. Use context Values only for request-scoped data that transits processes
+// and APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key, val interface{}) Context {
+	return context.WithValue(parent, key, val)
+}
+
+// stringMapContext is a simple context implementation that checks a map for a
+// key, falling back to a parent if not present.
+type stringMapContext struct {
+	context.Context
+	m map[string]interface{}
+}
+
+// WithValues returns a context that proxies lookups through a map. Only
+// supports string keys.
+func WithValues(ctx context.Context, m map[string]interface{}) context.Context {
+	mo := make(map[string]interface{}, len(m)) // make our own copy.
+	for k, v := range m {
+		mo[k] = v
+	}
+
+	return stringMapContext{
+		Context: ctx,
+		m:       mo,
+	}
+}
+
+func (smc stringMapContext) Value(key interface{}) interface{} {
+	if ks, ok := key.(string); ok {
+		if v, ok := smc.m[ks]; ok {
+			return v
+		}
+	}
+
+	return smc.Context.Value(key)
+}
diff --git a/vendor/github.com/docker/distribution/context/doc.go b/vendor/github.com/docker/distribution/context/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..3b4ab8882f59c1890ca4b96dfbdcbf34ddafff2f
--- /dev/null
+++ b/vendor/github.com/docker/distribution/context/doc.go
@@ -0,0 +1,89 @@
+// Package context provides several utilities for working with
+// golang.org/x/net/context in http requests. Primarily, the focus is on
+// logging relevant request information but this package is not limited to
+// that purpose.
+//
+// The easiest way to get started is to get the background context:
+//
+// 	ctx := context.Background()
+//
+// The returned context should be passed around your application and be the
+// root of all other context instances. If the application has a version, this
+// line should be called before anything else:
+//
+// 	ctx := context.WithVersion(context.Background(), version)
+//
+// The above will store the version in the context and will be available to
+// the logger.
+//
+// Logging
+//
+// The most useful aspect of this package is GetLogger. This function takes
+// any context.Context interface and returns the current logger from the
+// context. Canonical usage looks like this:
+//
+// 	GetLogger(ctx).Infof("something interesting happened")
+//
+// GetLogger also takes optional key arguments. The keys will be looked up in
+// the context and reported with the logger. The following example would
+// return a logger that prints the version with each log message:
+//
+// 	ctx := context.Context(context.Background(), "version", version)
+// 	GetLogger(ctx, "version").Infof("this log message has a version field")
+//
+// The above would print out a log message like this:
+//
+// 	INFO[0000] this log message has a version field        version=v2.0.0-alpha.2.m
+//
+// When used with WithLogger, we gain the ability to decorate the context with
+// loggers that have information from disparate parts of the call stack.
+// Following from the version example, we can build a new context with the
+// configured logger such that we always print the version field:
+//
+// 	ctx = WithLogger(ctx, GetLogger(ctx, "version"))
+//
+// Since the logger has been pushed to the context, we can now get the version
+// field for free with our log messages. Future calls to GetLogger on the new
+// context will have the version field:
+//
+// 	GetLogger(ctx).Infof("this log message has a version field")
+//
+// This becomes more powerful when we start stacking loggers. Let's say we
+// have the version logger from above but also want a request id. Using the
+// context above, in our request scoped function, we place another logger in
+// the context:
+//
+// 	ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context
+// 	ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id"))
+//
+// When GetLogger is called on the new context, "http.request.id" will be
+// included as a logger field, along with the original "version" field:
+//
+// 	INFO[0000] this log message has a version field        http.request.id=unique id version=v2.0.0-alpha.2.m
+//
+// Note that this only affects the new context, the previous context, with the
+// version field, can be used independently. Put another way, the new logger,
+// added to the request context, is unique to that context and can have
+// request scoped varaibles.
+//
+// HTTP Requests
+//
+// This package also contains several methods for working with http requests.
+// The concepts are very similar to those described above. We simply place the
+// request in the context using WithRequest. This makes the request variables
+// available. GetRequestLogger can then be called to get request specific
+// variables in a log line:
+//
+// 	ctx = WithRequest(ctx, req)
+// 	GetRequestLogger(ctx).Infof("request variables")
+//
+// Like above, if we want to include the request data in all log messages in
+// the context, we push the logger to a new context and use that one:
+//
+// 	ctx = WithLogger(ctx, GetRequestLogger(ctx))
+//
+// The concept is fairly powerful and ensures that calls throughout the stack
+// can be traced in log messages. Using the fields like "http.request.id", one
+// can analyze call flow for a particular request with a simple grep of the
+// logs.
+package context
diff --git a/vendor/github.com/docker/distribution/context/http.go b/vendor/github.com/docker/distribution/context/http.go
new file mode 100644
index 0000000000000000000000000000000000000000..7fe9b8ab0524a9fd01c1e1b157d6b17eb398a102
--- /dev/null
+++ b/vendor/github.com/docker/distribution/context/http.go
@@ -0,0 +1,366 @@
+package context
+
+import (
+	"errors"
+	"net"
+	"net/http"
+	"strings"
+	"sync"
+	"time"
+
+	log "github.com/Sirupsen/logrus"
+	"github.com/docker/distribution/uuid"
+	"github.com/gorilla/mux"
+)
+
+// Common errors used with this package.
+var (
+	ErrNoRequestContext        = errors.New("no http request in context")
+	ErrNoResponseWriterContext = errors.New("no http response in context")
+)
+
+func parseIP(ipStr string) net.IP {
+	ip := net.ParseIP(ipStr)
+	if ip == nil {
+		log.Warnf("invalid remote IP address: %q", ipStr)
+	}
+	return ip
+}
+
+// RemoteAddr extracts the remote address of the request, taking into
+// account proxy headers.
+func RemoteAddr(r *http.Request) string {
+	if prior := r.Header.Get("X-Forwarded-For"); prior != "" {
+		proxies := strings.Split(prior, ",")
+		if len(proxies) > 0 {
+			remoteAddr := strings.Trim(proxies[0], " ")
+			if parseIP(remoteAddr) != nil {
+				return remoteAddr
+			}
+		}
+	}
+	// X-Real-Ip is less supported, but worth checking in the
+	// absence of X-Forwarded-For
+	if realIP := r.Header.Get("X-Real-Ip"); realIP != "" {
+		if parseIP(realIP) != nil {
+			return realIP
+		}
+	}
+
+	return r.RemoteAddr
+}
+
+// RemoteIP extracts the remote IP of the request, taking into
+// account proxy headers.
+func RemoteIP(r *http.Request) string {
+	addr := RemoteAddr(r)
+
+	// Try parsing it as "IP:port"
+	if ip, _, err := net.SplitHostPort(addr); err == nil {
+		return ip
+	}
+
+	return addr
+}
+
+// WithRequest places the request on the context. The context of the request
+// is assigned a unique id, available at "http.request.id". The request itself
+// is available at "http.request". Other common attributes are available under
+// the prefix "http.request.". If a request is already present on the context,
+// this method will panic.
+func WithRequest(ctx Context, r *http.Request) Context {
+	if ctx.Value("http.request") != nil {
+		// NOTE(stevvooe): This needs to be considered a programming error. It
+		// is unlikely that we'd want to have more than one request in
+		// context.
+		panic("only one request per context")
+	}
+
+	return &httpRequestContext{
+		Context:   ctx,
+		startedAt: time.Now(),
+		id:        uuid.Generate().String(),
+		r:         r,
+	}
+}
+
+// GetRequest returns the http request in the given context. Returns
+// ErrNoRequestContext if the context does not have an http request associated
+// with it.
+func GetRequest(ctx Context) (*http.Request, error) {
+	if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok {
+		return r, nil
+	}
+	return nil, ErrNoRequestContext
+}
+
+// GetRequestID attempts to resolve the current request id, if possible. An
+// error is return if it is not available on the context.
+func GetRequestID(ctx Context) string {
+	return GetStringValue(ctx, "http.request.id")
+}
+
+// WithResponseWriter returns a new context and response writer that makes
+// interesting response statistics available within the context.
+func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) {
+	if closeNotifier, ok := w.(http.CloseNotifier); ok {
+		irwCN := &instrumentedResponseWriterCN{
+			instrumentedResponseWriter: instrumentedResponseWriter{
+				ResponseWriter: w,
+				Context:        ctx,
+			},
+			CloseNotifier: closeNotifier,
+		}
+
+		return irwCN, irwCN
+	}
+
+	irw := instrumentedResponseWriter{
+		ResponseWriter: w,
+		Context:        ctx,
+	}
+	return &irw, &irw
+}
+
+// GetResponseWriter returns the http.ResponseWriter from the provided
+// context. If not present, ErrNoResponseWriterContext is returned. The
+// returned instance provides instrumentation in the context.
+func GetResponseWriter(ctx Context) (http.ResponseWriter, error) {
+	v := ctx.Value("http.response")
+
+	rw, ok := v.(http.ResponseWriter)
+	if !ok || rw == nil {
+		return nil, ErrNoResponseWriterContext
+	}
+
+	return rw, nil
+}
+
+// getVarsFromRequest let's us change request vars implementation for testing
+// and maybe future changes.
+var getVarsFromRequest = mux.Vars
+
+// WithVars extracts gorilla/mux vars and makes them available on the returned
+// context. Variables are available at keys with the prefix "vars.". For
+// example, if looking for the variable "name", it can be accessed as
+// "vars.name". Implementations that are accessing values need not know that
+// the underlying context is implemented with gorilla/mux vars.
+func WithVars(ctx Context, r *http.Request) Context {
+	return &muxVarsContext{
+		Context: ctx,
+		vars:    getVarsFromRequest(r),
+	}
+}
+
+// GetRequestLogger returns a logger that contains fields from the request in
+// the current context. If the request is not available in the context, no
+// fields will display. Request loggers can safely be pushed onto the context.
+func GetRequestLogger(ctx Context) Logger {
+	return GetLogger(ctx,
+		"http.request.id",
+		"http.request.method",
+		"http.request.host",
+		"http.request.uri",
+		"http.request.referer",
+		"http.request.useragent",
+		"http.request.remoteaddr",
+		"http.request.contenttype")
+}
+
+// GetResponseLogger reads the current response stats and builds a logger.
+// Because the values are read at call time, pushing a logger returned from
+// this function on the context will lead to missing or invalid data. Only
+// call this at the end of a request, after the response has been written.
+func GetResponseLogger(ctx Context) Logger {
+	l := getLogrusLogger(ctx,
+		"http.response.written",
+		"http.response.status",
+		"http.response.contenttype")
+
+	duration := Since(ctx, "http.request.startedat")
+
+	if duration > 0 {
+		l = l.WithField("http.response.duration", duration.String())
+	}
+
+	return l
+}
+
+// httpRequestContext makes information about a request available to context.
+type httpRequestContext struct {
+	Context
+
+	startedAt time.Time
+	id        string
+	r         *http.Request
+}
+
+// Value returns a keyed element of the request for use in the context. To get
+// the request itself, query "request". For other components, access them as
+// "request.<component>". For example, r.RequestURI
+func (ctx *httpRequestContext) Value(key interface{}) interface{} {
+	if keyStr, ok := key.(string); ok {
+		if keyStr == "http.request" {
+			return ctx.r
+		}
+
+		if !strings.HasPrefix(keyStr, "http.request.") {
+			goto fallback
+		}
+
+		parts := strings.Split(keyStr, ".")
+
+		if len(parts) != 3 {
+			goto fallback
+		}
+
+		switch parts[2] {
+		case "uri":
+			return ctx.r.RequestURI
+		case "remoteaddr":
+			return RemoteAddr(ctx.r)
+		case "method":
+			return ctx.r.Method
+		case "host":
+			return ctx.r.Host
+		case "referer":
+			referer := ctx.r.Referer()
+			if referer != "" {
+				return referer
+			}
+		case "useragent":
+			return ctx.r.UserAgent()
+		case "id":
+			return ctx.id
+		case "startedat":
+			return ctx.startedAt
+		case "contenttype":
+			ct := ctx.r.Header.Get("Content-Type")
+			if ct != "" {
+				return ct
+			}
+		}
+	}
+
+fallback:
+	return ctx.Context.Value(key)
+}
+
+type muxVarsContext struct {
+	Context
+	vars map[string]string
+}
+
+func (ctx *muxVarsContext) Value(key interface{}) interface{} {
+	if keyStr, ok := key.(string); ok {
+		if keyStr == "vars" {
+			return ctx.vars
+		}
+
+		if strings.HasPrefix(keyStr, "vars.") {
+			keyStr = strings.TrimPrefix(keyStr, "vars.")
+		}
+
+		if v, ok := ctx.vars[keyStr]; ok {
+			return v
+		}
+	}
+
+	return ctx.Context.Value(key)
+}
+
+// instrumentedResponseWriterCN provides response writer information in a
+// context. It implements http.CloseNotifier so that users can detect
+// early disconnects.
+type instrumentedResponseWriterCN struct {
+	instrumentedResponseWriter
+	http.CloseNotifier
+}
+
+// instrumentedResponseWriter provides response writer information in a
+// context. This variant is only used in the case where CloseNotifier is not
+// implemented by the parent ResponseWriter.
+type instrumentedResponseWriter struct {
+	http.ResponseWriter
+	Context
+
+	mu      sync.Mutex
+	status  int
+	written int64
+}
+
+func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) {
+	n, err = irw.ResponseWriter.Write(p)
+
+	irw.mu.Lock()
+	irw.written += int64(n)
+
+	// Guess the likely status if not set.
+	if irw.status == 0 {
+		irw.status = http.StatusOK
+	}
+
+	irw.mu.Unlock()
+
+	return
+}
+
+func (irw *instrumentedResponseWriter) WriteHeader(status int) {
+	irw.ResponseWriter.WriteHeader(status)
+
+	irw.mu.Lock()
+	irw.status = status
+	irw.mu.Unlock()
+}
+
+func (irw *instrumentedResponseWriter) Flush() {
+	if flusher, ok := irw.ResponseWriter.(http.Flusher); ok {
+		flusher.Flush()
+	}
+}
+
+func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} {
+	if keyStr, ok := key.(string); ok {
+		if keyStr == "http.response" {
+			return irw
+		}
+
+		if !strings.HasPrefix(keyStr, "http.response.") {
+			goto fallback
+		}
+
+		parts := strings.Split(keyStr, ".")
+
+		if len(parts) != 3 {
+			goto fallback
+		}
+
+		irw.mu.Lock()
+		defer irw.mu.Unlock()
+
+		switch parts[2] {
+		case "written":
+			return irw.written
+		case "status":
+			return irw.status
+		case "contenttype":
+			contentType := irw.Header().Get("Content-Type")
+			if contentType != "" {
+				return contentType
+			}
+		}
+	}
+
+fallback:
+	return irw.Context.Value(key)
+}
+
+func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} {
+	if keyStr, ok := key.(string); ok {
+		if keyStr == "http.response" {
+			return irw
+		}
+	}
+
+	return irw.instrumentedResponseWriter.Value(key)
+}
diff --git a/vendor/github.com/docker/distribution/context/logger.go b/vendor/github.com/docker/distribution/context/logger.go
new file mode 100644
index 0000000000000000000000000000000000000000..fbb6a0511f7d4ff4feca30b270f945d129d0e50c
--- /dev/null
+++ b/vendor/github.com/docker/distribution/context/logger.go
@@ -0,0 +1,116 @@
+package context
+
+import (
+	"fmt"
+
+	"github.com/Sirupsen/logrus"
+	"runtime"
+)
+
+// Logger provides a leveled-logging interface.
+type Logger interface {
+	// standard logger methods
+	Print(args ...interface{})
+	Printf(format string, args ...interface{})
+	Println(args ...interface{})
+
+	Fatal(args ...interface{})
+	Fatalf(format string, args ...interface{})
+	Fatalln(args ...interface{})
+
+	Panic(args ...interface{})
+	Panicf(format string, args ...interface{})
+	Panicln(args ...interface{})
+
+	// Leveled methods, from logrus
+	Debug(args ...interface{})
+	Debugf(format string, args ...interface{})
+	Debugln(args ...interface{})
+
+	Error(args ...interface{})
+	Errorf(format string, args ...interface{})
+	Errorln(args ...interface{})
+
+	Info(args ...interface{})
+	Infof(format string, args ...interface{})
+	Infoln(args ...interface{})
+
+	Warn(args ...interface{})
+	Warnf(format string, args ...interface{})
+	Warnln(args ...interface{})
+}
+
+// WithLogger creates a new context with provided logger.
+func WithLogger(ctx Context, logger Logger) Context {
+	return WithValue(ctx, "logger", logger)
+}
+
+// GetLoggerWithField returns a logger instance with the specified field key
+// and value without affecting the context. Extra specified keys will be
+// resolved from the context.
+func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger {
+	return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value)
+}
+
+// GetLoggerWithFields returns a logger instance with the specified fields
+// without affecting the context. Extra specified keys will be resolved from
+// the context.
+func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger {
+	// must convert from interface{} -> interface{} to string -> interface{} for logrus.
+	lfields := make(logrus.Fields, len(fields))
+	for key, value := range fields {
+		lfields[fmt.Sprint(key)] = value
+	}
+
+	return getLogrusLogger(ctx, keys...).WithFields(lfields)
+}
+
+// GetLogger returns the logger from the current context, if present. If one
+// or more keys are provided, they will be resolved on the context and
+// included in the logger. While context.Value takes an interface, any key
+// argument passed to GetLogger will be passed to fmt.Sprint when expanded as
+// a logging key field. If context keys are integer constants, for example,
+// its recommended that a String method is implemented.
+func GetLogger(ctx Context, keys ...interface{}) Logger {
+	return getLogrusLogger(ctx, keys...)
+}
+
+// GetLogrusLogger returns the logrus logger for the context. If one more keys
+// are provided, they will be resolved on the context and included in the
+// logger. Only use this function if specific logrus functionality is
+// required.
+func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry {
+	var logger *logrus.Entry
+
+	// Get a logger, if it is present.
+	loggerInterface := ctx.Value("logger")
+	if loggerInterface != nil {
+		if lgr, ok := loggerInterface.(*logrus.Entry); ok {
+			logger = lgr
+		}
+	}
+
+	if logger == nil {
+		fields := logrus.Fields{}
+
+		// Fill in the instance id, if we have it.
+		instanceID := ctx.Value("instance.id")
+		if instanceID != nil {
+			fields["instance.id"] = instanceID
+		}
+
+		fields["go.version"] = runtime.Version()
+		// If no logger is found, just return the standard logger.
+		logger = logrus.StandardLogger().WithFields(fields)
+	}
+
+	fields := logrus.Fields{}
+	for _, key := range keys {
+		v := ctx.Value(key)
+		if v != nil {
+			fields[fmt.Sprint(key)] = v
+		}
+	}
+
+	return logger.WithFields(fields)
+}
diff --git a/vendor/github.com/docker/distribution/context/trace.go b/vendor/github.com/docker/distribution/context/trace.go
new file mode 100644
index 0000000000000000000000000000000000000000..721964a8489dbcd73b24b9a04e4ceb02b6479d43
--- /dev/null
+++ b/vendor/github.com/docker/distribution/context/trace.go
@@ -0,0 +1,104 @@
+package context
+
+import (
+	"runtime"
+	"time"
+
+	"github.com/docker/distribution/uuid"
+)
+
+// WithTrace allocates a traced timing span in a new context. This allows a
+// caller to track the time between calling WithTrace and the returned done
+// function. When the done function is called, a log message is emitted with a
+// "trace.duration" field, corresponding to the elapsed time and a
+// "trace.func" field, corresponding to the function that called WithTrace.
+//
+// The logging keys "trace.id" and "trace.parent.id" are provided to implement
+// dapper-like tracing. This function should be complemented with a WithSpan
+// method that could be used for tracing distributed RPC calls.
+//
+// The main benefit of this function is to post-process log messages or
+// intercept them in a hook to provide timing data. Trace ids and parent ids
+// can also be linked to provide call tracing, if so required.
+//
+// Here is an example of the usage:
+//
+// 	func timedOperation(ctx Context) {
+// 		ctx, done := WithTrace(ctx)
+// 		defer done("this will be the log message")
+// 		// ... function body ...
+// 	}
+//
+// If the function ran for roughly 1s, such a usage would emit a log message
+// as follows:
+//
+// 	INFO[0001] this will be the log message  trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id=<id> ...
+//
+// Notice that the function name is automatically resolved, along with the
+// package and a trace id is emitted that can be linked with parent ids.
+func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) {
+	if ctx == nil {
+		ctx = Background()
+	}
+
+	pc, file, line, _ := runtime.Caller(1)
+	f := runtime.FuncForPC(pc)
+	ctx = &traced{
+		Context: ctx,
+		id:      uuid.Generate().String(),
+		start:   time.Now(),
+		parent:  GetStringValue(ctx, "trace.id"),
+		fnname:  f.Name(),
+		file:    file,
+		line:    line,
+	}
+
+	return ctx, func(format string, a ...interface{}) {
+		GetLogger(ctx,
+			"trace.duration",
+			"trace.id",
+			"trace.parent.id",
+			"trace.func",
+			"trace.file",
+			"trace.line").
+			Debugf(format, a...)
+	}
+}
+
+// traced represents a context that is traced for function call timing. It
+// also provides fast lookup for the various attributes that are available on
+// the trace.
+type traced struct {
+	Context
+	id     string
+	parent string
+	start  time.Time
+	fnname string
+	file   string
+	line   int
+}
+
+func (ts *traced) Value(key interface{}) interface{} {
+	switch key {
+	case "trace.start":
+		return ts.start
+	case "trace.duration":
+		return time.Since(ts.start)
+	case "trace.id":
+		return ts.id
+	case "trace.parent.id":
+		if ts.parent == "" {
+			return nil // must return nil to signal no parent.
+		}
+
+		return ts.parent
+	case "trace.func":
+		return ts.fnname
+	case "trace.file":
+		return ts.file
+	case "trace.line":
+		return ts.line
+	}
+
+	return ts.Context.Value(key)
+}
diff --git a/vendor/github.com/docker/distribution/context/util.go b/vendor/github.com/docker/distribution/context/util.go
new file mode 100644
index 0000000000000000000000000000000000000000..cb9ef52e38e7a7abaa048bdeb342a9faaef0b17f
--- /dev/null
+++ b/vendor/github.com/docker/distribution/context/util.go
@@ -0,0 +1,24 @@
+package context
+
+import (
+	"time"
+)
+
+// Since looks up key, which should be a time.Time, and returns the duration
+// since that time. If the key is not found, the value returned will be zero.
+// This is helpful when inferring metrics related to context execution times.
+func Since(ctx Context, key interface{}) time.Duration {
+	if startedAt, ok := ctx.Value(key).(time.Time); ok {
+		return time.Since(startedAt)
+	}
+	return 0
+}
+
+// GetStringValue returns a string value from the context. The empty string
+// will be returned if not found.
+func GetStringValue(ctx Context, key interface{}) (value string) {
+	if valuev, ok := ctx.Value(key).(string); ok {
+		value = valuev
+	}
+	return value
+}
diff --git a/vendor/github.com/docker/distribution/context/version.go b/vendor/github.com/docker/distribution/context/version.go
new file mode 100644
index 0000000000000000000000000000000000000000..746cda02ecdcf4ba57448fd0345206882959f401
--- /dev/null
+++ b/vendor/github.com/docker/distribution/context/version.go
@@ -0,0 +1,16 @@
+package context
+
+// WithVersion stores the application version in the context. The new context
+// gets a logger to ensure log messages are marked with the application
+// version.
+func WithVersion(ctx Context, version string) Context {
+	ctx = WithValue(ctx, "version", version)
+	// push a new logger onto the stack
+	return WithLogger(ctx, GetLogger(ctx, "version"))
+}
+
+// GetVersion returns the application version from the context. An empty
+// string may returned if the version was not set on the context.
+func GetVersion(ctx Context) string {
+	return GetStringValue(ctx, "version")
+}
diff --git a/vendor/github.com/docker/distribution/coverpkg.sh b/vendor/github.com/docker/distribution/coverpkg.sh
new file mode 100755
index 0000000000000000000000000000000000000000..25d419ae826581226d96128fa75f41c4d5aaae69
--- /dev/null
+++ b/vendor/github.com/docker/distribution/coverpkg.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+# Given a subpackage and the containing package, figures out which packages
+# need to be passed to `go test -coverpkg`:  this includes all of the
+# subpackage's dependencies within the containing package, as well as the
+# subpackage itself.
+DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v github.com/docker/distribution/vendor)"
+echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ','
diff --git a/vendor/github.com/docker/distribution/doc.go b/vendor/github.com/docker/distribution/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..bdd8cb708e5d96d5d0be2956ab74ab5a00c4cace
--- /dev/null
+++ b/vendor/github.com/docker/distribution/doc.go
@@ -0,0 +1,7 @@
+// Package distribution will define the interfaces for the components of
+// docker distribution. The goal is to allow users to reliably package, ship
+// and store content related to docker images.
+//
+// This is currently a work in progress. More details are available in the
+// README.md.
+package distribution
diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go
new file mode 100644
index 0000000000000000000000000000000000000000..c20f28113ce5be7dd15e1df592fdea6b7dd424e1
--- /dev/null
+++ b/vendor/github.com/docker/distribution/errors.go
@@ -0,0 +1,115 @@
+package distribution
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+
+	"github.com/docker/distribution/digest"
+)
+
+// ErrAccessDenied is returned when an access to a requested resource is
+// denied.
+var ErrAccessDenied = errors.New("access denied")
+
+// ErrManifestNotModified is returned when a conditional manifest GetByTag
+// returns nil due to the client indicating it has the latest version
+var ErrManifestNotModified = errors.New("manifest not modified")
+
+// ErrUnsupported is returned when an unimplemented or unsupported action is
+// performed
+var ErrUnsupported = errors.New("operation unsupported")
+
+// ErrTagUnknown is returned if the given tag is not known by the tag service
+type ErrTagUnknown struct {
+	Tag string
+}
+
+func (err ErrTagUnknown) Error() string {
+	return fmt.Sprintf("unknown tag=%s", err.Tag)
+}
+
+// ErrRepositoryUnknown is returned if the named repository is not known by
+// the registry.
+type ErrRepositoryUnknown struct {
+	Name string
+}
+
+func (err ErrRepositoryUnknown) Error() string {
+	return fmt.Sprintf("unknown repository name=%s", err.Name)
+}
+
+// ErrRepositoryNameInvalid should be used to denote an invalid repository
+// name. Reason may set, indicating the cause of invalidity.
+type ErrRepositoryNameInvalid struct {
+	Name   string
+	Reason error
+}
+
+func (err ErrRepositoryNameInvalid) Error() string {
+	return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason)
+}
+
+// ErrManifestUnknown is returned if the manifest is not known by the
+// registry.
+type ErrManifestUnknown struct {
+	Name string
+	Tag  string
+}
+
+func (err ErrManifestUnknown) Error() string {
+	return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag)
+}
+
+// ErrManifestUnknownRevision is returned when a manifest cannot be found by
+// revision within a repository.
+type ErrManifestUnknownRevision struct {
+	Name     string
+	Revision digest.Digest
+}
+
+func (err ErrManifestUnknownRevision) Error() string {
+	return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision)
+}
+
+// ErrManifestUnverified is returned when the registry is unable to verify
+// the manifest.
+type ErrManifestUnverified struct{}
+
+func (ErrManifestUnverified) Error() string {
+	return fmt.Sprintf("unverified manifest")
+}
+
+// ErrManifestVerification provides a type to collect errors encountered
+// during manifest verification. Currently, it accepts errors of all types,
+// but it may be narrowed to those involving manifest verification.
+type ErrManifestVerification []error
+
+func (errs ErrManifestVerification) Error() string {
+	var parts []string
+	for _, err := range errs {
+		parts = append(parts, err.Error())
+	}
+
+	return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ","))
+}
+
+// ErrManifestBlobUnknown returned when a referenced blob cannot be found.
+type ErrManifestBlobUnknown struct {
+	Digest digest.Digest
+}
+
+func (err ErrManifestBlobUnknown) Error() string {
+	return fmt.Sprintf("unknown blob %v on manifest", err.Digest)
+}
+
+// ErrManifestNameInvalid should be used to denote an invalid manifest
+// name. Reason may set, indicating the cause of invalidity.
+type ErrManifestNameInvalid struct {
+	Name   string
+	Reason error
+}
+
+func (err ErrManifestNameInvalid) Error() string {
+	return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason)
+}
diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go
new file mode 100644
index 0000000000000000000000000000000000000000..c4fb63450bf2b20131e6f8505796ddcc40df02e3
--- /dev/null
+++ b/vendor/github.com/docker/distribution/manifests.go
@@ -0,0 +1,125 @@
+package distribution
+
+import (
+	"fmt"
+	"mime"
+
+	"github.com/docker/distribution/context"
+	"github.com/docker/distribution/digest"
+)
+
+// Manifest represents a registry object specifying a set of
+// references and an optional target
+type Manifest interface {
+	// References returns a list of objects which make up this manifest.
+	// A reference is anything which can be represented by a
+	// distribution.Descriptor. These can consist of layers, resources or other
+	// manifests.
+	//
+	// While no particular order is required, implementations should return
+	// them from highest to lowest priority. For example, one might want to
+	// return the base layer before the top layer.
+	References() []Descriptor
+
+	// Payload provides the serialized format of the manifest, in addition to
+	// the mediatype.
+	Payload() (mediatype string, payload []byte, err error)
+}
+
+// ManifestBuilder creates a manifest allowing one to include dependencies.
+// Instances can be obtained from a version-specific manifest package.  Manifest
+// specific data is passed into the function which creates the builder.
+type ManifestBuilder interface {
+	// Build creates the manifest from his builder.
+	Build(ctx context.Context) (Manifest, error)
+
+	// References returns a list of objects which have been added to this
+	// builder. The dependencies are returned in the order they were added,
+	// which should be from base to head.
+	References() []Descriptor
+
+	// AppendReference includes the given object in the manifest after any
+	// existing dependencies. If the add fails, such as when adding an
+	// unsupported dependency, an error may be returned.
+	//
+	// The destination of the reference is dependent on the manifest type and
+	// the dependency type.
+	AppendReference(dependency Describable) error
+}
+
+// ManifestService describes operations on image manifests.
+type ManifestService interface {
+	// Exists returns true if the manifest exists.
+	Exists(ctx context.Context, dgst digest.Digest) (bool, error)
+
+	// Get retrieves the manifest specified by the given digest
+	Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error)
+
+	// Put creates or updates the given manifest returning the manifest digest
+	Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error)
+
+	// Delete removes the manifest specified by the given digest. Deleting
+	// a manifest that doesn't exist will return ErrManifestNotFound
+	Delete(ctx context.Context, dgst digest.Digest) error
+}
+
+// ManifestEnumerator enables iterating over manifests
+type ManifestEnumerator interface {
+	// Enumerate calls ingester for each manifest.
+	Enumerate(ctx context.Context, ingester func(digest.Digest) error) error
+}
+
+// Describable is an interface for descriptors
+type Describable interface {
+	Descriptor() Descriptor
+}
+
+// ManifestMediaTypes returns the supported media types for manifests.
+func ManifestMediaTypes() (mediaTypes []string) {
+	for t := range mappings {
+		if t != "" {
+			mediaTypes = append(mediaTypes, t)
+		}
+	}
+	return
+}
+
+// UnmarshalFunc implements manifest unmarshalling a given MediaType
+type UnmarshalFunc func([]byte) (Manifest, Descriptor, error)
+
+var mappings = make(map[string]UnmarshalFunc, 0)
+
+// UnmarshalManifest looks up manifest unmarshal functions based on
+// MediaType
+func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) {
+	// Need to look up by the actual media type, not the raw contents of
+	// the header. Strip semicolons and anything following them.
+	var mediatype string
+	if ctHeader != "" {
+		var err error
+		mediatype, _, err = mime.ParseMediaType(ctHeader)
+		if err != nil {
+			return nil, Descriptor{}, err
+		}
+	}
+
+	unmarshalFunc, ok := mappings[mediatype]
+	if !ok {
+		unmarshalFunc, ok = mappings[""]
+		if !ok {
+			return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype and no default available: %s", mediatype)
+		}
+	}
+
+	return unmarshalFunc(p)
+}
+
+// RegisterManifestSchema registers an UnmarshalFunc for a given schema type.  This
+// should be called from specific
+func RegisterManifestSchema(mediatype string, u UnmarshalFunc) error {
+	if _, ok := mappings[mediatype]; ok {
+		return fmt.Errorf("manifest mediatype registration would overwrite existing: %s", mediatype)
+	}
+	mappings[mediatype] = u
+	return nil
+}
diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go
new file mode 100644
index 0000000000000000000000000000000000000000..1ede31ebb63ce6fa45999d0599f571cfad222826
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry.go
@@ -0,0 +1,97 @@
+package distribution
+
+import (
+	"github.com/docker/distribution/context"
+	"github.com/docker/distribution/reference"
+)
+
+// Scope defines the set of items that match a namespace.
+type Scope interface {
+	// Contains returns true if the name belongs to the namespace.
+	Contains(name string) bool
+}
+
+type fullScope struct{}
+
+func (f fullScope) Contains(string) bool {
+	return true
+}
+
+// GlobalScope represents the full namespace scope which contains
+// all other scopes.
+var GlobalScope = Scope(fullScope{})
+
+// Namespace represents a collection of repositories, addressable by name.
+// Generally, a namespace is backed by a set of one or more services,
+// providing facilities such as registry access, trust, and indexing.
+type Namespace interface {
+	// Scope describes the names that can be used with this Namespace. The
+	// global namespace will have a scope that matches all names. The scope
+	// effectively provides an identity for the namespace.
+	Scope() Scope
+
+	// Repository should return a reference to the named repository. The
+	// registry may or may not have the repository but should always return a
+	// reference.
+	Repository(ctx context.Context, name reference.Named) (Repository, error)
+
+	// Repositories fills 'repos' with a lexigraphically sorted catalog of repositories
+	// up to the size of 'repos' and returns the value 'n' for the number of entries
+	// which were filled.  'last' contains an offset in the catalog, and 'err' will be
+	// set to io.EOF if there are no more entries to obtain.
+	Repositories(ctx context.Context, repos []string, last string) (n int, err error)
+
+	// Blobs returns a blob enumerator to access all blobs
+	Blobs() BlobEnumerator
+
+	// BlobStatter returns a BlobStatter to control
+	BlobStatter() BlobStatter
+}
+
+// RepositoryEnumerator describes an operation to enumerate repositories
+type RepositoryEnumerator interface {
+	Enumerate(ctx context.Context, ingester func(string) error) error
+}
+
+// ManifestServiceOption is a function argument for Manifest Service methods
+type ManifestServiceOption interface {
+	Apply(ManifestService) error
+}
+
+// WithTag allows a tag to be passed into Put
+func WithTag(tag string) ManifestServiceOption {
+	return WithTagOption{tag}
+}
+
+// WithTagOption holds a tag
+type WithTagOption struct{ Tag string }
+
+// Apply conforms to the ManifestServiceOption interface
+func (o WithTagOption) Apply(m ManifestService) error {
+	// no implementation
+	return nil
+}
+
+// Repository is a named collection of manifests and layers.
+type Repository interface {
+	// Named returns the name of the repository.
+	Named() reference.Named
+
+	// Manifests returns a reference to this repository's manifest service.
+	// with the supplied options applied.
+	Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error)
+
+	// Blobs returns a reference to this repository's blob service.
+	Blobs(ctx context.Context) BlobStore
+
+	// TODO(stevvooe): The above BlobStore return can probably be relaxed to
+	// be a BlobService for use with clients. This will allow such
+	// implementations to avoid implementing ServeBlob.
+
+	// Tags returns a reference to this repositories tag service
+	Tags(ctx context.Context) TagService
+}
+
+// TODO(stevvooe): Must add close methods to all these. May want to change the
+// way instances are created to better reflect internal dependency
+// relationships.
diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go
new file mode 100644
index 0000000000000000000000000000000000000000..6d9bb4b62afba2bf02c672be73cd60dbb26df16f
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go
@@ -0,0 +1,267 @@
+package errcode
+
+import (
+	"encoding/json"
+	"fmt"
+	"strings"
+)
+
+// ErrorCoder is the base interface for ErrorCode and Error allowing
+// users of each to just call ErrorCode to get the real ID of each
+type ErrorCoder interface {
+	ErrorCode() ErrorCode
+}
+
+// ErrorCode represents the error type. The errors are serialized via strings
+// and the integer format may change and should *never* be exported.
+type ErrorCode int
+
+var _ error = ErrorCode(0)
+
+// ErrorCode just returns itself
+func (ec ErrorCode) ErrorCode() ErrorCode {
+	return ec
+}
+
+// Error returns the ID/Value
+func (ec ErrorCode) Error() string {
+	// NOTE(stevvooe): Cannot use message here since it may have unpopulated args.
+	return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1))
+}
+
+// Descriptor returns the descriptor for the error code.
+func (ec ErrorCode) Descriptor() ErrorDescriptor {
+	d, ok := errorCodeToDescriptors[ec]
+
+	if !ok {
+		return ErrorCodeUnknown.Descriptor()
+	}
+
+	return d
+}
+
+// String returns the canonical identifier for this error code.
+func (ec ErrorCode) String() string {
+	return ec.Descriptor().Value
+}
+
+// Message returned the human-readable error message for this error code.
+func (ec ErrorCode) Message() string {
+	return ec.Descriptor().Message
+}
+
+// MarshalText encodes the receiver into UTF-8-encoded text and returns the
+// result.
+func (ec ErrorCode) MarshalText() (text []byte, err error) {
+	return []byte(ec.String()), nil
+}
+
+// UnmarshalText decodes the form generated by MarshalText.
+func (ec *ErrorCode) UnmarshalText(text []byte) error {
+	desc, ok := idToDescriptors[string(text)]
+
+	if !ok {
+		desc = ErrorCodeUnknown.Descriptor()
+	}
+
+	*ec = desc.Code
+
+	return nil
+}
+
+// WithMessage creates a new Error struct based on the passed-in info and
+// overrides the Message property.
+func (ec ErrorCode) WithMessage(message string) Error {
+	return Error{
+		Code:    ec,
+		Message: message,
+	}
+}
+
+// WithDetail creates a new Error struct based on the passed-in info and
+// set the Detail property appropriately
+func (ec ErrorCode) WithDetail(detail interface{}) Error {
+	return Error{
+		Code:    ec,
+		Message: ec.Message(),
+	}.WithDetail(detail)
+}
+
+// WithArgs creates a new Error struct and sets the Args slice
+func (ec ErrorCode) WithArgs(args ...interface{}) Error {
+	return Error{
+		Code:    ec,
+		Message: ec.Message(),
+	}.WithArgs(args...)
+}
+
+// Error provides a wrapper around ErrorCode with extra Details provided.
+type Error struct {
+	Code    ErrorCode   `json:"code"`
+	Message string      `json:"message"`
+	Detail  interface{} `json:"detail,omitempty"`
+
+	// TODO(duglin): See if we need an "args" property so we can do the
+	// variable substitution right before showing the message to the user
+}
+
+var _ error = Error{}
+
+// ErrorCode returns the ID/Value of this Error
+func (e Error) ErrorCode() ErrorCode {
+	return e.Code
+}
+
+// Error returns a human readable representation of the error.
+func (e Error) Error() string {
+	return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message)
+}
+
+// WithDetail will return a new Error, based on the current one, but with
+// some Detail info added
+func (e Error) WithDetail(detail interface{}) Error {
+	return Error{
+		Code:    e.Code,
+		Message: e.Message,
+		Detail:  detail,
+	}
+}
+
+// WithArgs uses the passed-in list of interface{} as the substitution
+// variables in the Error's Message string, but returns a new Error
+func (e Error) WithArgs(args ...interface{}) Error {
+	return Error{
+		Code:    e.Code,
+		Message: fmt.Sprintf(e.Code.Message(), args...),
+		Detail:  e.Detail,
+	}
+}
+
+// ErrorDescriptor provides relevant information about a given error code.
+type ErrorDescriptor struct {
+	// Code is the error code that this descriptor describes.
+	Code ErrorCode
+
+	// Value provides a unique, string key, often captilized with
+	// underscores, to identify the error code. This value is used as the
+	// keyed value when serializing api errors.
+	Value string
+
+	// Message is a short, human readable decription of the error condition
+	// included in API responses.
+	Message string
+
+	// Description provides a complete account of the errors purpose, suitable
+	// for use in documentation.
+	Description string
+
+	// HTTPStatusCode provides the http status code that is associated with
+	// this error condition.
+	HTTPStatusCode int
+}
+
+// ParseErrorCode returns the value by the string error code.
+// `ErrorCodeUnknown` will be returned if the error is not known.
+func ParseErrorCode(value string) ErrorCode {
+	ed, ok := idToDescriptors[value]
+	if ok {
+		return ed.Code
+	}
+
+	return ErrorCodeUnknown
+}
+
+// Errors provides the envelope for multiple errors and a few sugar methods
+// for use within the application.
+type Errors []error
+
+var _ error = Errors{}
+
+func (errs Errors) Error() string {
+	switch len(errs) {
+	case 0:
+		return "<nil>"
+	case 1:
+		return errs[0].Error()
+	default:
+		msg := "errors:\n"
+		for _, err := range errs {
+			msg += err.Error() + "\n"
+		}
+		return msg
+	}
+}
+
+// Len returns the current number of errors.
+func (errs Errors) Len() int {
+	return len(errs)
+}
+
+// MarshalJSON converts slice of error, ErrorCode or Error into a
+// slice of Error - then serializes
+func (errs Errors) MarshalJSON() ([]byte, error) {
+	var tmpErrs struct {
+		Errors []Error `json:"errors,omitempty"`
+	}
+
+	for _, daErr := range errs {
+		var err Error
+
+		switch daErr.(type) {
+		case ErrorCode:
+			err = daErr.(ErrorCode).WithDetail(nil)
+		case Error:
+			err = daErr.(Error)
+		default:
+			err = ErrorCodeUnknown.WithDetail(daErr)
+
+		}
+
+		// If the Error struct was setup and they forgot to set the
+		// Message field (meaning its "") then grab it from the ErrCode
+		msg := err.Message
+		if msg == "" {
+			msg = err.Code.Message()
+		}
+
+		tmpErrs.Errors = append(tmpErrs.Errors, Error{
+			Code:    err.Code,
+			Message: msg,
+			Detail:  err.Detail,
+		})
+	}
+
+	return json.Marshal(tmpErrs)
+}
+
+// UnmarshalJSON deserializes []Error and then converts it into slice of
+// Error or ErrorCode
+func (errs *Errors) UnmarshalJSON(data []byte) error {
+	var tmpErrs struct {
+		Errors []Error
+	}
+
+	if err := json.Unmarshal(data, &tmpErrs); err != nil {
+		return err
+	}
+
+	var newErrs Errors
+	for _, daErr := range tmpErrs.Errors {
+		// If Message is empty or exactly matches the Code's message string
+		// then just use the Code, no need for a full Error struct
+		if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) {
+			// Error's w/o details get converted to ErrorCode
+			newErrs = append(newErrs, daErr.Code)
+		} else {
+			// Error's w/ details are untouched
+			newErrs = append(newErrs, Error{
+				Code:    daErr.Code,
+				Message: daErr.Message,
+				Detail:  daErr.Detail,
+			})
+		}
+	}
+
+	*errs = newErrs
+	return nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go
new file mode 100644
index 0000000000000000000000000000000000000000..49a64a86eb50c928e4efcc2269804b4ef3ed49ba
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go
@@ -0,0 +1,44 @@
+package errcode
+
+import (
+	"encoding/json"
+	"net/http"
+)
+
+// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err
+// and sets the content-type header to 'application/json'. It will handle
+// ErrorCoder and Errors, and if necessary will create an envelope.
+func ServeJSON(w http.ResponseWriter, err error) error {
+	w.Header().Set("Content-Type", "application/json; charset=utf-8")
+	var sc int
+
+	switch errs := err.(type) {
+	case Errors:
+		if len(errs) < 1 {
+			break
+		}
+
+		if err, ok := errs[0].(ErrorCoder); ok {
+			sc = err.ErrorCode().Descriptor().HTTPStatusCode
+		}
+	case ErrorCoder:
+		sc = errs.ErrorCode().Descriptor().HTTPStatusCode
+		err = Errors{err} // create an envelope.
+	default:
+		// We just have an unhandled error type, so just place in an envelope
+		// and move along.
+		err = Errors{err}
+	}
+
+	if sc == 0 {
+		sc = http.StatusInternalServerError
+	}
+
+	w.WriteHeader(sc)
+
+	if err := json.NewEncoder(w).Encode(err); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/github.com/docker/distribution/registry/api/errcode/register.go
new file mode 100644
index 0000000000000000000000000000000000000000..d1e8826c6d7df78dc91116c969d2373d3e3eecba
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/errcode/register.go
@@ -0,0 +1,138 @@
+package errcode
+
+import (
+	"fmt"
+	"net/http"
+	"sort"
+	"sync"
+)
+
+var (
+	errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{}
+	idToDescriptors        = map[string]ErrorDescriptor{}
+	groupToDescriptors     = map[string][]ErrorDescriptor{}
+)
+
+var (
+	// ErrorCodeUnknown is a generic error that can be used as a last
+	// resort if there is no situation-specific error message that can be used
+	ErrorCodeUnknown = Register("errcode", ErrorDescriptor{
+		Value:   "UNKNOWN",
+		Message: "unknown error",
+		Description: `Generic error returned when the error does not have an
+			                                            API classification.`,
+		HTTPStatusCode: http.StatusInternalServerError,
+	})
+
+	// ErrorCodeUnsupported is returned when an operation is not supported.
+	ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{
+		Value:   "UNSUPPORTED",
+		Message: "The operation is unsupported.",
+		Description: `The operation was unsupported due to a missing
+		implementation or invalid set of parameters.`,
+		HTTPStatusCode: http.StatusMethodNotAllowed,
+	})
+
+	// ErrorCodeUnauthorized is returned if a request requires
+	// authentication.
+	ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{
+		Value:   "UNAUTHORIZED",
+		Message: "authentication required",
+		Description: `The access controller was unable to authenticate
+		the client. Often this will be accompanied by a
+		Www-Authenticate HTTP response header indicating how to
+		authenticate.`,
+		HTTPStatusCode: http.StatusUnauthorized,
+	})
+
+	// ErrorCodeDenied is returned if a client does not have sufficient
+	// permission to perform an action.
+	ErrorCodeDenied = Register("errcode", ErrorDescriptor{
+		Value:   "DENIED",
+		Message: "requested access to the resource is denied",
+		Description: `The access controller denied access for the
+		operation on a resource.`,
+		HTTPStatusCode: http.StatusForbidden,
+	})
+
+	// ErrorCodeUnavailable provides a common error to report unavailability
+	// of a service or endpoint.
+	ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{
+		Value:          "UNAVAILABLE",
+		Message:        "service unavailable",
+		Description:    "Returned when a service is not available",
+		HTTPStatusCode: http.StatusServiceUnavailable,
+	})
+
+	// ErrorCodeTooManyRequests is returned if a client attempts too many
+	// times to contact a service endpoint.
+	ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{
+		Value:   "TOOMANYREQUESTS",
+		Message: "too many requests",
+		Description: `Returned when a client attempts to contact a
+		service too many times`,
+		HTTPStatusCode: http.StatusTooManyRequests,
+	})
+)
+
+var nextCode = 1000
+var registerLock sync.Mutex
+
+// Register will make the passed-in error known to the environment and
+// return a new ErrorCode
+func Register(group string, descriptor ErrorDescriptor) ErrorCode {
+	registerLock.Lock()
+	defer registerLock.Unlock()
+
+	descriptor.Code = ErrorCode(nextCode)
+
+	if _, ok := idToDescriptors[descriptor.Value]; ok {
+		panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value))
+	}
+	if _, ok := errorCodeToDescriptors[descriptor.Code]; ok {
+		panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code))
+	}
+
+	groupToDescriptors[group] = append(groupToDescriptors[group], descriptor)
+	errorCodeToDescriptors[descriptor.Code] = descriptor
+	idToDescriptors[descriptor.Value] = descriptor
+
+	nextCode++
+	return descriptor.Code
+}
+
+type byValue []ErrorDescriptor
+
+func (a byValue) Len() int           { return len(a) }
+func (a byValue) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
+
+// GetGroupNames returns the list of Error group names that are registered
+func GetGroupNames() []string {
+	keys := []string{}
+
+	for k := range groupToDescriptors {
+		keys = append(keys, k)
+	}
+	sort.Strings(keys)
+	return keys
+}
+
+// GetErrorCodeGroup returns the named group of error descriptors
+func GetErrorCodeGroup(name string) []ErrorDescriptor {
+	desc := groupToDescriptors[name]
+	sort.Sort(byValue(desc))
+	return desc
+}
+
+// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are
+// registered, irrespective of what group they're in
+func GetErrorAllDescriptors() []ErrorDescriptor {
+	result := []ErrorDescriptor{}
+
+	for _, group := range GetGroupNames() {
+		result = append(result, GetErrorCodeGroup(group)...)
+	}
+	sort.Sort(byValue(result))
+	return result
+}
diff --git a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go
new file mode 100644
index 0000000000000000000000000000000000000000..9979abae659cf3a0cc89169e70b8bd1deccc21b8
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go
@@ -0,0 +1,1596 @@
+package v2
+
+import (
+	"net/http"
+	"regexp"
+
+	"github.com/docker/distribution/digest"
+	"github.com/docker/distribution/reference"
+	"github.com/docker/distribution/registry/api/errcode"
+)
+
+var (
+	nameParameterDescriptor = ParameterDescriptor{
+		Name:        "name",
+		Type:        "string",
+		Format:      reference.NameRegexp.String(),
+		Required:    true,
+		Description: `Name of the target repository.`,
+	}
+
+	referenceParameterDescriptor = ParameterDescriptor{
+		Name:        "reference",
+		Type:        "string",
+		Format:      reference.TagRegexp.String(),
+		Required:    true,
+		Description: `Tag or digest of the target manifest.`,
+	}
+
+	uuidParameterDescriptor = ParameterDescriptor{
+		Name:        "uuid",
+		Type:        "opaque",
+		Required:    true,
+		Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.",
+	}
+
+	digestPathParameter = ParameterDescriptor{
+		Name:        "digest",
+		Type:        "path",
+		Required:    true,
+		Format:      digest.DigestRegexp.String(),
+		Description: `Digest of desired blob.`,
+	}
+
+	hostHeader = ParameterDescriptor{
+		Name:        "Host",
+		Type:        "string",
+		Description: "Standard HTTP Host Header. Should be set to the registry host.",
+		Format:      "<registry host>",
+		Examples:    []string{"registry-1.docker.io"},
+	}
+
+	authHeader = ParameterDescriptor{
+		Name:        "Authorization",
+		Type:        "string",
+		Description: "An RFC7235 compliant authorization header.",
+		Format:      "<scheme> <token>",
+		Examples:    []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="},
+	}
+
+	authChallengeHeader = ParameterDescriptor{
+		Name:        "WWW-Authenticate",
+		Type:        "string",
+		Description: "An RFC7235 compliant authentication challenge header.",
+		Format:      `<scheme> realm="<realm>", ..."`,
+		Examples: []string{
+			`Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`,
+		},
+	}
+
+	contentLengthZeroHeader = ParameterDescriptor{
+		Name:        "Content-Length",
+		Description: "The `Content-Length` header must be zero and the body must be empty.",
+		Type:        "integer",
+		Format:      "0",
+	}
+
+	dockerUploadUUIDHeader = ParameterDescriptor{
+		Name:        "Docker-Upload-UUID",
+		Description: "Identifies the docker upload uuid for the current request.",
+		Type:        "uuid",
+		Format:      "<uuid>",
+	}
+
+	digestHeader = ParameterDescriptor{
+		Name:        "Docker-Content-Digest",
+		Description: "Digest of the targeted content for the request.",
+		Type:        "digest",
+		Format:      "<digest>",
+	}
+
+	linkHeader = ParameterDescriptor{
+		Name:        "Link",
+		Type:        "link",
+		Description: "RFC5988 compliant rel='next' with URL to next result set, if available",
+		Format:      `<<url>?n=<last n value>&last=<last entry from response>>; rel="next"`,
+	}
+
+	paginationParameters = []ParameterDescriptor{
+		{
+			Name:        "n",
+			Type:        "integer",
+			Description: "Limit the number of entries in each response. It not present, all entries will be returned.",
+			Format:      "<integer>",
+			Required:    false,
+		},
+		{
+			Name:        "last",
+			Type:        "string",
+			Description: "Result set will include values lexically after last.",
+			Format:      "<integer>",
+			Required:    false,
+		},
+	}
+
+	unauthorizedResponseDescriptor = ResponseDescriptor{
+		Name:        "Authentication Required",
+		StatusCode:  http.StatusUnauthorized,
+		Description: "The client is not authenticated.",
+		Headers: []ParameterDescriptor{
+			authChallengeHeader,
+			{
+				Name:        "Content-Length",
+				Type:        "integer",
+				Description: "Length of the JSON response body.",
+				Format:      "<length>",
+			},
+		},
+		Body: BodyDescriptor{
+			ContentType: "application/json; charset=utf-8",
+			Format:      errorsBody,
+		},
+		ErrorCodes: []errcode.ErrorCode{
+			errcode.ErrorCodeUnauthorized,
+		},
+	}
+
+	repositoryNotFoundResponseDescriptor = ResponseDescriptor{
+		Name:        "No Such Repository Error",
+		StatusCode:  http.StatusNotFound,
+		Description: "The repository is not known to the registry.",
+		Headers: []ParameterDescriptor{
+			{
+				Name:        "Content-Length",
+				Type:        "integer",
+				Description: "Length of the JSON response body.",
+				Format:      "<length>",
+			},
+		},
+		Body: BodyDescriptor{
+			ContentType: "application/json; charset=utf-8",
+			Format:      errorsBody,
+		},
+		ErrorCodes: []errcode.ErrorCode{
+			ErrorCodeNameUnknown,
+		},
+	}
+
+	deniedResponseDescriptor = ResponseDescriptor{
+		Name:        "Access Denied",
+		StatusCode:  http.StatusForbidden,
+		Description: "The client does not have required access to the repository.",
+		Headers: []ParameterDescriptor{
+			{
+				Name:        "Content-Length",
+				Type:        "integer",
+				Description: "Length of the JSON response body.",
+				Format:      "<length>",
+			},
+		},
+		Body: BodyDescriptor{
+			ContentType: "application/json; charset=utf-8",
+			Format:      errorsBody,
+		},
+		ErrorCodes: []errcode.ErrorCode{
+			errcode.ErrorCodeDenied,
+		},
+	}
+
+	tooManyRequestsDescriptor = ResponseDescriptor{
+		Name:        "Too Many Requests",
+		StatusCode:  http.StatusTooManyRequests,
+		Description: "The client made too many requests within a time interval.",
+		Headers: []ParameterDescriptor{
+			{
+				Name:        "Content-Length",
+				Type:        "integer",
+				Description: "Length of the JSON response body.",
+				Format:      "<length>",
+			},
+		},
+		Body: BodyDescriptor{
+			ContentType: "application/json; charset=utf-8",
+			Format:      errorsBody,
+		},
+		ErrorCodes: []errcode.ErrorCode{
+			errcode.ErrorCodeTooManyRequests,
+		},
+	}
+)
+
+const (
+	manifestBody = `{
+   "name": <name>,
+   "tag": <tag>,
+   "fsLayers": [
+      {
+         "blobSum": "<digest>"
+      },
+      ...
+    ]
+   ],
+   "history": <v1 images>,
+   "signature": <JWS>
+}`
+
+	errorsBody = `{
+	"errors:" [
+	    {
+            "code": <error code>,
+            "message": "<error message>",
+            "detail": ...
+        },
+        ...
+    ]
+}`
+)
+
+// APIDescriptor exports descriptions of the layout of the v2 registry API.
+var APIDescriptor = struct {
+	// RouteDescriptors provides a list of the routes available in the API.
+	RouteDescriptors []RouteDescriptor
+}{
+	RouteDescriptors: routeDescriptors,
+}
+
+// RouteDescriptor describes a route specified by name.
+type RouteDescriptor struct {
+	// Name is the name of the route, as specified in RouteNameXXX exports.
+	// These names a should be considered a unique reference for a route. If
+	// the route is registered with gorilla, this is the name that will be
+	// used.
+	Name string
+
+	// Path is a gorilla/mux-compatible regexp that can be used to match the
+	// route. For any incoming method and path, only one route descriptor
+	// should match.
+	Path string
+
+	// Entity should be a short, human-readalbe description of the object
+	// targeted by the endpoint.
+	Entity string
+
+	// Description should provide an accurate overview of the functionality
+	// provided by the route.
+	Description string
+
+	// Methods should describe the various HTTP methods that may be used on
+	// this route, including request and response formats.
+	Methods []MethodDescriptor
+}
+
+// MethodDescriptor provides a description of the requests that may be
+// conducted with the target method.
+type MethodDescriptor struct {
+
+	// Method is an HTTP method, such as GET, PUT or POST.
+	Method string
+
+	// Description should provide an overview of the functionality provided by
+	// the covered method, suitable for use in documentation. Use of markdown
+	// here is encouraged.
+	Description string
+
+	// Requests is a slice of request descriptors enumerating how this
+	// endpoint may be used.
+	Requests []RequestDescriptor
+}
+
+// RequestDescriptor covers a particular set of headers and parameters that
+// can be carried out with the parent method. Its most helpful to have one
+// RequestDescriptor per API use case.
+type RequestDescriptor struct {
+	// Name provides a short identifier for the request, usable as a title or
+	// to provide quick context for the particular request.
+	Name string
+
+	// Description should cover the requests purpose, covering any details for
+	// this particular use case.
+	Description string
+
+	// Headers describes headers that must be used with the HTTP request.
+	Headers []ParameterDescriptor
+
+	// PathParameters enumerate the parameterized path components for the
+	// given request, as defined in the route's regular expression.
+	PathParameters []ParameterDescriptor
+
+	// QueryParameters provides a list of query parameters for the given
+	// request.
+	QueryParameters []ParameterDescriptor
+
+	// Body describes the format of the request body.
+	Body BodyDescriptor
+
+	// Successes enumerates the possible responses that are considered to be
+	// the result of a successful request.
+	Successes []ResponseDescriptor
+
+	// Failures covers the possible failures from this particular request.
+	Failures []ResponseDescriptor
+}
+
+// ResponseDescriptor describes the components of an API response.
+type ResponseDescriptor struct {
+	// Name provides a short identifier for the response, usable as a title or
+	// to provide quick context for the particular response.
+	Name string
+
+	// Description should provide a brief overview of the role of the
+	// response.
+	Description string
+
+	// StatusCode specifies the status received by this particular response.
+	StatusCode int
+
+	// Headers covers any headers that may be returned from the response.
+	Headers []ParameterDescriptor
+
+	// Fields describes any fields that may be present in the response.
+	Fields []ParameterDescriptor
+
+	// ErrorCodes enumerates the error codes that may be returned along with
+	// the response.
+	ErrorCodes []errcode.ErrorCode
+
+	// Body describes the body of the response, if any.
+	Body BodyDescriptor
+}
+
+// BodyDescriptor describes a request body and its expected content type. For
+// the most  part, it should be example json or some placeholder for body
+// data in documentation.
+type BodyDescriptor struct {
+	ContentType string
+	Format      string
+}
+
+// ParameterDescriptor describes the format of a request parameter, which may
+// be a header, path parameter or query parameter.
+type ParameterDescriptor struct {
+	// Name is the name of the parameter, either of the path component or
+	// query parameter.
+	Name string
+
+	// Type specifies the type of the parameter, such as string, integer, etc.
+	Type string
+
+	// Description provides a human-readable description of the parameter.
+	Description string
+
+	// Required means the field is required when set.
+	Required bool
+
+	// Format is a specifying the string format accepted by this parameter.
+	Format string
+
+	// Regexp is a compiled regular expression that can be used to validate
+	// the contents of the parameter.
+	Regexp *regexp.Regexp
+
+	// Examples provides multiple examples for the values that might be valid
+	// for this parameter.
+	Examples []string
+}
+
+var routeDescriptors = []RouteDescriptor{
+	{
+		Name:        RouteNameBase,
+		Path:        "/v2/",
+		Entity:      "Base",
+		Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`,
+		Methods: []MethodDescriptor{
+			{
+				Method:      "GET",
+				Description: "Check that the endpoint implements Docker Registry API V2.",
+				Requests: []RequestDescriptor{
+					{
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Description: "The API implements V2 protocol and is accessible.",
+								StatusCode:  http.StatusOK,
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Description: "The registry does not implement the V2 API.",
+								StatusCode:  http.StatusNotFound,
+							},
+							unauthorizedResponseDescriptor,
+							tooManyRequestsDescriptor,
+						},
+					},
+				},
+			},
+		},
+	},
+	{
+		Name:        RouteNameTags,
+		Path:        "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list",
+		Entity:      "Tags",
+		Description: "Retrieve information about tags.",
+		Methods: []MethodDescriptor{
+			{
+				Method:      "GET",
+				Description: "Fetch the tags under the repository identified by `name`.",
+				Requests: []RequestDescriptor{
+					{
+						Name:        "Tags",
+						Description: "Return all tags for the repository",
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+						},
+						Successes: []ResponseDescriptor{
+							{
+								StatusCode:  http.StatusOK,
+								Description: "A list of tags for the named repository.",
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Content-Length",
+										Type:        "integer",
+										Description: "Length of the JSON response body.",
+										Format:      "<length>",
+									},
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format: `{
+    "name": <name>,
+    "tags": [
+        <tag>,
+        ...
+    ]
+}`,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							unauthorizedResponseDescriptor,
+							repositoryNotFoundResponseDescriptor,
+							deniedResponseDescriptor,
+							tooManyRequestsDescriptor,
+						},
+					},
+					{
+						Name:            "Tags Paginated",
+						Description:     "Return a portion of the tags for the specified repository.",
+						PathParameters:  []ParameterDescriptor{nameParameterDescriptor},
+						QueryParameters: paginationParameters,
+						Successes: []ResponseDescriptor{
+							{
+								StatusCode:  http.StatusOK,
+								Description: "A list of tags for the named repository.",
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Content-Length",
+										Type:        "integer",
+										Description: "Length of the JSON response body.",
+										Format:      "<length>",
+									},
+									linkHeader,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format: `{
+    "name": <name>,
+    "tags": [
+        <tag>,
+        ...
+    ],
+}`,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							unauthorizedResponseDescriptor,
+							repositoryNotFoundResponseDescriptor,
+							deniedResponseDescriptor,
+							tooManyRequestsDescriptor,
+						},
+					},
+				},
+			},
+		},
+	},
+	{
+		Name:        RouteNameManifest,
+		Path:        "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}",
+		Entity:      "Manifest",
+		Description: "Create, update, delete and retrieve manifests.",
+		Methods: []MethodDescriptor{
+			{
+				Method:      "GET",
+				Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.",
+				Requests: []RequestDescriptor{
+					{
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+							referenceParameterDescriptor,
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.",
+								StatusCode:  http.StatusOK,
+								Headers: []ParameterDescriptor{
+									digestHeader,
+								},
+								Body: BodyDescriptor{
+									ContentType: "<media type of manifest>",
+									Format:      manifestBody,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Description: "The name or reference was invalid.",
+								StatusCode:  http.StatusBadRequest,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeNameInvalid,
+									ErrorCodeTagInvalid,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							unauthorizedResponseDescriptor,
+							repositoryNotFoundResponseDescriptor,
+							deniedResponseDescriptor,
+							tooManyRequestsDescriptor,
+						},
+					},
+				},
+			},
+			{
+				Method:      "PUT",
+				Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.",
+				Requests: []RequestDescriptor{
+					{
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+							referenceParameterDescriptor,
+						},
+						Body: BodyDescriptor{
+							ContentType: "<media type of manifest>",
+							Format:      manifestBody,
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.",
+								StatusCode:  http.StatusCreated,
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Location",
+										Type:        "url",
+										Description: "The canonical location url of the uploaded manifest.",
+										Format:      "<url>",
+									},
+									contentLengthZeroHeader,
+									digestHeader,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Name:        "Invalid Manifest",
+								Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.",
+								StatusCode:  http.StatusBadRequest,
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeNameInvalid,
+									ErrorCodeTagInvalid,
+									ErrorCodeManifestInvalid,
+									ErrorCodeManifestUnverified,
+									ErrorCodeBlobUnknown,
+								},
+							},
+							unauthorizedResponseDescriptor,
+							repositoryNotFoundResponseDescriptor,
+							deniedResponseDescriptor,
+							tooManyRequestsDescriptor,
+							{
+								Name:        "Missing Layer(s)",
+								Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.",
+								StatusCode:  http.StatusBadRequest,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeBlobUnknown,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format: `{
+    "errors:" [{
+            "code": "BLOB_UNKNOWN",
+            "message": "blob unknown to registry",
+            "detail": {
+                "digest": "<digest>"
+            }
+        },
+        ...
+    ]
+}`,
+								},
+							},
+							{
+								Name:        "Not allowed",
+								Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason",
+								StatusCode:  http.StatusMethodNotAllowed,
+								ErrorCodes: []errcode.ErrorCode{
+									errcode.ErrorCodeUnsupported,
+								},
+							},
+						},
+					},
+				},
+			},
+			{
+				Method:      "DELETE",
+				Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.",
+				Requests: []RequestDescriptor{
+					{
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+							referenceParameterDescriptor,
+						},
+						Successes: []ResponseDescriptor{
+							{
+								StatusCode: http.StatusAccepted,
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Name:        "Invalid Name or Reference",
+								Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.",
+								StatusCode:  http.StatusBadRequest,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeNameInvalid,
+									ErrorCodeTagInvalid,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							unauthorizedResponseDescriptor,
+							repositoryNotFoundResponseDescriptor,
+							deniedResponseDescriptor,
+							tooManyRequestsDescriptor,
+							{
+								Name:        "Unknown Manifest",
+								Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.",
+								StatusCode:  http.StatusNotFound,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeNameUnknown,
+									ErrorCodeManifestUnknown,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							{
+								Name:        "Not allowed",
+								Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.",
+								StatusCode:  http.StatusMethodNotAllowed,
+								ErrorCodes: []errcode.ErrorCode{
+									errcode.ErrorCodeUnsupported,
+								},
+							},
+						},
+					},
+				},
+			},
+		},
+	},
+
+	{
+		Name:        RouteNameBlob,
+		Path:        "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}",
+		Entity:      "Blob",
+		Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.",
+		Methods: []MethodDescriptor{
+			{
+				Method:      "GET",
+				Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.",
+				Requests: []RequestDescriptor{
+					{
+						Name: "Fetch Blob",
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+							digestPathParameter,
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.",
+								StatusCode:  http.StatusOK,
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Content-Length",
+										Type:        "integer",
+										Description: "The length of the requested blob content.",
+										Format:      "<length>",
+									},
+									digestHeader,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/octet-stream",
+									Format:      "<blob binary data>",
+								},
+							},
+							{
+								Description: "The blob identified by `digest` is available at the provided location.",
+								StatusCode:  http.StatusTemporaryRedirect,
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Location",
+										Type:        "url",
+										Description: "The location where the layer should be accessible.",
+										Format:      "<blob location>",
+									},
+									digestHeader,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.",
+								StatusCode:  http.StatusBadRequest,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeNameInvalid,
+									ErrorCodeDigestInvalid,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							{
+								Description: "The blob, identified by `name` and `digest`, is unknown to the registry.",
+								StatusCode:  http.StatusNotFound,
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeNameUnknown,
+									ErrorCodeBlobUnknown,
+								},
+							},
+							unauthorizedResponseDescriptor,
+							repositoryNotFoundResponseDescriptor,
+							deniedResponseDescriptor,
+							tooManyRequestsDescriptor,
+						},
+					},
+					{
+						Name:        "Fetch Blob Part",
+						Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.",
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+							{
+								Name:        "Range",
+								Type:        "string",
+								Description: "HTTP Range header specifying blob chunk.",
+								Format:      "bytes=<start>-<end>",
+							},
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+							digestPathParameter,
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.",
+								StatusCode:  http.StatusPartialContent,
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Content-Length",
+										Type:        "integer",
+										Description: "The length of the requested blob chunk.",
+										Format:      "<length>",
+									},
+									{
+										Name:        "Content-Range",
+										Type:        "byte range",
+										Description: "Content range of blob chunk.",
+										Format:      "bytes <start>-<end>/<size>",
+									},
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/octet-stream",
+									Format:      "<blob binary data>",
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.",
+								StatusCode:  http.StatusBadRequest,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeNameInvalid,
+									ErrorCodeDigestInvalid,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							{
+								StatusCode: http.StatusNotFound,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeNameUnknown,
+									ErrorCodeBlobUnknown,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							{
+								Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.",
+								StatusCode:  http.StatusRequestedRangeNotSatisfiable,
+							},
+							unauthorizedResponseDescriptor,
+							repositoryNotFoundResponseDescriptor,
+							deniedResponseDescriptor,
+							tooManyRequestsDescriptor,
+						},
+					},
+				},
+			},
+			{
+				Method:      "DELETE",
+				Description: "Delete the blob identified by `name` and `digest`",
+				Requests: []RequestDescriptor{
+					{
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+							digestPathParameter,
+						},
+						Successes: []ResponseDescriptor{
+							{
+								StatusCode: http.StatusAccepted,
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Content-Length",
+										Type:        "integer",
+										Description: "0",
+										Format:      "0",
+									},
+									digestHeader,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Name:       "Invalid Name or Digest",
+								StatusCode: http.StatusBadRequest,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeDigestInvalid,
+									ErrorCodeNameInvalid,
+								},
+							},
+							{
+								Description: "The blob, identified by `name` and `digest`, is unknown to the registry.",
+								StatusCode:  http.StatusNotFound,
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeNameUnknown,
+									ErrorCodeBlobUnknown,
+								},
+							},
+							{
+								Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled",
+								StatusCode:  http.StatusMethodNotAllowed,
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+								ErrorCodes: []errcode.ErrorCode{
+									errcode.ErrorCodeUnsupported,
+								},
+							},
+							unauthorizedResponseDescriptor,
+							repositoryNotFoundResponseDescriptor,
+							deniedResponseDescriptor,
+							tooManyRequestsDescriptor,
+						},
+					},
+				},
+			},
+
+			// TODO(stevvooe): We may want to add a PUT request here to
+			// kickoff an upload of a blob, integrated with the blob upload
+			// API.
+		},
+	},
+
+	{
+		Name:        RouteNameBlobUpload,
+		Path:        "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/",
+		Entity:      "Initiate Blob Upload",
+		Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.",
+		Methods: []MethodDescriptor{
+			{
+				Method:      "POST",
+				Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.",
+				Requests: []RequestDescriptor{
+					{
+						Name:        "Initiate Monolithic Blob Upload",
+						Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.",
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+							{
+								Name:   "Content-Length",
+								Type:   "integer",
+								Format: "<length of blob>",
+							},
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+						},
+						QueryParameters: []ParameterDescriptor{
+							{
+								Name:        "digest",
+								Type:        "query",
+								Format:      "<digest>",
+								Regexp:      digest.DigestRegexp,
+								Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`,
+							},
+						},
+						Body: BodyDescriptor{
+							ContentType: "application/octect-stream",
+							Format:      "<binary data>",
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Description: "The blob has been created in the registry and is available at the provided location.",
+								StatusCode:  http.StatusCreated,
+								Headers: []ParameterDescriptor{
+									{
+										Name:   "Location",
+										Type:   "url",
+										Format: "<blob location>",
+									},
+									contentLengthZeroHeader,
+									dockerUploadUUIDHeader,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Name:       "Invalid Name or Digest",
+								StatusCode: http.StatusBadRequest,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeDigestInvalid,
+									ErrorCodeNameInvalid,
+								},
+							},
+							{
+								Name:        "Not allowed",
+								Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason",
+								StatusCode:  http.StatusMethodNotAllowed,
+								ErrorCodes: []errcode.ErrorCode{
+									errcode.ErrorCodeUnsupported,
+								},
+							},
+							unauthorizedResponseDescriptor,
+							repositoryNotFoundResponseDescriptor,
+							deniedResponseDescriptor,
+							tooManyRequestsDescriptor,
+						},
+					},
+					{
+						Name:        "Initiate Resumable Blob Upload",
+						Description: "Initiate a resumable blob upload with an empty request body.",
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+							contentLengthZeroHeader,
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.",
+								StatusCode:  http.StatusAccepted,
+								Headers: []ParameterDescriptor{
+									contentLengthZeroHeader,
+									{
+										Name:        "Location",
+										Type:        "url",
+										Format:      "/v2/<name>/blobs/uploads/<uuid>",
+										Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.",
+									},
+									{
+										Name:        "Range",
+										Format:      "0-0",
+										Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.",
+									},
+									dockerUploadUUIDHeader,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Name:       "Invalid Name or Digest",
+								StatusCode: http.StatusBadRequest,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeDigestInvalid,
+									ErrorCodeNameInvalid,
+								},
+							},
+							unauthorizedResponseDescriptor,
+							repositoryNotFoundResponseDescriptor,
+							deniedResponseDescriptor,
+							tooManyRequestsDescriptor,
+						},
+					},
+					{
+						Name:        "Mount Blob",
+						Description: "Mount a blob identified by the `mount` parameter from another repository.",
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+							contentLengthZeroHeader,
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+						},
+						QueryParameters: []ParameterDescriptor{
+							{
+								Name:        "mount",
+								Type:        "query",
+								Format:      "<digest>",
+								Regexp:      digest.DigestRegexp,
+								Description: `Digest of blob to mount from the source repository.`,
+							},
+							{
+								Name:        "from",
+								Type:        "query",
+								Format:      "<repository name>",
+								Regexp:      reference.NameRegexp,
+								Description: `Name of the source repository.`,
+							},
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Description: "The blob has been mounted in the repository and is available at the provided location.",
+								StatusCode:  http.StatusCreated,
+								Headers: []ParameterDescriptor{
+									{
+										Name:   "Location",
+										Type:   "url",
+										Format: "<blob location>",
+									},
+									contentLengthZeroHeader,
+									dockerUploadUUIDHeader,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Name:       "Invalid Name or Digest",
+								StatusCode: http.StatusBadRequest,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeDigestInvalid,
+									ErrorCodeNameInvalid,
+								},
+							},
+							{
+								Name:        "Not allowed",
+								Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason",
+								StatusCode:  http.StatusMethodNotAllowed,
+								ErrorCodes: []errcode.ErrorCode{
+									errcode.ErrorCodeUnsupported,
+								},
+							},
+							unauthorizedResponseDescriptor,
+							repositoryNotFoundResponseDescriptor,
+							deniedResponseDescriptor,
+							tooManyRequestsDescriptor,
+						},
+					},
+				},
+			},
+		},
+	},
+
+	{
+		Name:        RouteNameBlobUploadChunk,
+		Path:        "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}",
+		Entity:      "Blob Upload",
+		Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.",
+		Methods: []MethodDescriptor{
+			{
+				Method:      "GET",
+				Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.",
+				Requests: []RequestDescriptor{
+					{
+						Description: "Retrieve the progress of the current upload, as reported by the `Range` header.",
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+							uuidParameterDescriptor,
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Name:        "Upload Progress",
+								Description: "The upload is known and in progress. The last received offset is available in the `Range` header.",
+								StatusCode:  http.StatusNoContent,
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Range",
+										Type:        "header",
+										Format:      "0-<offset>",
+										Description: "Range indicating the current progress of the upload.",
+									},
+									contentLengthZeroHeader,
+									dockerUploadUUIDHeader,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Description: "There was an error processing the upload and it must be restarted.",
+								StatusCode:  http.StatusBadRequest,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeDigestInvalid,
+									ErrorCodeNameInvalid,
+									ErrorCodeBlobUploadInvalid,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							{
+								Description: "The upload is unknown to the registry. The upload must be restarted.",
+								StatusCode:  http.StatusNotFound,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeBlobUploadUnknown,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							unauthorizedResponseDescriptor,
+							repositoryNotFoundResponseDescriptor,
+							deniedResponseDescriptor,
+							tooManyRequestsDescriptor,
+						},
+					},
+				},
+			},
+			{
+				Method:      "PATCH",
+				Description: "Upload a chunk of data for the specified upload.",
+				Requests: []RequestDescriptor{
+					{
+						Name:        "Stream upload",
+						Description: "Upload a stream of data to upload without completing the upload.",
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+							uuidParameterDescriptor,
+						},
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+						},
+						Body: BodyDescriptor{
+							ContentType: "application/octet-stream",
+							Format:      "<binary data>",
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Name:        "Data Accepted",
+								Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.",
+								StatusCode:  http.StatusNoContent,
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Location",
+										Type:        "url",
+										Format:      "/v2/<name>/blobs/uploads/<uuid>",
+										Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.",
+									},
+									{
+										Name:        "Range",
+										Type:        "header",
+										Format:      "0-<offset>",
+										Description: "Range indicating the current progress of the upload.",
+									},
+									contentLengthZeroHeader,
+									dockerUploadUUIDHeader,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Description: "There was an error processing the upload and it must be restarted.",
+								StatusCode:  http.StatusBadRequest,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeDigestInvalid,
+									ErrorCodeNameInvalid,
+									ErrorCodeBlobUploadInvalid,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							{
+								Description: "The upload is unknown to the registry. The upload must be restarted.",
+								StatusCode:  http.StatusNotFound,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeBlobUploadUnknown,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							unauthorizedResponseDescriptor,
+							repositoryNotFoundResponseDescriptor,
+							deniedResponseDescriptor,
+							tooManyRequestsDescriptor,
+						},
+					},
+					{
+						Name:        "Chunked upload",
+						Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.",
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+							uuidParameterDescriptor,
+						},
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+							{
+								Name:        "Content-Range",
+								Type:        "header",
+								Format:      "<start of range>-<end of range, inclusive>",
+								Required:    true,
+								Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.",
+							},
+							{
+								Name:        "Content-Length",
+								Type:        "integer",
+								Format:      "<length of chunk>",
+								Description: "Length of the chunk being uploaded, corresponding the length of the request body.",
+							},
+						},
+						Body: BodyDescriptor{
+							ContentType: "application/octet-stream",
+							Format:      "<binary chunk>",
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Name:        "Chunk Accepted",
+								Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.",
+								StatusCode:  http.StatusNoContent,
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Location",
+										Type:        "url",
+										Format:      "/v2/<name>/blobs/uploads/<uuid>",
+										Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.",
+									},
+									{
+										Name:        "Range",
+										Type:        "header",
+										Format:      "0-<offset>",
+										Description: "Range indicating the current progress of the upload.",
+									},
+									contentLengthZeroHeader,
+									dockerUploadUUIDHeader,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Description: "There was an error processing the upload and it must be restarted.",
+								StatusCode:  http.StatusBadRequest,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeDigestInvalid,
+									ErrorCodeNameInvalid,
+									ErrorCodeBlobUploadInvalid,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							{
+								Description: "The upload is unknown to the registry. The upload must be restarted.",
+								StatusCode:  http.StatusNotFound,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeBlobUploadUnknown,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							{
+								Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.",
+								StatusCode:  http.StatusRequestedRangeNotSatisfiable,
+							},
+							unauthorizedResponseDescriptor,
+							repositoryNotFoundResponseDescriptor,
+							deniedResponseDescriptor,
+							tooManyRequestsDescriptor,
+						},
+					},
+				},
+			},
+			{
+				Method:      "PUT",
+				Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.",
+				Requests: []RequestDescriptor{
+					{
+						Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.",
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+							{
+								Name:        "Content-Length",
+								Type:        "integer",
+								Format:      "<length of data>",
+								Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.",
+							},
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+							uuidParameterDescriptor,
+						},
+						QueryParameters: []ParameterDescriptor{
+							{
+								Name:        "digest",
+								Type:        "string",
+								Format:      "<digest>",
+								Regexp:      digest.DigestRegexp,
+								Required:    true,
+								Description: `Digest of uploaded blob.`,
+							},
+						},
+						Body: BodyDescriptor{
+							ContentType: "application/octet-stream",
+							Format:      "<binary data>",
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Name:        "Upload Complete",
+								Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.",
+								StatusCode:  http.StatusNoContent,
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Location",
+										Type:        "url",
+										Format:      "<blob location>",
+										Description: "The canonical location of the blob for retrieval",
+									},
+									{
+										Name:        "Content-Range",
+										Type:        "header",
+										Format:      "<start of range>-<end of range, inclusive>",
+										Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.",
+									},
+									contentLengthZeroHeader,
+									digestHeader,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Description: "There was an error processing the upload and it must be restarted.",
+								StatusCode:  http.StatusBadRequest,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeDigestInvalid,
+									ErrorCodeNameInvalid,
+									ErrorCodeBlobUploadInvalid,
+									errcode.ErrorCodeUnsupported,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							{
+								Description: "The upload is unknown to the registry. The upload must be restarted.",
+								StatusCode:  http.StatusNotFound,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeBlobUploadUnknown,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							unauthorizedResponseDescriptor,
+							repositoryNotFoundResponseDescriptor,
+							deniedResponseDescriptor,
+							tooManyRequestsDescriptor,
+						},
+					},
+				},
+			},
+			{
+				Method:      "DELETE",
+				Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.",
+				Requests: []RequestDescriptor{
+					{
+						Description: "Cancel the upload specified by `uuid`.",
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+							uuidParameterDescriptor,
+						},
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+							contentLengthZeroHeader,
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Name:        "Upload Deleted",
+								Description: "The upload has been successfully deleted.",
+								StatusCode:  http.StatusNoContent,
+								Headers: []ParameterDescriptor{
+									contentLengthZeroHeader,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Description: "An error was encountered processing the delete. The client may ignore this error.",
+								StatusCode:  http.StatusBadRequest,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeNameInvalid,
+									ErrorCodeBlobUploadInvalid,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							{
+								Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.",
+								StatusCode:  http.StatusNotFound,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeBlobUploadUnknown,
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format:      errorsBody,
+								},
+							},
+							unauthorizedResponseDescriptor,
+							repositoryNotFoundResponseDescriptor,
+							deniedResponseDescriptor,
+							tooManyRequestsDescriptor,
+						},
+					},
+				},
+			},
+		},
+	},
+	{
+		Name:        RouteNameCatalog,
+		Path:        "/v2/_catalog",
+		Entity:      "Catalog",
+		Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.",
+		Methods: []MethodDescriptor{
+			{
+				Method:      "GET",
+				Description: "Retrieve a sorted, json list of repositories available in the registry.",
+				Requests: []RequestDescriptor{
+					{
+						Name:        "Catalog Fetch",
+						Description: "Request an unabridged list of repositories available.  The implementation may impose a maximum limit and return a partial set with pagination links.",
+						Successes: []ResponseDescriptor{
+							{
+								Description: "Returns the unabridged list of repositories as a json response.",
+								StatusCode:  http.StatusOK,
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Content-Length",
+										Type:        "integer",
+										Description: "Length of the JSON response body.",
+										Format:      "<length>",
+									},
+								},
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format: `{
+	"repositories": [
+		<name>,
+		...
+	]
+}`,
+								},
+							},
+						},
+					},
+					{
+						Name:            "Catalog Fetch Paginated",
+						Description:     "Return the specified portion of repositories.",
+						QueryParameters: paginationParameters,
+						Successes: []ResponseDescriptor{
+							{
+								StatusCode: http.StatusOK,
+								Body: BodyDescriptor{
+									ContentType: "application/json; charset=utf-8",
+									Format: `{
+	"repositories": [
+		<name>,
+		...
+	]
+	"next": "<url>?last=<name>&n=<last value of n>"
+}`,
+								},
+								Headers: []ParameterDescriptor{
+									{
+										Name:        "Content-Length",
+										Type:        "integer",
+										Description: "Length of the JSON response body.",
+										Format:      "<length>",
+									},
+									linkHeader,
+								},
+							},
+						},
+					},
+				},
+			},
+		},
+	},
+}
+
+var routeDescriptorsMap map[string]RouteDescriptor
+
+func init() {
+	routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors))
+
+	for _, descriptor := range routeDescriptors {
+		routeDescriptorsMap[descriptor.Name] = descriptor
+	}
+}
diff --git a/vendor/github.com/docker/distribution/registry/api/v2/doc.go b/vendor/github.com/docker/distribution/registry/api/v2/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..cde0119594dd028e3a24fb84c929d74e316eb700
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/v2/doc.go
@@ -0,0 +1,9 @@
+// Package v2 describes routes, urls and the error codes used in the Docker
+// Registry JSON HTTP API V2. In addition to declarations, descriptors are
+// provided for routes and error codes that can be used for implementation and
+// automatically generating documentation.
+//
+// Definitions here are considered to be locked down for the V2 registry api.
+// Any changes must be considered carefully and should not proceed without a
+// change proposal in docker core.
+package v2
diff --git a/vendor/github.com/docker/distribution/registry/api/v2/errors.go b/vendor/github.com/docker/distribution/registry/api/v2/errors.go
new file mode 100644
index 0000000000000000000000000000000000000000..97d6923aa0321b07caf7b112c090d7fb771c29d4
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/v2/errors.go
@@ -0,0 +1,136 @@
+package v2
+
+import (
+	"net/http"
+
+	"github.com/docker/distribution/registry/api/errcode"
+)
+
+const errGroup = "registry.api.v2"
+
+var (
+	// ErrorCodeDigestInvalid is returned when uploading a blob if the
+	// provided digest does not match the blob contents.
+	ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
+		Value:   "DIGEST_INVALID",
+		Message: "provided digest did not match uploaded content",
+		Description: `When a blob is uploaded, the registry will check that
+		the content matches the digest provided by the client. The error may
+		include a detail structure with the key "digest", including the
+		invalid digest string. This error may also be returned when a manifest
+		includes an invalid layer digest.`,
+		HTTPStatusCode: http.StatusBadRequest,
+	})
+
+	// ErrorCodeSizeInvalid is returned when uploading a blob if the provided
+	ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
+		Value:   "SIZE_INVALID",
+		Message: "provided length did not match content length",
+		Description: `When a layer is uploaded, the provided size will be
+		checked against the uploaded content. If they do not match, this error
+		will be returned.`,
+		HTTPStatusCode: http.StatusBadRequest,
+	})
+
+	// ErrorCodeNameInvalid is returned when the name in the manifest does not
+	// match the provided name.
+	ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
+		Value:   "NAME_INVALID",
+		Message: "invalid repository name",
+		Description: `Invalid repository name encountered either during
+		manifest validation or any API operation.`,
+		HTTPStatusCode: http.StatusBadRequest,
+	})
+
+	// ErrorCodeTagInvalid is returned when the tag in the manifest does not
+	// match the provided tag.
+	ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
+		Value:   "TAG_INVALID",
+		Message: "manifest tag did not match URI",
+		Description: `During a manifest upload, if the tag in the manifest
+		does not match the uri tag, this error will be returned.`,
+		HTTPStatusCode: http.StatusBadRequest,
+	})
+
+	// ErrorCodeNameUnknown when the repository name is not known.
+	ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
+		Value:   "NAME_UNKNOWN",
+		Message: "repository name not known to registry",
+		Description: `This is returned if the name used during an operation is
+		unknown to the registry.`,
+		HTTPStatusCode: http.StatusNotFound,
+	})
+
+	// ErrorCodeManifestUnknown returned when image manifest is unknown.
+	ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
+		Value:   "MANIFEST_UNKNOWN",
+		Message: "manifest unknown",
+		Description: `This error is returned when the manifest, identified by
+		name and tag is unknown to the repository.`,
+		HTTPStatusCode: http.StatusNotFound,
+	})
+
+	// ErrorCodeManifestInvalid returned when an image manifest is invalid,
+	// typically during a PUT operation. This error encompasses all errors
+	// encountered during manifest validation that aren't signature errors.
+	ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
+		Value:   "MANIFEST_INVALID",
+		Message: "manifest invalid",
+		Description: `During upload, manifests undergo several checks ensuring
+		validity. If those checks fail, this error may be returned, unless a
+		more specific error is included. The detail will contain information
+		the failed validation.`,
+		HTTPStatusCode: http.StatusBadRequest,
+	})
+
+	// ErrorCodeManifestUnverified is returned when the manifest fails
+	// signature verification.
+	ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{
+		Value:   "MANIFEST_UNVERIFIED",
+		Message: "manifest failed signature verification",
+		Description: `During manifest upload, if the manifest fails signature
+		verification, this error will be returned.`,
+		HTTPStatusCode: http.StatusBadRequest,
+	})
+
+	// ErrorCodeManifestBlobUnknown is returned when a manifest blob is
+	// unknown to the registry.
+	ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
+		Value:   "MANIFEST_BLOB_UNKNOWN",
+		Message: "blob unknown to registry",
+		Description: `This error may be returned when a manifest blob is 
+		unknown to the registry.`,
+		HTTPStatusCode: http.StatusBadRequest,
+	})
+
+	// ErrorCodeBlobUnknown is returned when a blob is unknown to the
+	// registry. This can happen when the manifest references a nonexistent
+	// layer or the result is not found by a blob fetch.
+	ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
+		Value:   "BLOB_UNKNOWN",
+		Message: "blob unknown to registry",
+		Description: `This error may be returned when a blob is unknown to the
+		registry in a specified repository. This can be returned with a
+		standard get or if a manifest references an unknown layer during
+		upload.`,
+		HTTPStatusCode: http.StatusNotFound,
+	})
+
+	// ErrorCodeBlobUploadUnknown is returned when an upload is unknown.
+	ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{
+		Value:   "BLOB_UPLOAD_UNKNOWN",
+		Message: "blob upload unknown to registry",
+		Description: `If a blob upload has been cancelled or was never
+		started, this error code may be returned.`,
+		HTTPStatusCode: http.StatusNotFound,
+	})
+
+	// ErrorCodeBlobUploadInvalid is returned when an upload is invalid.
+	ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
+		Value:   "BLOB_UPLOAD_INVALID",
+		Message: "blob upload invalid",
+		Description: `The blob upload encountered an error and can no
+		longer proceed.`,
+		HTTPStatusCode: http.StatusNotFound,
+	})
+)
diff --git a/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go b/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go
new file mode 100644
index 0000000000000000000000000000000000000000..9bc41a3a64f994fb9d8fb71e84eed00bc696bc37
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go
@@ -0,0 +1,161 @@
+package v2
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+	"unicode"
+)
+
+var (
+	// according to rfc7230
+	reToken            = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`)
+	reQuotedValue      = regexp.MustCompile(`^[^\\"]+`)
+	reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`)
+)
+
+// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains
+// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The
+// function parses only the first element of the list, which is set by the very first proxy. It returns a map
+// of corresponding key-value pairs and an unparsed slice of the input string.
+//
+// Examples of Forwarded header values:
+//
+//  1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown
+//  2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80"
+//
+// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into
+// {"for": "192.0.2.43:443", "host": "registry.example.org"}.
+func parseForwardedHeader(forwarded string) (map[string]string, string, error) {
+	// Following are states of forwarded header parser. Any state could transition to a failure.
+	const (
+		// terminating state; can transition to Parameter
+		stateElement = iota
+		// terminating state; can transition to KeyValueDelimiter
+		stateParameter
+		// can transition to Value
+		stateKeyValueDelimiter
+		// can transition to one of { QuotedValue, PairEnd }
+		stateValue
+		// can transition to one of { EscapedCharacter, PairEnd }
+		stateQuotedValue
+		// can transition to one of { QuotedValue }
+		stateEscapedCharacter
+		// terminating state; can transition to one of { Parameter, Element }
+		statePairEnd
+	)
+
+	var (
+		parameter string
+		value     string
+		parse     = forwarded[:]
+		res       = map[string]string{}
+		state     = stateElement
+	)
+
+Loop:
+	for {
+		// skip spaces unless in quoted value
+		if state != stateQuotedValue && state != stateEscapedCharacter {
+			parse = strings.TrimLeftFunc(parse, unicode.IsSpace)
+		}
+
+		if len(parse) == 0 {
+			if state != stateElement && state != statePairEnd && state != stateParameter {
+				return nil, parse, fmt.Errorf("unexpected end of input")
+			}
+			// terminating
+			break
+		}
+
+		switch state {
+		// terminate at list element delimiter
+		case stateElement:
+			if parse[0] == ',' {
+				parse = parse[1:]
+				break Loop
+			}
+			state = stateParameter
+
+		// parse parameter (the key of key-value pair)
+		case stateParameter:
+			match := reToken.FindString(parse)
+			if len(match) == 0 {
+				return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse))
+			}
+			parameter = strings.ToLower(match)
+			parse = parse[len(match):]
+			state = stateKeyValueDelimiter
+
+		// parse '='
+		case stateKeyValueDelimiter:
+			if parse[0] != '=' {
+				return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse))
+			}
+			parse = parse[1:]
+			state = stateValue
+
+		// parse value or quoted value
+		case stateValue:
+			if parse[0] == '"' {
+				parse = parse[1:]
+				state = stateQuotedValue
+			} else {
+				value = reToken.FindString(parse)
+				if len(value) == 0 {
+					return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse))
+				}
+				if _, exists := res[parameter]; exists {
+					return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse))
+				}
+				res[parameter] = value
+				parse = parse[len(value):]
+				value = ""
+				state = statePairEnd
+			}
+
+		// parse a part of quoted value until the first backslash
+		case stateQuotedValue:
+			match := reQuotedValue.FindString(parse)
+			value += match
+			parse = parse[len(match):]
+			switch {
+			case len(parse) == 0:
+				return nil, parse, fmt.Errorf("unterminated quoted string")
+			case parse[0] == '"':
+				res[parameter] = value
+				value = ""
+				parse = parse[1:]
+				state = statePairEnd
+			case parse[0] == '\\':
+				parse = parse[1:]
+				state = stateEscapedCharacter
+			}
+
+		// parse escaped character in a quoted string, ignore the backslash
+		// transition back to QuotedValue state
+		case stateEscapedCharacter:
+			c := reEscapedCharacter.FindString(parse)
+			if len(c) == 0 {
+				return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1)
+			}
+			value += c
+			parse = parse[1:]
+			state = stateQuotedValue
+
+		// expect either a new key-value pair, new list or end of input
+		case statePairEnd:
+			switch parse[0] {
+			case ';':
+				parse = parse[1:]
+				state = stateParameter
+			case ',':
+				state = stateElement
+			default:
+				return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse))
+			}
+		}
+	}
+
+	return res, parse, nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/api/v2/routes.go b/vendor/github.com/docker/distribution/registry/api/v2/routes.go
new file mode 100644
index 0000000000000000000000000000000000000000..5b80d5be76a5f2d7d4314070335bf522304859f1
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/v2/routes.go
@@ -0,0 +1,49 @@
+package v2
+
+import "github.com/gorilla/mux"
+
+// The following are definitions of the name under which all V2 routes are
+// registered. These symbols can be used to look up a route based on the name.
+const (
+	RouteNameBase            = "base"
+	RouteNameManifest        = "manifest"
+	RouteNameTags            = "tags"
+	RouteNameBlob            = "blob"
+	RouteNameBlobUpload      = "blob-upload"
+	RouteNameBlobUploadChunk = "blob-upload-chunk"
+	RouteNameCatalog         = "catalog"
+)
+
+var allEndpoints = []string{
+	RouteNameManifest,
+	RouteNameCatalog,
+	RouteNameTags,
+	RouteNameBlob,
+	RouteNameBlobUpload,
+	RouteNameBlobUploadChunk,
+}
+
+// Router builds a gorilla router with named routes for the various API
+// methods. This can be used directly by both server implementations and
+// clients.
+func Router() *mux.Router {
+	return RouterWithPrefix("")
+}
+
+// RouterWithPrefix builds a gorilla router with a configured prefix
+// on all routes.
+func RouterWithPrefix(prefix string) *mux.Router {
+	rootRouter := mux.NewRouter()
+	router := rootRouter
+	if prefix != "" {
+		router = router.PathPrefix(prefix).Subrouter()
+	}
+
+	router.StrictSlash(true)
+
+	for _, descriptor := range routeDescriptors {
+		router.Path(descriptor.Path).Name(descriptor.Name)
+	}
+
+	return rootRouter
+}
diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/github.com/docker/distribution/registry/api/v2/urls.go
new file mode 100644
index 0000000000000000000000000000000000000000..e2e242eab01ad80e675bd46541012b02b02d54a6
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/api/v2/urls.go
@@ -0,0 +1,314 @@
+package v2
+
+import (
+	"net"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+
+	"github.com/docker/distribution/reference"
+	"github.com/gorilla/mux"
+)
+
+// URLBuilder creates registry API urls from a single base endpoint. It can be
+// used to create urls for use in a registry client or server.
+//
+// All urls will be created from the given base, including the api version.
+// For example, if a root of "/foo/" is provided, urls generated will be fall
+// under "/foo/v2/...". Most application will only provide a schema, host and
+// port, such as "https://localhost:5000/".
+type URLBuilder struct {
+	root     *url.URL // url root (ie http://localhost/)
+	router   *mux.Router
+	relative bool
+}
+
+// NewURLBuilder creates a URLBuilder with provided root url object.
+func NewURLBuilder(root *url.URL, relative bool) *URLBuilder {
+	return &URLBuilder{
+		root:     root,
+		router:   Router(),
+		relative: relative,
+	}
+}
+
+// NewURLBuilderFromString workes identically to NewURLBuilder except it takes
+// a string argument for the root, returning an error if it is not a valid
+// url.
+func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) {
+	u, err := url.Parse(root)
+	if err != nil {
+		return nil, err
+	}
+
+	return NewURLBuilder(u, relative), nil
+}
+
+// NewURLBuilderFromRequest uses information from an *http.Request to
+// construct the root url.
+func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder {
+	var scheme string
+
+	forwardedProto := r.Header.Get("X-Forwarded-Proto")
+	// TODO: log the error
+	forwardedHeader, _, _ := parseForwardedHeader(r.Header.Get("Forwarded"))
+
+	switch {
+	case len(forwardedProto) > 0:
+		scheme = forwardedProto
+	case len(forwardedHeader["proto"]) > 0:
+		scheme = forwardedHeader["proto"]
+	case r.TLS != nil:
+		scheme = "https"
+	case len(r.URL.Scheme) > 0:
+		scheme = r.URL.Scheme
+	default:
+		scheme = "http"
+	}
+
+	host := r.Host
+
+	if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 {
+		// According to the Apache mod_proxy docs, X-Forwarded-Host can be a
+		// comma-separated list of hosts, to which each proxy appends the
+		// requested host. We want to grab the first from this comma-separated
+		// list.
+		hosts := strings.SplitN(forwardedHost, ",", 2)
+		host = strings.TrimSpace(hosts[0])
+	} else if addr, exists := forwardedHeader["for"]; exists {
+		host = addr
+	} else if h, exists := forwardedHeader["host"]; exists {
+		host = h
+	}
+
+	portLessHost, port := host, ""
+	if !isIPv6Address(portLessHost) {
+		// with go 1.6, this would treat the last part of IPv6 address as a port
+		portLessHost, port, _ = net.SplitHostPort(host)
+	}
+	if forwardedPort := r.Header.Get("X-Forwarded-Port"); len(port) == 0 && len(forwardedPort) > 0 {
+		ports := strings.SplitN(forwardedPort, ",", 2)
+		forwardedPort = strings.TrimSpace(ports[0])
+		if _, err := strconv.ParseInt(forwardedPort, 10, 32); err == nil {
+			port = forwardedPort
+		}
+	}
+
+	if len(portLessHost) > 0 {
+		host = portLessHost
+	}
+	if len(port) > 0 {
+		// remove enclosing brackets of ipv6 address otherwise they will be duplicated
+		if len(host) > 1 && host[0] == '[' && host[len(host)-1] == ']' {
+			host = host[1 : len(host)-1]
+		}
+		// JoinHostPort properly encloses ipv6 addresses in square brackets
+		host = net.JoinHostPort(host, port)
+	} else if isIPv6Address(host) && host[0] != '[' {
+		// ipv6 needs to be enclosed in square brackets in urls
+		host = "[" + host + "]"
+	}
+
+	basePath := routeDescriptorsMap[RouteNameBase].Path
+
+	requestPath := r.URL.Path
+	index := strings.Index(requestPath, basePath)
+
+	u := &url.URL{
+		Scheme: scheme,
+		Host:   host,
+	}
+
+	if index > 0 {
+		// N.B. index+1 is important because we want to include the trailing /
+		u.Path = requestPath[0 : index+1]
+	}
+
+	return NewURLBuilder(u, relative)
+}
+
+// BuildBaseURL constructs a base url for the API, typically just "/v2/".
+func (ub *URLBuilder) BuildBaseURL() (string, error) {
+	route := ub.cloneRoute(RouteNameBase)
+
+	baseURL, err := route.URL()
+	if err != nil {
+		return "", err
+	}
+
+	return baseURL.String(), nil
+}
+
+// BuildCatalogURL constructs a url get a catalog of repositories
+func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) {
+	route := ub.cloneRoute(RouteNameCatalog)
+
+	catalogURL, err := route.URL()
+	if err != nil {
+		return "", err
+	}
+
+	return appendValuesURL(catalogURL, values...).String(), nil
+}
+
+// BuildTagsURL constructs a url to list the tags in the named repository.
+func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) {
+	route := ub.cloneRoute(RouteNameTags)
+
+	tagsURL, err := route.URL("name", name.Name())
+	if err != nil {
+		return "", err
+	}
+
+	return tagsURL.String(), nil
+}
+
+// BuildManifestURL constructs a url for the manifest identified by name and
+// reference. The argument reference may be either a tag or digest.
+func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) {
+	route := ub.cloneRoute(RouteNameManifest)
+
+	tagOrDigest := ""
+	switch v := ref.(type) {
+	case reference.Tagged:
+		tagOrDigest = v.Tag()
+	case reference.Digested:
+		tagOrDigest = v.Digest().String()
+	}
+
+	manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest)
+	if err != nil {
+		return "", err
+	}
+
+	return manifestURL.String(), nil
+}
+
+// BuildBlobURL constructs the url for the blob identified by name and dgst.
+func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) {
+	route := ub.cloneRoute(RouteNameBlob)
+
+	layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String())
+	if err != nil {
+		return "", err
+	}
+
+	return layerURL.String(), nil
+}
+
+// BuildBlobUploadURL constructs a url to begin a blob upload in the
+// repository identified by name.
+func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) {
+	route := ub.cloneRoute(RouteNameBlobUpload)
+
+	uploadURL, err := route.URL("name", name.Name())
+	if err != nil {
+		return "", err
+	}
+
+	return appendValuesURL(uploadURL, values...).String(), nil
+}
+
+// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid,
+// including any url values. This should generally not be used by clients, as
+// this url is provided by server implementations during the blob upload
+// process.
+func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) {
+	route := ub.cloneRoute(RouteNameBlobUploadChunk)
+
+	uploadURL, err := route.URL("name", name.Name(), "uuid", uuid)
+	if err != nil {
+		return "", err
+	}
+
+	return appendValuesURL(uploadURL, values...).String(), nil
+}
+
+// clondedRoute returns a clone of the named route from the router. Routes
+// must be cloned to avoid modifying them during url generation.
+func (ub *URLBuilder) cloneRoute(name string) clonedRoute {
+	route := new(mux.Route)
+	root := new(url.URL)
+
+	*route = *ub.router.GetRoute(name) // clone the route
+	*root = *ub.root
+
+	return clonedRoute{Route: route, root: root, relative: ub.relative}
+}
+
+type clonedRoute struct {
+	*mux.Route
+	root     *url.URL
+	relative bool
+}
+
+func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) {
+	routeURL, err := cr.Route.URL(pairs...)
+	if err != nil {
+		return nil, err
+	}
+
+	if cr.relative {
+		return routeURL, nil
+	}
+
+	if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" {
+		routeURL.Path = routeURL.Path[1:]
+	}
+
+	url := cr.root.ResolveReference(routeURL)
+	url.Scheme = cr.root.Scheme
+	return url, nil
+}
+
+// appendValuesURL appends the parameters to the url.
+func appendValuesURL(u *url.URL, values ...url.Values) *url.URL {
+	merged := u.Query()
+
+	for _, v := range values {
+		for k, vv := range v {
+			merged[k] = append(merged[k], vv...)
+		}
+	}
+
+	u.RawQuery = merged.Encode()
+	return u
+}
+
+// appendValues appends the parameters to the url. Panics if the string is not
+// a url.
+func appendValues(u string, values ...url.Values) string {
+	up, err := url.Parse(u)
+
+	if err != nil {
+		panic(err) // should never happen
+	}
+
+	return appendValuesURL(up, values...).String()
+}
+
+// isIPv6Address returns true if given string is a valid IPv6 address. No port is allowed. The address may be
+// enclosed in square brackets.
+func isIPv6Address(host string) bool {
+	if len(host) > 1 && host[0] == '[' && host[len(host)-1] == ']' {
+		host = host[1 : len(host)-1]
+	}
+	// The IPv6 scoped addressing zone identifier starts after the last percent sign.
+	if i := strings.LastIndexByte(host, '%'); i > 0 {
+		host = host[:i]
+	}
+	ip := net.ParseIP(host)
+	if ip == nil {
+		return false
+	}
+	if ip.To16() == nil {
+		return false
+	}
+	if ip.To4() == nil {
+		return true
+	}
+	// dot can be present in ipv4-mapped address, it needs to come after a colon though
+	i := strings.IndexAny(host, ":.")
+	return i >= 0 && host[i] == ':'
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/auth/api_version.go b/vendor/github.com/docker/distribution/registry/client/auth/api_version.go
new file mode 100644
index 0000000000000000000000000000000000000000..7d8f1d9576859963e9003e49043a26d9d65b3aef
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/auth/api_version.go
@@ -0,0 +1,58 @@
+package auth
+
+import (
+	"net/http"
+	"strings"
+)
+
+// APIVersion represents a version of an API including its
+// type and version number.
+type APIVersion struct {
+	// Type refers to the name of a specific API specification
+	// such as "registry"
+	Type string
+
+	// Version is the version of the API specification implemented,
+	// This may omit the revision number and only include
+	// the major and minor version, such as "2.0"
+	Version string
+}
+
+// String returns the string formatted API Version
+func (v APIVersion) String() string {
+	return v.Type + "/" + v.Version
+}
+
+// APIVersions gets the API versions out of an HTTP response using the provided
+// version header as the key for the HTTP header.
+func APIVersions(resp *http.Response, versionHeader string) []APIVersion {
+	versions := []APIVersion{}
+	if versionHeader != "" {
+		for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] {
+			for _, version := range strings.Fields(supportedVersions) {
+				versions = append(versions, ParseAPIVersion(version))
+			}
+		}
+	}
+	return versions
+}
+
+// ParseAPIVersion parses an API version string into an APIVersion
+// Format (Expected, not enforced):
+// API version string = <API type> '/' <API version>
+// API type = [a-z][a-z0-9]*
+// API version = [0-9]+(\.[0-9]+)?
+// TODO(dmcgowan): Enforce format, add error condition, remove unknown type
+func ParseAPIVersion(versionStr string) APIVersion {
+	idx := strings.IndexRune(versionStr, '/')
+	if idx == -1 {
+		return APIVersion{
+			Type:    "unknown",
+			Version: versionStr,
+		}
+	}
+	return APIVersion{
+		Type:    strings.ToLower(versionStr[:idx]),
+		Version: versionStr[idx+1:],
+	}
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go
new file mode 100644
index 0000000000000000000000000000000000000000..2c3ebe1653294c77b4a264f2b8a9c43130b787c3
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go
@@ -0,0 +1,27 @@
+package challenge
+
+import (
+	"net/url"
+	"strings"
+)
+
+// FROM: https://golang.org/src/net/http/http.go
+// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
+// return true if the string includes a port.
+func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
+
+// FROM: http://golang.org/src/net/http/transport.go
+var portMap = map[string]string{
+	"http":  "80",
+	"https": "443",
+}
+
+// canonicalAddr returns url.Host but always with a ":port" suffix
+// FROM: http://golang.org/src/net/http/transport.go
+func canonicalAddr(url *url.URL) string {
+	addr := url.Host
+	if !hasPort(addr) {
+		return addr + ":" + portMap[url.Scheme]
+	}
+	return addr
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
new file mode 100644
index 0000000000000000000000000000000000000000..c9bdfc355b508aaeb26a2320463390697c1d5dc4
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
@@ -0,0 +1,237 @@
+package challenge
+
+import (
+	"fmt"
+	"net/http"
+	"net/url"
+	"strings"
+	"sync"
+)
+
+// Challenge carries information from a WWW-Authenticate response header.
+// See RFC 2617.
+type Challenge struct {
+	// Scheme is the auth-scheme according to RFC 2617
+	Scheme string
+
+	// Parameters are the auth-params according to RFC 2617
+	Parameters map[string]string
+}
+
+// Manager manages the challenges for endpoints.
+// The challenges are pulled out of HTTP responses. Only
+// responses which expect challenges should be added to
+// the manager, since a non-unauthorized request will be
+// viewed as not requiring challenges.
+type Manager interface {
+	// GetChallenges returns the challenges for the given
+	// endpoint URL.
+	GetChallenges(endpoint url.URL) ([]Challenge, error)
+
+	// AddResponse adds the response to the challenge
+	// manager. The challenges will be parsed out of
+	// the WWW-Authenicate headers and added to the
+	// URL which was produced the response. If the
+	// response was authorized, any challenges for the
+	// endpoint will be cleared.
+	AddResponse(resp *http.Response) error
+}
+
+// NewSimpleManager returns an instance of
+// Manger which only maps endpoints to challenges
+// based on the responses which have been added the
+// manager. The simple manager will make no attempt to
+// perform requests on the endpoints or cache the responses
+// to a backend.
+func NewSimpleManager() Manager {
+	return &simpleManager{
+		Challanges: make(map[string][]Challenge),
+	}
+}
+
+type simpleManager struct {
+	sync.RWMutex
+	Challanges map[string][]Challenge
+}
+
+func normalizeURL(endpoint *url.URL) {
+	endpoint.Host = strings.ToLower(endpoint.Host)
+	endpoint.Host = canonicalAddr(endpoint)
+}
+
+func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) {
+	normalizeURL(&endpoint)
+
+	m.RLock()
+	defer m.RUnlock()
+	challenges := m.Challanges[endpoint.String()]
+	return challenges, nil
+}
+
+func (m *simpleManager) AddResponse(resp *http.Response) error {
+	challenges := ResponseChallenges(resp)
+	if resp.Request == nil {
+		return fmt.Errorf("missing request reference")
+	}
+	urlCopy := url.URL{
+		Path:   resp.Request.URL.Path,
+		Host:   resp.Request.URL.Host,
+		Scheme: resp.Request.URL.Scheme,
+	}
+	normalizeURL(&urlCopy)
+
+	m.Lock()
+	defer m.Unlock()
+	m.Challanges[urlCopy.String()] = challenges
+	return nil
+}
+
+// Octet types from RFC 2616.
+type octetType byte
+
+var octetTypes [256]octetType
+
+const (
+	isToken octetType = 1 << iota
+	isSpace
+)
+
+func init() {
+	// OCTET      = <any 8-bit sequence of data>
+	// CHAR       = <any US-ASCII character (octets 0 - 127)>
+	// CTL        = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
+	// CR         = <US-ASCII CR, carriage return (13)>
+	// LF         = <US-ASCII LF, linefeed (10)>
+	// SP         = <US-ASCII SP, space (32)>
+	// HT         = <US-ASCII HT, horizontal-tab (9)>
+	// <">        = <US-ASCII double-quote mark (34)>
+	// CRLF       = CR LF
+	// LWS        = [CRLF] 1*( SP | HT )
+	// TEXT       = <any OCTET except CTLs, but including LWS>
+	// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
+	//              | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
+	// token      = 1*<any CHAR except CTLs or separators>
+	// qdtext     = <any TEXT except <">>
+
+	for c := 0; c < 256; c++ {
+		var t octetType
+		isCtl := c <= 31 || c == 127
+		isChar := 0 <= c && c <= 127
+		isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
+		if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
+			t |= isSpace
+		}
+		if isChar && !isCtl && !isSeparator {
+			t |= isToken
+		}
+		octetTypes[c] = t
+	}
+}
+
+// ResponseChallenges returns a list of authorization challenges
+// for the given http Response. Challenges are only checked if
+// the response status code was a 401.
+func ResponseChallenges(resp *http.Response) []Challenge {
+	if resp.StatusCode == http.StatusUnauthorized {
+		// Parse the WWW-Authenticate Header and store the challenges
+		// on this endpoint object.
+		return parseAuthHeader(resp.Header)
+	}
+
+	return nil
+}
+
+func parseAuthHeader(header http.Header) []Challenge {
+	challenges := []Challenge{}
+	for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
+		v, p := parseValueAndParams(h)
+		if v != "" {
+			challenges = append(challenges, Challenge{Scheme: v, Parameters: p})
+		}
+	}
+	return challenges
+}
+
+func parseValueAndParams(header string) (value string, params map[string]string) {
+	params = make(map[string]string)
+	value, s := expectToken(header)
+	if value == "" {
+		return
+	}
+	value = strings.ToLower(value)
+	s = "," + skipSpace(s)
+	for strings.HasPrefix(s, ",") {
+		var pkey string
+		pkey, s = expectToken(skipSpace(s[1:]))
+		if pkey == "" {
+			return
+		}
+		if !strings.HasPrefix(s, "=") {
+			return
+		}
+		var pvalue string
+		pvalue, s = expectTokenOrQuoted(s[1:])
+		if pvalue == "" {
+			return
+		}
+		pkey = strings.ToLower(pkey)
+		params[pkey] = pvalue
+		s = skipSpace(s)
+	}
+	return
+}
+
+func skipSpace(s string) (rest string) {
+	i := 0
+	for ; i < len(s); i++ {
+		if octetTypes[s[i]]&isSpace == 0 {
+			break
+		}
+	}
+	return s[i:]
+}
+
+func expectToken(s string) (token, rest string) {
+	i := 0
+	for ; i < len(s); i++ {
+		if octetTypes[s[i]]&isToken == 0 {
+			break
+		}
+	}
+	return s[:i], s[i:]
+}
+
+func expectTokenOrQuoted(s string) (value string, rest string) {
+	if !strings.HasPrefix(s, "\"") {
+		return expectToken(s)
+	}
+	s = s[1:]
+	for i := 0; i < len(s); i++ {
+		switch s[i] {
+		case '"':
+			return s[:i], s[i+1:]
+		case '\\':
+			p := make([]byte, len(s)-1)
+			j := copy(p, s[:i])
+			escape := true
+			for i = i + 1; i < len(s); i++ {
+				b := s[i]
+				switch {
+				case escape:
+					escape = false
+					p[j] = b
+					j++
+				case b == '\\':
+					escape = true
+				case b == '"':
+					return string(p[:j]), s[i+1:]
+				default:
+					p[j] = b
+					j++
+				}
+			}
+			return "", ""
+		}
+	}
+	return "", ""
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/auth/session.go b/vendor/github.com/docker/distribution/registry/client/auth/session.go
new file mode 100644
index 0000000000000000000000000000000000000000..d6d884ffd1dd9601aeb9648ba86134bab20c97b8
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/auth/session.go
@@ -0,0 +1,503 @@
+package auth
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net/http"
+	"net/url"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/distribution/registry/client"
+	"github.com/docker/distribution/registry/client/auth/challenge"
+	"github.com/docker/distribution/registry/client/transport"
+)
+
+var (
+	// ErrNoBasicAuthCredentials is returned if a request can't be authorized with
+	// basic auth due to lack of credentials.
+	ErrNoBasicAuthCredentials = errors.New("no basic auth credentials")
+
+	// ErrNoToken is returned if a request is successful but the body does not
+	// contain an authorization token.
+	ErrNoToken = errors.New("authorization server did not include a token in the response")
+)
+
+const defaultClientID = "registry-client"
+
+// AuthenticationHandler is an interface for authorizing a request from
+// params from a "WWW-Authenicate" header for a single scheme.
+type AuthenticationHandler interface {
+	// Scheme returns the scheme as expected from the "WWW-Authenicate" header.
+	Scheme() string
+
+	// AuthorizeRequest adds the authorization header to a request (if needed)
+	// using the parameters from "WWW-Authenticate" method. The parameters
+	// values depend on the scheme.
+	AuthorizeRequest(req *http.Request, params map[string]string) error
+}
+
+// CredentialStore is an interface for getting credentials for
+// a given URL
+type CredentialStore interface {
+	// Basic returns basic auth for the given URL
+	Basic(*url.URL) (string, string)
+
+	// RefreshToken returns a refresh token for the
+	// given URL and service
+	RefreshToken(*url.URL, string) string
+
+	// SetRefreshToken sets the refresh token if none
+	// is provided for the given url and service
+	SetRefreshToken(realm *url.URL, service, token string)
+}
+
+// NewAuthorizer creates an authorizer which can handle multiple authentication
+// schemes. The handlers are tried in order, the higher priority authentication
+// methods should be first. The challengeMap holds a list of challenges for
+// a given root API endpoint (for example "https://registry-1.docker.io/v2/").
+func NewAuthorizer(manager challenge.Manager, handlers ...AuthenticationHandler) transport.RequestModifier {
+	return &endpointAuthorizer{
+		challenges: manager,
+		handlers:   handlers,
+	}
+}
+
+type endpointAuthorizer struct {
+	challenges challenge.Manager
+	handlers   []AuthenticationHandler
+	transport  http.RoundTripper
+}
+
+func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error {
+	pingPath := req.URL.Path
+	if v2Root := strings.Index(req.URL.Path, "/v2/"); v2Root != -1 {
+		pingPath = pingPath[:v2Root+4]
+	} else if v1Root := strings.Index(req.URL.Path, "/v1/"); v1Root != -1 {
+		pingPath = pingPath[:v1Root] + "/v2/"
+	} else {
+		return nil
+	}
+
+	ping := url.URL{
+		Host:   req.URL.Host,
+		Scheme: req.URL.Scheme,
+		Path:   pingPath,
+	}
+
+	challenges, err := ea.challenges.GetChallenges(ping)
+	if err != nil {
+		return err
+	}
+
+	if len(challenges) > 0 {
+		for _, handler := range ea.handlers {
+			for _, c := range challenges {
+				if c.Scheme != handler.Scheme() {
+					continue
+				}
+				if err := handler.AuthorizeRequest(req, c.Parameters); err != nil {
+					return err
+				}
+			}
+		}
+	}
+
+	return nil
+}
+
+// This is the minimum duration a token can last (in seconds).
+// A token must not live less than 60 seconds because older versions
+// of the Docker client didn't read their expiration from the token
+// response and assumed 60 seconds.  So to remain compatible with
+// those implementations, a token must live at least this long.
+const minimumTokenLifetimeSeconds = 60
+
+// Private interface for time used by this package to enable tests to provide their own implementation.
+type clock interface {
+	Now() time.Time
+}
+
+type tokenHandler struct {
+	header    http.Header
+	creds     CredentialStore
+	transport http.RoundTripper
+	clock     clock
+
+	offlineAccess bool
+	forceOAuth    bool
+	clientID      string
+	scopes        []Scope
+
+	tokenLock       sync.Mutex
+	tokenCache      string
+	tokenExpiration time.Time
+}
+
+// Scope is a type which is serializable to a string
+// using the allow scope grammar.
+type Scope interface {
+	String() string
+}
+
+// RepositoryScope represents a token scope for access
+// to a repository.
+type RepositoryScope struct {
+	Repository string
+	Class      string
+	Actions    []string
+}
+
+// String returns the string representation of the repository
+// using the scope grammar
+func (rs RepositoryScope) String() string {
+	repoType := "repository"
+	if rs.Class != "" {
+		repoType = fmt.Sprintf("%s(%s)", repoType, rs.Class)
+	}
+	return fmt.Sprintf("%s:%s:%s", repoType, rs.Repository, strings.Join(rs.Actions, ","))
+}
+
+// RegistryScope represents a token scope for access
+// to resources in the registry.
+type RegistryScope struct {
+	Name    string
+	Actions []string
+}
+
+// String returns the string representation of the user
+// using the scope grammar
+func (rs RegistryScope) String() string {
+	return fmt.Sprintf("registry:%s:%s", rs.Name, strings.Join(rs.Actions, ","))
+}
+
+// TokenHandlerOptions is used to configure a new token handler
+type TokenHandlerOptions struct {
+	Transport   http.RoundTripper
+	Credentials CredentialStore
+
+	OfflineAccess bool
+	ForceOAuth    bool
+	ClientID      string
+	Scopes        []Scope
+}
+
+// An implementation of clock for providing real time data.
+type realClock struct{}
+
+// Now implements clock
+func (realClock) Now() time.Time { return time.Now() }
+
+// NewTokenHandler creates a new AuthenicationHandler which supports
+// fetching tokens from a remote token server.
+func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler {
+	// Create options...
+	return NewTokenHandlerWithOptions(TokenHandlerOptions{
+		Transport:   transport,
+		Credentials: creds,
+		Scopes: []Scope{
+			RepositoryScope{
+				Repository: scope,
+				Actions:    actions,
+			},
+		},
+	})
+}
+
+// NewTokenHandlerWithOptions creates a new token handler using the provided
+// options structure.
+func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler {
+	handler := &tokenHandler{
+		transport:     options.Transport,
+		creds:         options.Credentials,
+		offlineAccess: options.OfflineAccess,
+		forceOAuth:    options.ForceOAuth,
+		clientID:      options.ClientID,
+		scopes:        options.Scopes,
+		clock:         realClock{},
+	}
+
+	return handler
+}
+
+func (th *tokenHandler) client() *http.Client {
+	return &http.Client{
+		Transport: th.transport,
+		Timeout:   15 * time.Second,
+	}
+}
+
+func (th *tokenHandler) Scheme() string {
+	return "bearer"
+}
+
+func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error {
+	var additionalScopes []string
+	if fromParam := req.URL.Query().Get("from"); fromParam != "" {
+		additionalScopes = append(additionalScopes, RepositoryScope{
+			Repository: fromParam,
+			Actions:    []string{"pull"},
+		}.String())
+	}
+
+	token, err := th.getToken(params, additionalScopes...)
+	if err != nil {
+		return err
+	}
+
+	req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
+
+	return nil
+}
+
+func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) {
+	th.tokenLock.Lock()
+	defer th.tokenLock.Unlock()
+	scopes := make([]string, 0, len(th.scopes)+len(additionalScopes))
+	for _, scope := range th.scopes {
+		scopes = append(scopes, scope.String())
+	}
+	var addedScopes bool
+	for _, scope := range additionalScopes {
+		scopes = append(scopes, scope)
+		addedScopes = true
+	}
+
+	now := th.clock.Now()
+	if now.After(th.tokenExpiration) || addedScopes {
+		token, expiration, err := th.fetchToken(params, scopes)
+		if err != nil {
+			return "", err
+		}
+
+		// do not update cache for added scope tokens
+		if !addedScopes {
+			th.tokenCache = token
+			th.tokenExpiration = expiration
+		}
+
+		return token, nil
+	}
+
+	return th.tokenCache, nil
+}
+
+type postTokenResponse struct {
+	AccessToken  string    `json:"access_token"`
+	RefreshToken string    `json:"refresh_token"`
+	ExpiresIn    int       `json:"expires_in"`
+	IssuedAt     time.Time `json:"issued_at"`
+	Scope        string    `json:"scope"`
+}
+
+func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) {
+	form := url.Values{}
+	form.Set("scope", strings.Join(scopes, " "))
+	form.Set("service", service)
+
+	clientID := th.clientID
+	if clientID == "" {
+		// Use default client, this is a required field
+		clientID = defaultClientID
+	}
+	form.Set("client_id", clientID)
+
+	if refreshToken != "" {
+		form.Set("grant_type", "refresh_token")
+		form.Set("refresh_token", refreshToken)
+	} else if th.creds != nil {
+		form.Set("grant_type", "password")
+		username, password := th.creds.Basic(realm)
+		form.Set("username", username)
+		form.Set("password", password)
+
+		// attempt to get a refresh token
+		form.Set("access_type", "offline")
+	} else {
+		// refuse to do oauth without a grant type
+		return "", time.Time{}, fmt.Errorf("no supported grant type")
+	}
+
+	resp, err := th.client().PostForm(realm.String(), form)
+	if err != nil {
+		return "", time.Time{}, err
+	}
+	defer resp.Body.Close()
+
+	if !client.SuccessStatus(resp.StatusCode) {
+		err := client.HandleErrorResponse(resp)
+		return "", time.Time{}, err
+	}
+
+	decoder := json.NewDecoder(resp.Body)
+
+	var tr postTokenResponse
+	if err = decoder.Decode(&tr); err != nil {
+		return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err)
+	}
+
+	if tr.RefreshToken != "" && tr.RefreshToken != refreshToken {
+		th.creds.SetRefreshToken(realm, service, tr.RefreshToken)
+	}
+
+	if tr.ExpiresIn < minimumTokenLifetimeSeconds {
+		// The default/minimum lifetime.
+		tr.ExpiresIn = minimumTokenLifetimeSeconds
+		logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn)
+	}
+
+	if tr.IssuedAt.IsZero() {
+		// issued_at is optional in the token response.
+		tr.IssuedAt = th.clock.Now().UTC()
+	}
+
+	return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil
+}
+
+type getTokenResponse struct {
+	Token        string    `json:"token"`
+	AccessToken  string    `json:"access_token"`
+	ExpiresIn    int       `json:"expires_in"`
+	IssuedAt     time.Time `json:"issued_at"`
+	RefreshToken string    `json:"refresh_token"`
+}
+
+func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) {
+
+	req, err := http.NewRequest("GET", realm.String(), nil)
+	if err != nil {
+		return "", time.Time{}, err
+	}
+
+	reqParams := req.URL.Query()
+
+	if service != "" {
+		reqParams.Add("service", service)
+	}
+
+	for _, scope := range scopes {
+		reqParams.Add("scope", scope)
+	}
+
+	if th.offlineAccess {
+		reqParams.Add("offline_token", "true")
+		clientID := th.clientID
+		if clientID == "" {
+			clientID = defaultClientID
+		}
+		reqParams.Add("client_id", clientID)
+	}
+
+	if th.creds != nil {
+		username, password := th.creds.Basic(realm)
+		if username != "" && password != "" {
+			reqParams.Add("account", username)
+			req.SetBasicAuth(username, password)
+		}
+	}
+
+	req.URL.RawQuery = reqParams.Encode()
+
+	resp, err := th.client().Do(req)
+	if err != nil {
+		return "", time.Time{}, err
+	}
+	defer resp.Body.Close()
+
+	if !client.SuccessStatus(resp.StatusCode) {
+		err := client.HandleErrorResponse(resp)
+		return "", time.Time{}, err
+	}
+
+	decoder := json.NewDecoder(resp.Body)
+
+	var tr getTokenResponse
+	if err = decoder.Decode(&tr); err != nil {
+		return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err)
+	}
+
+	if tr.RefreshToken != "" && th.creds != nil {
+		th.creds.SetRefreshToken(realm, service, tr.RefreshToken)
+	}
+
+	// `access_token` is equivalent to `token` and if both are specified
+	// the choice is undefined.  Canonicalize `access_token` by sticking
+	// things in `token`.
+	if tr.AccessToken != "" {
+		tr.Token = tr.AccessToken
+	}
+
+	if tr.Token == "" {
+		return "", time.Time{}, ErrNoToken
+	}
+
+	if tr.ExpiresIn < minimumTokenLifetimeSeconds {
+		// The default/minimum lifetime.
+		tr.ExpiresIn = minimumTokenLifetimeSeconds
+		logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn)
+	}
+
+	if tr.IssuedAt.IsZero() {
+		// issued_at is optional in the token response.
+		tr.IssuedAt = th.clock.Now().UTC()
+	}
+
+	return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil
+}
+
+func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) {
+	realm, ok := params["realm"]
+	if !ok {
+		return "", time.Time{}, errors.New("no realm specified for token auth challenge")
+	}
+
+	// TODO(dmcgowan): Handle empty scheme and relative realm
+	realmURL, err := url.Parse(realm)
+	if err != nil {
+		return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err)
+	}
+
+	service := params["service"]
+
+	var refreshToken string
+
+	if th.creds != nil {
+		refreshToken = th.creds.RefreshToken(realmURL, service)
+	}
+
+	if refreshToken != "" || th.forceOAuth {
+		return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes)
+	}
+
+	return th.fetchTokenWithBasicAuth(realmURL, service, scopes)
+}
+
+type basicHandler struct {
+	creds CredentialStore
+}
+
+// NewBasicHandler creaters a new authentiation handler which adds
+// basic authentication credentials to a request.
+func NewBasicHandler(creds CredentialStore) AuthenticationHandler {
+	return &basicHandler{
+		creds: creds,
+	}
+}
+
+func (*basicHandler) Scheme() string {
+	return "basic"
+}
+
+func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error {
+	if bh.creds != nil {
+		username, password := bh.creds.Basic(req.URL)
+		if username != "" && password != "" {
+			req.SetBasicAuth(username, password)
+			return nil
+		}
+	}
+	return ErrNoBasicAuthCredentials
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/github.com/docker/distribution/registry/client/blob_writer.go
new file mode 100644
index 0000000000000000000000000000000000000000..e3ffcb00fd60f51a451b5e1a7eee00aefdbc7bd3
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/blob_writer.go
@@ -0,0 +1,162 @@
+package client
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"time"
+
+	"github.com/docker/distribution"
+	"github.com/docker/distribution/context"
+)
+
+type httpBlobUpload struct {
+	statter distribution.BlobStatter
+	client  *http.Client
+
+	uuid      string
+	startedAt time.Time
+
+	location string // always the last value of the location header.
+	offset   int64
+	closed   bool
+}
+
+func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) {
+	panic("Not implemented")
+}
+
+func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error {
+	if resp.StatusCode == http.StatusNotFound {
+		return distribution.ErrBlobUploadUnknown
+	}
+	return HandleErrorResponse(resp)
+}
+
+func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) {
+	req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r))
+	if err != nil {
+		return 0, err
+	}
+	defer req.Body.Close()
+
+	resp, err := hbu.client.Do(req)
+	if err != nil {
+		return 0, err
+	}
+
+	if !SuccessStatus(resp.StatusCode) {
+		return 0, hbu.handleErrorResponse(resp)
+	}
+
+	hbu.uuid = resp.Header.Get("Docker-Upload-UUID")
+	hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location)
+	if err != nil {
+		return 0, err
+	}
+	rng := resp.Header.Get("Range")
+	var start, end int64
+	if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil {
+		return 0, err
+	} else if n != 2 || end < start {
+		return 0, fmt.Errorf("bad range format: %s", rng)
+	}
+
+	return (end - start + 1), nil
+
+}
+
+func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) {
+	req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p))
+	if err != nil {
+		return 0, err
+	}
+	req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1)))
+	req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p)))
+	req.Header.Set("Content-Type", "application/octet-stream")
+
+	resp, err := hbu.client.Do(req)
+	if err != nil {
+		return 0, err
+	}
+
+	if !SuccessStatus(resp.StatusCode) {
+		return 0, hbu.handleErrorResponse(resp)
+	}
+
+	hbu.uuid = resp.Header.Get("Docker-Upload-UUID")
+	hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location)
+	if err != nil {
+		return 0, err
+	}
+	rng := resp.Header.Get("Range")
+	var start, end int
+	if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil {
+		return 0, err
+	} else if n != 2 || end < start {
+		return 0, fmt.Errorf("bad range format: %s", rng)
+	}
+
+	return (end - start + 1), nil
+
+}
+
+func (hbu *httpBlobUpload) Size() int64 {
+	return hbu.offset
+}
+
+func (hbu *httpBlobUpload) ID() string {
+	return hbu.uuid
+}
+
+func (hbu *httpBlobUpload) StartedAt() time.Time {
+	return hbu.startedAt
+}
+
+func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
+	// TODO(dmcgowan): Check if already finished, if so just fetch
+	req, err := http.NewRequest("PUT", hbu.location, nil)
+	if err != nil {
+		return distribution.Descriptor{}, err
+	}
+
+	values := req.URL.Query()
+	values.Set("digest", desc.Digest.String())
+	req.URL.RawQuery = values.Encode()
+
+	resp, err := hbu.client.Do(req)
+	if err != nil {
+		return distribution.Descriptor{}, err
+	}
+	defer resp.Body.Close()
+
+	if !SuccessStatus(resp.StatusCode) {
+		return distribution.Descriptor{}, hbu.handleErrorResponse(resp)
+	}
+
+	return hbu.statter.Stat(ctx, desc.Digest)
+}
+
+func (hbu *httpBlobUpload) Cancel(ctx context.Context) error {
+	req, err := http.NewRequest("DELETE", hbu.location, nil)
+	if err != nil {
+		return err
+	}
+	resp, err := hbu.client.Do(req)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+
+	if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) {
+		return nil
+	}
+	return hbu.handleErrorResponse(resp)
+}
+
+func (hbu *httpBlobUpload) Close() error {
+	hbu.closed = true
+	return nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/errors.go b/vendor/github.com/docker/distribution/registry/client/errors.go
new file mode 100644
index 0000000000000000000000000000000000000000..52d49d5d295f2c83fabb198628d760f02a6cf04b
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/errors.go
@@ -0,0 +1,139 @@
+package client
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+
+	"github.com/docker/distribution/registry/api/errcode"
+	"github.com/docker/distribution/registry/client/auth/challenge"
+)
+
+// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty
+// errcode.Errors slice.
+var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body")
+
+// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is
+// returned when making a registry api call.
+type UnexpectedHTTPStatusError struct {
+	Status string
+}
+
+func (e *UnexpectedHTTPStatusError) Error() string {
+	return fmt.Sprintf("received unexpected HTTP status: %s", e.Status)
+}
+
+// UnexpectedHTTPResponseError is returned when an expected HTTP status code
+// is returned, but the content was unexpected and failed to be parsed.
+type UnexpectedHTTPResponseError struct {
+	ParseErr   error
+	StatusCode int
+	Response   []byte
+}
+
+func (e *UnexpectedHTTPResponseError) Error() string {
+	return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response))
+}
+
+func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
+	var errors errcode.Errors
+	body, err := ioutil.ReadAll(r)
+	if err != nil {
+		return err
+	}
+
+	// For backward compatibility, handle irregularly formatted
+	// messages that contain a "details" field.
+	var detailsErr struct {
+		Details string `json:"details"`
+	}
+	err = json.Unmarshal(body, &detailsErr)
+	if err == nil && detailsErr.Details != "" {
+		switch statusCode {
+		case http.StatusUnauthorized:
+			return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details)
+		case http.StatusTooManyRequests:
+			return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details)
+		default:
+			return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details)
+		}
+	}
+
+	if err := json.Unmarshal(body, &errors); err != nil {
+		return &UnexpectedHTTPResponseError{
+			ParseErr:   err,
+			StatusCode: statusCode,
+			Response:   body,
+		}
+	}
+
+	if len(errors) == 0 {
+		// If there was no error specified in the body, return
+		// UnexpectedHTTPResponseError.
+		return &UnexpectedHTTPResponseError{
+			ParseErr:   ErrNoErrorsInBody,
+			StatusCode: statusCode,
+			Response:   body,
+		}
+	}
+
+	return errors
+}
+
+func makeErrorList(err error) []error {
+	if errL, ok := err.(errcode.Errors); ok {
+		return []error(errL)
+	}
+	return []error{err}
+}
+
+func mergeErrors(err1, err2 error) error {
+	return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...))
+}
+
+// HandleErrorResponse returns error parsed from HTTP response for an
+// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An
+// UnexpectedHTTPStatusError returned for response code outside of expected
+// range.
+func HandleErrorResponse(resp *http.Response) error {
+	if resp.StatusCode >= 400 && resp.StatusCode < 500 {
+		// Check for OAuth errors within the `WWW-Authenticate` header first
+		// See https://tools.ietf.org/html/rfc6750#section-3
+		for _, c := range challenge.ResponseChallenges(resp) {
+			if c.Scheme == "bearer" {
+				var err errcode.Error
+				// codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
+				switch c.Parameters["error"] {
+				case "invalid_token":
+					err.Code = errcode.ErrorCodeUnauthorized
+				case "insufficient_scope":
+					err.Code = errcode.ErrorCodeDenied
+				default:
+					continue
+				}
+				if description := c.Parameters["error_description"]; description != "" {
+					err.Message = description
+				} else {
+					err.Message = err.Code.Message()
+				}
+
+				return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body))
+			}
+		}
+		err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
+		if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
+			return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
+		}
+		return err
+	}
+	return &UnexpectedHTTPStatusError{Status: resp.Status}
+}
+
+// SuccessStatus returns true if the argument is a successful HTTP response
+// code (in the range 200 - 399 inclusive).
+func SuccessStatus(status int) bool {
+	return status >= 200 && status <= 399
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go
new file mode 100644
index 0000000000000000000000000000000000000000..1ebd0b18394652746b3d87a12896fbe98d98ab43
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/repository.go
@@ -0,0 +1,853 @@
+package client
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/docker/distribution"
+	"github.com/docker/distribution/context"
+	"github.com/docker/distribution/digest"
+	"github.com/docker/distribution/reference"
+	"github.com/docker/distribution/registry/api/v2"
+	"github.com/docker/distribution/registry/client/transport"
+	"github.com/docker/distribution/registry/storage/cache"
+	"github.com/docker/distribution/registry/storage/cache/memory"
+)
+
+// Registry provides an interface for calling Repositories, which returns a catalog of repositories.
+type Registry interface {
+	Repositories(ctx context.Context, repos []string, last string) (n int, err error)
+}
+
+// checkHTTPRedirect is a callback that can manipulate redirected HTTP
+// requests. It is used to preserve Accept and Range headers.
+func checkHTTPRedirect(req *http.Request, via []*http.Request) error {
+	if len(via) >= 10 {
+		return errors.New("stopped after 10 redirects")
+	}
+
+	if len(via) > 0 {
+		for headerName, headerVals := range via[0].Header {
+			if headerName != "Accept" && headerName != "Range" {
+				continue
+			}
+			for _, val := range headerVals {
+				// Don't add to redirected request if redirected
+				// request already has a header with the same
+				// name and value.
+				hasValue := false
+				for _, existingVal := range req.Header[headerName] {
+					if existingVal == val {
+						hasValue = true
+						break
+					}
+				}
+				if !hasValue {
+					req.Header.Add(headerName, val)
+				}
+			}
+		}
+	}
+
+	return nil
+}
+
+// NewRegistry creates a registry namespace which can be used to get a listing of repositories
+func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) {
+	ub, err := v2.NewURLBuilderFromString(baseURL, false)
+	if err != nil {
+		return nil, err
+	}
+
+	client := &http.Client{
+		Transport:     transport,
+		Timeout:       1 * time.Minute,
+		CheckRedirect: checkHTTPRedirect,
+	}
+
+	return &registry{
+		client:  client,
+		ub:      ub,
+		context: ctx,
+	}, nil
+}
+
+type registry struct {
+	client  *http.Client
+	ub      *v2.URLBuilder
+	context context.Context
+}
+
+// Repositories returns a lexigraphically sorted catalog given a base URL.  The 'entries' slice will be filled up to the size
+// of the slice, starting at the value provided in 'last'.  The number of entries will be returned along with io.EOF if there
+// are no more entries
+func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) {
+	var numFilled int
+	var returnErr error
+
+	values := buildCatalogValues(len(entries), last)
+	u, err := r.ub.BuildCatalogURL(values)
+	if err != nil {
+		return 0, err
+	}
+
+	resp, err := r.client.Get(u)
+	if err != nil {
+		return 0, err
+	}
+	defer resp.Body.Close()
+
+	if SuccessStatus(resp.StatusCode) {
+		var ctlg struct {
+			Repositories []string `json:"repositories"`
+		}
+		decoder := json.NewDecoder(resp.Body)
+
+		if err := decoder.Decode(&ctlg); err != nil {
+			return 0, err
+		}
+
+		for cnt := range ctlg.Repositories {
+			entries[cnt] = ctlg.Repositories[cnt]
+		}
+		numFilled = len(ctlg.Repositories)
+
+		link := resp.Header.Get("Link")
+		if link == "" {
+			returnErr = io.EOF
+		}
+	} else {
+		return 0, HandleErrorResponse(resp)
+	}
+
+	return numFilled, returnErr
+}
+
+// NewRepository creates a new Repository for the given repository name and base URL.
+func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) {
+	ub, err := v2.NewURLBuilderFromString(baseURL, false)
+	if err != nil {
+		return nil, err
+	}
+
+	client := &http.Client{
+		Transport:     transport,
+		CheckRedirect: checkHTTPRedirect,
+		// TODO(dmcgowan): create cookie jar
+	}
+
+	return &repository{
+		client:  client,
+		ub:      ub,
+		name:    name,
+		context: ctx,
+	}, nil
+}
+
+type repository struct {
+	client  *http.Client
+	ub      *v2.URLBuilder
+	context context.Context
+	name    reference.Named
+}
+
+func (r *repository) Named() reference.Named {
+	return r.name
+}
+
+func (r *repository) Blobs(ctx context.Context) distribution.BlobStore {
+	statter := &blobStatter{
+		name:   r.name,
+		ub:     r.ub,
+		client: r.client,
+	}
+	return &blobs{
+		name:    r.name,
+		ub:      r.ub,
+		client:  r.client,
+		statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter),
+	}
+}
+
+func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
+	// todo(richardscothern): options should be sent over the wire
+	return &manifests{
+		name:   r.name,
+		ub:     r.ub,
+		client: r.client,
+		etags:  make(map[string]string),
+	}, nil
+}
+
+func (r *repository) Tags(ctx context.Context) distribution.TagService {
+	return &tags{
+		client:  r.client,
+		ub:      r.ub,
+		context: r.context,
+		name:    r.Named(),
+	}
+}
+
+// tags implements remote tagging operations.
+type tags struct {
+	client  *http.Client
+	ub      *v2.URLBuilder
+	context context.Context
+	name    reference.Named
+}
+
+// All returns all tags
+func (t *tags) All(ctx context.Context) ([]string, error) {
+	var tags []string
+
+	u, err := t.ub.BuildTagsURL(t.name)
+	if err != nil {
+		return tags, err
+	}
+
+	for {
+		resp, err := t.client.Get(u)
+		if err != nil {
+			return tags, err
+		}
+		defer resp.Body.Close()
+
+		if SuccessStatus(resp.StatusCode) {
+			b, err := ioutil.ReadAll(resp.Body)
+			if err != nil {
+				return tags, err
+			}
+
+			tagsResponse := struct {
+				Tags []string `json:"tags"`
+			}{}
+			if err := json.Unmarshal(b, &tagsResponse); err != nil {
+				return tags, err
+			}
+			tags = append(tags, tagsResponse.Tags...)
+			if link := resp.Header.Get("Link"); link != "" {
+				u = strings.Trim(strings.Split(link, ";")[0], "<>")
+			} else {
+				return tags, nil
+			}
+		} else {
+			return tags, HandleErrorResponse(resp)
+		}
+	}
+}
+
+func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) {
+	desc := distribution.Descriptor{}
+	headers := response.Header
+
+	ctHeader := headers.Get("Content-Type")
+	if ctHeader == "" {
+		return distribution.Descriptor{}, errors.New("missing or empty Content-Type header")
+	}
+	desc.MediaType = ctHeader
+
+	digestHeader := headers.Get("Docker-Content-Digest")
+	if digestHeader == "" {
+		bytes, err := ioutil.ReadAll(response.Body)
+		if err != nil {
+			return distribution.Descriptor{}, err
+		}
+		_, desc, err := distribution.UnmarshalManifest(ctHeader, bytes)
+		if err != nil {
+			return distribution.Descriptor{}, err
+		}
+		return desc, nil
+	}
+
+	dgst, err := digest.ParseDigest(digestHeader)
+	if err != nil {
+		return distribution.Descriptor{}, err
+	}
+	desc.Digest = dgst
+
+	lengthHeader := headers.Get("Content-Length")
+	if lengthHeader == "" {
+		return distribution.Descriptor{}, errors.New("missing or empty Content-Length header")
+	}
+	length, err := strconv.ParseInt(lengthHeader, 10, 64)
+	if err != nil {
+		return distribution.Descriptor{}, err
+	}
+	desc.Size = length
+
+	return desc, nil
+
+}
+
+// Get issues a HEAD request for a Manifest against its named endpoint in order
+// to construct a descriptor for the tag.  If the registry doesn't support HEADing
+// a manifest, fallback to GET.
+func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) {
+	ref, err := reference.WithTag(t.name, tag)
+	if err != nil {
+		return distribution.Descriptor{}, err
+	}
+	u, err := t.ub.BuildManifestURL(ref)
+	if err != nil {
+		return distribution.Descriptor{}, err
+	}
+
+	newRequest := func(method string) (*http.Response, error) {
+		req, err := http.NewRequest(method, u, nil)
+		if err != nil {
+			return nil, err
+		}
+
+		for _, t := range distribution.ManifestMediaTypes() {
+			req.Header.Add("Accept", t)
+		}
+		resp, err := t.client.Do(req)
+		return resp, err
+	}
+
+	resp, err := newRequest("HEAD")
+	if err != nil {
+		return distribution.Descriptor{}, err
+	}
+	defer resp.Body.Close()
+
+	switch {
+	case resp.StatusCode >= 200 && resp.StatusCode < 400:
+		return descriptorFromResponse(resp)
+	default:
+		// if the response is an error - there will be no body to decode.
+		// Issue a GET request:
+		//   - for data from a server that does not handle HEAD
+		//   - to get error details in case of a failure
+		resp, err = newRequest("GET")
+		if err != nil {
+			return distribution.Descriptor{}, err
+		}
+		defer resp.Body.Close()
+
+		if resp.StatusCode >= 200 && resp.StatusCode < 400 {
+			return descriptorFromResponse(resp)
+		}
+		return distribution.Descriptor{}, HandleErrorResponse(resp)
+	}
+}
+
+func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) {
+	panic("not implemented")
+}
+
+func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error {
+	panic("not implemented")
+}
+
+func (t *tags) Untag(ctx context.Context, tag string) error {
+	panic("not implemented")
+}
+
+type manifests struct {
+	name   reference.Named
+	ub     *v2.URLBuilder
+	client *http.Client
+	etags  map[string]string
+}
+
+func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
+	ref, err := reference.WithDigest(ms.name, dgst)
+	if err != nil {
+		return false, err
+	}
+	u, err := ms.ub.BuildManifestURL(ref)
+	if err != nil {
+		return false, err
+	}
+
+	resp, err := ms.client.Head(u)
+	if err != nil {
+		return false, err
+	}
+
+	if SuccessStatus(resp.StatusCode) {
+		return true, nil
+	} else if resp.StatusCode == http.StatusNotFound {
+		return false, nil
+	}
+	return false, HandleErrorResponse(resp)
+}
+
+// AddEtagToTag allows a client to supply an eTag to Get which will be
+// used for a conditional HTTP request.  If the eTag matches, a nil manifest
+// and ErrManifestNotModified error will be returned. etag is automatically
+// quoted when added to this map.
+func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption {
+	return etagOption{tag, etag}
+}
+
+type etagOption struct{ tag, etag string }
+
+func (o etagOption) Apply(ms distribution.ManifestService) error {
+	if ms, ok := ms.(*manifests); ok {
+		ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag)
+		return nil
+	}
+	return fmt.Errorf("etag options is a client-only option")
+}
+
+// ReturnContentDigest allows a client to set a the content digest on
+// a successful request from the 'Docker-Content-Digest' header. This
+// returned digest is represents the digest which the registry uses
+// to refer to the content and can be used to delete the content.
+func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption {
+	return contentDigestOption{dgst}
+}
+
+type contentDigestOption struct{ digest *digest.Digest }
+
+func (o contentDigestOption) Apply(ms distribution.ManifestService) error {
+	return nil
+}
+
+func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
+	var (
+		digestOrTag string
+		ref         reference.Named
+		err         error
+		contentDgst *digest.Digest
+	)
+
+	for _, option := range options {
+		if opt, ok := option.(distribution.WithTagOption); ok {
+			digestOrTag = opt.Tag
+			ref, err = reference.WithTag(ms.name, opt.Tag)
+			if err != nil {
+				return nil, err
+			}
+		} else if opt, ok := option.(contentDigestOption); ok {
+			contentDgst = opt.digest
+		} else {
+			err := option.Apply(ms)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+
+	if digestOrTag == "" {
+		digestOrTag = dgst.String()
+		ref, err = reference.WithDigest(ms.name, dgst)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	u, err := ms.ub.BuildManifestURL(ref)
+	if err != nil {
+		return nil, err
+	}
+
+	req, err := http.NewRequest("GET", u, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	for _, t := range distribution.ManifestMediaTypes() {
+		req.Header.Add("Accept", t)
+	}
+
+	if _, ok := ms.etags[digestOrTag]; ok {
+		req.Header.Set("If-None-Match", ms.etags[digestOrTag])
+	}
+
+	resp, err := ms.client.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode == http.StatusNotModified {
+		return nil, distribution.ErrManifestNotModified
+	} else if SuccessStatus(resp.StatusCode) {
+		if contentDgst != nil {
+			dgst, err := digest.ParseDigest(resp.Header.Get("Docker-Content-Digest"))
+			if err == nil {
+				*contentDgst = dgst
+			}
+		}
+		mt := resp.Header.Get("Content-Type")
+		body, err := ioutil.ReadAll(resp.Body)
+
+		if err != nil {
+			return nil, err
+		}
+		m, _, err := distribution.UnmarshalManifest(mt, body)
+		if err != nil {
+			return nil, err
+		}
+		return m, nil
+	}
+	return nil, HandleErrorResponse(resp)
+}
+
+// Put puts a manifest.  A tag can be specified using an options parameter which uses some shared state to hold the
+// tag name in order to build the correct upload URL.
+func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
+	ref := ms.name
+	var tagged bool
+
+	for _, option := range options {
+		if opt, ok := option.(distribution.WithTagOption); ok {
+			var err error
+			ref, err = reference.WithTag(ref, opt.Tag)
+			if err != nil {
+				return "", err
+			}
+			tagged = true
+		} else {
+			err := option.Apply(ms)
+			if err != nil {
+				return "", err
+			}
+		}
+	}
+	mediaType, p, err := m.Payload()
+	if err != nil {
+		return "", err
+	}
+
+	if !tagged {
+		// generate a canonical digest and Put by digest
+		_, d, err := distribution.UnmarshalManifest(mediaType, p)
+		if err != nil {
+			return "", err
+		}
+		ref, err = reference.WithDigest(ref, d.Digest)
+		if err != nil {
+			return "", err
+		}
+	}
+
+	manifestURL, err := ms.ub.BuildManifestURL(ref)
+	if err != nil {
+		return "", err
+	}
+
+	putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p))
+	if err != nil {
+		return "", err
+	}
+
+	putRequest.Header.Set("Content-Type", mediaType)
+
+	resp, err := ms.client.Do(putRequest)
+	if err != nil {
+		return "", err
+	}
+	defer resp.Body.Close()
+
+	if SuccessStatus(resp.StatusCode) {
+		dgstHeader := resp.Header.Get("Docker-Content-Digest")
+		dgst, err := digest.ParseDigest(dgstHeader)
+		if err != nil {
+			return "", err
+		}
+
+		return dgst, nil
+	}
+
+	return "", HandleErrorResponse(resp)
+}
+
+func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error {
+	ref, err := reference.WithDigest(ms.name, dgst)
+	if err != nil {
+		return err
+	}
+	u, err := ms.ub.BuildManifestURL(ref)
+	if err != nil {
+		return err
+	}
+	req, err := http.NewRequest("DELETE", u, nil)
+	if err != nil {
+		return err
+	}
+
+	resp, err := ms.client.Do(req)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+
+	if SuccessStatus(resp.StatusCode) {
+		return nil
+	}
+	return HandleErrorResponse(resp)
+}
+
+// todo(richardscothern): Restore interface and implementation with merge of #1050
+/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) {
+	panic("not supported")
+}*/
+
+type blobs struct {
+	name   reference.Named
+	ub     *v2.URLBuilder
+	client *http.Client
+
+	statter distribution.BlobDescriptorService
+	distribution.BlobDeleter
+}
+
+func sanitizeLocation(location, base string) (string, error) {
+	baseURL, err := url.Parse(base)
+	if err != nil {
+		return "", err
+	}
+
+	locationURL, err := url.Parse(location)
+	if err != nil {
+		return "", err
+	}
+
+	return baseURL.ResolveReference(locationURL).String(), nil
+}
+
+func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
+	return bs.statter.Stat(ctx, dgst)
+
+}
+
+func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
+	reader, err := bs.Open(ctx, dgst)
+	if err != nil {
+		return nil, err
+	}
+	defer reader.Close()
+
+	return ioutil.ReadAll(reader)
+}
+
+func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
+	ref, err := reference.WithDigest(bs.name, dgst)
+	if err != nil {
+		return nil, err
+	}
+	blobURL, err := bs.ub.BuildBlobURL(ref)
+	if err != nil {
+		return nil, err
+	}
+
+	return transport.NewHTTPReadSeeker(bs.client, blobURL,
+		func(resp *http.Response) error {
+			if resp.StatusCode == http.StatusNotFound {
+				return distribution.ErrBlobUnknown
+			}
+			return HandleErrorResponse(resp)
+		}), nil
+}
+
+func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
+	panic("not implemented")
+}
+
+func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
+	writer, err := bs.Create(ctx)
+	if err != nil {
+		return distribution.Descriptor{}, err
+	}
+	dgstr := digest.Canonical.New()
+	n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash()))
+	if err != nil {
+		return distribution.Descriptor{}, err
+	}
+	if n < int64(len(p)) {
+		return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p))
+	}
+
+	desc := distribution.Descriptor{
+		MediaType: mediaType,
+		Size:      int64(len(p)),
+		Digest:    dgstr.Digest(),
+	}
+
+	return writer.Commit(ctx, desc)
+}
+
+type optionFunc func(interface{}) error
+
+func (f optionFunc) Apply(v interface{}) error {
+	return f(v)
+}
+
+// WithMountFrom returns a BlobCreateOption which designates that the blob should be
+// mounted from the given canonical reference.
+func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption {
+	return optionFunc(func(v interface{}) error {
+		opts, ok := v.(*distribution.CreateOptions)
+		if !ok {
+			return fmt.Errorf("unexpected options type: %T", v)
+		}
+
+		opts.Mount.ShouldMount = true
+		opts.Mount.From = ref
+
+		return nil
+	})
+}
+
+func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
+	var opts distribution.CreateOptions
+
+	for _, option := range options {
+		err := option.Apply(&opts)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	var values []url.Values
+
+	if opts.Mount.ShouldMount {
+		values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}})
+	}
+
+	u, err := bs.ub.BuildBlobUploadURL(bs.name, values...)
+	if err != nil {
+		return nil, err
+	}
+
+	resp, err := bs.client.Post(u, "", nil)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	switch resp.StatusCode {
+	case http.StatusCreated:
+		desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest())
+		if err != nil {
+			return nil, err
+		}
+		return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc}
+	case http.StatusAccepted:
+		// TODO(dmcgowan): Check for invalid UUID
+		uuid := resp.Header.Get("Docker-Upload-UUID")
+		location, err := sanitizeLocation(resp.Header.Get("Location"), u)
+		if err != nil {
+			return nil, err
+		}
+
+		return &httpBlobUpload{
+			statter:   bs.statter,
+			client:    bs.client,
+			uuid:      uuid,
+			startedAt: time.Now(),
+			location:  location,
+		}, nil
+	default:
+		return nil, HandleErrorResponse(resp)
+	}
+}
+
+func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
+	panic("not implemented")
+}
+
+func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error {
+	return bs.statter.Clear(ctx, dgst)
+}
+
+type blobStatter struct {
+	name   reference.Named
+	ub     *v2.URLBuilder
+	client *http.Client
+}
+
+func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
+	ref, err := reference.WithDigest(bs.name, dgst)
+	if err != nil {
+		return distribution.Descriptor{}, err
+	}
+	u, err := bs.ub.BuildBlobURL(ref)
+	if err != nil {
+		return distribution.Descriptor{}, err
+	}
+
+	resp, err := bs.client.Head(u)
+	if err != nil {
+		return distribution.Descriptor{}, err
+	}
+	defer resp.Body.Close()
+
+	if SuccessStatus(resp.StatusCode) {
+		lengthHeader := resp.Header.Get("Content-Length")
+		if lengthHeader == "" {
+			return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u)
+		}
+
+		length, err := strconv.ParseInt(lengthHeader, 10, 64)
+		if err != nil {
+			return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err)
+		}
+
+		return distribution.Descriptor{
+			MediaType: resp.Header.Get("Content-Type"),
+			Size:      length,
+			Digest:    dgst,
+		}, nil
+	} else if resp.StatusCode == http.StatusNotFound {
+		return distribution.Descriptor{}, distribution.ErrBlobUnknown
+	}
+	return distribution.Descriptor{}, HandleErrorResponse(resp)
+}
+
+func buildCatalogValues(maxEntries int, last string) url.Values {
+	values := url.Values{}
+
+	if maxEntries > 0 {
+		values.Add("n", strconv.Itoa(maxEntries))
+	}
+
+	if last != "" {
+		values.Add("last", last)
+	}
+
+	return values
+}
+
+func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
+	ref, err := reference.WithDigest(bs.name, dgst)
+	if err != nil {
+		return err
+	}
+	blobURL, err := bs.ub.BuildBlobURL(ref)
+	if err != nil {
+		return err
+	}
+
+	req, err := http.NewRequest("DELETE", blobURL, nil)
+	if err != nil {
+		return err
+	}
+
+	resp, err := bs.client.Do(req)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+
+	if SuccessStatus(resp.StatusCode) {
+		return nil
+	}
+	return HandleErrorResponse(resp)
+}
+
+func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
+	return nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
new file mode 100644
index 0000000000000000000000000000000000000000..e5ff09d75642063160c190b5ff7ac72883390a48
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
@@ -0,0 +1,251 @@
+package transport
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"net/http"
+	"os"
+	"regexp"
+	"strconv"
+)
+
+var (
+	contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`)
+
+	// ErrWrongCodeForByteRange is returned if the client sends a request
+	// with a Range header but the server returns a 2xx or 3xx code other
+	// than 206 Partial Content.
+	ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request")
+)
+
+// ReadSeekCloser combines io.ReadSeeker with io.Closer.
+type ReadSeekCloser interface {
+	io.ReadSeeker
+	io.Closer
+}
+
+// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET
+// request. When seeking and starting a read from a non-zero offset
+// the a "Range" header will be added which sets the offset.
+// TODO(dmcgowan): Move this into a separate utility package
+func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser {
+	return &httpReadSeeker{
+		client:       client,
+		url:          url,
+		errorHandler: errorHandler,
+	}
+}
+
+type httpReadSeeker struct {
+	client *http.Client
+	url    string
+
+	// errorHandler creates an error from an unsuccessful HTTP response.
+	// This allows the error to be created with the HTTP response body
+	// without leaking the body through a returned error.
+	errorHandler func(*http.Response) error
+
+	size int64
+
+	// rc is the remote read closer.
+	rc io.ReadCloser
+	// readerOffset tracks the offset as of the last read.
+	readerOffset int64
+	// seekOffset allows Seek to override the offset. Seek changes
+	// seekOffset instead of changing readOffset directly so that
+	// connection resets can be delayed and possibly avoided if the
+	// seek is undone (i.e. seeking to the end and then back to the
+	// beginning).
+	seekOffset int64
+	err        error
+}
+
+func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) {
+	if hrs.err != nil {
+		return 0, hrs.err
+	}
+
+	// If we sought to a different position, we need to reset the
+	// connection. This logic is here instead of Seek so that if
+	// a seek is undone before the next read, the connection doesn't
+	// need to be closed and reopened. A common example of this is
+	// seeking to the end to determine the length, and then seeking
+	// back to the original position.
+	if hrs.readerOffset != hrs.seekOffset {
+		hrs.reset()
+	}
+
+	hrs.readerOffset = hrs.seekOffset
+
+	rd, err := hrs.reader()
+	if err != nil {
+		return 0, err
+	}
+
+	n, err = rd.Read(p)
+	hrs.seekOffset += int64(n)
+	hrs.readerOffset += int64(n)
+
+	return n, err
+}
+
+func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
+	if hrs.err != nil {
+		return 0, hrs.err
+	}
+
+	lastReaderOffset := hrs.readerOffset
+
+	if whence == os.SEEK_SET && hrs.rc == nil {
+		// If no request has been made yet, and we are seeking to an
+		// absolute position, set the read offset as well to avoid an
+		// unnecessary request.
+		hrs.readerOffset = offset
+	}
+
+	_, err := hrs.reader()
+	if err != nil {
+		hrs.readerOffset = lastReaderOffset
+		return 0, err
+	}
+
+	newOffset := hrs.seekOffset
+
+	switch whence {
+	case os.SEEK_CUR:
+		newOffset += offset
+	case os.SEEK_END:
+		if hrs.size < 0 {
+			return 0, errors.New("content length not known")
+		}
+		newOffset = hrs.size + offset
+	case os.SEEK_SET:
+		newOffset = offset
+	}
+
+	if newOffset < 0 {
+		err = errors.New("cannot seek to negative position")
+	} else {
+		hrs.seekOffset = newOffset
+	}
+
+	return hrs.seekOffset, err
+}
+
+func (hrs *httpReadSeeker) Close() error {
+	if hrs.err != nil {
+		return hrs.err
+	}
+
+	// close and release reader chain
+	if hrs.rc != nil {
+		hrs.rc.Close()
+	}
+
+	hrs.rc = nil
+
+	hrs.err = errors.New("httpLayer: closed")
+
+	return nil
+}
+
+func (hrs *httpReadSeeker) reset() {
+	if hrs.err != nil {
+		return
+	}
+	if hrs.rc != nil {
+		hrs.rc.Close()
+		hrs.rc = nil
+	}
+}
+
+func (hrs *httpReadSeeker) reader() (io.Reader, error) {
+	if hrs.err != nil {
+		return nil, hrs.err
+	}
+
+	if hrs.rc != nil {
+		return hrs.rc, nil
+	}
+
+	req, err := http.NewRequest("GET", hrs.url, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	if hrs.readerOffset > 0 {
+		// If we are at different offset, issue a range request from there.
+		req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset))
+		// TODO: get context in here
+		// context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range"))
+	}
+
+	req.Header.Add("Accept-Encoding", "identity")
+	resp, err := hrs.client.Do(req)
+	if err != nil {
+		return nil, err
+	}
+
+	// Normally would use client.SuccessStatus, but that would be a cyclic
+	// import
+	if resp.StatusCode >= 200 && resp.StatusCode <= 399 {
+		if hrs.readerOffset > 0 {
+			if resp.StatusCode != http.StatusPartialContent {
+				return nil, ErrWrongCodeForByteRange
+			}
+
+			contentRange := resp.Header.Get("Content-Range")
+			if contentRange == "" {
+				return nil, errors.New("no Content-Range header found in HTTP 206 response")
+			}
+
+			submatches := contentRangeRegexp.FindStringSubmatch(contentRange)
+			if len(submatches) < 4 {
+				return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange)
+			}
+
+			startByte, err := strconv.ParseUint(submatches[1], 10, 64)
+			if err != nil {
+				return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange)
+			}
+
+			if startByte != uint64(hrs.readerOffset) {
+				return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset)
+			}
+
+			endByte, err := strconv.ParseUint(submatches[2], 10, 64)
+			if err != nil {
+				return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange)
+			}
+
+			if submatches[3] == "*" {
+				hrs.size = -1
+			} else {
+				size, err := strconv.ParseUint(submatches[3], 10, 64)
+				if err != nil {
+					return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange)
+				}
+
+				if endByte+1 != size {
+					return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange)
+				}
+
+				hrs.size = int64(size)
+			}
+		} else if resp.StatusCode == http.StatusOK {
+			hrs.size = resp.ContentLength
+		} else {
+			hrs.size = -1
+		}
+		hrs.rc = resp.Body
+	} else {
+		defer resp.Body.Close()
+		if hrs.errorHandler != nil {
+			return nil, hrs.errorHandler(resp)
+		}
+		return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status)
+	}
+
+	return hrs.rc, nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/client/transport/transport.go b/vendor/github.com/docker/distribution/registry/client/transport/transport.go
new file mode 100644
index 0000000000000000000000000000000000000000..30e45fab0f73d6a91ce1d746aae7f6306d2f6887
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/client/transport/transport.go
@@ -0,0 +1,147 @@
+package transport
+
+import (
+	"io"
+	"net/http"
+	"sync"
+)
+
+// RequestModifier represents an object which will do an inplace
+// modification of an HTTP request.
+type RequestModifier interface {
+	ModifyRequest(*http.Request) error
+}
+
+type headerModifier http.Header
+
+// NewHeaderRequestModifier returns a new RequestModifier which will
+// add the given headers to a request.
+func NewHeaderRequestModifier(header http.Header) RequestModifier {
+	return headerModifier(header)
+}
+
+func (h headerModifier) ModifyRequest(req *http.Request) error {
+	for k, s := range http.Header(h) {
+		req.Header[k] = append(req.Header[k], s...)
+	}
+
+	return nil
+}
+
+// NewTransport creates a new transport which will apply modifiers to
+// the request on a RoundTrip call.
+func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper {
+	return &transport{
+		Modifiers: modifiers,
+		Base:      base,
+	}
+}
+
+// transport is an http.RoundTripper that makes HTTP requests after
+// copying and modifying the request
+type transport struct {
+	Modifiers []RequestModifier
+	Base      http.RoundTripper
+
+	mu     sync.Mutex                      // guards modReq
+	modReq map[*http.Request]*http.Request // original -> modified
+}
+
+// RoundTrip authorizes and authenticates the request with an
+// access token. If no token exists or token is expired,
+// tries to refresh/fetch a new token.
+func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
+	req2 := cloneRequest(req)
+	for _, modifier := range t.Modifiers {
+		if err := modifier.ModifyRequest(req2); err != nil {
+			return nil, err
+		}
+	}
+
+	t.setModReq(req, req2)
+	res, err := t.base().RoundTrip(req2)
+	if err != nil {
+		t.setModReq(req, nil)
+		return nil, err
+	}
+	res.Body = &onEOFReader{
+		rc: res.Body,
+		fn: func() { t.setModReq(req, nil) },
+	}
+	return res, nil
+}
+
+// CancelRequest cancels an in-flight request by closing its connection.
+func (t *transport) CancelRequest(req *http.Request) {
+	type canceler interface {
+		CancelRequest(*http.Request)
+	}
+	if cr, ok := t.base().(canceler); ok {
+		t.mu.Lock()
+		modReq := t.modReq[req]
+		delete(t.modReq, req)
+		t.mu.Unlock()
+		cr.CancelRequest(modReq)
+	}
+}
+
+func (t *transport) base() http.RoundTripper {
+	if t.Base != nil {
+		return t.Base
+	}
+	return http.DefaultTransport
+}
+
+func (t *transport) setModReq(orig, mod *http.Request) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	if t.modReq == nil {
+		t.modReq = make(map[*http.Request]*http.Request)
+	}
+	if mod == nil {
+		delete(t.modReq, orig)
+	} else {
+		t.modReq[orig] = mod
+	}
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+	// shallow copy of the struct
+	r2 := new(http.Request)
+	*r2 = *r
+	// deep copy of the Header
+	r2.Header = make(http.Header, len(r.Header))
+	for k, s := range r.Header {
+		r2.Header[k] = append([]string(nil), s...)
+	}
+
+	return r2
+}
+
+type onEOFReader struct {
+	rc io.ReadCloser
+	fn func()
+}
+
+func (r *onEOFReader) Read(p []byte) (n int, err error) {
+	n, err = r.rc.Read(p)
+	if err == io.EOF {
+		r.runFunc()
+	}
+	return
+}
+
+func (r *onEOFReader) Close() error {
+	err := r.rc.Close()
+	r.runFunc()
+	return err
+}
+
+func (r *onEOFReader) runFunc() {
+	if fn := r.fn; fn != nil {
+		fn()
+		r.fn = nil
+	}
+}
diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cache.go b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go
new file mode 100644
index 0000000000000000000000000000000000000000..10a3909197cba501432f3039413ce7e05fbd7856
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go
@@ -0,0 +1,35 @@
+// Package cache provides facilities to speed up access to the storage
+// backend.
+package cache
+
+import (
+	"fmt"
+
+	"github.com/docker/distribution"
+)
+
+// BlobDescriptorCacheProvider provides repository scoped
+// BlobDescriptorService cache instances and a global descriptor cache.
+type BlobDescriptorCacheProvider interface {
+	distribution.BlobDescriptorService
+
+	RepositoryScoped(repo string) (distribution.BlobDescriptorService, error)
+}
+
+// ValidateDescriptor provides a helper function to ensure that caches have
+// common criteria for admitting descriptors.
+func ValidateDescriptor(desc distribution.Descriptor) error {
+	if err := desc.Digest.Validate(); err != nil {
+		return err
+	}
+
+	if desc.Size < 0 {
+		return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size)
+	}
+
+	if desc.MediaType == "" {
+		return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc)
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go
new file mode 100644
index 0000000000000000000000000000000000000000..94ca8a90c76b7cc62df36225bfb3ade1fcfa9b84
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go
@@ -0,0 +1,101 @@
+package cache
+
+import (
+	"github.com/docker/distribution/context"
+	"github.com/docker/distribution/digest"
+
+	"github.com/docker/distribution"
+)
+
+// Metrics is used to hold metric counters
+// related to the number of times a cache was
+// hit or missed.
+type Metrics struct {
+	Requests uint64
+	Hits     uint64
+	Misses   uint64
+}
+
+// MetricsTracker represents a metric tracker
+// which simply counts the number of hits and misses.
+type MetricsTracker interface {
+	Hit()
+	Miss()
+	Metrics() Metrics
+}
+
+type cachedBlobStatter struct {
+	cache   distribution.BlobDescriptorService
+	backend distribution.BlobDescriptorService
+	tracker MetricsTracker
+}
+
+// NewCachedBlobStatter creates a new statter which prefers a cache and
+// falls back to a backend.
+func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService {
+	return &cachedBlobStatter{
+		cache:   cache,
+		backend: backend,
+	}
+}
+
+// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and
+// falls back to a backend. Hits and misses will send to the tracker.
+func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter {
+	return &cachedBlobStatter{
+		cache:   cache,
+		backend: backend,
+		tracker: tracker,
+	}
+}
+
+func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
+	desc, err := cbds.cache.Stat(ctx, dgst)
+	if err != nil {
+		if err != distribution.ErrBlobUnknown {
+			context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err)
+		}
+
+		goto fallback
+	}
+
+	if cbds.tracker != nil {
+		cbds.tracker.Hit()
+	}
+	return desc, nil
+fallback:
+	if cbds.tracker != nil {
+		cbds.tracker.Miss()
+	}
+	desc, err = cbds.backend.Stat(ctx, dgst)
+	if err != nil {
+		return desc, err
+	}
+
+	if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
+		context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err)
+	}
+
+	return desc, err
+
+}
+
+func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
+	err := cbds.cache.Clear(ctx, dgst)
+	if err != nil {
+		return err
+	}
+
+	err = cbds.backend.Clear(ctx, dgst)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
+	if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
+		context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err)
+	}
+	return nil
+}
diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go
new file mode 100644
index 0000000000000000000000000000000000000000..cf125e187f6d95305f10a136126833000a26bb64
--- /dev/null
+++ b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go
@@ -0,0 +1,179 @@
+package memory
+
+import (
+	"sync"
+
+	"github.com/docker/distribution"
+	"github.com/docker/distribution/context"
+	"github.com/docker/distribution/digest"
+	"github.com/docker/distribution/reference"
+	"github.com/docker/distribution/registry/storage/cache"
+)
+
+type inMemoryBlobDescriptorCacheProvider struct {
+	global       *mapBlobDescriptorCache
+	repositories map[string]*mapBlobDescriptorCache
+	mu           sync.RWMutex
+}
+
+// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for
+// storing blob descriptor data.
+func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider {
+	return &inMemoryBlobDescriptorCacheProvider{
+		global:       newMapBlobDescriptorCache(),
+		repositories: make(map[string]*mapBlobDescriptorCache),
+	}
+}
+
+func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) {
+	if _, err := reference.ParseNamed(repo); err != nil {
+		return nil, err
+	}
+
+	imbdcp.mu.RLock()
+	defer imbdcp.mu.RUnlock()
+
+	return &repositoryScopedInMemoryBlobDescriptorCache{
+		repo:       repo,
+		parent:     imbdcp,
+		repository: imbdcp.repositories[repo],
+	}, nil
+}
+
+func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
+	return imbdcp.global.Stat(ctx, dgst)
+}
+
+func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error {
+	return imbdcp.global.Clear(ctx, dgst)
+}
+
+func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
+	_, err := imbdcp.Stat(ctx, dgst)
+	if err == distribution.ErrBlobUnknown {
+
+		if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest {
+			// if the digests differ, set the other canonical mapping
+			if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil {
+				return err
+			}
+		}
+
+		// unknown, just set it
+		return imbdcp.global.SetDescriptor(ctx, dgst, desc)
+	}
+
+	// we already know it, do nothing
+	return err
+}
+
+// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped
+// repository cache. Instances are not thread-safe but the delegated
+// operations are.
+type repositoryScopedInMemoryBlobDescriptorCache struct {
+	repo       string
+	parent     *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map
+	repository *mapBlobDescriptorCache
+}
+
+func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
+	rsimbdcp.parent.mu.Lock()
+	repo := rsimbdcp.repository
+	rsimbdcp.parent.mu.Unlock()
+
+	if repo == nil {
+		return distribution.Descriptor{}, distribution.ErrBlobUnknown
+	}
+
+	return repo.Stat(ctx, dgst)
+}
+
+func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error {
+	rsimbdcp.parent.mu.Lock()
+	repo := rsimbdcp.repository
+	rsimbdcp.parent.mu.Unlock()
+
+	if repo == nil {
+		return distribution.ErrBlobUnknown
+	}
+
+	return repo.Clear(ctx, dgst)
+}
+
+func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
+	rsimbdcp.parent.mu.Lock()
+	repo := rsimbdcp.repository
+	if repo == nil {
+		// allocate map since we are setting it now.
+		var ok bool
+		// have to read back value since we may have allocated elsewhere.
+		repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo]
+		if !ok {
+			repo = newMapBlobDescriptorCache()
+			rsimbdcp.parent.repositories[rsimbdcp.repo] = repo
+		}
+		rsimbdcp.repository = repo
+	}
+	rsimbdcp.parent.mu.Unlock()
+
+	if err := repo.SetDescriptor(ctx, dgst, desc); err != nil {
+		return err
+	}
+
+	return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc)
+}
+
+// mapBlobDescriptorCache provides a simple map-based implementation of the
+// descriptor cache.
+type mapBlobDescriptorCache struct {
+	descriptors map[digest.Digest]distribution.Descriptor
+	mu          sync.RWMutex
+}
+
+var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{}
+
+func newMapBlobDescriptorCache() *mapBlobDescriptorCache {
+	return &mapBlobDescriptorCache{
+		descriptors: make(map[digest.Digest]distribution.Descriptor),
+	}
+}
+
+func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
+	if err := dgst.Validate(); err != nil {
+		return distribution.Descriptor{}, err
+	}
+
+	mbdc.mu.RLock()
+	defer mbdc.mu.RUnlock()
+
+	desc, ok := mbdc.descriptors[dgst]
+	if !ok {
+		return distribution.Descriptor{}, distribution.ErrBlobUnknown
+	}
+
+	return desc, nil
+}
+
+func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error {
+	mbdc.mu.Lock()
+	defer mbdc.mu.Unlock()
+
+	delete(mbdc.descriptors, dgst)
+	return nil
+}
+
+func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
+	if err := dgst.Validate(); err != nil {
+		return err
+	}
+
+	if err := cache.ValidateDescriptor(desc); err != nil {
+		return err
+	}
+
+	mbdc.mu.Lock()
+	defer mbdc.mu.Unlock()
+
+	mbdc.descriptors[dgst] = desc
+	return nil
+}
diff --git a/vendor/github.com/docker/distribution/tags.go b/vendor/github.com/docker/distribution/tags.go
new file mode 100644
index 0000000000000000000000000000000000000000..503056596371154a76354cc0f787bd03ef3532ce
--- /dev/null
+++ b/vendor/github.com/docker/distribution/tags.go
@@ -0,0 +1,27 @@
+package distribution
+
+import (
+	"github.com/docker/distribution/context"
+)
+
+// TagService provides access to information about tagged objects.
+type TagService interface {
+	// Get retrieves the descriptor identified by the tag. Some
+	// implementations may differentiate between "trusted" tags and
+	// "untrusted" tags. If a tag is "untrusted", the mapping will be returned
+	// as an ErrTagUntrusted error, with the target descriptor.
+	Get(ctx context.Context, tag string) (Descriptor, error)
+
+	// Tag associates the tag with the provided descriptor, updating the
+	// current association, if needed.
+	Tag(ctx context.Context, tag string, desc Descriptor) error
+
+	// Untag removes the given tag association
+	Untag(ctx context.Context, tag string) error
+
+	// All returns the set of tags managed by this tag service
+	All(ctx context.Context) ([]string, error)
+
+	// Lookup returns the set of tags referencing the given digest.
+	Lookup(ctx context.Context, digest Descriptor) ([]string, error)
+}
diff --git a/vendor/github.com/docker/distribution/uuid/uuid.go b/vendor/github.com/docker/distribution/uuid/uuid.go
new file mode 100644
index 0000000000000000000000000000000000000000..d433ccaf512d7a4a08fb812106e565dc5708a987
--- /dev/null
+++ b/vendor/github.com/docker/distribution/uuid/uuid.go
@@ -0,0 +1,126 @@
+// Package uuid provides simple UUID generation. Only version 4 style UUIDs
+// can be generated.
+//
+// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs.
+package uuid
+
+import (
+	"crypto/rand"
+	"fmt"
+	"io"
+	"os"
+	"syscall"
+	"time"
+)
+
+const (
+	// Bits is the number of bits in a UUID
+	Bits = 128
+
+	// Size is the number of bytes in a UUID
+	Size = Bits / 8
+
+	format = "%08x-%04x-%04x-%04x-%012x"
+)
+
+var (
+	// ErrUUIDInvalid indicates a parsed string is not a valid uuid.
+	ErrUUIDInvalid = fmt.Errorf("invalid uuid")
+
+	// Loggerf can be used to override the default logging destination. Such
+	// log messages in this library should be logged at warning or higher.
+	Loggerf = func(format string, args ...interface{}) {}
+)
+
+// UUID represents a UUID value. UUIDs can be compared and set to other values
+// and accessed by byte.
+type UUID [Size]byte
+
+// Generate creates a new, version 4 uuid.
+func Generate() (u UUID) {
+	const (
+		// ensures we backoff for less than 450ms total. Use the following to
+		// select new value, in units of 10ms:
+		// 	n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2
+		maxretries = 9
+		backoff    = time.Millisecond * 10
+	)
+
+	var (
+		totalBackoff time.Duration
+		count        int
+		retries      int
+	)
+
+	for {
+		// This should never block but the read may fail. Because of this,
+		// we just try to read the random number generator until we get
+		// something. This is a very rare condition but may happen.
+		b := time.Duration(retries) * backoff
+		time.Sleep(b)
+		totalBackoff += b
+
+		n, err := io.ReadFull(rand.Reader, u[count:])
+		if err != nil {
+			if retryOnError(err) && retries < maxretries {
+				count += n
+				retries++
+				Loggerf("error generating version 4 uuid, retrying: %v", err)
+				continue
+			}
+
+			// Any other errors represent a system problem. What did someone
+			// do to /dev/urandom?
+			panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err))
+		}
+
+		break
+	}
+
+	u[6] = (u[6] & 0x0f) | 0x40 // set version byte
+	u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b}
+
+	return u
+}
+
+// Parse attempts to extract a uuid from the string or returns an error.
+func Parse(s string) (u UUID, err error) {
+	if len(s) != 36 {
+		return UUID{}, ErrUUIDInvalid
+	}
+
+	// create stack addresses for each section of the uuid.
+	p := make([][]byte, 5)
+
+	if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil {
+		return u, err
+	}
+
+	copy(u[0:4], p[0])
+	copy(u[4:6], p[1])
+	copy(u[6:8], p[2])
+	copy(u[8:10], p[3])
+	copy(u[10:16], p[4])
+
+	return
+}
+
+func (u UUID) String() string {
+	return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:])
+}
+
+// retryOnError tries to detect whether or not retrying would be fruitful.
+func retryOnError(err error) bool {
+	switch err := err.(type) {
+	case *os.PathError:
+		return retryOnError(err.Err) // unpack the target error
+	case syscall.Errno:
+		if err == syscall.EPERM {
+			// EPERM represents an entropy pool exhaustion, a condition under
+			// which we backoff and retry.
+			return true
+		}
+	}
+
+	return false
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/LICENSE b/vendor/github.com/docker/docker-credential-helpers/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..1ea555e2af0df8a92a2f32f5b91a565ab55effd9
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2016 David Calavera
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/docker/docker-credential-helpers/client/client.go b/vendor/github.com/docker/docker-credential-helpers/client/client.go
new file mode 100644
index 0000000000000000000000000000000000000000..d1d0434cb558d21c4a2e59929748e4af4985c3ad
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/client/client.go
@@ -0,0 +1,121 @@
+package client
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"strings"
+
+	"github.com/docker/docker-credential-helpers/credentials"
+)
+
+// isValidCredsMessage checks if 'msg' contains invalid credentials error message.
+// It returns whether the logs are free of invalid credentials errors and the error if it isn't.
+// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername.
+func isValidCredsMessage(msg string) error {
+	if credentials.IsCredentialsMissingServerURLMessage(msg) {
+		return credentials.NewErrCredentialsMissingServerURL()
+	}
+
+	if credentials.IsCredentialsMissingUsernameMessage(msg) {
+		return credentials.NewErrCredentialsMissingUsername()
+	}
+
+	return nil
+}
+
+// Store uses an external program to save credentials.
+func Store(program ProgramFunc, creds *credentials.Credentials) error {
+	cmd := program("store")
+
+	buffer := new(bytes.Buffer)
+	if err := json.NewEncoder(buffer).Encode(creds); err != nil {
+		return err
+	}
+	cmd.Input(buffer)
+
+	out, err := cmd.Output()
+	if err != nil {
+		t := strings.TrimSpace(string(out))
+
+		if isValidErr := isValidCredsMessage(t); isValidErr != nil {
+			err = isValidErr
+		}
+
+		return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t)
+	}
+
+	return nil
+}
+
+// Get executes an external program to get the credentials from a native store.
+func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) {
+	cmd := program("get")
+	cmd.Input(strings.NewReader(serverURL))
+
+	out, err := cmd.Output()
+	if err != nil {
+		t := strings.TrimSpace(string(out))
+
+		if credentials.IsErrCredentialsNotFoundMessage(t) {
+			return nil, credentials.NewErrCredentialsNotFound()
+		}
+
+		if isValidErr := isValidCredsMessage(t); isValidErr != nil {
+			err = isValidErr
+		}
+
+		return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t)
+	}
+
+	resp := &credentials.Credentials{
+		ServerURL: serverURL,
+	}
+
+	if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil {
+		return nil, err
+	}
+
+	return resp, nil
+}
+
+// Erase executes a program to remove the server credentials from the native store.
+func Erase(program ProgramFunc, serverURL string) error {
+	cmd := program("erase")
+	cmd.Input(strings.NewReader(serverURL))
+	out, err := cmd.Output()
+	if err != nil {
+		t := strings.TrimSpace(string(out))
+
+		if isValidErr := isValidCredsMessage(t); isValidErr != nil {
+			err = isValidErr
+		}
+
+		return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t)
+	}
+
+	return nil
+}
+
+// List executes a program to list server credentials in the native store.
+func List(program ProgramFunc) (map[string]string, error) {
+	cmd := program("list")
+	cmd.Input(strings.NewReader("unused"))
+	out, err := cmd.Output()
+	if err != nil {
+		t := strings.TrimSpace(string(out))
+
+		if isValidErr := isValidCredsMessage(t); isValidErr != nil {
+			err = isValidErr
+		}
+
+		return nil, fmt.Errorf("error listing credentials - err: %v, out: `%s`", err, t)
+	}
+
+	var resp map[string]string
+	if err = json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil {
+		return nil, err
+	}
+
+	return resp, nil
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/client/command.go b/vendor/github.com/docker/docker-credential-helpers/client/command.go
new file mode 100644
index 0000000000000000000000000000000000000000..a144d5ac1800faeec0afb4c4abf874d072b06a8b
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/client/command.go
@@ -0,0 +1,44 @@
+package client
+
+import (
+	"io"
+	"os"
+	"os/exec"
+)
+
+// Program is an interface to execute external programs.
+type Program interface {
+	Output() ([]byte, error)
+	Input(in io.Reader)
+}
+
+// ProgramFunc is a type of function that initializes programs based on arguments.
+type ProgramFunc func(args ...string) Program
+
+// NewShellProgramFunc creates programs that are executed in a Shell.
+func NewShellProgramFunc(name string) ProgramFunc {
+	return func(args ...string) Program {
+		return &Shell{cmd: newCmdRedirectErr(name, args)}
+	}
+}
+
+func newCmdRedirectErr(name string, args []string) *exec.Cmd {
+	newCmd := exec.Command(name, args...)
+	newCmd.Stderr = os.Stderr
+	return newCmd
+}
+
+// Shell invokes shell commands to talk with a remote credentials helper.
+type Shell struct {
+	cmd *exec.Cmd
+}
+
+// Output returns responses from the remote credentials helper.
+func (s *Shell) Output() ([]byte, error) {
+	return s.cmd.Output()
+}
+
+// Input sets the input to send to a remote credentials helper.
+func (s *Shell) Input(in io.Reader) {
+	s.cmd.Stdin = in
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go
new file mode 100644
index 0000000000000000000000000000000000000000..da8b594e7f89e6fee49a5033bc3ff574e6bb06bf
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go
@@ -0,0 +1,186 @@
+package credentials
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"os"
+	"strings"
+)
+
+// Credentials holds the information shared between docker and the credentials store.
+type Credentials struct {
+	ServerURL string
+	Username  string
+	Secret    string
+}
+
+// isValid checks the integrity of Credentials object such that no credentials lack
+// a server URL or a username.
+// It returns whether the credentials are valid and the error if it isn't.
+// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername
+func (c *Credentials) isValid() (bool, error) {
+	if len(c.ServerURL) == 0 {
+		return false, NewErrCredentialsMissingServerURL()
+	}
+
+	if len(c.Username) == 0 {
+		return false, NewErrCredentialsMissingUsername()
+	}
+
+	return true, nil
+}
+
+// CredsLabel holds the way Docker credentials should be labeled as such in credentials stores that allow labelling.
+// That label allows to filter out non-Docker credentials too at lookup/search in macOS keychain,
+// Windows credentials manager and Linux libsecret. Default value is "Docker Credentials"
+var CredsLabel = "Docker Credentials"
+
+// SetCredsLabel is a simple setter for CredsLabel
+func SetCredsLabel(label string) {
+	CredsLabel = label
+}
+
+// Serve initializes the credentials helper and parses the action argument.
+// This function is designed to be called from a command line interface.
+// It uses os.Args[1] as the key for the action.
+// It uses os.Stdin as input and os.Stdout as output.
+// This function terminates the program with os.Exit(1) if there is an error.
+func Serve(helper Helper) {
+	var err error
+	if len(os.Args) != 2 {
+		err = fmt.Errorf("Usage: %s <store|get|erase|list|version>", os.Args[0])
+	}
+
+	if err == nil {
+		err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout)
+	}
+
+	if err != nil {
+		fmt.Fprintf(os.Stdout, "%v\n", err)
+		os.Exit(1)
+	}
+}
+
+// HandleCommand uses a helper and a key to run a credential action.
+func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error {
+	switch key {
+	case "store":
+		return Store(helper, in)
+	case "get":
+		return Get(helper, in, out)
+	case "erase":
+		return Erase(helper, in)
+	case "list":
+		return List(helper, out)
+	case "version":
+		return PrintVersion(out)
+	}
+	return fmt.Errorf("Unknown credential action `%s`", key)
+}
+
+// Store uses a helper and an input reader to save credentials.
+// The reader must contain the JSON serialization of a Credentials struct.
+func Store(helper Helper, reader io.Reader) error {
+	scanner := bufio.NewScanner(reader)
+
+	buffer := new(bytes.Buffer)
+	for scanner.Scan() {
+		buffer.Write(scanner.Bytes())
+	}
+
+	if err := scanner.Err(); err != nil && err != io.EOF {
+		return err
+	}
+
+	var creds Credentials
+	if err := json.NewDecoder(buffer).Decode(&creds); err != nil {
+		return err
+	}
+
+	if ok, err := creds.isValid(); !ok {
+		return err
+	}
+
+	return helper.Add(&creds)
+}
+
+// Get retrieves the credentials for a given server url.
+// The reader must contain the server URL to search.
+// The writer is used to write the JSON serialization of the credentials.
+func Get(helper Helper, reader io.Reader, writer io.Writer) error {
+	scanner := bufio.NewScanner(reader)
+
+	buffer := new(bytes.Buffer)
+	for scanner.Scan() {
+		buffer.Write(scanner.Bytes())
+	}
+
+	if err := scanner.Err(); err != nil && err != io.EOF {
+		return err
+	}
+
+	serverURL := strings.TrimSpace(buffer.String())
+	if len(serverURL) == 0 {
+		return NewErrCredentialsMissingServerURL()
+	}
+
+	username, secret, err := helper.Get(serverURL)
+	if err != nil {
+		return err
+	}
+
+	resp := Credentials{
+		ServerURL: serverURL,
+		Username:  username,
+		Secret:    secret,
+	}
+
+	buffer.Reset()
+	if err := json.NewEncoder(buffer).Encode(resp); err != nil {
+		return err
+	}
+
+	fmt.Fprint(writer, buffer.String())
+	return nil
+}
+
+// Erase removes credentials from the store.
+// The reader must contain the server URL to remove.
+func Erase(helper Helper, reader io.Reader) error {
+	scanner := bufio.NewScanner(reader)
+
+	buffer := new(bytes.Buffer)
+	for scanner.Scan() {
+		buffer.Write(scanner.Bytes())
+	}
+
+	if err := scanner.Err(); err != nil && err != io.EOF {
+		return err
+	}
+
+	serverURL := strings.TrimSpace(buffer.String())
+	if len(serverURL) == 0 {
+		return NewErrCredentialsMissingServerURL()
+	}
+
+	return helper.Delete(serverURL)
+}
+
+//List returns all the serverURLs of keys in
+//the OS store as a list of strings
+func List(helper Helper, writer io.Writer) error {
+	accts, err := helper.List()
+	if err != nil {
+		return err
+	}
+	return json.NewEncoder(writer).Encode(accts)
+}
+
+//PrintVersion outputs the current version.
+func PrintVersion(writer io.Writer) error {
+	fmt.Fprintln(writer, Version)
+	return nil
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go
new file mode 100644
index 0000000000000000000000000000000000000000..fe6a5aef45c08b1c1c190d5255cf2c0b62bb4781
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go
@@ -0,0 +1,102 @@
+package credentials
+
+const (
+	// ErrCredentialsNotFound standardizes the not found error, so every helper returns
+	// the same message and docker can handle it properly.
+	errCredentialsNotFoundMessage = "credentials not found in native keychain"
+
+	// ErrCredentialsMissingServerURL and ErrCredentialsMissingUsername standardize
+	// invalid credentials or credentials management operations
+	errCredentialsMissingServerURLMessage = "no credentials server URL"
+	errCredentialsMissingUsernameMessage  = "no credentials username"
+)
+
+// errCredentialsNotFound represents an error
+// raised when credentials are not in the store.
+type errCredentialsNotFound struct{}
+
+// Error returns the standard error message
+// for when the credentials are not in the store.
+func (errCredentialsNotFound) Error() string {
+	return errCredentialsNotFoundMessage
+}
+
+// NewErrCredentialsNotFound creates a new error
+// for when the credentials are not in the store.
+func NewErrCredentialsNotFound() error {
+	return errCredentialsNotFound{}
+}
+
+// IsErrCredentialsNotFound returns true if the error
+// was caused by not having a set of credentials in a store.
+func IsErrCredentialsNotFound(err error) bool {
+	_, ok := err.(errCredentialsNotFound)
+	return ok
+}
+
+// IsErrCredentialsNotFoundMessage returns true if the error
+// was caused by not having a set of credentials in a store.
+//
+// This function helps to check messages returned by an
+// external program via its standard output.
+func IsErrCredentialsNotFoundMessage(err string) bool {
+	return err == errCredentialsNotFoundMessage
+}
+
+// errCredentialsMissingServerURL represents an error raised
+// when the credentials object has no server URL or when no
+// server URL is provided to a credentials operation requiring
+// one.
+type errCredentialsMissingServerURL struct{}
+
+func (errCredentialsMissingServerURL) Error() string {
+	return errCredentialsMissingServerURLMessage
+}
+
+// errCredentialsMissingUsername represents an error raised
+// when the credentials object has no username or when no
+// username is provided to a credentials operation requiring
+// one.
+type errCredentialsMissingUsername struct{}
+
+func (errCredentialsMissingUsername) Error() string {
+	return errCredentialsMissingUsernameMessage
+}
+
+// NewErrCredentialsMissingServerURL creates a new error for
+// errCredentialsMissingServerURL.
+func NewErrCredentialsMissingServerURL() error {
+	return errCredentialsMissingServerURL{}
+}
+
+// NewErrCredentialsMissingUsername creates a new error for
+// errCredentialsMissingUsername.
+func NewErrCredentialsMissingUsername() error {
+	return errCredentialsMissingUsername{}
+}
+
+// IsCredentialsMissingServerURL returns true if the error
+// was an errCredentialsMissingServerURL.
+func IsCredentialsMissingServerURL(err error) bool {
+	_, ok := err.(errCredentialsMissingServerURL)
+	return ok
+}
+
+// IsCredentialsMissingServerURLMessage checks for an
+// errCredentialsMissingServerURL in the error message.
+func IsCredentialsMissingServerURLMessage(err string) bool {
+	return err == errCredentialsMissingServerURLMessage
+}
+
+// IsCredentialsMissingUsername returns true if the error
+// was an errCredentialsMissingUsername.
+func IsCredentialsMissingUsername(err error) bool {
+	_, ok := err.(errCredentialsMissingUsername)
+	return ok
+}
+
+// IsCredentialsMissingUsernameMessage checks for an
+// errCredentialsMissingUsername in the error message.
+func IsCredentialsMissingUsernameMessage(err string) bool {
+	return err == errCredentialsMissingUsernameMessage
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go b/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go
new file mode 100644
index 0000000000000000000000000000000000000000..135acd254d7d5089eeda50c49edfe31ddc29cc7f
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go
@@ -0,0 +1,14 @@
+package credentials
+
+// Helper is the interface a credentials store helper must implement.
+type Helper interface {
+	// Add appends credentials to the store.
+	Add(*Credentials) error
+	// Delete removes credentials from the store.
+	Delete(serverURL string) error
+	// Get retrieves credentials from the store.
+	// It returns username and secret as strings.
+	Get(serverURL string) (string, string, error)
+	// List returns the stored serverURLs and their associated usernames.
+	List() (map[string]string, error)
+}
diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go
new file mode 100644
index 0000000000000000000000000000000000000000..a4834dd41aa296075fedd426ad72098ff4eaf1e6
--- /dev/null
+++ b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go
@@ -0,0 +1,4 @@
+package credentials
+
+// Version holds a string describing the current version
+const Version = "0.5.2"
diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/credentials.go b/vendor/github.com/docker/docker/cliconfig/credentials/credentials.go
new file mode 100644
index 0000000000000000000000000000000000000000..ca874cac5134039cd1bd97f66be0af5c2da98be7
--- /dev/null
+++ b/vendor/github.com/docker/docker/cliconfig/credentials/credentials.go
@@ -0,0 +1,17 @@
+package credentials
+
+import (
+	"github.com/docker/docker/api/types"
+)
+
+// Store is the interface that any credentials store must implement.
+type Store interface {
+	// Erase removes credentials from the store for a given server.
+	Erase(serverAddress string) error
+	// Get retrieves credentials from the store for a given server.
+	Get(serverAddress string) (types.AuthConfig, error)
+	// GetAll retrieves all the credentials from the store.
+	GetAll() (map[string]types.AuthConfig, error)
+	// Store saves credentials in the store.
+	Store(authConfig types.AuthConfig) error
+}
diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store.go
new file mode 100644
index 0000000000000000000000000000000000000000..b4733709b1a32ed1d1cf1bf2d38be686ac465953
--- /dev/null
+++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store.go
@@ -0,0 +1,22 @@
+package credentials
+
+import (
+	"os/exec"
+
+	"github.com/docker/docker/cliconfig/configfile"
+)
+
+// DetectDefaultStore sets the default credentials store
+// if the host includes the default store helper program.
+func DetectDefaultStore(c *configfile.ConfigFile) {
+	if c.CredentialsStore != "" {
+		// user defined
+		return
+	}
+
+	if defaultCredentialsStore != "" {
+		if _, err := exec.LookPath(remoteCredentialsPrefix + defaultCredentialsStore); err == nil {
+			c.CredentialsStore = defaultCredentialsStore
+		}
+	}
+}
diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go
new file mode 100644
index 0000000000000000000000000000000000000000..63e8ed4010c2dac2696a7317610f9b4ddd02c0ad
--- /dev/null
+++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_darwin.go
@@ -0,0 +1,3 @@
+package credentials
+
+const defaultCredentialsStore = "osxkeychain"
diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..864c540f6c0b9f5f9320d122688aac92f5d0fd7b
--- /dev/null
+++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_linux.go
@@ -0,0 +1,3 @@
+package credentials
+
+const defaultCredentialsStore = "secretservice"
diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go
new file mode 100644
index 0000000000000000000000000000000000000000..519ef53dcdece2cdf3afc8a642006118cb8d6e7a
--- /dev/null
+++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_unsupported.go
@@ -0,0 +1,5 @@
+// +build !windows,!darwin,!linux
+
+package credentials
+
+const defaultCredentialsStore = ""
diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/default_store_windows.go b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..fb6a9745cf1b264af2004bb3df3cc2e1abc37f49
--- /dev/null
+++ b/vendor/github.com/docker/docker/cliconfig/credentials/default_store_windows.go
@@ -0,0 +1,3 @@
+package credentials
+
+const defaultCredentialsStore = "wincred"
diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/file_store.go b/vendor/github.com/docker/docker/cliconfig/credentials/file_store.go
new file mode 100644
index 0000000000000000000000000000000000000000..ca73a384d4365fd45b1212b5442286d9c0c8fd57
--- /dev/null
+++ b/vendor/github.com/docker/docker/cliconfig/credentials/file_store.go
@@ -0,0 +1,53 @@
+package credentials
+
+import (
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/cliconfig/configfile"
+	"github.com/docker/docker/registry"
+)
+
+// fileStore implements a credentials store using
+// the docker configuration file to keep the credentials in plain text.
+type fileStore struct {
+	file *configfile.ConfigFile
+}
+
+// NewFileStore creates a new file credentials store.
+func NewFileStore(file *configfile.ConfigFile) Store {
+	return &fileStore{
+		file: file,
+	}
+}
+
+// Erase removes the given credentials from the file store.
+func (c *fileStore) Erase(serverAddress string) error {
+	delete(c.file.AuthConfigs, serverAddress)
+	return c.file.Save()
+}
+
+// Get retrieves credentials for a specific server from the file store.
+func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) {
+	authConfig, ok := c.file.AuthConfigs[serverAddress]
+	if !ok {
+		// Maybe they have a legacy config file, we will iterate the keys converting
+		// them to the new format and testing
+		for r, ac := range c.file.AuthConfigs {
+			if serverAddress == registry.ConvertToHostname(r) {
+				return ac, nil
+			}
+		}
+
+		authConfig = types.AuthConfig{}
+	}
+	return authConfig, nil
+}
+
+func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) {
+	return c.file.AuthConfigs, nil
+}
+
+// Store saves the given credentials in the file store.
+func (c *fileStore) Store(authConfig types.AuthConfig) error {
+	c.file.AuthConfigs[authConfig.ServerAddress] = authConfig
+	return c.file.Save()
+}
diff --git a/vendor/github.com/docker/docker/cliconfig/credentials/native_store.go b/vendor/github.com/docker/docker/cliconfig/credentials/native_store.go
new file mode 100644
index 0000000000000000000000000000000000000000..dec2dbcb822b382d847de6b4999f38fe4d595bde
--- /dev/null
+++ b/vendor/github.com/docker/docker/cliconfig/credentials/native_store.go
@@ -0,0 +1,144 @@
+package credentials
+
+import (
+	"github.com/docker/docker-credential-helpers/client"
+	"github.com/docker/docker-credential-helpers/credentials"
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/cliconfig/configfile"
+)
+
+const (
+	remoteCredentialsPrefix = "docker-credential-"
+	tokenUsername           = "<token>"
+)
+
+// nativeStore implements a credentials store
+// using native keychain to keep credentials secure.
+// It piggybacks into a file store to keep users' emails.
+type nativeStore struct {
+	programFunc client.ProgramFunc
+	fileStore   Store
+}
+
+// NewNativeStore creates a new native store that
+// uses a remote helper program to manage credentials.
+func NewNativeStore(file *configfile.ConfigFile, helperSuffix string) Store {
+	name := remoteCredentialsPrefix + helperSuffix
+	return &nativeStore{
+		programFunc: client.NewShellProgramFunc(name),
+		fileStore:   NewFileStore(file),
+	}
+}
+
+// Erase removes the given credentials from the native store.
+func (c *nativeStore) Erase(serverAddress string) error {
+	if err := client.Erase(c.programFunc, serverAddress); err != nil {
+		return err
+	}
+
+	// Fallback to plain text store to remove email
+	return c.fileStore.Erase(serverAddress)
+}
+
+// Get retrieves credentials for a specific server from the native store.
+func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) {
+	// load user email if it exist or an empty auth config.
+	auth, _ := c.fileStore.Get(serverAddress)
+
+	creds, err := c.getCredentialsFromStore(serverAddress)
+	if err != nil {
+		return auth, err
+	}
+	auth.Username = creds.Username
+	auth.IdentityToken = creds.IdentityToken
+	auth.Password = creds.Password
+
+	return auth, nil
+}
+
+// GetAll retrieves all the credentials from the native store.
+func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) {
+	auths, err := c.listCredentialsInStore()
+	if err != nil {
+		return nil, err
+	}
+
+	// Emails are only stored in the file store.
+	// This call can be safely eliminated when emails are removed.
+	fileConfigs, _ := c.fileStore.GetAll()
+
+	authConfigs := make(map[string]types.AuthConfig)
+	for registry := range auths {
+		creds, err := c.getCredentialsFromStore(registry)
+		if err != nil {
+			return nil, err
+		}
+		ac, _ := fileConfigs[registry] // might contain Email
+		ac.Username = creds.Username
+		ac.Password = creds.Password
+		ac.IdentityToken = creds.IdentityToken
+		authConfigs[registry] = ac
+	}
+
+	return authConfigs, nil
+}
+
+// Store saves the given credentials in the file store.
+func (c *nativeStore) Store(authConfig types.AuthConfig) error {
+	if err := c.storeCredentialsInStore(authConfig); err != nil {
+		return err
+	}
+	authConfig.Username = ""
+	authConfig.Password = ""
+	authConfig.IdentityToken = ""
+
+	// Fallback to old credential in plain text to save only the email
+	return c.fileStore.Store(authConfig)
+}
+
+// storeCredentialsInStore executes the command to store the credentials in the native store.
+func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error {
+	creds := &credentials.Credentials{
+		ServerURL: config.ServerAddress,
+		Username:  config.Username,
+		Secret:    config.Password,
+	}
+
+	if config.IdentityToken != "" {
+		creds.Username = tokenUsername
+		creds.Secret = config.IdentityToken
+	}
+
+	return client.Store(c.programFunc, creds)
+}
+
+// getCredentialsFromStore executes the command to get the credentials from the native store.
+func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) {
+	var ret types.AuthConfig
+
+	creds, err := client.Get(c.programFunc, serverAddress)
+	if err != nil {
+		if credentials.IsErrCredentialsNotFound(err) {
+			// do not return an error if the credentials are not
+			// in the keyckain. Let docker ask for new credentials.
+			return ret, nil
+		}
+		return ret, err
+	}
+
+	if creds.Username == tokenUsername {
+		ret.IdentityToken = creds.Secret
+	} else {
+		ret.Password = creds.Secret
+		ret.Username = creds.Username
+	}
+
+	ret.ServerAddress = serverAddress
+	return ret, nil
+}
+
+// listCredentialsInStore returns a listing of stored credentials as a map of
+// URL -> username.
+func (c *nativeStore) listCredentialsInStore() (map[string]string, error) {
+	return client.List(c.programFunc)
+}
diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/counter.go b/vendor/github.com/docker/docker/daemon/graphdriver/counter.go
new file mode 100644
index 0000000000000000000000000000000000000000..5ea604f5b6d4c9d2dd260fce91b88c392a677f19
--- /dev/null
+++ b/vendor/github.com/docker/docker/daemon/graphdriver/counter.go
@@ -0,0 +1,67 @@
+package graphdriver
+
+import "sync"
+
+type minfo struct {
+	check bool
+	count int
+}
+
+// RefCounter is a generic counter for use by graphdriver Get/Put calls
+type RefCounter struct {
+	counts  map[string]*minfo
+	mu      sync.Mutex
+	checker Checker
+}
+
+// NewRefCounter returns a new RefCounter
+func NewRefCounter(c Checker) *RefCounter {
+	return &RefCounter{
+		checker: c,
+		counts:  make(map[string]*minfo),
+	}
+}
+
+// Increment increaes the ref count for the given id and returns the current count
+func (c *RefCounter) Increment(path string) int {
+	c.mu.Lock()
+	m := c.counts[path]
+	if m == nil {
+		m = &minfo{}
+		c.counts[path] = m
+	}
+	// if we are checking this path for the first time check to make sure
+	// if it was already mounted on the system and make sure we have a correct ref
+	// count if it is mounted as it is in use.
+	if !m.check {
+		m.check = true
+		if c.checker.IsMounted(path) {
+			m.count++
+		}
+	}
+	m.count++
+	c.mu.Unlock()
+	return m.count
+}
+
+// Decrement decreases the ref count for the given id and returns the current count
+func (c *RefCounter) Decrement(path string) int {
+	c.mu.Lock()
+	m := c.counts[path]
+	if m == nil {
+		m = &minfo{}
+		c.counts[path] = m
+	}
+	// if we are checking this path for the first time check to make sure
+	// if it was already mounted on the system and make sure we have a correct ref
+	// count if it is mounted as it is in use.
+	if !m.check {
+		m.check = true
+		if c.checker.IsMounted(path) {
+			m.count++
+		}
+	}
+	m.count--
+	c.mu.Unlock()
+	return m.count
+}
diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver.go
new file mode 100644
index 0000000000000000000000000000000000000000..f0bce562b7a9c60c48eea0400e1bfff31534d246
--- /dev/null
+++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver.go
@@ -0,0 +1,270 @@
+package graphdriver
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/vbatts/tar-split/tar/storage"
+
+	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/idtools"
+	"github.com/docker/docker/pkg/plugingetter"
+)
+
+// FsMagic unsigned id of the filesystem in use.
+type FsMagic uint32
+
+const (
+	// FsMagicUnsupported is a predefined constant value other than a valid filesystem id.
+	FsMagicUnsupported = FsMagic(0x00000000)
+)
+
+var (
+	// All registered drivers
+	drivers map[string]InitFunc
+
+	// ErrNotSupported returned when driver is not supported.
+	ErrNotSupported = errors.New("driver not supported")
+	// ErrPrerequisites retuned when driver does not meet prerequisites.
+	ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)")
+	// ErrIncompatibleFS returned when file system is not supported.
+	ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver")
+)
+
+//CreateOpts contains optional arguments for Create() and CreateReadWrite()
+// methods.
+type CreateOpts struct {
+	MountLabel string
+	StorageOpt map[string]string
+}
+
+// InitFunc initializes the storage driver.
+type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error)
+
+// ProtoDriver defines the basic capabilities of a driver.
+// This interface exists solely to be a minimum set of methods
+// for client code which choose not to implement the entire Driver
+// interface and use the NaiveDiffDriver wrapper constructor.
+//
+// Use of ProtoDriver directly by client code is not recommended.
+type ProtoDriver interface {
+	// String returns a string representation of this driver.
+	String() string
+	// CreateReadWrite creates a new, empty filesystem layer that is ready
+	// to be used as the storage for a container. Additional options can
+	// be passed in opts. parent may be "" and opts may be nil.
+	CreateReadWrite(id, parent string, opts *CreateOpts) error
+	// Create creates a new, empty, filesystem layer with the
+	// specified id and parent and options passed in opts. Parent
+	// may be "" and opts may be nil.
+	Create(id, parent string, opts *CreateOpts) error
+	// Remove attempts to remove the filesystem layer with this id.
+	Remove(id string) error
+	// Get returns the mountpoint for the layered filesystem referred
+	// to by this id. You can optionally specify a mountLabel or "".
+	// Returns the absolute path to the mounted layered filesystem.
+	Get(id, mountLabel string) (dir string, err error)
+	// Put releases the system resources for the specified id,
+	// e.g, unmounting layered filesystem.
+	Put(id string) error
+	// Exists returns whether a filesystem layer with the specified
+	// ID exists on this driver.
+	Exists(id string) bool
+	// Status returns a set of key-value pairs which give low
+	// level diagnostic status about this driver.
+	Status() [][2]string
+	// Returns a set of key-value pairs which give low level information
+	// about the image/container driver is managing.
+	GetMetadata(id string) (map[string]string, error)
+	// Cleanup performs necessary tasks to release resources
+	// held by the driver, e.g., unmounting all layered filesystems
+	// known to this driver.
+	Cleanup() error
+}
+
+// DiffDriver is the interface to use to implement graph diffs
+type DiffDriver interface {
+	// Diff produces an archive of the changes between the specified
+	// layer and its parent layer which may be "".
+	Diff(id, parent string) (io.ReadCloser, error)
+	// Changes produces a list of changes between the specified layer
+	// and its parent layer. If parent is "", then all changes will be ADD changes.
+	Changes(id, parent string) ([]archive.Change, error)
+	// ApplyDiff extracts the changeset from the given diff into the
+	// layer with the specified id and parent, returning the size of the
+	// new layer in bytes.
+	// The archive.Reader must be an uncompressed stream.
+	ApplyDiff(id, parent string, diff io.Reader) (size int64, err error)
+	// DiffSize calculates the changes between the specified id
+	// and its parent and returns the size in bytes of the changes
+	// relative to its base filesystem directory.
+	DiffSize(id, parent string) (size int64, err error)
+}
+
+// Driver is the interface for layered/snapshot file system drivers.
+type Driver interface {
+	ProtoDriver
+	DiffDriver
+}
+
+// DiffGetterDriver is the interface for layered file system drivers that
+// provide a specialized function for getting file contents for tar-split.
+type DiffGetterDriver interface {
+	Driver
+	// DiffGetter returns an interface to efficiently retrieve the contents
+	// of files in a layer.
+	DiffGetter(id string) (FileGetCloser, error)
+}
+
+// FileGetCloser extends the storage.FileGetter interface with a Close method
+// for cleaning up.
+type FileGetCloser interface {
+	storage.FileGetter
+	// Close cleans up any resources associated with the FileGetCloser.
+	Close() error
+}
+
+// Checker makes checks on specified filesystems.
+type Checker interface {
+	// IsMounted returns true if the provided path is mounted for the specific checker
+	IsMounted(path string) bool
+}
+
+func init() {
+	drivers = make(map[string]InitFunc)
+}
+
+// Register registers an InitFunc for the driver.
+func Register(name string, initFunc InitFunc) error {
+	if _, exists := drivers[name]; exists {
+		return fmt.Errorf("Name already registered %s", name)
+	}
+	drivers[name] = initFunc
+
+	return nil
+}
+
+// GetDriver initializes and returns the registered driver
+func GetDriver(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) {
+	if initFunc, exists := drivers[name]; exists {
+		return initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps)
+	}
+
+	pluginDriver, err := lookupPlugin(name, pg, config)
+	if err == nil {
+		return pluginDriver, nil
+	}
+	logrus.WithError(err).WithField("driver", name).WithField("home-dir", config.Root).Error("Failed to GetDriver graph")
+	return nil, ErrNotSupported
+}
+
+// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins
+func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) {
+	if initFunc, exists := drivers[name]; exists {
+		return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps)
+	}
+	logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home)
+	return nil, ErrNotSupported
+}
+
+// Options is used to initialize a graphdriver
+type Options struct {
+	Root                string
+	DriverOptions       []string
+	UIDMaps             []idtools.IDMap
+	GIDMaps             []idtools.IDMap
+	ExperimentalEnabled bool
+}
+
+// New creates the driver and initializes it at the specified root.
+func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) {
+	if name != "" {
+		logrus.Debugf("[graphdriver] trying provided driver: %s", name) // so the logs show specified driver
+		return GetDriver(name, pg, config)
+	}
+
+	// Guess for prior driver
+	driversMap := scanPriorDrivers(config.Root)
+	for _, name := range priority {
+		if name == "vfs" {
+			// don't use vfs even if there is state present.
+			continue
+		}
+		if _, prior := driversMap[name]; prior {
+			// of the state found from prior drivers, check in order of our priority
+			// which we would prefer
+			driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps)
+			if err != nil {
+				// unlike below, we will return error here, because there is prior
+				// state, and now it is no longer supported/prereq/compatible, so
+				// something changed and needs attention. Otherwise the daemon's
+				// images would just "disappear".
+				logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err)
+				return nil, err
+			}
+
+			// abort starting when there are other prior configured drivers
+			// to ensure the user explicitly selects the driver to load
+			if len(driversMap)-1 > 0 {
+				var driversSlice []string
+				for name := range driversMap {
+					driversSlice = append(driversSlice, name)
+				}
+
+				return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s <DRIVER>)", config.Root, strings.Join(driversSlice, ", "))
+			}
+
+			logrus.Infof("[graphdriver] using prior storage driver: %s", name)
+			return driver, nil
+		}
+	}
+
+	// Check for priority drivers first
+	for _, name := range priority {
+		driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps)
+		if err != nil {
+			if isDriverNotSupported(err) {
+				continue
+			}
+			return nil, err
+		}
+		return driver, nil
+	}
+
+	// Check all registered drivers if no priority driver is found
+	for name, initFunc := range drivers {
+		driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps)
+		if err != nil {
+			if isDriverNotSupported(err) {
+				continue
+			}
+			return nil, err
+		}
+		return driver, nil
+	}
+	return nil, fmt.Errorf("No supported storage backend found")
+}
+
+// isDriverNotSupported returns true if the error initializing
+// the graph driver is a non-supported error.
+func isDriverNotSupported(err error) bool {
+	return err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS
+}
+
+// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers
+func scanPriorDrivers(root string) map[string]bool {
+	driversMap := make(map[string]bool)
+
+	for driver := range drivers {
+		p := filepath.Join(root, driver)
+		if _, err := os.Stat(p); err == nil && driver != "vfs" {
+			driversMap[driver] = true
+		}
+	}
+	return driversMap
+}
diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go
new file mode 100644
index 0000000000000000000000000000000000000000..2891a84f3aca85807548ddcc7dfadcb30716aeb2
--- /dev/null
+++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go
@@ -0,0 +1,19 @@
+package graphdriver
+
+import "syscall"
+
+var (
+	// Slice of drivers that should be used in an order
+	priority = []string{
+		"zfs",
+	}
+)
+
+// Mounted checks if the given path is mounted as the fs type
+func Mounted(fsType FsMagic, mountPath string) (bool, error) {
+	var buf syscall.Statfs_t
+	if err := syscall.Statfs(mountPath, &buf); err != nil {
+		return false, err
+	}
+	return FsMagic(buf.Type) == fsType, nil
+}
diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..5c8d0e23014107525c3b5f7eab51c778a90d5af1
--- /dev/null
+++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go
@@ -0,0 +1,135 @@
+// +build linux
+
+package graphdriver
+
+import (
+	"path/filepath"
+	"syscall"
+
+	"github.com/docker/docker/pkg/mount"
+)
+
+const (
+	// FsMagicAufs filesystem id for Aufs
+	FsMagicAufs = FsMagic(0x61756673)
+	// FsMagicBtrfs filesystem id for Btrfs
+	FsMagicBtrfs = FsMagic(0x9123683E)
+	// FsMagicCramfs filesystem id for Cramfs
+	FsMagicCramfs = FsMagic(0x28cd3d45)
+	// FsMagicEcryptfs filesystem id for eCryptfs
+	FsMagicEcryptfs = FsMagic(0xf15f)
+	// FsMagicExtfs filesystem id for Extfs
+	FsMagicExtfs = FsMagic(0x0000EF53)
+	// FsMagicF2fs filesystem id for F2fs
+	FsMagicF2fs = FsMagic(0xF2F52010)
+	// FsMagicGPFS filesystem id for GPFS
+	FsMagicGPFS = FsMagic(0x47504653)
+	// FsMagicJffs2Fs filesystem if for Jffs2Fs
+	FsMagicJffs2Fs = FsMagic(0x000072b6)
+	// FsMagicJfs filesystem id for Jfs
+	FsMagicJfs = FsMagic(0x3153464a)
+	// FsMagicNfsFs filesystem id for NfsFs
+	FsMagicNfsFs = FsMagic(0x00006969)
+	// FsMagicRAMFs filesystem id for RamFs
+	FsMagicRAMFs = FsMagic(0x858458f6)
+	// FsMagicReiserFs filesystem id for ReiserFs
+	FsMagicReiserFs = FsMagic(0x52654973)
+	// FsMagicSmbFs filesystem id for SmbFs
+	FsMagicSmbFs = FsMagic(0x0000517B)
+	// FsMagicSquashFs filesystem id for SquashFs
+	FsMagicSquashFs = FsMagic(0x73717368)
+	// FsMagicTmpFs filesystem id for TmpFs
+	FsMagicTmpFs = FsMagic(0x01021994)
+	// FsMagicVxFS filesystem id for VxFs
+	FsMagicVxFS = FsMagic(0xa501fcf5)
+	// FsMagicXfs filesystem id for Xfs
+	FsMagicXfs = FsMagic(0x58465342)
+	// FsMagicZfs filesystem id for Zfs
+	FsMagicZfs = FsMagic(0x2fc12fc1)
+	// FsMagicOverlay filesystem id for overlay
+	FsMagicOverlay = FsMagic(0x794C7630)
+)
+
+var (
+	// Slice of drivers that should be used in an order
+	priority = []string{
+		"aufs",
+		"btrfs",
+		"zfs",
+		"overlay2",
+		"overlay",
+		"devicemapper",
+		"vfs",
+	}
+
+	// FsNames maps filesystem id to name of the filesystem.
+	FsNames = map[FsMagic]string{
+		FsMagicAufs:        "aufs",
+		FsMagicBtrfs:       "btrfs",
+		FsMagicCramfs:      "cramfs",
+		FsMagicExtfs:       "extfs",
+		FsMagicF2fs:        "f2fs",
+		FsMagicGPFS:        "gpfs",
+		FsMagicJffs2Fs:     "jffs2",
+		FsMagicJfs:         "jfs",
+		FsMagicNfsFs:       "nfs",
+		FsMagicOverlay:     "overlayfs",
+		FsMagicRAMFs:       "ramfs",
+		FsMagicReiserFs:    "reiserfs",
+		FsMagicSmbFs:       "smb",
+		FsMagicSquashFs:    "squashfs",
+		FsMagicTmpFs:       "tmpfs",
+		FsMagicUnsupported: "unsupported",
+		FsMagicVxFS:        "vxfs",
+		FsMagicXfs:         "xfs",
+		FsMagicZfs:         "zfs",
+	}
+)
+
+// GetFSMagic returns the filesystem id given the path.
+func GetFSMagic(rootpath string) (FsMagic, error) {
+	var buf syscall.Statfs_t
+	if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil {
+		return 0, err
+	}
+	return FsMagic(buf.Type), nil
+}
+
+// NewFsChecker returns a checker configured for the provied FsMagic
+func NewFsChecker(t FsMagic) Checker {
+	return &fsChecker{
+		t: t,
+	}
+}
+
+type fsChecker struct {
+	t FsMagic
+}
+
+func (c *fsChecker) IsMounted(path string) bool {
+	m, _ := Mounted(c.t, path)
+	return m
+}
+
+// NewDefaultChecker returns a check that parses /proc/mountinfo to check
+// if the specified path is mounted.
+func NewDefaultChecker() Checker {
+	return &defaultChecker{}
+}
+
+type defaultChecker struct {
+}
+
+func (c *defaultChecker) IsMounted(path string) bool {
+	m, _ := mount.Mounted(path)
+	return m
+}
+
+// Mounted checks if the given path is mounted as the fs type
+func Mounted(fsType FsMagic, mountPath string) (bool, error) {
+	var buf syscall.Statfs_t
+	if err := syscall.Statfs(mountPath, &buf); err != nil {
+		return false, err
+	}
+	return FsMagic(buf.Type) == fsType, nil
+}
diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go
new file mode 100644
index 0000000000000000000000000000000000000000..7daf01c32d7d16468d06c87a0c3cca2465b40c9d
--- /dev/null
+++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_solaris.go
@@ -0,0 +1,97 @@
+// +build solaris,cgo
+
+package graphdriver
+
+/*
+#include <sys/statvfs.h>
+#include <stdlib.h>
+
+static inline struct statvfs *getstatfs(char *s) {
+        struct statvfs *buf;
+        int err;
+        buf = (struct statvfs *)malloc(sizeof(struct statvfs));
+        err = statvfs(s, buf);
+        return buf;
+}
+*/
+import "C"
+import (
+	"path/filepath"
+	"unsafe"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/pkg/mount"
+)
+
+const (
+	// FsMagicZfs filesystem id for Zfs
+	FsMagicZfs = FsMagic(0x2fc12fc1)
+)
+
+var (
+	// Slice of drivers that should be used in an order
+	priority = []string{
+		"zfs",
+	}
+
+	// FsNames maps filesystem id to name of the filesystem.
+	FsNames = map[FsMagic]string{
+		FsMagicZfs: "zfs",
+	}
+)
+
+// GetFSMagic returns the filesystem id given the path.
+func GetFSMagic(rootpath string) (FsMagic, error) {
+	return 0, nil
+}
+
+type fsChecker struct {
+	t FsMagic
+}
+
+func (c *fsChecker) IsMounted(path string) bool {
+	m, _ := Mounted(c.t, path)
+	return m
+}
+
+// NewFsChecker returns a checker configured for the provied FsMagic
+func NewFsChecker(t FsMagic) Checker {
+	return &fsChecker{
+		t: t,
+	}
+}
+
+// NewDefaultChecker returns a check that parses /proc/mountinfo to check
+// if the specified path is mounted.
+// No-op on Solaris.
+func NewDefaultChecker() Checker {
+	return &defaultChecker{}
+}
+
+type defaultChecker struct {
+}
+
+func (c *defaultChecker) IsMounted(path string) bool {
+	m, _ := mount.Mounted(path)
+	return m
+}
+
+// Mounted checks if the given path is mounted as the fs type
+//Solaris supports only ZFS for now
+func Mounted(fsType FsMagic, mountPath string) (bool, error) {
+
+	cs := C.CString(filepath.Dir(mountPath))
+	buf := C.getstatfs(cs)
+
+	// on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ]
+	if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) ||
+		(buf.f_basetype[3] != 0) {
+		logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath)
+		C.free(unsafe.Pointer(buf))
+		return false, ErrPrerequisites
+	}
+
+	C.free(unsafe.Pointer(buf))
+	C.free(unsafe.Pointer(cs))
+	return true, nil
+}
diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go
new file mode 100644
index 0000000000000000000000000000000000000000..4a875608b0dec6271911e453d7577d66c22b74c5
--- /dev/null
+++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go
@@ -0,0 +1,15 @@
+// +build !linux,!windows,!freebsd,!solaris
+
+package graphdriver
+
+var (
+	// Slice of drivers that should be used in an order
+	priority = []string{
+		"unsupported",
+	}
+)
+
+// GetFSMagic returns the filesystem id given the path.
+func GetFSMagic(rootpath string) (FsMagic, error) {
+	return FsMagicUnsupported, nil
+}
diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..ffd30c2950c776fb6b777f0d25236afefd22906e
--- /dev/null
+++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go
@@ -0,0 +1,14 @@
+package graphdriver
+
+var (
+	// Slice of drivers that should be used in order
+	priority = []string{
+		"windowsfilter",
+	}
+)
+
+// GetFSMagic returns the filesystem id given the path.
+func GetFSMagic(rootpath string) (FsMagic, error) {
+	// Note it is OK to return FsMagicUnsupported on Windows.
+	return FsMagicUnsupported, nil
+}
diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go b/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go
new file mode 100644
index 0000000000000000000000000000000000000000..20826cd7d22db5d6f892efa37c04dd9b04b2e19c
--- /dev/null
+++ b/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go
@@ -0,0 +1,169 @@
+package graphdriver
+
+import (
+	"io"
+	"time"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/chrootarchive"
+	"github.com/docker/docker/pkg/idtools"
+	"github.com/docker/docker/pkg/ioutils"
+)
+
+var (
+	// ApplyUncompressedLayer defines the unpack method used by the graph
+	// driver.
+	ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer
+)
+
+// NaiveDiffDriver takes a ProtoDriver and adds the
+// capability of the Diffing methods which it may or may not
+// support on its own. See the comment on the exported
+// NewNaiveDiffDriver function below.
+// Notably, the AUFS driver doesn't need to be wrapped like this.
+type NaiveDiffDriver struct {
+	ProtoDriver
+	uidMaps []idtools.IDMap
+	gidMaps []idtools.IDMap
+}
+
+// NewNaiveDiffDriver returns a fully functional driver that wraps the
+// given ProtoDriver and adds the capability of the following methods which
+// it may or may not support on its own:
+//     Diff(id, parent string) (archive.Archive, error)
+//     Changes(id, parent string) ([]archive.Change, error)
+//     ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error)
+//     DiffSize(id, parent string) (size int64, err error)
+func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver {
+	return &NaiveDiffDriver{ProtoDriver: driver,
+		uidMaps: uidMaps,
+		gidMaps: gidMaps}
+}
+
+// Diff produces an archive of the changes between the specified
+// layer and its parent layer which may be "".
+func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) {
+	startTime := time.Now()
+	driver := gdw.ProtoDriver
+
+	layerFs, err := driver.Get(id, "")
+	if err != nil {
+		return nil, err
+	}
+
+	defer func() {
+		if err != nil {
+			driver.Put(id)
+		}
+	}()
+
+	if parent == "" {
+		archive, err := archive.Tar(layerFs, archive.Uncompressed)
+		if err != nil {
+			return nil, err
+		}
+		return ioutils.NewReadCloserWrapper(archive, func() error {
+			err := archive.Close()
+			driver.Put(id)
+			return err
+		}), nil
+	}
+
+	parentFs, err := driver.Get(parent, "")
+	if err != nil {
+		return nil, err
+	}
+	defer driver.Put(parent)
+
+	changes, err := archive.ChangesDirs(layerFs, parentFs)
+	if err != nil {
+		return nil, err
+	}
+
+	archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps)
+	if err != nil {
+		return nil, err
+	}
+
+	return ioutils.NewReadCloserWrapper(archive, func() error {
+		err := archive.Close()
+		driver.Put(id)
+
+		// NaiveDiffDriver compares file metadata with parent layers. Parent layers
+		// are extracted from tar's with full second precision on modified time.
+		// We need this hack here to make sure calls within same second receive
+		// correct result.
+		time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now()))
+		return err
+	}), nil
+}
+
+// Changes produces a list of changes between the specified layer
+// and its parent layer. If parent is "", then all changes will be ADD changes.
+func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) {
+	driver := gdw.ProtoDriver
+
+	layerFs, err := driver.Get(id, "")
+	if err != nil {
+		return nil, err
+	}
+	defer driver.Put(id)
+
+	parentFs := ""
+
+	if parent != "" {
+		parentFs, err = driver.Get(parent, "")
+		if err != nil {
+			return nil, err
+		}
+		defer driver.Put(parent)
+	}
+
+	return archive.ChangesDirs(layerFs, parentFs)
+}
+
+// ApplyDiff extracts the changeset from the given diff into the
+// layer with the specified id and parent, returning the size of the
+// new layer in bytes.
+func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) {
+	driver := gdw.ProtoDriver
+
+	// Mount the root filesystem so we can apply the diff/layer.
+	layerFs, err := driver.Get(id, "")
+	if err != nil {
+		return
+	}
+	defer driver.Put(id)
+
+	options := &archive.TarOptions{UIDMaps: gdw.uidMaps,
+		GIDMaps: gdw.gidMaps}
+	start := time.Now().UTC()
+	logrus.Debug("Start untar layer")
+	if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil {
+		return
+	}
+	logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
+
+	return
+}
+
+// DiffSize calculates the changes between the specified layer
+// and its parent and returns the size in bytes of the changes
+// relative to its base filesystem directory.
+func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) {
+	driver := gdw.ProtoDriver
+
+	changes, err := gdw.Changes(id, parent)
+	if err != nil {
+		return
+	}
+
+	layerFs, err := driver.Get(id, "")
+	if err != nil {
+		return
+	}
+	defer driver.Put(id)
+
+	return archive.ChangesSize(layerFs, changes), nil
+}
diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go b/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go
new file mode 100644
index 0000000000000000000000000000000000000000..7294bcc5f6cfaa1d6366652b3ea659bb37d414a0
--- /dev/null
+++ b/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go
@@ -0,0 +1,43 @@
+package graphdriver
+
+import (
+	"fmt"
+	"io"
+	"path/filepath"
+
+	"github.com/docker/docker/pkg/plugingetter"
+	"github.com/docker/docker/plugin/v2"
+)
+
+type pluginClient interface {
+	// Call calls the specified method with the specified arguments for the plugin.
+	Call(string, interface{}, interface{}) error
+	// Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream
+	Stream(string, interface{}) (io.ReadCloser, error)
+	// SendFile calls the specified method, and passes through the IO stream
+	SendFile(string, io.Reader, interface{}) error
+}
+
+func lookupPlugin(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) {
+	if !config.ExperimentalEnabled {
+		return nil, fmt.Errorf("graphdriver plugins are only supported with experimental mode")
+	}
+	pl, err := pg.Get(name, "GraphDriver", plugingetter.ACQUIRE)
+	if err != nil {
+		return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err)
+	}
+	return newPluginDriver(name, pl, config)
+}
+
+func newPluginDriver(name string, pl plugingetter.CompatPlugin, config Options) (Driver, error) {
+	home := config.Root
+	if !pl.IsV1() {
+		if p, ok := pl.(*v2.Plugin); ok {
+			if p.PropagatedMount != "" {
+				home = p.PluginObj.Config.PropagatedMount
+			}
+		}
+	}
+	proxy := &graphDriverProxy{name, pl}
+	return proxy, proxy.Init(filepath.Join(home, name), config.DriverOptions, config.UIDMaps, config.GIDMaps)
+}
diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go b/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go
new file mode 100644
index 0000000000000000000000000000000000000000..bfe74cc6f9408b288f5650176b5d01f82a22e191
--- /dev/null
+++ b/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go
@@ -0,0 +1,252 @@
+package graphdriver
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"path/filepath"
+
+	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/idtools"
+	"github.com/docker/docker/pkg/plugingetter"
+)
+
+type graphDriverProxy struct {
+	name string
+	p    plugingetter.CompatPlugin
+}
+
+type graphDriverRequest struct {
+	ID         string            `json:",omitempty"`
+	Parent     string            `json:",omitempty"`
+	MountLabel string            `json:",omitempty"`
+	StorageOpt map[string]string `json:",omitempty"`
+}
+
+type graphDriverResponse struct {
+	Err      string            `json:",omitempty"`
+	Dir      string            `json:",omitempty"`
+	Exists   bool              `json:",omitempty"`
+	Status   [][2]string       `json:",omitempty"`
+	Changes  []archive.Change  `json:",omitempty"`
+	Size     int64             `json:",omitempty"`
+	Metadata map[string]string `json:",omitempty"`
+}
+
+type graphDriverInitRequest struct {
+	Home    string
+	Opts    []string        `json:"Opts"`
+	UIDMaps []idtools.IDMap `json:"UIDMaps"`
+	GIDMaps []idtools.IDMap `json:"GIDMaps"`
+}
+
+func (d *graphDriverProxy) Init(home string, opts []string, uidMaps, gidMaps []idtools.IDMap) error {
+	if !d.p.IsV1() {
+		if cp, ok := d.p.(plugingetter.CountedPlugin); ok {
+			// always acquire here, it will be cleaned up on daemon shutdown
+			cp.Acquire()
+		}
+	}
+	args := &graphDriverInitRequest{
+		Home:    home,
+		Opts:    opts,
+		UIDMaps: uidMaps,
+		GIDMaps: gidMaps,
+	}
+	var ret graphDriverResponse
+	if err := d.p.Client().Call("GraphDriver.Init", args, &ret); err != nil {
+		return err
+	}
+	if ret.Err != "" {
+		return errors.New(ret.Err)
+	}
+	return nil
+}
+
+func (d *graphDriverProxy) String() string {
+	return d.name
+}
+
+func (d *graphDriverProxy) CreateReadWrite(id, parent string, opts *CreateOpts) error {
+	args := &graphDriverRequest{
+		ID:     id,
+		Parent: parent,
+	}
+	if opts != nil {
+		args.MountLabel = opts.MountLabel
+		args.StorageOpt = opts.StorageOpt
+	}
+
+	var ret graphDriverResponse
+	if err := d.p.Client().Call("GraphDriver.CreateReadWrite", args, &ret); err != nil {
+		return err
+	}
+	if ret.Err != "" {
+		return errors.New(ret.Err)
+	}
+	return nil
+}
+
+func (d *graphDriverProxy) Create(id, parent string, opts *CreateOpts) error {
+	args := &graphDriverRequest{
+		ID:     id,
+		Parent: parent,
+	}
+	if opts != nil {
+		args.MountLabel = opts.MountLabel
+		args.StorageOpt = opts.StorageOpt
+	}
+	var ret graphDriverResponse
+	if err := d.p.Client().Call("GraphDriver.Create", args, &ret); err != nil {
+		return err
+	}
+	if ret.Err != "" {
+		return errors.New(ret.Err)
+	}
+	return nil
+}
+
+func (d *graphDriverProxy) Remove(id string) error {
+	args := &graphDriverRequest{ID: id}
+	var ret graphDriverResponse
+	if err := d.p.Client().Call("GraphDriver.Remove", args, &ret); err != nil {
+		return err
+	}
+	if ret.Err != "" {
+		return errors.New(ret.Err)
+	}
+	return nil
+}
+
+func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) {
+	args := &graphDriverRequest{
+		ID:         id,
+		MountLabel: mountLabel,
+	}
+	var ret graphDriverResponse
+	if err := d.p.Client().Call("GraphDriver.Get", args, &ret); err != nil {
+		return "", err
+	}
+	var err error
+	if ret.Err != "" {
+		err = errors.New(ret.Err)
+	}
+	return filepath.Join(d.p.BasePath(), ret.Dir), err
+}
+
+func (d *graphDriverProxy) Put(id string) error {
+	args := &graphDriverRequest{ID: id}
+	var ret graphDriverResponse
+	if err := d.p.Client().Call("GraphDriver.Put", args, &ret); err != nil {
+		return err
+	}
+	if ret.Err != "" {
+		return errors.New(ret.Err)
+	}
+	return nil
+}
+
+func (d *graphDriverProxy) Exists(id string) bool {
+	args := &graphDriverRequest{ID: id}
+	var ret graphDriverResponse
+	if err := d.p.Client().Call("GraphDriver.Exists", args, &ret); err != nil {
+		return false
+	}
+	return ret.Exists
+}
+
+func (d *graphDriverProxy) Status() [][2]string {
+	args := &graphDriverRequest{}
+	var ret graphDriverResponse
+	if err := d.p.Client().Call("GraphDriver.Status", args, &ret); err != nil {
+		return nil
+	}
+	return ret.Status
+}
+
+func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) {
+	args := &graphDriverRequest{
+		ID: id,
+	}
+	var ret graphDriverResponse
+	if err := d.p.Client().Call("GraphDriver.GetMetadata", args, &ret); err != nil {
+		return nil, err
+	}
+	if ret.Err != "" {
+		return nil, errors.New(ret.Err)
+	}
+	return ret.Metadata, nil
+}
+
+func (d *graphDriverProxy) Cleanup() error {
+	if !d.p.IsV1() {
+		if cp, ok := d.p.(plugingetter.CountedPlugin); ok {
+			// always release
+			defer cp.Release()
+		}
+	}
+
+	args := &graphDriverRequest{}
+	var ret graphDriverResponse
+	if err := d.p.Client().Call("GraphDriver.Cleanup", args, &ret); err != nil {
+		return nil
+	}
+	if ret.Err != "" {
+		return errors.New(ret.Err)
+	}
+	return nil
+}
+
+func (d *graphDriverProxy) Diff(id, parent string) (io.ReadCloser, error) {
+	args := &graphDriverRequest{
+		ID:     id,
+		Parent: parent,
+	}
+	body, err := d.p.Client().Stream("GraphDriver.Diff", args)
+	if err != nil {
+		return nil, err
+	}
+	return body, nil
+}
+
+func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) {
+	args := &graphDriverRequest{
+		ID:     id,
+		Parent: parent,
+	}
+	var ret graphDriverResponse
+	if err := d.p.Client().Call("GraphDriver.Changes", args, &ret); err != nil {
+		return nil, err
+	}
+	if ret.Err != "" {
+		return nil, errors.New(ret.Err)
+	}
+
+	return ret.Changes, nil
+}
+
+func (d *graphDriverProxy) ApplyDiff(id, parent string, diff io.Reader) (int64, error) {
+	var ret graphDriverResponse
+	if err := d.p.Client().SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil {
+		return -1, err
+	}
+	if ret.Err != "" {
+		return -1, errors.New(ret.Err)
+	}
+	return ret.Size, nil
+}
+
+func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) {
+	args := &graphDriverRequest{
+		ID:     id,
+		Parent: parent,
+	}
+	var ret graphDriverResponse
+	if err := d.p.Client().Call("GraphDriver.DiffSize", args, &ret); err != nil {
+		return -1, err
+	}
+	if ret.Err != "" {
+		return -1, errors.New(ret.Err)
+	}
+	return ret.Size, nil
+}
diff --git a/vendor/github.com/docker/docker/image/fs.go b/vendor/github.com/docker/docker/image/fs.go
new file mode 100644
index 0000000000000000000000000000000000000000..39cfbf5d74d75e57f35bbf314c95a8c754601a3d
--- /dev/null
+++ b/vendor/github.com/docker/docker/image/fs.go
@@ -0,0 +1,173 @@
+package image
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"sync"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/distribution/digest"
+	"github.com/docker/docker/pkg/ioutils"
+)
+
+// DigestWalkFunc is function called by StoreBackend.Walk
+type DigestWalkFunc func(id digest.Digest) error
+
+// StoreBackend provides interface for image.Store persistence
+type StoreBackend interface {
+	Walk(f DigestWalkFunc) error
+	Get(id digest.Digest) ([]byte, error)
+	Set(data []byte) (digest.Digest, error)
+	Delete(id digest.Digest) error
+	SetMetadata(id digest.Digest, key string, data []byte) error
+	GetMetadata(id digest.Digest, key string) ([]byte, error)
+	DeleteMetadata(id digest.Digest, key string) error
+}
+
+// fs implements StoreBackend using the filesystem.
+type fs struct {
+	sync.RWMutex
+	root string
+}
+
+const (
+	contentDirName  = "content"
+	metadataDirName = "metadata"
+)
+
+// NewFSStoreBackend returns new filesystem based backend for image.Store
+func NewFSStoreBackend(root string) (StoreBackend, error) {
+	return newFSStore(root)
+}
+
+func newFSStore(root string) (*fs, error) {
+	s := &fs{
+		root: root,
+	}
+	if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil {
+		return nil, err
+	}
+	if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil {
+		return nil, err
+	}
+	return s, nil
+}
+
+func (s *fs) contentFile(dgst digest.Digest) string {
+	return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex())
+}
+
+func (s *fs) metadataDir(dgst digest.Digest) string {
+	return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex())
+}
+
+// Walk calls the supplied callback for each image ID in the storage backend.
+func (s *fs) Walk(f DigestWalkFunc) error {
+	// Only Canonical digest (sha256) is currently supported
+	s.RLock()
+	dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical)))
+	s.RUnlock()
+	if err != nil {
+		return err
+	}
+	for _, v := range dir {
+		dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name())
+		if err := dgst.Validate(); err != nil {
+			logrus.Debugf("Skipping invalid digest %s: %s", dgst, err)
+			continue
+		}
+		if err := f(dgst); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Get returns the content stored under a given digest.
+func (s *fs) Get(dgst digest.Digest) ([]byte, error) {
+	s.RLock()
+	defer s.RUnlock()
+
+	return s.get(dgst)
+}
+
+func (s *fs) get(dgst digest.Digest) ([]byte, error) {
+	content, err := ioutil.ReadFile(s.contentFile(dgst))
+	if err != nil {
+		return nil, err
+	}
+
+	// todo: maybe optional
+	if digest.FromBytes(content) != dgst {
+		return nil, fmt.Errorf("failed to verify: %v", dgst)
+	}
+
+	return content, nil
+}
+
+// Set stores content by checksum.
+func (s *fs) Set(data []byte) (digest.Digest, error) {
+	s.Lock()
+	defer s.Unlock()
+
+	if len(data) == 0 {
+		return "", fmt.Errorf("Invalid empty data")
+	}
+
+	dgst := digest.FromBytes(data)
+	if err := ioutils.AtomicWriteFile(s.contentFile(dgst), data, 0600); err != nil {
+		return "", err
+	}
+
+	return dgst, nil
+}
+
+// Delete removes content and metadata files associated with the digest.
+func (s *fs) Delete(dgst digest.Digest) error {
+	s.Lock()
+	defer s.Unlock()
+
+	if err := os.RemoveAll(s.metadataDir(dgst)); err != nil {
+		return err
+	}
+	if err := os.Remove(s.contentFile(dgst)); err != nil {
+		return err
+	}
+	return nil
+}
+
+// SetMetadata sets metadata for a given ID. It fails if there's no base file.
+func (s *fs) SetMetadata(dgst digest.Digest, key string, data []byte) error {
+	s.Lock()
+	defer s.Unlock()
+	if _, err := s.get(dgst); err != nil {
+		return err
+	}
+
+	baseDir := filepath.Join(s.metadataDir(dgst))
+	if err := os.MkdirAll(baseDir, 0700); err != nil {
+		return err
+	}
+	return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(dgst), key), data, 0600)
+}
+
+// GetMetadata returns metadata for a given digest.
+func (s *fs) GetMetadata(dgst digest.Digest, key string) ([]byte, error) {
+	s.RLock()
+	defer s.RUnlock()
+
+	if _, err := s.get(dgst); err != nil {
+		return nil, err
+	}
+	return ioutil.ReadFile(filepath.Join(s.metadataDir(dgst), key))
+}
+
+// DeleteMetadata removes the metadata associated with a digest.
+func (s *fs) DeleteMetadata(dgst digest.Digest, key string) error {
+	s.Lock()
+	defer s.Unlock()
+
+	return os.RemoveAll(filepath.Join(s.metadataDir(dgst), key))
+}
diff --git a/vendor/github.com/docker/docker/image/image.go b/vendor/github.com/docker/docker/image/image.go
new file mode 100644
index 0000000000000000000000000000000000000000..29a990a55660394282a751dcb58e8824a2861325
--- /dev/null
+++ b/vendor/github.com/docker/docker/image/image.go
@@ -0,0 +1,150 @@
+package image
+
+import (
+	"encoding/json"
+	"errors"
+	"io"
+	"time"
+
+	"github.com/docker/distribution/digest"
+	"github.com/docker/docker/api/types/container"
+)
+
+// ID is the content-addressable ID of an image.
+type ID digest.Digest
+
+func (id ID) String() string {
+	return id.Digest().String()
+}
+
+// Digest converts ID into a digest
+func (id ID) Digest() digest.Digest {
+	return digest.Digest(id)
+}
+
+// IDFromDigest creates an ID from a digest
+func IDFromDigest(digest digest.Digest) ID {
+	return ID(digest)
+}
+
+// V1Image stores the V1 image configuration.
+type V1Image struct {
+	// ID a unique 64 character identifier of the image
+	ID string `json:"id,omitempty"`
+	// Parent id of the image
+	Parent string `json:"parent,omitempty"`
+	// Comment user added comment
+	Comment string `json:"comment,omitempty"`
+	// Created timestamp when image was created
+	Created time.Time `json:"created"`
+	// Container is the id of the container used to commit
+	Container string `json:"container,omitempty"`
+	// ContainerConfig is the configuration of the container that is committed into the image
+	ContainerConfig container.Config `json:"container_config,omitempty"`
+	// DockerVersion specifies version on which image is built
+	DockerVersion string `json:"docker_version,omitempty"`
+	// Author of the image
+	Author string `json:"author,omitempty"`
+	// Config is the configuration of the container received from the client
+	Config *container.Config `json:"config,omitempty"`
+	// Architecture is the hardware that the image is build and runs on
+	Architecture string `json:"architecture,omitempty"`
+	// OS is the operating system used to build and run the image
+	OS string `json:"os,omitempty"`
+	// Size is the total size of the image including all layers it is composed of
+	Size int64 `json:",omitempty"`
+}
+
+// Image stores the image configuration
+type Image struct {
+	V1Image
+	Parent     ID        `json:"parent,omitempty"`
+	RootFS     *RootFS   `json:"rootfs,omitempty"`
+	History    []History `json:"history,omitempty"`
+	OSVersion  string    `json:"os.version,omitempty"`
+	OSFeatures []string  `json:"os.features,omitempty"`
+
+	// rawJSON caches the immutable JSON associated with this image.
+	rawJSON []byte
+
+	// computedID is the ID computed from the hash of the image config.
+	// Not to be confused with the legacy V1 ID in V1Image.
+	computedID ID
+}
+
+// RawJSON returns the immutable JSON associated with the image.
+func (img *Image) RawJSON() []byte {
+	return img.rawJSON
+}
+
+// ID returns the image's content-addressable ID.
+func (img *Image) ID() ID {
+	return img.computedID
+}
+
+// ImageID stringifies ID.
+func (img *Image) ImageID() string {
+	return img.ID().String()
+}
+
+// RunConfig returns the image's container config.
+func (img *Image) RunConfig() *container.Config {
+	return img.Config
+}
+
+// MarshalJSON serializes the image to JSON. It sorts the top-level keys so
+// that JSON that's been manipulated by a push/pull cycle with a legacy
+// registry won't end up with a different key order.
+func (img *Image) MarshalJSON() ([]byte, error) {
+	type MarshalImage Image
+
+	pass1, err := json.Marshal(MarshalImage(*img))
+	if err != nil {
+		return nil, err
+	}
+
+	var c map[string]*json.RawMessage
+	if err := json.Unmarshal(pass1, &c); err != nil {
+		return nil, err
+	}
+	return json.Marshal(c)
+}
+
+// History stores build commands that were used to create an image
+type History struct {
+	// Created timestamp for build point
+	Created time.Time `json:"created"`
+	// Author of the build point
+	Author string `json:"author,omitempty"`
+	// CreatedBy keeps the Dockerfile command used while building image.
+	CreatedBy string `json:"created_by,omitempty"`
+	// Comment is custom message set by the user when creating the image.
+	Comment string `json:"comment,omitempty"`
+	// EmptyLayer is set to true if this history item did not generate a
+	// layer. Otherwise, the history item is associated with the next
+	// layer in the RootFS section.
+	EmptyLayer bool `json:"empty_layer,omitempty"`
+}
+
+// Exporter provides interface for exporting and importing images
+type Exporter interface {
+	Load(io.ReadCloser, io.Writer, bool) error
+	// TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error
+	Save([]string, io.Writer) error
+}
+
+// NewFromJSON creates an Image configuration from json.
+func NewFromJSON(src []byte) (*Image, error) {
+	img := &Image{}
+
+	if err := json.Unmarshal(src, img); err != nil {
+		return nil, err
+	}
+	if img.RootFS == nil {
+		return nil, errors.New("Invalid image JSON, no RootFS key.")
+	}
+
+	img.rawJSON = src
+
+	return img, nil
+}
diff --git a/vendor/github.com/docker/docker/image/rootfs.go b/vendor/github.com/docker/docker/image/rootfs.go
new file mode 100644
index 0000000000000000000000000000000000000000..7b24e3ed1e2fd9565cbb7035ba4246883fe82757
--- /dev/null
+++ b/vendor/github.com/docker/docker/image/rootfs.go
@@ -0,0 +1,44 @@
+package image
+
+import (
+	"runtime"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/layer"
+)
+
+// TypeLayers is used for RootFS.Type for filesystems organized into layers.
+const TypeLayers = "layers"
+
+// typeLayersWithBase is an older format used by Windows up to v1.12. We
+// explicitly handle this as an error case to ensure that a daemon which still
+// has an older image like this on disk can still start, even though the
+// image itself is not usable. See https://github.com/docker/docker/pull/25806.
+const typeLayersWithBase = "layers+base"
+
+// RootFS describes images root filesystem
+// This is currently a placeholder that only supports layers. In the future
+// this can be made into an interface that supports different implementations.
+type RootFS struct {
+	Type    string         `json:"type"`
+	DiffIDs []layer.DiffID `json:"diff_ids,omitempty"`
+}
+
+// NewRootFS returns empty RootFS struct
+func NewRootFS() *RootFS {
+	return &RootFS{Type: TypeLayers}
+}
+
+// Append appends a new diffID to rootfs
+func (r *RootFS) Append(id layer.DiffID) {
+	r.DiffIDs = append(r.DiffIDs, id)
+}
+
+// ChainID returns the ChainID for the top layer in RootFS.
+func (r *RootFS) ChainID() layer.ChainID {
+	if runtime.GOOS == "windows" && r.Type == typeLayersWithBase {
+		logrus.Warnf("Layer type is unsupported on this platform. DiffIDs: '%v'", r.DiffIDs)
+		return ""
+	}
+	return layer.CreateChainID(r.DiffIDs)
+}
diff --git a/vendor/github.com/docker/docker/image/store.go b/vendor/github.com/docker/docker/image/store.go
new file mode 100644
index 0000000000000000000000000000000000000000..b61c456097ca7f9546e18bc6ec18d83fb0df73de
--- /dev/null
+++ b/vendor/github.com/docker/docker/image/store.go
@@ -0,0 +1,295 @@
+package image
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"sync"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/distribution/digest"
+	"github.com/docker/docker/layer"
+)
+
+// Store is an interface for creating and accessing images
+type Store interface {
+	Create(config []byte) (ID, error)
+	Get(id ID) (*Image, error)
+	Delete(id ID) ([]layer.Metadata, error)
+	Search(partialID string) (ID, error)
+	SetParent(id ID, parent ID) error
+	GetParent(id ID) (ID, error)
+	Children(id ID) []ID
+	Map() map[ID]*Image
+	Heads() map[ID]*Image
+}
+
+// LayerGetReleaser is a minimal interface for getting and releasing images.
+type LayerGetReleaser interface {
+	Get(layer.ChainID) (layer.Layer, error)
+	Release(layer.Layer) ([]layer.Metadata, error)
+}
+
+type imageMeta struct {
+	layer    layer.Layer
+	children map[ID]struct{}
+}
+
+type store struct {
+	sync.Mutex
+	ls        LayerGetReleaser
+	images    map[ID]*imageMeta
+	fs        StoreBackend
+	digestSet *digest.Set
+}
+
+// NewImageStore returns new store object for given layer store
+func NewImageStore(fs StoreBackend, ls LayerGetReleaser) (Store, error) {
+	is := &store{
+		ls:        ls,
+		images:    make(map[ID]*imageMeta),
+		fs:        fs,
+		digestSet: digest.NewSet(),
+	}
+
+	// load all current images and retain layers
+	if err := is.restore(); err != nil {
+		return nil, err
+	}
+
+	return is, nil
+}
+
+func (is *store) restore() error {
+	err := is.fs.Walk(func(dgst digest.Digest) error {
+		img, err := is.Get(IDFromDigest(dgst))
+		if err != nil {
+			logrus.Errorf("invalid image %v, %v", dgst, err)
+			return nil
+		}
+		var l layer.Layer
+		if chainID := img.RootFS.ChainID(); chainID != "" {
+			l, err = is.ls.Get(chainID)
+			if err != nil {
+				return err
+			}
+		}
+		if err := is.digestSet.Add(dgst); err != nil {
+			return err
+		}
+
+		imageMeta := &imageMeta{
+			layer:    l,
+			children: make(map[ID]struct{}),
+		}
+
+		is.images[IDFromDigest(dgst)] = imageMeta
+
+		return nil
+	})
+	if err != nil {
+		return err
+	}
+
+	// Second pass to fill in children maps
+	for id := range is.images {
+		if parent, err := is.GetParent(id); err == nil {
+			if parentMeta := is.images[parent]; parentMeta != nil {
+				parentMeta.children[id] = struct{}{}
+			}
+		}
+	}
+
+	return nil
+}
+
+func (is *store) Create(config []byte) (ID, error) {
+	var img Image
+	err := json.Unmarshal(config, &img)
+	if err != nil {
+		return "", err
+	}
+
+	// Must reject any config that references diffIDs from the history
+	// which aren't among the rootfs layers.
+	rootFSLayers := make(map[layer.DiffID]struct{})
+	for _, diffID := range img.RootFS.DiffIDs {
+		rootFSLayers[diffID] = struct{}{}
+	}
+
+	layerCounter := 0
+	for _, h := range img.History {
+		if !h.EmptyLayer {
+			layerCounter++
+		}
+	}
+	if layerCounter > len(img.RootFS.DiffIDs) {
+		return "", errors.New("too many non-empty layers in History section")
+	}
+
+	dgst, err := is.fs.Set(config)
+	if err != nil {
+		return "", err
+	}
+	imageID := IDFromDigest(dgst)
+
+	is.Lock()
+	defer is.Unlock()
+
+	if _, exists := is.images[imageID]; exists {
+		return imageID, nil
+	}
+
+	layerID := img.RootFS.ChainID()
+
+	var l layer.Layer
+	if layerID != "" {
+		l, err = is.ls.Get(layerID)
+		if err != nil {
+			return "", err
+		}
+	}
+
+	imageMeta := &imageMeta{
+		layer:    l,
+		children: make(map[ID]struct{}),
+	}
+
+	is.images[imageID] = imageMeta
+	if err := is.digestSet.Add(imageID.Digest()); err != nil {
+		delete(is.images, imageID)
+		return "", err
+	}
+
+	return imageID, nil
+}
+
+func (is *store) Search(term string) (ID, error) {
+	is.Lock()
+	defer is.Unlock()
+
+	dgst, err := is.digestSet.Lookup(term)
+	if err != nil {
+		if err == digest.ErrDigestNotFound {
+			err = fmt.Errorf("No such image: %s", term)
+		}
+		return "", err
+	}
+	return IDFromDigest(dgst), nil
+}
+
+func (is *store) Get(id ID) (*Image, error) {
+	// todo: Check if image is in images
+	// todo: Detect manual insertions and start using them
+	config, err := is.fs.Get(id.Digest())
+	if err != nil {
+		return nil, err
+	}
+
+	img, err := NewFromJSON(config)
+	if err != nil {
+		return nil, err
+	}
+	img.computedID = id
+
+	img.Parent, err = is.GetParent(id)
+	if err != nil {
+		img.Parent = ""
+	}
+
+	return img, nil
+}
+
+func (is *store) Delete(id ID) ([]layer.Metadata, error) {
+	is.Lock()
+	defer is.Unlock()
+
+	imageMeta := is.images[id]
+	if imageMeta == nil {
+		return nil, fmt.Errorf("unrecognized image ID %s", id.String())
+	}
+	for id := range imageMeta.children {
+		is.fs.DeleteMetadata(id.Digest(), "parent")
+	}
+	if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil {
+		delete(is.images[parent].children, id)
+	}
+
+	if err := is.digestSet.Remove(id.Digest()); err != nil {
+		logrus.Errorf("error removing %s from digest set: %q", id, err)
+	}
+	delete(is.images, id)
+	is.fs.Delete(id.Digest())
+
+	if imageMeta.layer != nil {
+		return is.ls.Release(imageMeta.layer)
+	}
+	return nil, nil
+}
+
+func (is *store) SetParent(id, parent ID) error {
+	is.Lock()
+	defer is.Unlock()
+	parentMeta := is.images[parent]
+	if parentMeta == nil {
+		return fmt.Errorf("unknown parent image ID %s", parent.String())
+	}
+	if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil {
+		delete(is.images[parent].children, id)
+	}
+	parentMeta.children[id] = struct{}{}
+	return is.fs.SetMetadata(id.Digest(), "parent", []byte(parent))
+}
+
+func (is *store) GetParent(id ID) (ID, error) {
+	d, err := is.fs.GetMetadata(id.Digest(), "parent")
+	if err != nil {
+		return "", err
+	}
+	return ID(d), nil // todo: validate?
+}
+
+func (is *store) Children(id ID) []ID {
+	is.Lock()
+	defer is.Unlock()
+
+	return is.children(id)
+}
+
+func (is *store) children(id ID) []ID {
+	var ids []ID
+	if is.images[id] != nil {
+		for id := range is.images[id].children {
+			ids = append(ids, id)
+		}
+	}
+	return ids
+}
+
+func (is *store) Heads() map[ID]*Image {
+	return is.imagesMap(false)
+}
+
+func (is *store) Map() map[ID]*Image {
+	return is.imagesMap(true)
+}
+
+func (is *store) imagesMap(all bool) map[ID]*Image {
+	is.Lock()
+	defer is.Unlock()
+
+	images := make(map[ID]*Image)
+
+	for id := range is.images {
+		if !all && len(is.children(id)) > 0 {
+			continue
+		}
+		img, err := is.Get(id)
+		if err != nil {
+			logrus.Errorf("invalid image access: %q, error: %q", id, err)
+			continue
+		}
+		images[id] = img
+	}
+	return images
+}
diff --git a/vendor/github.com/docker/docker/image/v1/imagev1.go b/vendor/github.com/docker/docker/image/v1/imagev1.go
new file mode 100644
index 0000000000000000000000000000000000000000..d498ddbc00b6f0988907f8861b0cb82a80d6a342
--- /dev/null
+++ b/vendor/github.com/docker/docker/image/v1/imagev1.go
@@ -0,0 +1,156 @@
+package v1
+
+import (
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"regexp"
+	"strings"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/distribution/digest"
+	"github.com/docker/docker/api/types/versions"
+	"github.com/docker/docker/image"
+	"github.com/docker/docker/layer"
+)
+
+var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
+
+// noFallbackMinVersion is the minimum version for which v1compatibility
+// information will not be marshaled through the Image struct to remove
+// blank fields.
+var noFallbackMinVersion = "1.8.3"
+
+// HistoryFromConfig creates a History struct from v1 configuration JSON
+func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) {
+	h := image.History{}
+	var v1Image image.V1Image
+	if err := json.Unmarshal(imageJSON, &v1Image); err != nil {
+		return h, err
+	}
+
+	return image.History{
+		Author:     v1Image.Author,
+		Created:    v1Image.Created,
+		CreatedBy:  strings.Join(v1Image.ContainerConfig.Cmd, " "),
+		Comment:    v1Image.Comment,
+		EmptyLayer: emptyLayer,
+	}, nil
+}
+
+// CreateID creates an ID from v1 image, layerID and parent ID.
+// Used for backwards compatibility with old clients.
+func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest) (digest.Digest, error) {
+	v1Image.ID = ""
+	v1JSON, err := json.Marshal(v1Image)
+	if err != nil {
+		return "", err
+	}
+
+	var config map[string]*json.RawMessage
+	if err := json.Unmarshal(v1JSON, &config); err != nil {
+		return "", err
+	}
+
+	// FIXME: note that this is slightly incompatible with RootFS logic
+	config["layer_id"] = rawJSON(layerID)
+	if parent != "" {
+		config["parent"] = rawJSON(parent)
+	}
+
+	configJSON, err := json.Marshal(config)
+	if err != nil {
+		return "", err
+	}
+	logrus.Debugf("CreateV1ID %s", configJSON)
+
+	return digest.FromBytes(configJSON), nil
+}
+
+// MakeConfigFromV1Config creates an image config from the legacy V1 config format.
+func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) ([]byte, error) {
+	var dver struct {
+		DockerVersion string `json:"docker_version"`
+	}
+
+	if err := json.Unmarshal(imageJSON, &dver); err != nil {
+		return nil, err
+	}
+
+	useFallback := versions.LessThan(dver.DockerVersion, noFallbackMinVersion)
+
+	if useFallback {
+		var v1Image image.V1Image
+		err := json.Unmarshal(imageJSON, &v1Image)
+		if err != nil {
+			return nil, err
+		}
+		imageJSON, err = json.Marshal(v1Image)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	var c map[string]*json.RawMessage
+	if err := json.Unmarshal(imageJSON, &c); err != nil {
+		return nil, err
+	}
+
+	delete(c, "id")
+	delete(c, "parent")
+	delete(c, "Size") // Size is calculated from data on disk and is inconsistent
+	delete(c, "parent_id")
+	delete(c, "layer_id")
+	delete(c, "throwaway")
+
+	c["rootfs"] = rawJSON(rootfs)
+	c["history"] = rawJSON(history)
+
+	return json.Marshal(c)
+}
+
+// MakeV1ConfigFromConfig creates an legacy V1 image config from an Image struct
+func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
+	// Top-level v1compatibility string should be a modified version of the
+	// image config.
+	var configAsMap map[string]*json.RawMessage
+	if err := json.Unmarshal(img.RawJSON(), &configAsMap); err != nil {
+		return nil, err
+	}
+
+	// Delete fields that didn't exist in old manifest
+	imageType := reflect.TypeOf(img).Elem()
+	for i := 0; i < imageType.NumField(); i++ {
+		f := imageType.Field(i)
+		jsonName := strings.Split(f.Tag.Get("json"), ",")[0]
+		// Parent is handled specially below.
+		if jsonName != "" && jsonName != "parent" {
+			delete(configAsMap, jsonName)
+		}
+	}
+	configAsMap["id"] = rawJSON(v1ID)
+	if parentV1ID != "" {
+		configAsMap["parent"] = rawJSON(parentV1ID)
+	}
+	if throwaway {
+		configAsMap["throwaway"] = rawJSON(true)
+	}
+
+	return json.Marshal(configAsMap)
+}
+
+func rawJSON(value interface{}) *json.RawMessage {
+	jsonval, err := json.Marshal(value)
+	if err != nil {
+		return nil
+	}
+	return (*json.RawMessage)(&jsonval)
+}
+
+// ValidateID checks whether an ID string is a valid image ID.
+func ValidateID(id string) error {
+	if ok := validHex.MatchString(id); !ok {
+		return fmt.Errorf("image ID %q is invalid", id)
+	}
+	return nil
+}
diff --git a/vendor/github.com/docker/docker/layer/empty.go b/vendor/github.com/docker/docker/layer/empty.go
new file mode 100644
index 0000000000000000000000000000000000000000..3b6ffc82f71c0091327c9b42fb955cdd360aa7a2
--- /dev/null
+++ b/vendor/github.com/docker/docker/layer/empty.go
@@ -0,0 +1,56 @@
+package layer
+
+import (
+	"archive/tar"
+	"bytes"
+	"fmt"
+	"io"
+	"io/ioutil"
+)
+
+// DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file -
+// (1024 NULL bytes)
+const DigestSHA256EmptyTar = DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef")
+
+type emptyLayer struct{}
+
+// EmptyLayer is a layer that corresponds to empty tar.
+var EmptyLayer = &emptyLayer{}
+
+func (el *emptyLayer) TarStream() (io.ReadCloser, error) {
+	buf := new(bytes.Buffer)
+	tarWriter := tar.NewWriter(buf)
+	tarWriter.Close()
+	return ioutil.NopCloser(buf), nil
+}
+
+func (el *emptyLayer) TarStreamFrom(p ChainID) (io.ReadCloser, error) {
+	if p == "" {
+		return el.TarStream()
+	}
+	return nil, fmt.Errorf("can't get parent tar stream of an empty layer")
+}
+
+func (el *emptyLayer) ChainID() ChainID {
+	return ChainID(DigestSHA256EmptyTar)
+}
+
+func (el *emptyLayer) DiffID() DiffID {
+	return DigestSHA256EmptyTar
+}
+
+func (el *emptyLayer) Parent() Layer {
+	return nil
+}
+
+func (el *emptyLayer) Size() (size int64, err error) {
+	return 0, nil
+}
+
+func (el *emptyLayer) DiffSize() (size int64, err error) {
+	return 0, nil
+}
+
+func (el *emptyLayer) Metadata() (map[string]string, error) {
+	return make(map[string]string), nil
+}
diff --git a/vendor/github.com/docker/docker/layer/filestore.go b/vendor/github.com/docker/docker/layer/filestore.go
new file mode 100644
index 0000000000000000000000000000000000000000..42b45556e34e37ba934ab6ecef21ebacb6c4182d
--- /dev/null
+++ b/vendor/github.com/docker/docker/layer/filestore.go
@@ -0,0 +1,354 @@
+package layer
+
+import (
+	"compress/gzip"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"regexp"
+	"strconv"
+	"strings"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/distribution"
+	"github.com/docker/distribution/digest"
+	"github.com/docker/docker/pkg/ioutils"
+)
+
+var (
+	stringIDRegexp      = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`)
+	supportedAlgorithms = []digest.Algorithm{
+		digest.SHA256,
+		// digest.SHA384, // Currently not used
+		// digest.SHA512, // Currently not used
+	}
+)
+
+type fileMetadataStore struct {
+	root string
+}
+
+type fileMetadataTransaction struct {
+	store *fileMetadataStore
+	ws    *ioutils.AtomicWriteSet
+}
+
+// NewFSMetadataStore returns an instance of a metadata store
+// which is backed by files on disk using the provided root
+// as the root of metadata files.
+func NewFSMetadataStore(root string) (MetadataStore, error) {
+	if err := os.MkdirAll(root, 0700); err != nil {
+		return nil, err
+	}
+	return &fileMetadataStore{
+		root: root,
+	}, nil
+}
+
+func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string {
+	dgst := digest.Digest(layer)
+	return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex())
+}
+
+func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string {
+	return filepath.Join(fms.getLayerDirectory(layer), filename)
+}
+
+func (fms *fileMetadataStore) getMountDirectory(mount string) string {
+	return filepath.Join(fms.root, "mounts", mount)
+}
+
+func (fms *fileMetadataStore) getMountFilename(mount, filename string) string {
+	return filepath.Join(fms.getMountDirectory(mount), filename)
+}
+
+func (fms *fileMetadataStore) StartTransaction() (MetadataTransaction, error) {
+	tmpDir := filepath.Join(fms.root, "tmp")
+	if err := os.MkdirAll(tmpDir, 0755); err != nil {
+		return nil, err
+	}
+	ws, err := ioutils.NewAtomicWriteSet(tmpDir)
+	if err != nil {
+		return nil, err
+	}
+
+	return &fileMetadataTransaction{
+		store: fms,
+		ws:    ws,
+	}, nil
+}
+
+func (fm *fileMetadataTransaction) SetSize(size int64) error {
+	content := fmt.Sprintf("%d", size)
+	return fm.ws.WriteFile("size", []byte(content), 0644)
+}
+
+func (fm *fileMetadataTransaction) SetParent(parent ChainID) error {
+	return fm.ws.WriteFile("parent", []byte(digest.Digest(parent).String()), 0644)
+}
+
+func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error {
+	return fm.ws.WriteFile("diff", []byte(digest.Digest(diff).String()), 0644)
+}
+
+func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error {
+	return fm.ws.WriteFile("cache-id", []byte(cacheID), 0644)
+}
+
+func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error {
+	jsonRef, err := json.Marshal(ref)
+	if err != nil {
+		return err
+	}
+	return fm.ws.WriteFile("descriptor.json", jsonRef, 0644)
+}
+
+func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) {
+	f, err := fm.ws.FileWriter("tar-split.json.gz", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
+	if err != nil {
+		return nil, err
+	}
+	var wc io.WriteCloser
+	if compressInput {
+		wc = gzip.NewWriter(f)
+	} else {
+		wc = f
+	}
+
+	return ioutils.NewWriteCloserWrapper(wc, func() error {
+		wc.Close()
+		return f.Close()
+	}), nil
+}
+
+func (fm *fileMetadataTransaction) Commit(layer ChainID) error {
+	finalDir := fm.store.getLayerDirectory(layer)
+	if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil {
+		return err
+	}
+
+	return fm.ws.Commit(finalDir)
+}
+
+func (fm *fileMetadataTransaction) Cancel() error {
+	return fm.ws.Cancel()
+}
+
+func (fm *fileMetadataTransaction) String() string {
+	return fm.ws.String()
+}
+
+func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) {
+	content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "size"))
+	if err != nil {
+		return 0, err
+	}
+
+	size, err := strconv.ParseInt(string(content), 10, 64)
+	if err != nil {
+		return 0, err
+	}
+
+	return size, nil
+}
+
+func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) {
+	content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "parent"))
+	if err != nil {
+		if os.IsNotExist(err) {
+			return "", nil
+		}
+		return "", err
+	}
+
+	dgst, err := digest.ParseDigest(strings.TrimSpace(string(content)))
+	if err != nil {
+		return "", err
+	}
+
+	return ChainID(dgst), nil
+}
+
+func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) {
+	content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "diff"))
+	if err != nil {
+		return "", err
+	}
+
+	dgst, err := digest.ParseDigest(strings.TrimSpace(string(content)))
+	if err != nil {
+		return "", err
+	}
+
+	return DiffID(dgst), nil
+}
+
+func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) {
+	contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id"))
+	if err != nil {
+		return "", err
+	}
+	content := strings.TrimSpace(string(contentBytes))
+
+	if !stringIDRegexp.MatchString(content) {
+		return "", errors.New("invalid cache id value")
+	}
+
+	return content, nil
+}
+
+func (fms *fileMetadataStore) GetDescriptor(layer ChainID) (distribution.Descriptor, error) {
+	content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "descriptor.json"))
+	if err != nil {
+		if os.IsNotExist(err) {
+			// only return empty descriptor to represent what is stored
+			return distribution.Descriptor{}, nil
+		}
+		return distribution.Descriptor{}, err
+	}
+
+	var ref distribution.Descriptor
+	err = json.Unmarshal(content, &ref)
+	if err != nil {
+		return distribution.Descriptor{}, err
+	}
+	return ref, err
+}
+
+func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) {
+	fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz"))
+	if err != nil {
+		return nil, err
+	}
+	f, err := gzip.NewReader(fz)
+	if err != nil {
+		return nil, err
+	}
+
+	return ioutils.NewReadCloserWrapper(f, func() error {
+		f.Close()
+		return fz.Close()
+	}), nil
+}
+
+func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error {
+	if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil {
+		return err
+	}
+	return ioutil.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644)
+}
+
+func (fms *fileMetadataStore) SetInitID(mount string, init string) error {
+	if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil {
+		return err
+	}
+	return ioutil.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644)
+}
+
+func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error {
+	if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil {
+		return err
+	}
+	return ioutil.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644)
+}
+
+func (fms *fileMetadataStore) GetMountID(mount string) (string, error) {
+	contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id"))
+	if err != nil {
+		return "", err
+	}
+	content := strings.TrimSpace(string(contentBytes))
+
+	if !stringIDRegexp.MatchString(content) {
+		return "", errors.New("invalid mount id value")
+	}
+
+	return content, nil
+}
+
+func (fms *fileMetadataStore) GetInitID(mount string) (string, error) {
+	contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id"))
+	if err != nil {
+		if os.IsNotExist(err) {
+			return "", nil
+		}
+		return "", err
+	}
+	content := strings.TrimSpace(string(contentBytes))
+
+	if !stringIDRegexp.MatchString(content) {
+		return "", errors.New("invalid init id value")
+	}
+
+	return content, nil
+}
+
+func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) {
+	content, err := ioutil.ReadFile(fms.getMountFilename(mount, "parent"))
+	if err != nil {
+		if os.IsNotExist(err) {
+			return "", nil
+		}
+		return "", err
+	}
+
+	dgst, err := digest.ParseDigest(strings.TrimSpace(string(content)))
+	if err != nil {
+		return "", err
+	}
+
+	return ChainID(dgst), nil
+}
+
+func (fms *fileMetadataStore) List() ([]ChainID, []string, error) {
+	var ids []ChainID
+	for _, algorithm := range supportedAlgorithms {
+		fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm)))
+		if err != nil {
+			if os.IsNotExist(err) {
+				continue
+			}
+			return nil, nil, err
+		}
+
+		for _, fi := range fileInfos {
+			if fi.IsDir() && fi.Name() != "mounts" {
+				dgst := digest.NewDigestFromHex(string(algorithm), fi.Name())
+				if err := dgst.Validate(); err != nil {
+					logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name())
+				} else {
+					ids = append(ids, ChainID(dgst))
+				}
+			}
+		}
+	}
+
+	fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts"))
+	if err != nil {
+		if os.IsNotExist(err) {
+			return ids, []string{}, nil
+		}
+		return nil, nil, err
+	}
+
+	var mounts []string
+	for _, fi := range fileInfos {
+		if fi.IsDir() {
+			mounts = append(mounts, fi.Name())
+		}
+	}
+
+	return ids, mounts, nil
+}
+
+func (fms *fileMetadataStore) Remove(layer ChainID) error {
+	return os.RemoveAll(fms.getLayerDirectory(layer))
+}
+
+func (fms *fileMetadataStore) RemoveMount(mount string) error {
+	return os.RemoveAll(fms.getMountDirectory(mount))
+}
diff --git a/vendor/github.com/docker/docker/layer/layer.go b/vendor/github.com/docker/docker/layer/layer.go
new file mode 100644
index 0000000000000000000000000000000000000000..ec1d4346d7d978538e4fe9769c167aea343b61ed
--- /dev/null
+++ b/vendor/github.com/docker/docker/layer/layer.go
@@ -0,0 +1,275 @@
+// Package layer is package for managing read-only
+// and read-write mounts on the union file system
+// driver. Read-only mounts are referenced using a
+// content hash and are protected from mutation in
+// the exposed interface. The tar format is used
+// to create read-only layers and export both
+// read-only and writable layers. The exported
+// tar data for a read-only layer should match
+// the tar used to create the layer.
+package layer
+
+import (
+	"errors"
+	"io"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/distribution"
+	"github.com/docker/distribution/digest"
+	"github.com/docker/docker/pkg/archive"
+)
+
+var (
+	// ErrLayerDoesNotExist is used when an operation is
+	// attempted on a layer which does not exist.
+	ErrLayerDoesNotExist = errors.New("layer does not exist")
+
+	// ErrLayerNotRetained is used when a release is
+	// attempted on a layer which is not retained.
+	ErrLayerNotRetained = errors.New("layer not retained")
+
+	// ErrMountDoesNotExist is used when an operation is
+	// attempted on a mount layer which does not exist.
+	ErrMountDoesNotExist = errors.New("mount does not exist")
+
+	// ErrMountNameConflict is used when a mount is attempted
+	// to be created but there is already a mount with the name
+	// used for creation.
+	ErrMountNameConflict = errors.New("mount already exists with name")
+
+	// ErrActiveMount is used when an operation on a
+	// mount is attempted but the layer is still
+	// mounted and the operation cannot be performed.
+	ErrActiveMount = errors.New("mount still active")
+
+	// ErrNotMounted is used when requesting an active
+	// mount but the layer is not mounted.
+	ErrNotMounted = errors.New("not mounted")
+
+	// ErrMaxDepthExceeded is used when a layer is attempted
+	// to be created which would result in a layer depth
+	// greater than the 125 max.
+	ErrMaxDepthExceeded = errors.New("max depth exceeded")
+
+	// ErrNotSupported is used when the action is not supported
+	// on the current platform
+	ErrNotSupported = errors.New("not support on this platform")
+)
+
+// ChainID is the content-addressable ID of a layer.
+type ChainID digest.Digest
+
+// String returns a string rendition of a layer ID
+func (id ChainID) String() string {
+	return string(id)
+}
+
+// DiffID is the hash of an individual layer tar.
+type DiffID digest.Digest
+
+// String returns a string rendition of a layer DiffID
+func (diffID DiffID) String() string {
+	return string(diffID)
+}
+
+// TarStreamer represents an object which may
+// have its contents exported as a tar stream.
+type TarStreamer interface {
+	// TarStream returns a tar archive stream
+	// for the contents of a layer.
+	TarStream() (io.ReadCloser, error)
+}
+
+// Layer represents a read-only layer
+type Layer interface {
+	TarStreamer
+
+	// TarStreamFrom returns a tar archive stream for all the layer chain with
+	// arbitrary depth.
+	TarStreamFrom(ChainID) (io.ReadCloser, error)
+
+	// ChainID returns the content hash of the entire layer chain. The hash
+	// chain is made up of DiffID of top layer and all of its parents.
+	ChainID() ChainID
+
+	// DiffID returns the content hash of the layer
+	// tar stream used to create this layer.
+	DiffID() DiffID
+
+	// Parent returns the next layer in the layer chain.
+	Parent() Layer
+
+	// Size returns the size of the entire layer chain. The size
+	// is calculated from the total size of all files in the layers.
+	Size() (int64, error)
+
+	// DiffSize returns the size difference of the top layer
+	// from parent layer.
+	DiffSize() (int64, error)
+
+	// Metadata returns the low level storage metadata associated
+	// with layer.
+	Metadata() (map[string]string, error)
+}
+
+// RWLayer represents a layer which is
+// read and writable
+type RWLayer interface {
+	TarStreamer
+
+	// Name of mounted layer
+	Name() string
+
+	// Parent returns the layer which the writable
+	// layer was created from.
+	Parent() Layer
+
+	// Mount mounts the RWLayer and returns the filesystem path
+	// the to the writable layer.
+	Mount(mountLabel string) (string, error)
+
+	// Unmount unmounts the RWLayer. This should be called
+	// for every mount. If there are multiple mount calls
+	// this operation will only decrement the internal mount counter.
+	Unmount() error
+
+	// Size represents the size of the writable layer
+	// as calculated by the total size of the files
+	// changed in the mutable layer.
+	Size() (int64, error)
+
+	// Changes returns the set of changes for the mutable layer
+	// from the base layer.
+	Changes() ([]archive.Change, error)
+
+	// Metadata returns the low level metadata for the mutable layer
+	Metadata() (map[string]string, error)
+}
+
+// Metadata holds information about a
+// read-only layer
+type Metadata struct {
+	// ChainID is the content hash of the layer
+	ChainID ChainID
+
+	// DiffID is the hash of the tar data used to
+	// create the layer
+	DiffID DiffID
+
+	// Size is the size of the layer and all parents
+	Size int64
+
+	// DiffSize is the size of the top layer
+	DiffSize int64
+}
+
+// MountInit is a function to initialize a
+// writable mount. Changes made here will
+// not be included in the Tar stream of the
+// RWLayer.
+type MountInit func(root string) error
+
+// Store represents a backend for managing both
+// read-only and read-write layers.
+type Store interface {
+	Register(io.Reader, ChainID) (Layer, error)
+	Get(ChainID) (Layer, error)
+	Map() map[ChainID]Layer
+	Release(Layer) ([]Metadata, error)
+
+	CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error)
+	GetRWLayer(id string) (RWLayer, error)
+	GetMountID(id string) (string, error)
+	ReleaseRWLayer(RWLayer) ([]Metadata, error)
+
+	Cleanup() error
+	DriverStatus() [][2]string
+	DriverName() string
+}
+
+// DescribableStore represents a layer store capable of storing
+// descriptors for layers.
+type DescribableStore interface {
+	RegisterWithDescriptor(io.Reader, ChainID, distribution.Descriptor) (Layer, error)
+}
+
+// MetadataTransaction represents functions for setting layer metadata
+// with a single transaction.
+type MetadataTransaction interface {
+	SetSize(int64) error
+	SetParent(parent ChainID) error
+	SetDiffID(DiffID) error
+	SetCacheID(string) error
+	SetDescriptor(distribution.Descriptor) error
+	TarSplitWriter(compressInput bool) (io.WriteCloser, error)
+
+	Commit(ChainID) error
+	Cancel() error
+	String() string
+}
+
+// MetadataStore represents a backend for persisting
+// metadata about layers and providing the metadata
+// for restoring a Store.
+type MetadataStore interface {
+	// StartTransaction starts an update for new metadata
+	// which will be used to represent an ID on commit.
+	StartTransaction() (MetadataTransaction, error)
+
+	GetSize(ChainID) (int64, error)
+	GetParent(ChainID) (ChainID, error)
+	GetDiffID(ChainID) (DiffID, error)
+	GetCacheID(ChainID) (string, error)
+	GetDescriptor(ChainID) (distribution.Descriptor, error)
+	TarSplitReader(ChainID) (io.ReadCloser, error)
+
+	SetMountID(string, string) error
+	SetInitID(string, string) error
+	SetMountParent(string, ChainID) error
+
+	GetMountID(string) (string, error)
+	GetInitID(string) (string, error)
+	GetMountParent(string) (ChainID, error)
+
+	// List returns the full list of referenced
+	// read-only and read-write layers
+	List() ([]ChainID, []string, error)
+
+	Remove(ChainID) error
+	RemoveMount(string) error
+}
+
+// CreateChainID returns ID for a layerDigest slice
+func CreateChainID(dgsts []DiffID) ChainID {
+	return createChainIDFromParent("", dgsts...)
+}
+
+func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID {
+	if len(dgsts) == 0 {
+		return parent
+	}
+	if parent == "" {
+		return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...)
+	}
+	// H = "H(n-1) SHA256(n)"
+	dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0])))
+	return createChainIDFromParent(ChainID(dgst), dgsts[1:]...)
+}
+
+// ReleaseAndLog releases the provided layer from the given layer
+// store, logging any error and release metadata
+func ReleaseAndLog(ls Store, l Layer) {
+	metadata, err := ls.Release(l)
+	if err != nil {
+		logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err)
+	}
+	LogReleaseMetadata(metadata)
+}
+
+// LogReleaseMetadata logs a metadata array, uses this to
+// ensure consistent logging for release metadata
+func LogReleaseMetadata(metadatas []Metadata) {
+	for _, metadata := range metadatas {
+		logrus.Infof("Layer %s cleaned up", metadata.ChainID)
+	}
+}
diff --git a/vendor/github.com/docker/docker/layer/layer_store.go b/vendor/github.com/docker/docker/layer/layer_store.go
new file mode 100644
index 0000000000000000000000000000000000000000..1a1ff9fe59b42840543db91a3a9f38df80c86b1f
--- /dev/null
+++ b/vendor/github.com/docker/docker/layer/layer_store.go
@@ -0,0 +1,684 @@
+package layer
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"sync"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/distribution"
+	"github.com/docker/distribution/digest"
+	"github.com/docker/docker/daemon/graphdriver"
+	"github.com/docker/docker/pkg/idtools"
+	"github.com/docker/docker/pkg/plugingetter"
+	"github.com/docker/docker/pkg/stringid"
+	"github.com/vbatts/tar-split/tar/asm"
+	"github.com/vbatts/tar-split/tar/storage"
+)
+
+// maxLayerDepth represents the maximum number of
+// layers which can be chained together. 125 was
+// chosen to account for the 127 max in some
+// graphdrivers plus the 2 additional layers
+// used to create a rwlayer.
+const maxLayerDepth = 125
+
+type layerStore struct {
+	store  MetadataStore
+	driver graphdriver.Driver
+
+	layerMap map[ChainID]*roLayer
+	layerL   sync.Mutex
+
+	mounts map[string]*mountedLayer
+	mountL sync.Mutex
+}
+
+// StoreOptions are the options used to create a new Store instance
+type StoreOptions struct {
+	StorePath                 string
+	MetadataStorePathTemplate string
+	GraphDriver               string
+	GraphDriverOptions        []string
+	UIDMaps                   []idtools.IDMap
+	GIDMaps                   []idtools.IDMap
+	PluginGetter              plugingetter.PluginGetter
+	ExperimentalEnabled       bool
+}
+
+// NewStoreFromOptions creates a new Store instance
+func NewStoreFromOptions(options StoreOptions) (Store, error) {
+	driver, err := graphdriver.New(options.GraphDriver, options.PluginGetter, graphdriver.Options{
+		Root:                options.StorePath,
+		DriverOptions:       options.GraphDriverOptions,
+		UIDMaps:             options.UIDMaps,
+		GIDMaps:             options.GIDMaps,
+		ExperimentalEnabled: options.ExperimentalEnabled,
+	})
+	if err != nil {
+		return nil, fmt.Errorf("error initializing graphdriver: %v", err)
+	}
+	logrus.Debugf("Using graph driver %s", driver)
+
+	fms, err := NewFSMetadataStore(fmt.Sprintf(options.MetadataStorePathTemplate, driver))
+	if err != nil {
+		return nil, err
+	}
+
+	return NewStoreFromGraphDriver(fms, driver)
+}
+
+// NewStoreFromGraphDriver creates a new Store instance using the provided
+// metadata store and graph driver. The metadata store will be used to restore
+// the Store.
+func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver) (Store, error) {
+	ls := &layerStore{
+		store:    store,
+		driver:   driver,
+		layerMap: map[ChainID]*roLayer{},
+		mounts:   map[string]*mountedLayer{},
+	}
+
+	ids, mounts, err := store.List()
+	if err != nil {
+		return nil, err
+	}
+
+	for _, id := range ids {
+		l, err := ls.loadLayer(id)
+		if err != nil {
+			logrus.Debugf("Failed to load layer %s: %s", id, err)
+			continue
+		}
+		if l.parent != nil {
+			l.parent.referenceCount++
+		}
+	}
+
+	for _, mount := range mounts {
+		if err := ls.loadMount(mount); err != nil {
+			logrus.Debugf("Failed to load mount %s: %s", mount, err)
+		}
+	}
+
+	return ls, nil
+}
+
+func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) {
+	cl, ok := ls.layerMap[layer]
+	if ok {
+		return cl, nil
+	}
+
+	diff, err := ls.store.GetDiffID(layer)
+	if err != nil {
+		return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err)
+	}
+
+	size, err := ls.store.GetSize(layer)
+	if err != nil {
+		return nil, fmt.Errorf("failed to get size for %s: %s", layer, err)
+	}
+
+	cacheID, err := ls.store.GetCacheID(layer)
+	if err != nil {
+		return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err)
+	}
+
+	parent, err := ls.store.GetParent(layer)
+	if err != nil {
+		return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err)
+	}
+
+	descriptor, err := ls.store.GetDescriptor(layer)
+	if err != nil {
+		return nil, fmt.Errorf("failed to get descriptor for %s: %s", layer, err)
+	}
+
+	cl = &roLayer{
+		chainID:    layer,
+		diffID:     diff,
+		size:       size,
+		cacheID:    cacheID,
+		layerStore: ls,
+		references: map[Layer]struct{}{},
+		descriptor: descriptor,
+	}
+
+	if parent != "" {
+		p, err := ls.loadLayer(parent)
+		if err != nil {
+			return nil, err
+		}
+		cl.parent = p
+	}
+
+	ls.layerMap[cl.chainID] = cl
+
+	return cl, nil
+}
+
+func (ls *layerStore) loadMount(mount string) error {
+	if _, ok := ls.mounts[mount]; ok {
+		return nil
+	}
+
+	mountID, err := ls.store.GetMountID(mount)
+	if err != nil {
+		return err
+	}
+
+	initID, err := ls.store.GetInitID(mount)
+	if err != nil {
+		return err
+	}
+
+	parent, err := ls.store.GetMountParent(mount)
+	if err != nil {
+		return err
+	}
+
+	ml := &mountedLayer{
+		name:       mount,
+		mountID:    mountID,
+		initID:     initID,
+		layerStore: ls,
+		references: map[RWLayer]*referencedRWLayer{},
+	}
+
+	if parent != "" {
+		p, err := ls.loadLayer(parent)
+		if err != nil {
+			return err
+		}
+		ml.parent = p
+
+		p.referenceCount++
+	}
+
+	ls.mounts[ml.name] = ml
+
+	return nil
+}
+
+func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent string, layer *roLayer) error {
+	digester := digest.Canonical.New()
+	tr := io.TeeReader(ts, digester.Hash())
+
+	tsw, err := tx.TarSplitWriter(true)
+	if err != nil {
+		return err
+	}
+	metaPacker := storage.NewJSONPacker(tsw)
+	defer tsw.Close()
+
+	// we're passing nil here for the file putter, because the ApplyDiff will
+	// handle the extraction of the archive
+	rdr, err := asm.NewInputTarStream(tr, metaPacker, nil)
+	if err != nil {
+		return err
+	}
+
+	applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, rdr)
+	if err != nil {
+		return err
+	}
+
+	// Discard trailing data but ensure metadata is picked up to reconstruct stream
+	io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed
+
+	layer.size = applySize
+	layer.diffID = DiffID(digester.Digest())
+
+	logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize)
+
+	return nil
+}
+
+func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) {
+	return ls.registerWithDescriptor(ts, parent, distribution.Descriptor{})
+}
+
+func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) {
+	// err is used to hold the error which will always trigger
+	// cleanup of creates sources but may not be an error returned
+	// to the caller (already exists).
+	var err error
+	var pid string
+	var p *roLayer
+	if string(parent) != "" {
+		p = ls.get(parent)
+		if p == nil {
+			return nil, ErrLayerDoesNotExist
+		}
+		pid = p.cacheID
+		// Release parent chain if error
+		defer func() {
+			if err != nil {
+				ls.layerL.Lock()
+				ls.releaseLayer(p)
+				ls.layerL.Unlock()
+			}
+		}()
+		if p.depth() >= maxLayerDepth {
+			err = ErrMaxDepthExceeded
+			return nil, err
+		}
+	}
+
+	// Create new roLayer
+	layer := &roLayer{
+		parent:         p,
+		cacheID:        stringid.GenerateRandomID(),
+		referenceCount: 1,
+		layerStore:     ls,
+		references:     map[Layer]struct{}{},
+		descriptor:     descriptor,
+	}
+
+	if err = ls.driver.Create(layer.cacheID, pid, nil); err != nil {
+		return nil, err
+	}
+
+	tx, err := ls.store.StartTransaction()
+	if err != nil {
+		return nil, err
+	}
+
+	defer func() {
+		if err != nil {
+			logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err)
+			if err := ls.driver.Remove(layer.cacheID); err != nil {
+				logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err)
+			}
+			if err := tx.Cancel(); err != nil {
+				logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err)
+			}
+		}
+	}()
+
+	if err = ls.applyTar(tx, ts, pid, layer); err != nil {
+		return nil, err
+	}
+
+	if layer.parent == nil {
+		layer.chainID = ChainID(layer.diffID)
+	} else {
+		layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID)
+	}
+
+	if err = storeLayer(tx, layer); err != nil {
+		return nil, err
+	}
+
+	ls.layerL.Lock()
+	defer ls.layerL.Unlock()
+
+	if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil {
+		// Set error for cleanup, but do not return the error
+		err = errors.New("layer already exists")
+		return existingLayer.getReference(), nil
+	}
+
+	if err = tx.Commit(layer.chainID); err != nil {
+		return nil, err
+	}
+
+	ls.layerMap[layer.chainID] = layer
+
+	return layer.getReference(), nil
+}
+
+func (ls *layerStore) getWithoutLock(layer ChainID) *roLayer {
+	l, ok := ls.layerMap[layer]
+	if !ok {
+		return nil
+	}
+
+	l.referenceCount++
+
+	return l
+}
+
+func (ls *layerStore) get(l ChainID) *roLayer {
+	ls.layerL.Lock()
+	defer ls.layerL.Unlock()
+	return ls.getWithoutLock(l)
+}
+
+func (ls *layerStore) Get(l ChainID) (Layer, error) {
+	ls.layerL.Lock()
+	defer ls.layerL.Unlock()
+
+	layer := ls.getWithoutLock(l)
+	if layer == nil {
+		return nil, ErrLayerDoesNotExist
+	}
+
+	return layer.getReference(), nil
+}
+
+func (ls *layerStore) Map() map[ChainID]Layer {
+	ls.layerL.Lock()
+	defer ls.layerL.Unlock()
+
+	layers := map[ChainID]Layer{}
+
+	for k, v := range ls.layerMap {
+		layers[k] = v
+	}
+
+	return layers
+}
+
+func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error {
+	err := ls.driver.Remove(layer.cacheID)
+	if err != nil {
+		return err
+	}
+
+	err = ls.store.Remove(layer.chainID)
+	if err != nil {
+		return err
+	}
+	metadata.DiffID = layer.diffID
+	metadata.ChainID = layer.chainID
+	metadata.Size, err = layer.Size()
+	if err != nil {
+		return err
+	}
+	metadata.DiffSize = layer.size
+
+	return nil
+}
+
+func (ls *layerStore) releaseLayer(l *roLayer) ([]Metadata, error) {
+	depth := 0
+	removed := []Metadata{}
+	for {
+		if l.referenceCount == 0 {
+			panic("layer not retained")
+		}
+		l.referenceCount--
+		if l.referenceCount != 0 {
+			return removed, nil
+		}
+
+		if len(removed) == 0 && depth > 0 {
+			panic("cannot remove layer with child")
+		}
+		if l.hasReferences() {
+			panic("cannot delete referenced layer")
+		}
+		var metadata Metadata
+		if err := ls.deleteLayer(l, &metadata); err != nil {
+			return nil, err
+		}
+
+		delete(ls.layerMap, l.chainID)
+		removed = append(removed, metadata)
+
+		if l.parent == nil {
+			return removed, nil
+		}
+
+		depth++
+		l = l.parent
+	}
+}
+
+func (ls *layerStore) Release(l Layer) ([]Metadata, error) {
+	ls.layerL.Lock()
+	defer ls.layerL.Unlock()
+	layer, ok := ls.layerMap[l.ChainID()]
+	if !ok {
+		return []Metadata{}, nil
+	}
+	if !layer.hasReference(l) {
+		return nil, ErrLayerNotRetained
+	}
+
+	layer.deleteReference(l)
+
+	return ls.releaseLayer(layer)
+}
+
+func (ls *layerStore) CreateRWLayer(name string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error) {
+	ls.mountL.Lock()
+	defer ls.mountL.Unlock()
+	m, ok := ls.mounts[name]
+	if ok {
+		return nil, ErrMountNameConflict
+	}
+
+	var err error
+	var pid string
+	var p *roLayer
+	if string(parent) != "" {
+		p = ls.get(parent)
+		if p == nil {
+			return nil, ErrLayerDoesNotExist
+		}
+		pid = p.cacheID
+
+		// Release parent chain if error
+		defer func() {
+			if err != nil {
+				ls.layerL.Lock()
+				ls.releaseLayer(p)
+				ls.layerL.Unlock()
+			}
+		}()
+	}
+
+	m = &mountedLayer{
+		name:       name,
+		parent:     p,
+		mountID:    ls.mountID(name),
+		layerStore: ls,
+		references: map[RWLayer]*referencedRWLayer{},
+	}
+
+	if initFunc != nil {
+		pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt)
+		if err != nil {
+			return nil, err
+		}
+		m.initID = pid
+	}
+
+	createOpts := &graphdriver.CreateOpts{
+		StorageOpt: storageOpt,
+	}
+
+	if err = ls.driver.CreateReadWrite(m.mountID, pid, createOpts); err != nil {
+		return nil, err
+	}
+
+	if err = ls.saveMount(m); err != nil {
+		return nil, err
+	}
+
+	return m.getReference(), nil
+}
+
+func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) {
+	ls.mountL.Lock()
+	defer ls.mountL.Unlock()
+	mount, ok := ls.mounts[id]
+	if !ok {
+		return nil, ErrMountDoesNotExist
+	}
+
+	return mount.getReference(), nil
+}
+
+func (ls *layerStore) GetMountID(id string) (string, error) {
+	ls.mountL.Lock()
+	defer ls.mountL.Unlock()
+	mount, ok := ls.mounts[id]
+	if !ok {
+		return "", ErrMountDoesNotExist
+	}
+	logrus.Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID)
+
+	return mount.mountID, nil
+}
+
+func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) {
+	ls.mountL.Lock()
+	defer ls.mountL.Unlock()
+	m, ok := ls.mounts[l.Name()]
+	if !ok {
+		return []Metadata{}, nil
+	}
+
+	if err := m.deleteReference(l); err != nil {
+		return nil, err
+	}
+
+	if m.hasReferences() {
+		return []Metadata{}, nil
+	}
+
+	if err := ls.driver.Remove(m.mountID); err != nil {
+		logrus.Errorf("Error removing mounted layer %s: %s", m.name, err)
+		m.retakeReference(l)
+		return nil, err
+	}
+
+	if m.initID != "" {
+		if err := ls.driver.Remove(m.initID); err != nil {
+			logrus.Errorf("Error removing init layer %s: %s", m.name, err)
+			m.retakeReference(l)
+			return nil, err
+		}
+	}
+
+	if err := ls.store.RemoveMount(m.name); err != nil {
+		logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err)
+		m.retakeReference(l)
+		return nil, err
+	}
+
+	delete(ls.mounts, m.Name())
+
+	ls.layerL.Lock()
+	defer ls.layerL.Unlock()
+	if m.parent != nil {
+		return ls.releaseLayer(m.parent)
+	}
+
+	return []Metadata{}, nil
+}
+
+func (ls *layerStore) saveMount(mount *mountedLayer) error {
+	if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil {
+		return err
+	}
+
+	if mount.initID != "" {
+		if err := ls.store.SetInitID(mount.name, mount.initID); err != nil {
+			return err
+		}
+	}
+
+	if mount.parent != nil {
+		if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil {
+			return err
+		}
+	}
+
+	ls.mounts[mount.name] = mount
+
+	return nil
+}
+
+func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) {
+	// Use "<graph-id>-init" to maintain compatibility with graph drivers
+	// which are expecting this layer with this special name. If all
+	// graph drivers can be updated to not rely on knowing about this layer
+	// then the initID should be randomly generated.
+	initID := fmt.Sprintf("%s-init", graphID)
+
+	createOpts := &graphdriver.CreateOpts{
+		MountLabel: mountLabel,
+		StorageOpt: storageOpt,
+	}
+
+	if err := ls.driver.CreateReadWrite(initID, parent, createOpts); err != nil {
+		return "", err
+	}
+	p, err := ls.driver.Get(initID, "")
+	if err != nil {
+		return "", err
+	}
+
+	if err := initFunc(p); err != nil {
+		ls.driver.Put(initID)
+		return "", err
+	}
+
+	if err := ls.driver.Put(initID); err != nil {
+		return "", err
+	}
+
+	return initID, nil
+}
+
+func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error {
+	diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver)
+	if !ok {
+		diffDriver = &naiveDiffPathDriver{ls.driver}
+	}
+
+	defer metadata.Close()
+
+	// get our relative path to the container
+	fileGetCloser, err := diffDriver.DiffGetter(graphID)
+	if err != nil {
+		return err
+	}
+	defer fileGetCloser.Close()
+
+	metaUnpacker := storage.NewJSONUnpacker(metadata)
+	upackerCounter := &unpackSizeCounter{metaUnpacker, size}
+	logrus.Debugf("Assembling tar data for %s", graphID)
+	return asm.WriteOutputTarStream(fileGetCloser, upackerCounter, w)
+}
+
+func (ls *layerStore) Cleanup() error {
+	return ls.driver.Cleanup()
+}
+
+func (ls *layerStore) DriverStatus() [][2]string {
+	return ls.driver.Status()
+}
+
+func (ls *layerStore) DriverName() string {
+	return ls.driver.String()
+}
+
+type naiveDiffPathDriver struct {
+	graphdriver.Driver
+}
+
+type fileGetPutter struct {
+	storage.FileGetter
+	driver graphdriver.Driver
+	id     string
+}
+
+func (w *fileGetPutter) Close() error {
+	return w.driver.Put(w.id)
+}
+
+func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
+	p, err := n.Driver.Get(id, "")
+	if err != nil {
+		return nil, err
+	}
+	return &fileGetPutter{storage.NewPathFileGetter(p), n.Driver, id}, nil
+}
diff --git a/vendor/github.com/docker/docker/layer/layer_store_windows.go b/vendor/github.com/docker/docker/layer/layer_store_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..1276a912cc8cec402e7a42c2a86e3386c6c493eb
--- /dev/null
+++ b/vendor/github.com/docker/docker/layer/layer_store_windows.go
@@ -0,0 +1,11 @@
+package layer
+
+import (
+	"io"
+
+	"github.com/docker/distribution"
+)
+
+func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) {
+	return ls.registerWithDescriptor(ts, parent, descriptor)
+}
diff --git a/vendor/github.com/docker/docker/layer/layer_unix.go b/vendor/github.com/docker/docker/layer/layer_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..776b78ac02b2886b60ad39a67170135e5b6ec758
--- /dev/null
+++ b/vendor/github.com/docker/docker/layer/layer_unix.go
@@ -0,0 +1,9 @@
+// +build linux freebsd darwin openbsd solaris
+
+package layer
+
+import "github.com/docker/docker/pkg/stringid"
+
+func (ls *layerStore) mountID(name string) string {
+	return stringid.GenerateRandomID()
+}
diff --git a/vendor/github.com/docker/docker/layer/layer_windows.go b/vendor/github.com/docker/docker/layer/layer_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..e20311a09149b53f243515fda7e060a51e624044
--- /dev/null
+++ b/vendor/github.com/docker/docker/layer/layer_windows.go
@@ -0,0 +1,98 @@
+package layer
+
+import (
+	"errors"
+	"fmt"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/distribution/digest"
+	"github.com/docker/docker/daemon/graphdriver"
+)
+
+// GetLayerPath returns the path to a layer
+func GetLayerPath(s Store, layer ChainID) (string, error) {
+	ls, ok := s.(*layerStore)
+	if !ok {
+		return "", errors.New("unsupported layer store")
+	}
+	ls.layerL.Lock()
+	defer ls.layerL.Unlock()
+
+	rl, ok := ls.layerMap[layer]
+	if !ok {
+		return "", ErrLayerDoesNotExist
+	}
+
+	path, err := ls.driver.Get(rl.cacheID, "")
+	if err != nil {
+		return "", err
+	}
+
+	if err := ls.driver.Put(rl.cacheID); err != nil {
+		return "", err
+	}
+
+	return path, nil
+}
+
+func (ls *layerStore) RegisterDiffID(graphID string, size int64) (Layer, error) {
+	var err error // this is used for cleanup in existingLayer case
+	diffID := digest.FromBytes([]byte(graphID))
+
+	// Create new roLayer
+	layer := &roLayer{
+		cacheID:        graphID,
+		diffID:         DiffID(diffID),
+		referenceCount: 1,
+		layerStore:     ls,
+		references:     map[Layer]struct{}{},
+		size:           size,
+	}
+
+	tx, err := ls.store.StartTransaction()
+	if err != nil {
+		return nil, err
+	}
+	defer func() {
+		if err != nil {
+			if err := tx.Cancel(); err != nil {
+				logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err)
+			}
+		}
+	}()
+
+	layer.chainID = createChainIDFromParent("", layer.diffID)
+
+	if !ls.driver.Exists(layer.cacheID) {
+		return nil, fmt.Errorf("layer %q is unknown to driver", layer.cacheID)
+	}
+	if err = storeLayer(tx, layer); err != nil {
+		return nil, err
+	}
+
+	ls.layerL.Lock()
+	defer ls.layerL.Unlock()
+
+	if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil {
+		// Set error for cleanup, but do not return
+		err = errors.New("layer already exists")
+		return existingLayer.getReference(), nil
+	}
+
+	if err = tx.Commit(layer.chainID); err != nil {
+		return nil, err
+	}
+
+	ls.layerMap[layer.chainID] = layer
+
+	return layer.getReference(), nil
+}
+
+func (ls *layerStore) mountID(name string) string {
+	// windows has issues if container ID doesn't match mount ID
+	return name
+}
+
+func (ls *layerStore) GraphDriver() graphdriver.Driver {
+	return ls.driver
+}
diff --git a/vendor/github.com/docker/docker/layer/migration.go b/vendor/github.com/docker/docker/layer/migration.go
new file mode 100644
index 0000000000000000000000000000000000000000..b45c31099d612ed55cd3160c949770df15382cd2
--- /dev/null
+++ b/vendor/github.com/docker/docker/layer/migration.go
@@ -0,0 +1,256 @@
+package layer
+
+import (
+	"compress/gzip"
+	"errors"
+	"fmt"
+	"io"
+	"os"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/distribution/digest"
+	"github.com/vbatts/tar-split/tar/asm"
+	"github.com/vbatts/tar-split/tar/storage"
+)
+
+// CreateRWLayerByGraphID creates a RWLayer in the layer store using
+// the provided name with the given graphID. To get the RWLayer
+// after migration the layer may be retrieved by the given name.
+func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent ChainID) (err error) {
+	ls.mountL.Lock()
+	defer ls.mountL.Unlock()
+	m, ok := ls.mounts[name]
+	if ok {
+		if m.parent.chainID != parent {
+			return errors.New("name conflict, mismatched parent")
+		}
+		if m.mountID != graphID {
+			return errors.New("mount already exists")
+		}
+
+		return nil
+	}
+
+	if !ls.driver.Exists(graphID) {
+		return fmt.Errorf("graph ID does not exist: %q", graphID)
+	}
+
+	var p *roLayer
+	if string(parent) != "" {
+		p = ls.get(parent)
+		if p == nil {
+			return ErrLayerDoesNotExist
+		}
+
+		// Release parent chain if error
+		defer func() {
+			if err != nil {
+				ls.layerL.Lock()
+				ls.releaseLayer(p)
+				ls.layerL.Unlock()
+			}
+		}()
+	}
+
+	// TODO: Ensure graphID has correct parent
+
+	m = &mountedLayer{
+		name:       name,
+		parent:     p,
+		mountID:    graphID,
+		layerStore: ls,
+		references: map[RWLayer]*referencedRWLayer{},
+	}
+
+	// Check for existing init layer
+	initID := fmt.Sprintf("%s-init", graphID)
+	if ls.driver.Exists(initID) {
+		m.initID = initID
+	}
+
+	if err = ls.saveMount(m); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) {
+	defer func() {
+		if err != nil {
+			logrus.Debugf("could not get checksum for %q with tar-split: %q", id, err)
+			diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath)
+		}
+	}()
+
+	if oldTarDataPath == "" {
+		err = errors.New("no tar-split file")
+		return
+	}
+
+	tarDataFile, err := os.Open(oldTarDataPath)
+	if err != nil {
+		return
+	}
+	defer tarDataFile.Close()
+	uncompressed, err := gzip.NewReader(tarDataFile)
+	if err != nil {
+		return
+	}
+
+	dgst := digest.Canonical.New()
+	err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash())
+	if err != nil {
+		return
+	}
+
+	diffID = DiffID(dgst.Digest())
+	err = os.RemoveAll(newTarDataPath)
+	if err != nil {
+		return
+	}
+	err = os.Link(oldTarDataPath, newTarDataPath)
+
+	return
+}
+
+func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) {
+	rawarchive, err := ls.driver.Diff(id, parent)
+	if err != nil {
+		return
+	}
+	defer rawarchive.Close()
+
+	f, err := os.Create(newTarDataPath)
+	if err != nil {
+		return
+	}
+	defer f.Close()
+	mfz := gzip.NewWriter(f)
+	defer mfz.Close()
+	metaPacker := storage.NewJSONPacker(mfz)
+
+	packerCounter := &packSizeCounter{metaPacker, &size}
+
+	archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil)
+	if err != nil {
+		return
+	}
+	dgst, err := digest.FromReader(archive)
+	if err != nil {
+		return
+	}
+	diffID = DiffID(dgst)
+	return
+}
+
+func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) {
+	// err is used to hold the error which will always trigger
+	// cleanup of creates sources but may not be an error returned
+	// to the caller (already exists).
+	var err error
+	var p *roLayer
+	if string(parent) != "" {
+		p = ls.get(parent)
+		if p == nil {
+			return nil, ErrLayerDoesNotExist
+		}
+
+		// Release parent chain if error
+		defer func() {
+			if err != nil {
+				ls.layerL.Lock()
+				ls.releaseLayer(p)
+				ls.layerL.Unlock()
+			}
+		}()
+	}
+
+	// Create new roLayer
+	layer := &roLayer{
+		parent:         p,
+		cacheID:        graphID,
+		referenceCount: 1,
+		layerStore:     ls,
+		references:     map[Layer]struct{}{},
+		diffID:         diffID,
+		size:           size,
+		chainID:        createChainIDFromParent(parent, diffID),
+	}
+
+	ls.layerL.Lock()
+	defer ls.layerL.Unlock()
+
+	if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil {
+		// Set error for cleanup, but do not return
+		err = errors.New("layer already exists")
+		return existingLayer.getReference(), nil
+	}
+
+	tx, err := ls.store.StartTransaction()
+	if err != nil {
+		return nil, err
+	}
+
+	defer func() {
+		if err != nil {
+			logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err)
+			if err := tx.Cancel(); err != nil {
+				logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err)
+			}
+		}
+	}()
+
+	tsw, err := tx.TarSplitWriter(false)
+	if err != nil {
+		return nil, err
+	}
+	defer tsw.Close()
+	tdf, err := os.Open(tarDataFile)
+	if err != nil {
+		return nil, err
+	}
+	defer tdf.Close()
+	_, err = io.Copy(tsw, tdf)
+	if err != nil {
+		return nil, err
+	}
+
+	if err = storeLayer(tx, layer); err != nil {
+		return nil, err
+	}
+
+	if err = tx.Commit(layer.chainID); err != nil {
+		return nil, err
+	}
+
+	ls.layerMap[layer.chainID] = layer
+
+	return layer.getReference(), nil
+}
+
+type unpackSizeCounter struct {
+	unpacker storage.Unpacker
+	size     *int64
+}
+
+func (u *unpackSizeCounter) Next() (*storage.Entry, error) {
+	e, err := u.unpacker.Next()
+	if err == nil && u.size != nil {
+		*u.size += e.Size
+	}
+	return e, err
+}
+
+type packSizeCounter struct {
+	packer storage.Packer
+	size   *int64
+}
+
+func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) {
+	n, err := p.packer.AddEntry(e)
+	if err == nil && p.size != nil {
+		*p.size += e.Size
+	}
+	return n, err
+}
diff --git a/vendor/github.com/docker/docker/layer/mounted_layer.go b/vendor/github.com/docker/docker/layer/mounted_layer.go
new file mode 100644
index 0000000000000000000000000000000000000000..a5cfcfa9bd75141b853e10e57f9b8470ac03a530
--- /dev/null
+++ b/vendor/github.com/docker/docker/layer/mounted_layer.go
@@ -0,0 +1,99 @@
+package layer
+
+import (
+	"io"
+
+	"github.com/docker/docker/pkg/archive"
+)
+
+type mountedLayer struct {
+	name       string
+	mountID    string
+	initID     string
+	parent     *roLayer
+	path       string
+	layerStore *layerStore
+
+	references map[RWLayer]*referencedRWLayer
+}
+
+func (ml *mountedLayer) cacheParent() string {
+	if ml.initID != "" {
+		return ml.initID
+	}
+	if ml.parent != nil {
+		return ml.parent.cacheID
+	}
+	return ""
+}
+
+func (ml *mountedLayer) TarStream() (io.ReadCloser, error) {
+	return ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent())
+}
+
+func (ml *mountedLayer) Name() string {
+	return ml.name
+}
+
+func (ml *mountedLayer) Parent() Layer {
+	if ml.parent != nil {
+		return ml.parent
+	}
+
+	// Return a nil interface instead of an interface wrapping a nil
+	// pointer.
+	return nil
+}
+
+func (ml *mountedLayer) Size() (int64, error) {
+	return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent())
+}
+
+func (ml *mountedLayer) Changes() ([]archive.Change, error) {
+	return ml.layerStore.driver.Changes(ml.mountID, ml.cacheParent())
+}
+
+func (ml *mountedLayer) Metadata() (map[string]string, error) {
+	return ml.layerStore.driver.GetMetadata(ml.mountID)
+}
+
+func (ml *mountedLayer) getReference() RWLayer {
+	ref := &referencedRWLayer{
+		mountedLayer: ml,
+	}
+	ml.references[ref] = ref
+
+	return ref
+}
+
+func (ml *mountedLayer) hasReferences() bool {
+	return len(ml.references) > 0
+}
+
+func (ml *mountedLayer) deleteReference(ref RWLayer) error {
+	if _, ok := ml.references[ref]; !ok {
+		return ErrLayerNotRetained
+	}
+	delete(ml.references, ref)
+	return nil
+}
+
+func (ml *mountedLayer) retakeReference(r RWLayer) {
+	if ref, ok := r.(*referencedRWLayer); ok {
+		ml.references[ref] = ref
+	}
+}
+
+type referencedRWLayer struct {
+	*mountedLayer
+}
+
+func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) {
+	return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel)
+}
+
+// Unmount decrements the activity count and unmounts the underlying layer
+// Callers should only call `Unmount` once per call to `Mount`, even on error.
+func (rl *referencedRWLayer) Unmount() error {
+	return rl.layerStore.driver.Put(rl.mountedLayer.mountID)
+}
diff --git a/vendor/github.com/docker/docker/layer/ro_layer.go b/vendor/github.com/docker/docker/layer/ro_layer.go
new file mode 100644
index 0000000000000000000000000000000000000000..7c8d233a350f976e4023306fdaf31fe4a35052b4
--- /dev/null
+++ b/vendor/github.com/docker/docker/layer/ro_layer.go
@@ -0,0 +1,192 @@
+package layer
+
+import (
+	"fmt"
+	"io"
+
+	"github.com/docker/distribution"
+	"github.com/docker/distribution/digest"
+)
+
+type roLayer struct {
+	chainID    ChainID
+	diffID     DiffID
+	parent     *roLayer
+	cacheID    string
+	size       int64
+	layerStore *layerStore
+	descriptor distribution.Descriptor
+
+	referenceCount int
+	references     map[Layer]struct{}
+}
+
+// TarStream for roLayer guarentees that the data that is produced is the exact
+// data that the layer was registered with.
+func (rl *roLayer) TarStream() (io.ReadCloser, error) {
+	r, err := rl.layerStore.store.TarSplitReader(rl.chainID)
+	if err != nil {
+		return nil, err
+	}
+
+	pr, pw := io.Pipe()
+	go func() {
+		err := rl.layerStore.assembleTarTo(rl.cacheID, r, nil, pw)
+		if err != nil {
+			pw.CloseWithError(err)
+		} else {
+			pw.Close()
+		}
+	}()
+	rc, err := newVerifiedReadCloser(pr, digest.Digest(rl.diffID))
+	if err != nil {
+		return nil, err
+	}
+	return rc, nil
+}
+
+// TarStreamFrom does not make any guarentees to the correctness of the produced
+// data. As such it should not be used when the layer content must be verified
+// to be an exact match to the registered layer.
+func (rl *roLayer) TarStreamFrom(parent ChainID) (io.ReadCloser, error) {
+	var parentCacheID string
+	for pl := rl.parent; pl != nil; pl = pl.parent {
+		if pl.chainID == parent {
+			parentCacheID = pl.cacheID
+			break
+		}
+	}
+
+	if parent != ChainID("") && parentCacheID == "" {
+		return nil, fmt.Errorf("layer ID '%s' is not a parent of the specified layer: cannot provide diff to non-parent", parent)
+	}
+	return rl.layerStore.driver.Diff(rl.cacheID, parentCacheID)
+}
+
+func (rl *roLayer) ChainID() ChainID {
+	return rl.chainID
+}
+
+func (rl *roLayer) DiffID() DiffID {
+	return rl.diffID
+}
+
+func (rl *roLayer) Parent() Layer {
+	if rl.parent == nil {
+		return nil
+	}
+	return rl.parent
+}
+
+func (rl *roLayer) Size() (size int64, err error) {
+	if rl.parent != nil {
+		size, err = rl.parent.Size()
+		if err != nil {
+			return
+		}
+	}
+
+	return size + rl.size, nil
+}
+
+func (rl *roLayer) DiffSize() (size int64, err error) {
+	return rl.size, nil
+}
+
+func (rl *roLayer) Metadata() (map[string]string, error) {
+	return rl.layerStore.driver.GetMetadata(rl.cacheID)
+}
+
+type referencedCacheLayer struct {
+	*roLayer
+}
+
+func (rl *roLayer) getReference() Layer {
+	ref := &referencedCacheLayer{
+		roLayer: rl,
+	}
+	rl.references[ref] = struct{}{}
+
+	return ref
+}
+
+func (rl *roLayer) hasReference(ref Layer) bool {
+	_, ok := rl.references[ref]
+	return ok
+}
+
+func (rl *roLayer) hasReferences() bool {
+	return len(rl.references) > 0
+}
+
+func (rl *roLayer) deleteReference(ref Layer) {
+	delete(rl.references, ref)
+}
+
+func (rl *roLayer) depth() int {
+	if rl.parent == nil {
+		return 1
+	}
+	return rl.parent.depth() + 1
+}
+
+func storeLayer(tx MetadataTransaction, layer *roLayer) error {
+	if err := tx.SetDiffID(layer.diffID); err != nil {
+		return err
+	}
+	if err := tx.SetSize(layer.size); err != nil {
+		return err
+	}
+	if err := tx.SetCacheID(layer.cacheID); err != nil {
+		return err
+	}
+	// Do not store empty descriptors
+	if layer.descriptor.Digest != "" {
+		if err := tx.SetDescriptor(layer.descriptor); err != nil {
+			return err
+		}
+	}
+	if layer.parent != nil {
+		if err := tx.SetParent(layer.parent.chainID); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) {
+	verifier, err := digest.NewDigestVerifier(dgst)
+	if err != nil {
+		return nil, err
+	}
+	return &verifiedReadCloser{
+		rc:       rc,
+		dgst:     dgst,
+		verifier: verifier,
+	}, nil
+}
+
+type verifiedReadCloser struct {
+	rc       io.ReadCloser
+	dgst     digest.Digest
+	verifier digest.Verifier
+}
+
+func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) {
+	n, err = vrc.rc.Read(p)
+	if n > 0 {
+		if n, err := vrc.verifier.Write(p[:n]); err != nil {
+			return n, err
+		}
+	}
+	if err == io.EOF {
+		if !vrc.verifier.Verified() {
+			err = fmt.Errorf("could not verify layer data for: %s. This may be because internal files in the layer store were modified. Re-pulling or rebuilding this image may resolve the issue", vrc.dgst)
+		}
+	}
+	return
+}
+func (vrc *verifiedReadCloser) Close() error {
+	return vrc.rc.Close()
+}
diff --git a/vendor/github.com/docker/docker/layer/ro_layer_windows.go b/vendor/github.com/docker/docker/layer/ro_layer_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..32bd7182a3a37d830fb2652339b68c736ef49084
--- /dev/null
+++ b/vendor/github.com/docker/docker/layer/ro_layer_windows.go
@@ -0,0 +1,9 @@
+package layer
+
+import "github.com/docker/distribution"
+
+var _ distribution.Describable = &roLayer{}
+
+func (rl *roLayer) Descriptor() distribution.Descriptor {
+	return rl.descriptor
+}
diff --git a/vendor/github.com/docker/docker/oci/defaults_linux.go b/vendor/github.com/docker/docker/oci/defaults_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..8b3ce7281b7de9d50b09dc6f1f5b21c3b90a2978
--- /dev/null
+++ b/vendor/github.com/docker/docker/oci/defaults_linux.go
@@ -0,0 +1,168 @@
+package oci
+
+import (
+	"os"
+	"runtime"
+
+	"github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func sPtr(s string) *string      { return &s }
+func iPtr(i int64) *int64        { return &i }
+func u32Ptr(i int64) *uint32     { u := uint32(i); return &u }
+func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm }
+
+// DefaultSpec returns default oci spec used by docker.
+func DefaultSpec() specs.Spec {
+	s := specs.Spec{
+		Version: specs.Version,
+		Platform: specs.Platform{
+			OS:   runtime.GOOS,
+			Arch: runtime.GOARCH,
+		},
+	}
+	s.Mounts = []specs.Mount{
+		{
+			Destination: "/proc",
+			Type:        "proc",
+			Source:      "proc",
+			Options:     []string{"nosuid", "noexec", "nodev"},
+		},
+		{
+			Destination: "/dev",
+			Type:        "tmpfs",
+			Source:      "tmpfs",
+			Options:     []string{"nosuid", "strictatime", "mode=755"},
+		},
+		{
+			Destination: "/dev/pts",
+			Type:        "devpts",
+			Source:      "devpts",
+			Options:     []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"},
+		},
+		{
+			Destination: "/sys",
+			Type:        "sysfs",
+			Source:      "sysfs",
+			Options:     []string{"nosuid", "noexec", "nodev", "ro"},
+		},
+		{
+			Destination: "/sys/fs/cgroup",
+			Type:        "cgroup",
+			Source:      "cgroup",
+			Options:     []string{"ro", "nosuid", "noexec", "nodev"},
+		},
+		{
+			Destination: "/dev/mqueue",
+			Type:        "mqueue",
+			Source:      "mqueue",
+			Options:     []string{"nosuid", "noexec", "nodev"},
+		},
+	}
+	s.Process.Capabilities = []string{
+		"CAP_CHOWN",
+		"CAP_DAC_OVERRIDE",
+		"CAP_FSETID",
+		"CAP_FOWNER",
+		"CAP_MKNOD",
+		"CAP_NET_RAW",
+		"CAP_SETGID",
+		"CAP_SETUID",
+		"CAP_SETFCAP",
+		"CAP_SETPCAP",
+		"CAP_NET_BIND_SERVICE",
+		"CAP_SYS_CHROOT",
+		"CAP_KILL",
+		"CAP_AUDIT_WRITE",
+	}
+
+	s.Linux = &specs.Linux{
+		MaskedPaths: []string{
+			"/proc/kcore",
+			"/proc/latency_stats",
+			"/proc/timer_list",
+			"/proc/timer_stats",
+			"/proc/sched_debug",
+			"/sys/firmware",
+		},
+		ReadonlyPaths: []string{
+			"/proc/asound",
+			"/proc/bus",
+			"/proc/fs",
+			"/proc/irq",
+			"/proc/sys",
+			"/proc/sysrq-trigger",
+		},
+		Namespaces: []specs.Namespace{
+			{Type: "mount"},
+			{Type: "network"},
+			{Type: "uts"},
+			{Type: "pid"},
+			{Type: "ipc"},
+		},
+		// Devices implicitly contains the following devices:
+		// null, zero, full, random, urandom, tty, console, and ptmx.
+		// ptmx is a bind-mount or symlink of the container's ptmx.
+		// See also: https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#default-devices
+		Devices: []specs.Device{},
+		Resources: &specs.Resources{
+			Devices: []specs.DeviceCgroup{
+				{
+					Allow:  false,
+					Access: sPtr("rwm"),
+				},
+				{
+					Allow:  true,
+					Type:   sPtr("c"),
+					Major:  iPtr(1),
+					Minor:  iPtr(5),
+					Access: sPtr("rwm"),
+				},
+				{
+					Allow:  true,
+					Type:   sPtr("c"),
+					Major:  iPtr(1),
+					Minor:  iPtr(3),
+					Access: sPtr("rwm"),
+				},
+				{
+					Allow:  true,
+					Type:   sPtr("c"),
+					Major:  iPtr(1),
+					Minor:  iPtr(9),
+					Access: sPtr("rwm"),
+				},
+				{
+					Allow:  true,
+					Type:   sPtr("c"),
+					Major:  iPtr(1),
+					Minor:  iPtr(8),
+					Access: sPtr("rwm"),
+				},
+				{
+					Allow:  true,
+					Type:   sPtr("c"),
+					Major:  iPtr(5),
+					Minor:  iPtr(0),
+					Access: sPtr("rwm"),
+				},
+				{
+					Allow:  true,
+					Type:   sPtr("c"),
+					Major:  iPtr(5),
+					Minor:  iPtr(1),
+					Access: sPtr("rwm"),
+				},
+				{
+					Allow:  false,
+					Type:   sPtr("c"),
+					Major:  iPtr(10),
+					Minor:  iPtr(229),
+					Access: sPtr("rwm"),
+				},
+			},
+		},
+	}
+
+	return s
+}
diff --git a/vendor/github.com/docker/docker/oci/defaults_solaris.go b/vendor/github.com/docker/docker/oci/defaults_solaris.go
new file mode 100644
index 0000000000000000000000000000000000000000..85c8b68e16e9c78055263f182ffb46a923797b1b
--- /dev/null
+++ b/vendor/github.com/docker/docker/oci/defaults_solaris.go
@@ -0,0 +1,20 @@
+package oci
+
+import (
+	"runtime"
+
+	"github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// DefaultSpec returns default oci spec used by docker.
+func DefaultSpec() specs.Spec {
+	s := specs.Spec{
+		Version: "0.6.0",
+		Platform: specs.Platform{
+			OS:   "SunOS",
+			Arch: runtime.GOARCH,
+		},
+	}
+	s.Solaris = &specs.Solaris{}
+	return s
+}
diff --git a/vendor/github.com/docker/docker/oci/defaults_windows.go b/vendor/github.com/docker/docker/oci/defaults_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..ab51904ec4ed1e724806ef833bdb7998b9ef5a2c
--- /dev/null
+++ b/vendor/github.com/docker/docker/oci/defaults_windows.go
@@ -0,0 +1,19 @@
+package oci
+
+import (
+	"runtime"
+
+	"github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// DefaultSpec returns default spec used by docker.
+func DefaultSpec() specs.Spec {
+	return specs.Spec{
+		Version: specs.Version,
+		Platform: specs.Platform{
+			OS:   runtime.GOOS,
+			Arch: runtime.GOARCH,
+		},
+		Windows: &specs.Windows{},
+	}
+}
diff --git a/vendor/github.com/docker/docker/oci/devices_linux.go b/vendor/github.com/docker/docker/oci/devices_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..2840d2586a02b989eaede2ecad1a3a9edc93da6d
--- /dev/null
+++ b/vendor/github.com/docker/docker/oci/devices_linux.go
@@ -0,0 +1,86 @@
+package oci
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/opencontainers/runc/libcontainer/configs"
+	"github.com/opencontainers/runc/libcontainer/devices"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// Device transforms a libcontainer configs.Device to a specs.Device object.
+func Device(d *configs.Device) specs.Device {
+	return specs.Device{
+		Type:     string(d.Type),
+		Path:     d.Path,
+		Major:    d.Major,
+		Minor:    d.Minor,
+		FileMode: fmPtr(int64(d.FileMode)),
+		UID:      u32Ptr(int64(d.Uid)),
+		GID:      u32Ptr(int64(d.Gid)),
+	}
+}
+
+func deviceCgroup(d *configs.Device) specs.DeviceCgroup {
+	t := string(d.Type)
+	return specs.DeviceCgroup{
+		Allow:  true,
+		Type:   &t,
+		Major:  &d.Major,
+		Minor:  &d.Minor,
+		Access: &d.Permissions,
+	}
+}
+
+// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions.
+func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.Device, devPermissions []specs.DeviceCgroup, err error) {
+	resolvedPathOnHost := pathOnHost
+
+	// check if it is a symbolic link
+	if src, e := os.Lstat(pathOnHost); e == nil && src.Mode()&os.ModeSymlink == os.ModeSymlink {
+		if linkedPathOnHost, e := filepath.EvalSymlinks(pathOnHost); e == nil {
+			resolvedPathOnHost = linkedPathOnHost
+		}
+	}
+
+	device, err := devices.DeviceFromPath(resolvedPathOnHost, cgroupPermissions)
+	// if there was no error, return the device
+	if err == nil {
+		device.Path = pathInContainer
+		return append(devs, Device(device)), append(devPermissions, deviceCgroup(device)), nil
+	}
+
+	// if the device is not a device node
+	// try to see if it's a directory holding many devices
+	if err == devices.ErrNotADevice {
+
+		// check if it is a directory
+		if src, e := os.Stat(resolvedPathOnHost); e == nil && src.IsDir() {
+
+			// mount the internal devices recursively
+			filepath.Walk(resolvedPathOnHost, func(dpath string, f os.FileInfo, e error) error {
+				childDevice, e := devices.DeviceFromPath(dpath, cgroupPermissions)
+				if e != nil {
+					// ignore the device
+					return nil
+				}
+
+				// add the device to userSpecified devices
+				childDevice.Path = strings.Replace(dpath, resolvedPathOnHost, pathInContainer, 1)
+				devs = append(devs, Device(childDevice))
+				devPermissions = append(devPermissions, deviceCgroup(childDevice))
+
+				return nil
+			})
+		}
+	}
+
+	if len(devs) > 0 {
+		return devs, devPermissions, nil
+	}
+
+	return devs, devPermissions, fmt.Errorf("error gathering device information while adding custom device %q: %s", pathOnHost, err)
+}
diff --git a/vendor/github.com/docker/docker/oci/devices_unsupported.go b/vendor/github.com/docker/docker/oci/devices_unsupported.go
new file mode 100644
index 0000000000000000000000000000000000000000..6252cab536d05d768893a56c56b29dcc6377d156
--- /dev/null
+++ b/vendor/github.com/docker/docker/oci/devices_unsupported.go
@@ -0,0 +1,20 @@
+// +build !linux
+
+package oci
+
+import (
+	"errors"
+
+	"github.com/opencontainers/runc/libcontainer/configs"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// Device transforms a libcontainer configs.Device to a specs.Device object.
+// Not implemented
+func Device(d *configs.Device) specs.Device { return specs.Device{} }
+
+// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions.
+// Not implemented
+func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.Device, devPermissions []specs.DeviceCgroup, err error) {
+	return nil, nil, errors.New("oci/devices: unsupported platform")
+}
diff --git a/vendor/github.com/docker/docker/oci/namespaces.go b/vendor/github.com/docker/docker/oci/namespaces.go
new file mode 100644
index 0000000000000000000000000000000000000000..49024824982a012aa093136d3c75e1f79a828e6c
--- /dev/null
+++ b/vendor/github.com/docker/docker/oci/namespaces.go
@@ -0,0 +1,16 @@
+package oci
+
+import specs "github.com/opencontainers/runtime-spec/specs-go"
+
+// RemoveNamespace removes the `nsType` namespace from OCI spec `s`
+func RemoveNamespace(s *specs.Spec, nsType specs.NamespaceType) {
+	idx := -1
+	for i, n := range s.Linux.Namespaces {
+		if n.Type == nsType {
+			idx = i
+		}
+	}
+	if idx >= 0 {
+		s.Linux.Namespaces = append(s.Linux.Namespaces[:idx], s.Linux.Namespaces[idx+1:]...)
+	}
+}
diff --git a/vendor/github.com/docker/docker/opts/hosts.go b/vendor/github.com/docker/docker/opts/hosts.go
new file mode 100644
index 0000000000000000000000000000000000000000..266df1e5374e025efa16698efc9f9f627acf9fa3
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/hosts.go
@@ -0,0 +1,151 @@
+package opts
+
+import (
+	"fmt"
+	"net"
+	"net/url"
+	"strconv"
+	"strings"
+)
+
+var (
+	// DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp://
+	// These are the IANA registered port numbers for use with Docker
+	// see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
+	DefaultHTTPPort = 2375 // Default HTTP Port
+	// DefaultTLSHTTPPort Default HTTP Port used when TLS enabled
+	DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port
+	// DefaultUnixSocket Path for the unix socket.
+	// Docker daemon by default always listens on the default unix socket
+	DefaultUnixSocket = "/var/run/docker.sock"
+	// DefaultTCPHost constant defines the default host string used by docker on Windows
+	DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
+	// DefaultTLSHost constant defines the default host string used by docker for TLS sockets
+	DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort)
+	// DefaultNamedPipe defines the default named pipe used by docker on Windows
+	DefaultNamedPipe = `//./pipe/docker_engine`
+)
+
+// ValidateHost validates that the specified string is a valid host and returns it.
+func ValidateHost(val string) (string, error) {
+	host := strings.TrimSpace(val)
+	// The empty string means default and is not handled by parseDockerDaemonHost
+	if host != "" {
+		_, err := parseDockerDaemonHost(host)
+		if err != nil {
+			return val, err
+		}
+	}
+	// Note: unlike most flag validators, we don't return the mutated value here
+	//       we need to know what the user entered later (using ParseHost) to adjust for tls
+	return val, nil
+}
+
+// ParseHost and set defaults for a Daemon host string
+func ParseHost(defaultToTLS bool, val string) (string, error) {
+	host := strings.TrimSpace(val)
+	if host == "" {
+		if defaultToTLS {
+			host = DefaultTLSHost
+		} else {
+			host = DefaultHost
+		}
+	} else {
+		var err error
+		host, err = parseDockerDaemonHost(host)
+		if err != nil {
+			return val, err
+		}
+	}
+	return host, nil
+}
+
+// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host.
+// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go.
+func parseDockerDaemonHost(addr string) (string, error) {
+	addrParts := strings.SplitN(addr, "://", 2)
+	if len(addrParts) == 1 && addrParts[0] != "" {
+		addrParts = []string{"tcp", addrParts[0]}
+	}
+
+	switch addrParts[0] {
+	case "tcp":
+		return ParseTCPAddr(addrParts[1], DefaultTCPHost)
+	case "unix":
+		return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket)
+	case "npipe":
+		return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe)
+	case "fd":
+		return addr, nil
+	default:
+		return "", fmt.Errorf("Invalid bind address format: %s", addr)
+	}
+}
+
+// parseSimpleProtoAddr parses and validates that the specified address is a valid
+// socket address for simple protocols like unix and npipe. It returns a formatted
+// socket address, either using the address parsed from addr, or the contents of
+// defaultAddr if addr is a blank string.
+func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) {
+	addr = strings.TrimPrefix(addr, proto+"://")
+	if strings.Contains(addr, "://") {
+		return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr)
+	}
+	if addr == "" {
+		addr = defaultAddr
+	}
+	return fmt.Sprintf("%s://%s", proto, addr), nil
+}
+
+// ParseTCPAddr parses and validates that the specified address is a valid TCP
+// address. It returns a formatted TCP address, either using the address parsed
+// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string.
+// tryAddr is expected to have already been Trim()'d
+// defaultAddr must be in the full `tcp://host:port` form
+func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) {
+	if tryAddr == "" || tryAddr == "tcp://" {
+		return defaultAddr, nil
+	}
+	addr := strings.TrimPrefix(tryAddr, "tcp://")
+	if strings.Contains(addr, "://") || addr == "" {
+		return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr)
+	}
+
+	defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://")
+	defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr)
+	if err != nil {
+		return "", err
+	}
+	// url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but
+	// not 1.4. See https://github.com/golang/go/issues/12200 and
+	// https://github.com/golang/go/issues/6530.
+	if strings.HasSuffix(addr, "]:") {
+		addr += defaultPort
+	}
+
+	u, err := url.Parse("tcp://" + addr)
+	if err != nil {
+		return "", err
+	}
+	host, port, err := net.SplitHostPort(u.Host)
+	if err != nil {
+		// try port addition once
+		host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort))
+	}
+	if err != nil {
+		return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
+	}
+
+	if host == "" {
+		host = defaultHost
+	}
+	if port == "" {
+		port = defaultPort
+	}
+	p, err := strconv.Atoi(port)
+	if err != nil && p == 0 {
+		return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
+	}
+
+	return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil
+}
diff --git a/vendor/github.com/docker/docker/opts/hosts_unix.go b/vendor/github.com/docker/docker/opts/hosts_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..611407a9d94bad7687de7f5bab1290643eff0048
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/hosts_unix.go
@@ -0,0 +1,8 @@
+// +build !windows
+
+package opts
+
+import "fmt"
+
+// DefaultHost constant defines the default host string used by docker on other hosts than Windows
+var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket)
diff --git a/vendor/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/docker/docker/opts/hosts_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..7c239e00f1e4ba20802364ef4247fece7506c5ce
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/hosts_windows.go
@@ -0,0 +1,6 @@
+// +build windows
+
+package opts
+
+// DefaultHost constant defines the default host string used by docker on Windows
+var DefaultHost = "npipe://" + DefaultNamedPipe
diff --git a/vendor/github.com/docker/docker/opts/ip.go b/vendor/github.com/docker/docker/opts/ip.go
new file mode 100644
index 0000000000000000000000000000000000000000..fb03b50111fd1b582e77264a9d1d1d18cd3d8ed5
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/ip.go
@@ -0,0 +1,47 @@
+package opts
+
+import (
+	"fmt"
+	"net"
+)
+
+// IPOpt holds an IP. It is used to store values from CLI flags.
+type IPOpt struct {
+	*net.IP
+}
+
+// NewIPOpt creates a new IPOpt from a reference net.IP and a
+// string representation of an IP. If the string is not a valid
+// IP it will fallback to the specified reference.
+func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt {
+	o := &IPOpt{
+		IP: ref,
+	}
+	o.Set(defaultVal)
+	return o
+}
+
+// Set sets an IPv4 or IPv6 address from a given string. If the given
+// string is not parseable as an IP address it returns an error.
+func (o *IPOpt) Set(val string) error {
+	ip := net.ParseIP(val)
+	if ip == nil {
+		return fmt.Errorf("%s is not an ip address", val)
+	}
+	*o.IP = ip
+	return nil
+}
+
+// String returns the IP address stored in the IPOpt. If stored IP is a
+// nil pointer, it returns an empty string.
+func (o *IPOpt) String() string {
+	if *o.IP == nil {
+		return ""
+	}
+	return o.IP.String()
+}
+
+// Type returns the type of the option
+func (o *IPOpt) Type() string {
+	return "ip"
+}
diff --git a/vendor/github.com/docker/docker/opts/mount.go b/vendor/github.com/docker/docker/opts/mount.go
new file mode 100644
index 0000000000000000000000000000000000000000..ce6383ddca37699e08487a6594ecb74f156148fe
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/mount.go
@@ -0,0 +1,171 @@
+package opts
+
+import (
+	"encoding/csv"
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+
+	mounttypes "github.com/docker/docker/api/types/mount"
+	"github.com/docker/go-units"
+)
+
+// MountOpt is a Value type for parsing mounts
+type MountOpt struct {
+	values []mounttypes.Mount
+}
+
+// Set a new mount value
+func (m *MountOpt) Set(value string) error {
+	csvReader := csv.NewReader(strings.NewReader(value))
+	fields, err := csvReader.Read()
+	if err != nil {
+		return err
+	}
+
+	mount := mounttypes.Mount{}
+
+	volumeOptions := func() *mounttypes.VolumeOptions {
+		if mount.VolumeOptions == nil {
+			mount.VolumeOptions = &mounttypes.VolumeOptions{
+				Labels: make(map[string]string),
+			}
+		}
+		if mount.VolumeOptions.DriverConfig == nil {
+			mount.VolumeOptions.DriverConfig = &mounttypes.Driver{}
+		}
+		return mount.VolumeOptions
+	}
+
+	bindOptions := func() *mounttypes.BindOptions {
+		if mount.BindOptions == nil {
+			mount.BindOptions = new(mounttypes.BindOptions)
+		}
+		return mount.BindOptions
+	}
+
+	tmpfsOptions := func() *mounttypes.TmpfsOptions {
+		if mount.TmpfsOptions == nil {
+			mount.TmpfsOptions = new(mounttypes.TmpfsOptions)
+		}
+		return mount.TmpfsOptions
+	}
+
+	setValueOnMap := func(target map[string]string, value string) {
+		parts := strings.SplitN(value, "=", 2)
+		if len(parts) == 1 {
+			target[value] = ""
+		} else {
+			target[parts[0]] = parts[1]
+		}
+	}
+
+	mount.Type = mounttypes.TypeVolume // default to volume mounts
+	// Set writable as the default
+	for _, field := range fields {
+		parts := strings.SplitN(field, "=", 2)
+		key := strings.ToLower(parts[0])
+
+		if len(parts) == 1 {
+			switch key {
+			case "readonly", "ro":
+				mount.ReadOnly = true
+				continue
+			case "volume-nocopy":
+				volumeOptions().NoCopy = true
+				continue
+			}
+		}
+
+		if len(parts) != 2 {
+			return fmt.Errorf("invalid field '%s' must be a key=value pair", field)
+		}
+
+		value := parts[1]
+		switch key {
+		case "type":
+			mount.Type = mounttypes.Type(strings.ToLower(value))
+		case "source", "src":
+			mount.Source = value
+		case "target", "dst", "destination":
+			mount.Target = value
+		case "readonly", "ro":
+			mount.ReadOnly, err = strconv.ParseBool(value)
+			if err != nil {
+				return fmt.Errorf("invalid value for %s: %s", key, value)
+			}
+		case "bind-propagation":
+			bindOptions().Propagation = mounttypes.Propagation(strings.ToLower(value))
+		case "volume-nocopy":
+			volumeOptions().NoCopy, err = strconv.ParseBool(value)
+			if err != nil {
+				return fmt.Errorf("invalid value for populate: %s", value)
+			}
+		case "volume-label":
+			setValueOnMap(volumeOptions().Labels, value)
+		case "volume-driver":
+			volumeOptions().DriverConfig.Name = value
+		case "volume-opt":
+			if volumeOptions().DriverConfig.Options == nil {
+				volumeOptions().DriverConfig.Options = make(map[string]string)
+			}
+			setValueOnMap(volumeOptions().DriverConfig.Options, value)
+		case "tmpfs-size":
+			sizeBytes, err := units.RAMInBytes(value)
+			if err != nil {
+				return fmt.Errorf("invalid value for %s: %s", key, value)
+			}
+			tmpfsOptions().SizeBytes = sizeBytes
+		case "tmpfs-mode":
+			ui64, err := strconv.ParseUint(value, 8, 32)
+			if err != nil {
+				return fmt.Errorf("invalid value for %s: %s", key, value)
+			}
+			tmpfsOptions().Mode = os.FileMode(ui64)
+		default:
+			return fmt.Errorf("unexpected key '%s' in '%s'", key, field)
+		}
+	}
+
+	if mount.Type == "" {
+		return fmt.Errorf("type is required")
+	}
+
+	if mount.Target == "" {
+		return fmt.Errorf("target is required")
+	}
+
+	if mount.VolumeOptions != nil && mount.Type != mounttypes.TypeVolume {
+		return fmt.Errorf("cannot mix 'volume-*' options with mount type '%s'", mount.Type)
+	}
+	if mount.BindOptions != nil && mount.Type != mounttypes.TypeBind {
+		return fmt.Errorf("cannot mix 'bind-*' options with mount type '%s'", mount.Type)
+	}
+	if mount.TmpfsOptions != nil && mount.Type != mounttypes.TypeTmpfs {
+		return fmt.Errorf("cannot mix 'tmpfs-*' options with mount type '%s'", mount.Type)
+	}
+
+	m.values = append(m.values, mount)
+	return nil
+}
+
+// Type returns the type of this option
+func (m *MountOpt) Type() string {
+	return "mount"
+}
+
+// String returns a string repr of this option
+func (m *MountOpt) String() string {
+	mounts := []string{}
+	for _, mount := range m.values {
+		repr := fmt.Sprintf("%s %s %s", mount.Type, mount.Source, mount.Target)
+		mounts = append(mounts, repr)
+	}
+	return strings.Join(mounts, ", ")
+}
+
+// Value returns the mounts
+func (m *MountOpt) Value() []mounttypes.Mount {
+	return m.values
+}
diff --git a/vendor/github.com/docker/docker/opts/opts.go b/vendor/github.com/docker/docker/opts/opts.go
new file mode 100644
index 0000000000000000000000000000000000000000..ae851537ec8aee78ad5d09126c2aa6fb4ed22bc0
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/opts.go
@@ -0,0 +1,360 @@
+package opts
+
+import (
+	"fmt"
+	"math/big"
+	"net"
+	"regexp"
+	"strings"
+
+	"github.com/docker/docker/api/types/filters"
+)
+
+var (
+	alphaRegexp  = regexp.MustCompile(`[a-zA-Z]`)
+	domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
+)
+
+// ListOpts holds a list of values and a validation function.
+type ListOpts struct {
+	values    *[]string
+	validator ValidatorFctType
+}
+
+// NewListOpts creates a new ListOpts with the specified validator.
+func NewListOpts(validator ValidatorFctType) ListOpts {
+	var values []string
+	return *NewListOptsRef(&values, validator)
+}
+
+// NewListOptsRef creates a new ListOpts with the specified values and validator.
+func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
+	return &ListOpts{
+		values:    values,
+		validator: validator,
+	}
+}
+
+func (opts *ListOpts) String() string {
+	return fmt.Sprintf("%v", []string((*opts.values)))
+}
+
+// Set validates if needed the input value and adds it to the
+// internal slice.
+func (opts *ListOpts) Set(value string) error {
+	if opts.validator != nil {
+		v, err := opts.validator(value)
+		if err != nil {
+			return err
+		}
+		value = v
+	}
+	(*opts.values) = append((*opts.values), value)
+	return nil
+}
+
+// Delete removes the specified element from the slice.
+func (opts *ListOpts) Delete(key string) {
+	for i, k := range *opts.values {
+		if k == key {
+			(*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)
+			return
+		}
+	}
+}
+
+// GetMap returns the content of values in a map in order to avoid
+// duplicates.
+func (opts *ListOpts) GetMap() map[string]struct{} {
+	ret := make(map[string]struct{})
+	for _, k := range *opts.values {
+		ret[k] = struct{}{}
+	}
+	return ret
+}
+
+// GetAll returns the values of slice.
+func (opts *ListOpts) GetAll() []string {
+	return (*opts.values)
+}
+
+// GetAllOrEmpty returns the values of the slice
+// or an empty slice when there are no values.
+func (opts *ListOpts) GetAllOrEmpty() []string {
+	v := *opts.values
+	if v == nil {
+		return make([]string, 0)
+	}
+	return v
+}
+
+// Get checks the existence of the specified key.
+func (opts *ListOpts) Get(key string) bool {
+	for _, k := range *opts.values {
+		if k == key {
+			return true
+		}
+	}
+	return false
+}
+
+// Len returns the amount of element in the slice.
+func (opts *ListOpts) Len() int {
+	return len((*opts.values))
+}
+
+// Type returns a string name for this Option type
+func (opts *ListOpts) Type() string {
+	return "list"
+}
+
+// NamedOption is an interface that list and map options
+// with names implement.
+type NamedOption interface {
+	Name() string
+}
+
+// NamedListOpts is a ListOpts with a configuration name.
+// This struct is useful to keep reference to the assigned
+// field name in the internal configuration struct.
+type NamedListOpts struct {
+	name string
+	ListOpts
+}
+
+var _ NamedOption = &NamedListOpts{}
+
+// NewNamedListOptsRef creates a reference to a new NamedListOpts struct.
+func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts {
+	return &NamedListOpts{
+		name:     name,
+		ListOpts: *NewListOptsRef(values, validator),
+	}
+}
+
+// Name returns the name of the NamedListOpts in the configuration.
+func (o *NamedListOpts) Name() string {
+	return o.name
+}
+
+// MapOpts holds a map of values and a validation function.
+type MapOpts struct {
+	values    map[string]string
+	validator ValidatorFctType
+}
+
+// Set validates if needed the input value and add it to the
+// internal map, by splitting on '='.
+func (opts *MapOpts) Set(value string) error {
+	if opts.validator != nil {
+		v, err := opts.validator(value)
+		if err != nil {
+			return err
+		}
+		value = v
+	}
+	vals := strings.SplitN(value, "=", 2)
+	if len(vals) == 1 {
+		(opts.values)[vals[0]] = ""
+	} else {
+		(opts.values)[vals[0]] = vals[1]
+	}
+	return nil
+}
+
+// GetAll returns the values of MapOpts as a map.
+func (opts *MapOpts) GetAll() map[string]string {
+	return opts.values
+}
+
+func (opts *MapOpts) String() string {
+	return fmt.Sprintf("%v", map[string]string((opts.values)))
+}
+
+// Type returns a string name for this Option type
+func (opts *MapOpts) Type() string {
+	return "map"
+}
+
+// NewMapOpts creates a new MapOpts with the specified map of values and a validator.
+func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
+	if values == nil {
+		values = make(map[string]string)
+	}
+	return &MapOpts{
+		values:    values,
+		validator: validator,
+	}
+}
+
+// NamedMapOpts is a MapOpts struct with a configuration name.
+// This struct is useful to keep reference to the assigned
+// field name in the internal configuration struct.
+type NamedMapOpts struct {
+	name string
+	MapOpts
+}
+
+var _ NamedOption = &NamedMapOpts{}
+
+// NewNamedMapOpts creates a reference to a new NamedMapOpts struct.
+func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts {
+	return &NamedMapOpts{
+		name:    name,
+		MapOpts: *NewMapOpts(values, validator),
+	}
+}
+
+// Name returns the name of the NamedMapOpts in the configuration.
+func (o *NamedMapOpts) Name() string {
+	return o.name
+}
+
+// ValidatorFctType defines a validator function that returns a validated string and/or an error.
+type ValidatorFctType func(val string) (string, error)
+
+// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
+type ValidatorFctListType func(val string) ([]string, error)
+
+// ValidateIPAddress validates an Ip address.
+func ValidateIPAddress(val string) (string, error) {
+	var ip = net.ParseIP(strings.TrimSpace(val))
+	if ip != nil {
+		return ip.String(), nil
+	}
+	return "", fmt.Errorf("%s is not an ip address", val)
+}
+
+// ValidateDNSSearch validates domain for resolvconf search configuration.
+// A zero length domain is represented by a dot (.).
+func ValidateDNSSearch(val string) (string, error) {
+	if val = strings.Trim(val, " "); val == "." {
+		return val, nil
+	}
+	return validateDomain(val)
+}
+
+func validateDomain(val string) (string, error) {
+	if alphaRegexp.FindString(val) == "" {
+		return "", fmt.Errorf("%s is not a valid domain", val)
+	}
+	ns := domainRegexp.FindSubmatch([]byte(val))
+	if len(ns) > 0 && len(ns[1]) < 255 {
+		return string(ns[1]), nil
+	}
+	return "", fmt.Errorf("%s is not a valid domain", val)
+}
+
+// ValidateLabel validates that the specified string is a valid label, and returns it.
+// Labels are in the form on key=value.
+func ValidateLabel(val string) (string, error) {
+	if strings.Count(val, "=") < 1 {
+		return "", fmt.Errorf("bad attribute format: %s", val)
+	}
+	return val, nil
+}
+
+// ValidateSysctl validates a sysctl and returns it.
+func ValidateSysctl(val string) (string, error) {
+	validSysctlMap := map[string]bool{
+		"kernel.msgmax":          true,
+		"kernel.msgmnb":          true,
+		"kernel.msgmni":          true,
+		"kernel.sem":             true,
+		"kernel.shmall":          true,
+		"kernel.shmmax":          true,
+		"kernel.shmmni":          true,
+		"kernel.shm_rmid_forced": true,
+	}
+	validSysctlPrefixes := []string{
+		"net.",
+		"fs.mqueue.",
+	}
+	arr := strings.Split(val, "=")
+	if len(arr) < 2 {
+		return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
+	}
+	if validSysctlMap[arr[0]] {
+		return val, nil
+	}
+
+	for _, vp := range validSysctlPrefixes {
+		if strings.HasPrefix(arr[0], vp) {
+			return val, nil
+		}
+	}
+	return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
+}
+
+// FilterOpt is a flag type for validating filters
+type FilterOpt struct {
+	filter filters.Args
+}
+
+// NewFilterOpt returns a new FilterOpt
+func NewFilterOpt() FilterOpt {
+	return FilterOpt{filter: filters.NewArgs()}
+}
+
+func (o *FilterOpt) String() string {
+	repr, err := filters.ToParam(o.filter)
+	if err != nil {
+		return "invalid filters"
+	}
+	return repr
+}
+
+// Set sets the value of the opt by parsing the command line value
+func (o *FilterOpt) Set(value string) error {
+	var err error
+	o.filter, err = filters.ParseFlag(value, o.filter)
+	return err
+}
+
+// Type returns the option type
+func (o *FilterOpt) Type() string {
+	return "filter"
+}
+
+// Value returns the value of this option
+func (o *FilterOpt) Value() filters.Args {
+	return o.filter
+}
+
+// NanoCPUs is a type for fixed point fractional number.
+type NanoCPUs int64
+
+// String returns the string format of the number
+func (c *NanoCPUs) String() string {
+	return big.NewRat(c.Value(), 1e9).FloatString(3)
+}
+
+// Set sets the value of the NanoCPU by passing a string
+func (c *NanoCPUs) Set(value string) error {
+	cpus, err := ParseCPUs(value)
+	*c = NanoCPUs(cpus)
+	return err
+}
+
+// Type returns the type
+func (c *NanoCPUs) Type() string {
+	return "decimal"
+}
+
+// Value returns the value in int64
+func (c *NanoCPUs) Value() int64 {
+	return int64(*c)
+}
+
+// ParseCPUs takes a string ratio and returns an integer value of nano cpus
+func ParseCPUs(value string) (int64, error) {
+	cpu, ok := new(big.Rat).SetString(value)
+	if !ok {
+		return 0, fmt.Errorf("failed to parse %v as a rational number", value)
+	}
+	nano := cpu.Mul(cpu, big.NewRat(1e9, 1))
+	if !nano.IsInt() {
+		return 0, fmt.Errorf("value is too precise")
+	}
+	return nano.Num().Int64(), nil
+}
diff --git a/vendor/github.com/docker/docker/opts/opts_unix.go b/vendor/github.com/docker/docker/opts/opts_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..f1ce844a8f601ada1a759032574590143380f0ac
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/opts_unix.go
@@ -0,0 +1,6 @@
+// +build !windows
+
+package opts
+
+// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080
+const DefaultHTTPHost = "localhost"
diff --git a/vendor/github.com/docker/docker/opts/opts_windows.go b/vendor/github.com/docker/docker/opts/opts_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..ebe40c969c923fb9a214e9793c5288cabadf8d7d
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/opts_windows.go
@@ -0,0 +1,56 @@
+package opts
+
+// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5.
+// @jhowardmsft, @swernli.
+//
+// On Windows, this mitigates a problem with the default options of running
+// a docker client against a local docker daemon on TP5.
+//
+// What was found that if the default host is "localhost", even if the client
+// (and daemon as this is local) is not physically on a network, and the DNS
+// cache is flushed (ipconfig /flushdns), then the client will pause for
+// exactly one second when connecting to the daemon for calls. For example
+// using docker run windowsservercore cmd, the CLI will send a create followed
+// by an attach. You see the delay between the attach finishing and the attach
+// being seen by the daemon.
+//
+// Here's some daemon debug logs with additional debug spew put in. The
+// AfterWriteJSON log is the very last thing the daemon does as part of the
+// create call. The POST /attach is the second CLI call. Notice the second
+// time gap.
+//
+// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs"
+// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig"
+// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...."
+// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking....
+// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...."
+// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...."
+// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func"
+// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create"
+// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2"
+// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate"
+// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON"
+// ... 1 second gap here....
+// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach"
+// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1"
+//
+// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change
+// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory,
+// the Windows networking stack is supposed to resolve "localhost" internally,
+// without hitting DNS, or even reading the hosts file (which is why localhost
+// is commented out in the hosts file on Windows).
+//
+// We have validated that working around this using the actual IPv4 localhost
+// address does not cause the delay.
+//
+// This does not occur with the docker client built with 1.4.3 on the same
+// Windows build, regardless of whether the daemon is built using 1.5.1
+// or 1.4.3. It does not occur on Linux. We also verified we see the same thing
+// on a cross-compiled Windows binary (from Linux).
+//
+// Final note: This is a mitigation, not a 'real' fix. It is still susceptible
+// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...'
+// explicitly.
+
+// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080
+const DefaultHTTPHost = "127.0.0.1"
diff --git a/vendor/github.com/docker/docker/opts/port.go b/vendor/github.com/docker/docker/opts/port.go
new file mode 100644
index 0000000000000000000000000000000000000000..020a5d1e1c4567b07558c9a61bafca5c1521a0cd
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/port.go
@@ -0,0 +1,146 @@
+package opts
+
+import (
+	"encoding/csv"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+
+	"github.com/docker/docker/api/types/swarm"
+	"github.com/docker/go-connections/nat"
+)
+
+const (
+	portOptTargetPort    = "target"
+	portOptPublishedPort = "published"
+	portOptProtocol      = "protocol"
+	portOptMode          = "mode"
+)
+
+// PortOpt represents a port config in swarm mode.
+type PortOpt struct {
+	ports []swarm.PortConfig
+}
+
+// Set a new port value
+func (p *PortOpt) Set(value string) error {
+	longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value)
+	if err != nil {
+		return err
+	}
+	if longSyntax {
+		csvReader := csv.NewReader(strings.NewReader(value))
+		fields, err := csvReader.Read()
+		if err != nil {
+			return err
+		}
+
+		pConfig := swarm.PortConfig{}
+		for _, field := range fields {
+			parts := strings.SplitN(field, "=", 2)
+			if len(parts) != 2 {
+				return fmt.Errorf("invalid field %s", field)
+			}
+
+			key := strings.ToLower(parts[0])
+			value := strings.ToLower(parts[1])
+
+			switch key {
+			case portOptProtocol:
+				if value != string(swarm.PortConfigProtocolTCP) && value != string(swarm.PortConfigProtocolUDP) {
+					return fmt.Errorf("invalid protocol value %s", value)
+				}
+
+				pConfig.Protocol = swarm.PortConfigProtocol(value)
+			case portOptMode:
+				if value != string(swarm.PortConfigPublishModeIngress) && value != string(swarm.PortConfigPublishModeHost) {
+					return fmt.Errorf("invalid publish mode value %s", value)
+				}
+
+				pConfig.PublishMode = swarm.PortConfigPublishMode(value)
+			case portOptTargetPort:
+				tPort, err := strconv.ParseUint(value, 10, 16)
+				if err != nil {
+					return err
+				}
+
+				pConfig.TargetPort = uint32(tPort)
+			case portOptPublishedPort:
+				pPort, err := strconv.ParseUint(value, 10, 16)
+				if err != nil {
+					return err
+				}
+
+				pConfig.PublishedPort = uint32(pPort)
+			default:
+				return fmt.Errorf("invalid field key %s", key)
+			}
+		}
+
+		if pConfig.TargetPort == 0 {
+			return fmt.Errorf("missing mandatory field %q", portOptTargetPort)
+		}
+
+		if pConfig.PublishMode == "" {
+			pConfig.PublishMode = swarm.PortConfigPublishModeIngress
+		}
+
+		if pConfig.Protocol == "" {
+			pConfig.Protocol = swarm.PortConfigProtocolTCP
+		}
+
+		p.ports = append(p.ports, pConfig)
+	} else {
+		// short syntax
+		portConfigs := []swarm.PortConfig{}
+		// We can ignore errors because the format was already validated by ValidatePort
+		ports, portBindings, _ := nat.ParsePortSpecs([]string{value})
+
+		for port := range ports {
+			portConfigs = append(portConfigs, ConvertPortToPortConfig(port, portBindings)...)
+		}
+		p.ports = append(p.ports, portConfigs...)
+	}
+	return nil
+}
+
+// Type returns the type of this option
+func (p *PortOpt) Type() string {
+	return "port"
+}
+
+// String returns a string repr of this option
+func (p *PortOpt) String() string {
+	ports := []string{}
+	for _, port := range p.ports {
+		repr := fmt.Sprintf("%v:%v/%s/%s", port.PublishedPort, port.TargetPort, port.Protocol, port.PublishMode)
+		ports = append(ports, repr)
+	}
+	return strings.Join(ports, ", ")
+}
+
+// Value returns the ports
+func (p *PortOpt) Value() []swarm.PortConfig {
+	return p.ports
+}
+
+// ConvertPortToPortConfig converts ports to the swarm type
+func ConvertPortToPortConfig(
+	port nat.Port,
+	portBindings map[nat.Port][]nat.PortBinding,
+) []swarm.PortConfig {
+	ports := []swarm.PortConfig{}
+
+	for _, binding := range portBindings[port] {
+		hostPort, _ := strconv.ParseUint(binding.HostPort, 10, 16)
+		ports = append(ports, swarm.PortConfig{
+			//TODO Name: ?
+			Protocol:      swarm.PortConfigProtocol(strings.ToLower(port.Proto())),
+			TargetPort:    uint32(port.Int()),
+			PublishedPort: uint32(hostPort),
+			PublishMode:   swarm.PortConfigPublishModeIngress,
+		})
+	}
+	return ports
+}
diff --git a/vendor/github.com/docker/docker/opts/quotedstring.go b/vendor/github.com/docker/docker/opts/quotedstring.go
new file mode 100644
index 0000000000000000000000000000000000000000..fb1e5374bc3a46df7f7c7655593f499a7a8b0597
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/quotedstring.go
@@ -0,0 +1,37 @@
+package opts
+
+// QuotedString is a string that may have extra quotes around the value. The
+// quotes are stripped from the value.
+type QuotedString struct {
+	value *string
+}
+
+// Set sets a new value
+func (s *QuotedString) Set(val string) error {
+	*s.value = trimQuotes(val)
+	return nil
+}
+
+// Type returns the type of the value
+func (s *QuotedString) Type() string {
+	return "string"
+}
+
+func (s *QuotedString) String() string {
+	return string(*s.value)
+}
+
+func trimQuotes(value string) string {
+	lastIndex := len(value) - 1
+	for _, char := range []byte{'\'', '"'} {
+		if value[0] == char && value[lastIndex] == char {
+			return value[1:lastIndex]
+		}
+	}
+	return value
+}
+
+// NewQuotedString returns a new quoted string option
+func NewQuotedString(value *string) *QuotedString {
+	return &QuotedString{value: value}
+}
diff --git a/vendor/github.com/docker/docker/opts/secret.go b/vendor/github.com/docker/docker/opts/secret.go
new file mode 100644
index 0000000000000000000000000000000000000000..b77a33f685a4f056de186496e4a8225601f1afde
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/secret.go
@@ -0,0 +1,107 @@
+package opts
+
+import (
+	"encoding/csv"
+	"fmt"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+
+	"github.com/docker/docker/api/types"
+)
+
+// SecretOpt is a Value type for parsing secrets
+type SecretOpt struct {
+	values []*types.SecretRequestOption
+}
+
+// Set a new secret value
+func (o *SecretOpt) Set(value string) error {
+	csvReader := csv.NewReader(strings.NewReader(value))
+	fields, err := csvReader.Read()
+	if err != nil {
+		return err
+	}
+
+	options := &types.SecretRequestOption{
+		Source: "",
+		Target: "",
+		UID:    "0",
+		GID:    "0",
+		Mode:   0444,
+	}
+
+	// support a simple syntax of --secret foo
+	if len(fields) == 1 {
+		options.Source = fields[0]
+		options.Target = fields[0]
+		o.values = append(o.values, options)
+		return nil
+	}
+
+	for _, field := range fields {
+		parts := strings.SplitN(field, "=", 2)
+		key := strings.ToLower(parts[0])
+
+		if len(parts) != 2 {
+			return fmt.Errorf("invalid field '%s' must be a key=value pair", field)
+		}
+
+		value := parts[1]
+		switch key {
+		case "source":
+			options.Source = value
+		case "target":
+			tDir, _ := filepath.Split(value)
+			if tDir != "" {
+				return fmt.Errorf("target must not be a path")
+			}
+			options.Target = value
+		case "uid":
+			options.UID = value
+		case "gid":
+			options.GID = value
+		case "mode":
+			m, err := strconv.ParseUint(value, 0, 32)
+			if err != nil {
+				return fmt.Errorf("invalid mode specified: %v", err)
+			}
+
+			options.Mode = os.FileMode(m)
+		default:
+			if len(fields) == 1 && value == "" {
+
+			} else {
+				return fmt.Errorf("invalid field in secret request: %s", key)
+			}
+		}
+	}
+
+	if options.Source == "" {
+		return fmt.Errorf("source is required")
+	}
+
+	o.values = append(o.values, options)
+	return nil
+}
+
+// Type returns the type of this option
+func (o *SecretOpt) Type() string {
+	return "secret"
+}
+
+// String returns a string repr of this option
+func (o *SecretOpt) String() string {
+	secrets := []string{}
+	for _, secret := range o.values {
+		repr := fmt.Sprintf("%s -> %s", secret.Source, secret.Target)
+		secrets = append(secrets, repr)
+	}
+	return strings.Join(secrets, ", ")
+}
+
+// Value returns the secret requests
+func (o *SecretOpt) Value() []*types.SecretRequestOption {
+	return o.values
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/docker/docker/pkg/archive/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..7307d9694f6675f88802928f082113eefcaa57fe
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/README.md
@@ -0,0 +1 @@
+This code provides helper functions for dealing with archive files.
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go
new file mode 100644
index 0000000000000000000000000000000000000000..3261c4f4988d2042d479023ac81ed18ea731d129
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive.go
@@ -0,0 +1,1175 @@
+package archive
+
+import (
+	"archive/tar"
+	"bufio"
+	"bytes"
+	"compress/bzip2"
+	"compress/gzip"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"runtime"
+	"strings"
+	"syscall"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/pkg/fileutils"
+	"github.com/docker/docker/pkg/idtools"
+	"github.com/docker/docker/pkg/ioutils"
+	"github.com/docker/docker/pkg/pools"
+	"github.com/docker/docker/pkg/promise"
+	"github.com/docker/docker/pkg/system"
+)
+
+type (
+	// Compression is the state represents if compressed or not.
+	Compression int
+	// WhiteoutFormat is the format of whiteouts unpacked
+	WhiteoutFormat int
+	// TarChownOptions wraps the chown options UID and GID.
+	TarChownOptions struct {
+		UID, GID int
+	}
+
+	// TarOptions wraps the tar options.
+	TarOptions struct {
+		IncludeFiles     []string
+		ExcludePatterns  []string
+		Compression      Compression
+		NoLchown         bool
+		UIDMaps          []idtools.IDMap
+		GIDMaps          []idtools.IDMap
+		ChownOpts        *TarChownOptions
+		IncludeSourceDir bool
+		// WhiteoutFormat is the expected on disk format for whiteout files.
+		// This format will be converted to the standard format on pack
+		// and from the standard format on unpack.
+		WhiteoutFormat WhiteoutFormat
+		// When unpacking, specifies whether overwriting a directory with a
+		// non-directory is allowed and vice versa.
+		NoOverwriteDirNonDir bool
+		// For each include when creating an archive, the included name will be
+		// replaced with the matching name from this map.
+		RebaseNames map[string]string
+		InUserNS    bool
+	}
+
+	// Archiver allows the reuse of most utility functions of this package
+	// with a pluggable Untar function. Also, to facilitate the passing of
+	// specific id mappings for untar, an archiver can be created with maps
+	// which will then be passed to Untar operations
+	Archiver struct {
+		Untar   func(io.Reader, string, *TarOptions) error
+		UIDMaps []idtools.IDMap
+		GIDMaps []idtools.IDMap
+	}
+
+	// breakoutError is used to differentiate errors related to breaking out
+	// When testing archive breakout in the unit tests, this error is expected
+	// in order for the test to pass.
+	breakoutError error
+)
+
+var (
+	// ErrNotImplemented is the error message of function not implemented.
+	ErrNotImplemented = errors.New("Function not implemented")
+	defaultArchiver   = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil}
+)
+
+const (
+	// HeaderSize is the size in bytes of a tar header
+	HeaderSize = 512
+)
+
+const (
+	// Uncompressed represents the uncompressed.
+	Uncompressed Compression = iota
+	// Bzip2 is bzip2 compression algorithm.
+	Bzip2
+	// Gzip is gzip compression algorithm.
+	Gzip
+	// Xz is xz compression algorithm.
+	Xz
+)
+
+const (
+	// AUFSWhiteoutFormat is the default format for whiteouts
+	AUFSWhiteoutFormat WhiteoutFormat = iota
+	// OverlayWhiteoutFormat formats whiteout according to the overlay
+	// standard.
+	OverlayWhiteoutFormat
+)
+
+// IsArchive checks for the magic bytes of a tar or any supported compression
+// algorithm.
+func IsArchive(header []byte) bool {
+	compression := DetectCompression(header)
+	if compression != Uncompressed {
+		return true
+	}
+	r := tar.NewReader(bytes.NewBuffer(header))
+	_, err := r.Next()
+	return err == nil
+}
+
+// IsArchivePath checks if the (possibly compressed) file at the given path
+// starts with a tar file header.
+func IsArchivePath(path string) bool {
+	file, err := os.Open(path)
+	if err != nil {
+		return false
+	}
+	defer file.Close()
+	rdr, err := DecompressStream(file)
+	if err != nil {
+		return false
+	}
+	r := tar.NewReader(rdr)
+	_, err = r.Next()
+	return err == nil
+}
+
+// DetectCompression detects the compression algorithm of the source.
+func DetectCompression(source []byte) Compression {
+	for compression, m := range map[Compression][]byte{
+		Bzip2: {0x42, 0x5A, 0x68},
+		Gzip:  {0x1F, 0x8B, 0x08},
+		Xz:    {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
+	} {
+		if len(source) < len(m) {
+			logrus.Debug("Len too short")
+			continue
+		}
+		if bytes.Compare(m, source[:len(m)]) == 0 {
+			return compression
+		}
+	}
+	return Uncompressed
+}
+
+func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) {
+	args := []string{"xz", "-d", "-c", "-q"}
+
+	return cmdStream(exec.Command(args[0], args[1:]...), archive)
+}
+
+// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
+func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
+	p := pools.BufioReader32KPool
+	buf := p.Get(archive)
+	bs, err := buf.Peek(10)
+	if err != nil && err != io.EOF {
+		// Note: we'll ignore any io.EOF error because there are some odd
+		// cases where the layer.tar file will be empty (zero bytes) and
+		// that results in an io.EOF from the Peek() call. So, in those
+		// cases we'll just treat it as a non-compressed stream and
+		// that means just create an empty layer.
+		// See Issue 18170
+		return nil, err
+	}
+
+	compression := DetectCompression(bs)
+	switch compression {
+	case Uncompressed:
+		readBufWrapper := p.NewReadCloserWrapper(buf, buf)
+		return readBufWrapper, nil
+	case Gzip:
+		gzReader, err := gzip.NewReader(buf)
+		if err != nil {
+			return nil, err
+		}
+		readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
+		return readBufWrapper, nil
+	case Bzip2:
+		bz2Reader := bzip2.NewReader(buf)
+		readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
+		return readBufWrapper, nil
+	case Xz:
+		xzReader, chdone, err := xzDecompress(buf)
+		if err != nil {
+			return nil, err
+		}
+		readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
+		return ioutils.NewReadCloserWrapper(readBufWrapper, func() error {
+			<-chdone
+			return readBufWrapper.Close()
+		}), nil
+	default:
+		return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+	}
+}
+
+// CompressStream compresseses the dest with specified compression algorithm.
+func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
+	p := pools.BufioWriter32KPool
+	buf := p.Get(dest)
+	switch compression {
+	case Uncompressed:
+		writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
+		return writeBufWrapper, nil
+	case Gzip:
+		gzWriter := gzip.NewWriter(dest)
+		writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
+		return writeBufWrapper, nil
+	case Bzip2, Xz:
+		// archive/bzip2 does not support writing, and there is no xz support at all
+		// However, this is not a problem as docker only currently generates gzipped tars
+		return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+	default:
+		return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+	}
+}
+
+// Extension returns the extension of a file that uses the specified compression algorithm.
+func (compression *Compression) Extension() string {
+	switch *compression {
+	case Uncompressed:
+		return "tar"
+	case Bzip2:
+		return "tar.bz2"
+	case Gzip:
+		return "tar.gz"
+	case Xz:
+		return "tar.xz"
+	}
+	return ""
+}
+
+type tarWhiteoutConverter interface {
+	ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error)
+	ConvertRead(*tar.Header, string) (bool, error)
+}
+
+type tarAppender struct {
+	TarWriter *tar.Writer
+	Buffer    *bufio.Writer
+
+	// for hardlink mapping
+	SeenFiles map[uint64]string
+	UIDMaps   []idtools.IDMap
+	GIDMaps   []idtools.IDMap
+
+	// For packing and unpacking whiteout files in the
+	// non standard format. The whiteout files defined
+	// by the AUFS standard are used as the tar whiteout
+	// standard.
+	WhiteoutConverter tarWhiteoutConverter
+}
+
+// canonicalTarName provides a platform-independent and consistent posix-style
+//path for files and directories to be archived regardless of the platform.
+func canonicalTarName(name string, isDir bool) (string, error) {
+	name, err := CanonicalTarNameForPath(name)
+	if err != nil {
+		return "", err
+	}
+
+	// suffix with '/' for directories
+	if isDir && !strings.HasSuffix(name, "/") {
+		name += "/"
+	}
+	return name, nil
+}
+
+// addTarFile adds to the tar archive a file from `path` as `name`
+func (ta *tarAppender) addTarFile(path, name string) error {
+	fi, err := os.Lstat(path)
+	if err != nil {
+		return err
+	}
+
+	link := ""
+	if fi.Mode()&os.ModeSymlink != 0 {
+		if link, err = os.Readlink(path); err != nil {
+			return err
+		}
+	}
+
+	hdr, err := tar.FileInfoHeader(fi, link)
+	if err != nil {
+		return err
+	}
+	hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+
+	name, err = canonicalTarName(name, fi.IsDir())
+	if err != nil {
+		return fmt.Errorf("tar: cannot canonicalize path: %v", err)
+	}
+	hdr.Name = name
+
+	inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys())
+	if err != nil {
+		return err
+	}
+
+	// if it's not a directory and has more than 1 link,
+	// it's hard linked, so set the type flag accordingly
+	if !fi.IsDir() && hasHardlinks(fi) {
+		// a link should have a name that it links too
+		// and that linked name should be first in the tar archive
+		if oldpath, ok := ta.SeenFiles[inode]; ok {
+			hdr.Typeflag = tar.TypeLink
+			hdr.Linkname = oldpath
+			hdr.Size = 0 // This Must be here for the writer math to add up!
+		} else {
+			ta.SeenFiles[inode] = name
+		}
+	}
+
+	capability, _ := system.Lgetxattr(path, "security.capability")
+	if capability != nil {
+		hdr.Xattrs = make(map[string]string)
+		hdr.Xattrs["security.capability"] = string(capability)
+	}
+
+	//handle re-mapping container ID mappings back to host ID mappings before
+	//writing tar headers/files. We skip whiteout files because they were written
+	//by the kernel and already have proper ownership relative to the host
+	if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) {
+		uid, gid, err := getFileUIDGID(fi.Sys())
+		if err != nil {
+			return err
+		}
+		xUID, err := idtools.ToContainer(uid, ta.UIDMaps)
+		if err != nil {
+			return err
+		}
+		xGID, err := idtools.ToContainer(gid, ta.GIDMaps)
+		if err != nil {
+			return err
+		}
+		hdr.Uid = xUID
+		hdr.Gid = xGID
+	}
+
+	if ta.WhiteoutConverter != nil {
+		wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi)
+		if err != nil {
+			return err
+		}
+
+		// If a new whiteout file exists, write original hdr, then
+		// replace hdr with wo to be written after. Whiteouts should
+		// always be written after the original. Note the original
+		// hdr may have been updated to be a whiteout with returning
+		// a whiteout header
+		if wo != nil {
+			if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+				return err
+			}
+			if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
+				return fmt.Errorf("tar: cannot use whiteout for non-empty file")
+			}
+			hdr = wo
+		}
+	}
+
+	if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+		return err
+	}
+
+	if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
+		// We use system.OpenSequential to ensure we use sequential file
+		// access on Windows to avoid depleting the standby list.
+		// On Linux, this equates to a regular os.Open.
+		file, err := system.OpenSequential(path)
+		if err != nil {
+			return err
+		}
+
+		ta.Buffer.Reset(ta.TarWriter)
+		defer ta.Buffer.Reset(nil)
+		_, err = io.Copy(ta.Buffer, file)
+		file.Close()
+		if err != nil {
+			return err
+		}
+		err = ta.Buffer.Flush()
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions, inUserns bool) error {
+	// hdr.Mode is in linux format, which we can use for sycalls,
+	// but for os.Foo() calls we need the mode converted to os.FileMode,
+	// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
+	hdrInfo := hdr.FileInfo()
+
+	switch hdr.Typeflag {
+	case tar.TypeDir:
+		// Create directory unless it exists as a directory already.
+		// In that case we just want to merge the two
+		if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
+			if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
+				return err
+			}
+		}
+
+	case tar.TypeReg, tar.TypeRegA:
+		// Source is regular file. We use system.OpenFileSequential to use sequential
+		// file access to avoid depleting the standby list on Windows.
+		// On Linux, this equates to a regular os.OpenFile
+		file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
+		if err != nil {
+			return err
+		}
+		if _, err := io.Copy(file, reader); err != nil {
+			file.Close()
+			return err
+		}
+		file.Close()
+
+	case tar.TypeBlock, tar.TypeChar:
+		if inUserns { // cannot create devices in a userns
+			return nil
+		}
+		// Handle this is an OS-specific way
+		if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
+			return err
+		}
+
+	case tar.TypeFifo:
+		// Handle this is an OS-specific way
+		if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
+			return err
+		}
+
+	case tar.TypeLink:
+		targetPath := filepath.Join(extractDir, hdr.Linkname)
+		// check for hardlink breakout
+		if !strings.HasPrefix(targetPath, extractDir) {
+			return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
+		}
+		if err := os.Link(targetPath, path); err != nil {
+			return err
+		}
+
+	case tar.TypeSymlink:
+		// 	path 				-> hdr.Linkname = targetPath
+		// e.g. /extractDir/path/to/symlink 	-> ../2/file	= /extractDir/path/2/file
+		targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
+
+		// the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
+		// that symlink would first have to be created, which would be caught earlier, at this very check:
+		if !strings.HasPrefix(targetPath, extractDir) {
+			return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
+		}
+		if err := os.Symlink(hdr.Linkname, path); err != nil {
+			return err
+		}
+
+	case tar.TypeXGlobalHeader:
+		logrus.Debug("PAX Global Extended Headers found and ignored")
+		return nil
+
+	default:
+		return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag)
+	}
+
+	// Lchown is not supported on Windows.
+	if Lchown && runtime.GOOS != "windows" {
+		if chownOpts == nil {
+			chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid}
+		}
+		if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
+			return err
+		}
+	}
+
+	var errors []string
+	for key, value := range hdr.Xattrs {
+		if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
+			if err == syscall.ENOTSUP {
+				// We ignore errors here because not all graphdrivers support
+				// xattrs *cough* old versions of AUFS *cough*. However only
+				// ENOTSUP should be emitted in that case, otherwise we still
+				// bail.
+				errors = append(errors, err.Error())
+				continue
+			}
+			return err
+		}
+
+	}
+
+	if len(errors) > 0 {
+		logrus.WithFields(logrus.Fields{
+			"errors": errors,
+		}).Warn("ignored xattrs in archive: underlying filesystem doesn't support them")
+	}
+
+	// There is no LChmod, so ignore mode for symlink. Also, this
+	// must happen after chown, as that can modify the file mode
+	if err := handleLChmod(hdr, path, hdrInfo); err != nil {
+		return err
+	}
+
+	aTime := hdr.AccessTime
+	if aTime.Before(hdr.ModTime) {
+		// Last access time should never be before last modified time.
+		aTime = hdr.ModTime
+	}
+
+	// system.Chtimes doesn't support a NOFOLLOW flag atm
+	if hdr.Typeflag == tar.TypeLink {
+		if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+			if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
+				return err
+			}
+		}
+	} else if hdr.Typeflag != tar.TypeSymlink {
+		if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
+			return err
+		}
+	} else {
+		ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)}
+		if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
+			return err
+		}
+	}
+	return nil
+}
+
+// Tar creates an archive from the directory at `path`, and returns it as a
+// stream of bytes.
+func Tar(path string, compression Compression) (io.ReadCloser, error) {
+	return TarWithOptions(path, &TarOptions{Compression: compression})
+}
+
+// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
+// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
+func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
+
+	// Fix the source path to work with long path names. This is a no-op
+	// on platforms other than Windows.
+	srcPath = fixVolumePathPrefix(srcPath)
+
+	patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns)
+
+	if err != nil {
+		return nil, err
+	}
+
+	pipeReader, pipeWriter := io.Pipe()
+
+	compressWriter, err := CompressStream(pipeWriter, options.Compression)
+	if err != nil {
+		return nil, err
+	}
+
+	go func() {
+		ta := &tarAppender{
+			TarWriter:         tar.NewWriter(compressWriter),
+			Buffer:            pools.BufioWriter32KPool.Get(nil),
+			SeenFiles:         make(map[uint64]string),
+			UIDMaps:           options.UIDMaps,
+			GIDMaps:           options.GIDMaps,
+			WhiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat),
+		}
+
+		defer func() {
+			// Make sure to check the error on Close.
+			if err := ta.TarWriter.Close(); err != nil {
+				logrus.Errorf("Can't close tar writer: %s", err)
+			}
+			if err := compressWriter.Close(); err != nil {
+				logrus.Errorf("Can't close compress writer: %s", err)
+			}
+			if err := pipeWriter.Close(); err != nil {
+				logrus.Errorf("Can't close pipe writer: %s", err)
+			}
+		}()
+
+		// this buffer is needed for the duration of this piped stream
+		defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+		// In general we log errors here but ignore them because
+		// during e.g. a diff operation the container can continue
+		// mutating the filesystem and we can see transient errors
+		// from this
+
+		stat, err := os.Lstat(srcPath)
+		if err != nil {
+			return
+		}
+
+		if !stat.IsDir() {
+			// We can't later join a non-dir with any includes because the
+			// 'walk' will error if "file/." is stat-ed and "file" is not a
+			// directory. So, we must split the source path and use the
+			// basename as the include.
+			if len(options.IncludeFiles) > 0 {
+				logrus.Warn("Tar: Can't archive a file with includes")
+			}
+
+			dir, base := SplitPathDirEntry(srcPath)
+			srcPath = dir
+			options.IncludeFiles = []string{base}
+		}
+
+		if len(options.IncludeFiles) == 0 {
+			options.IncludeFiles = []string{"."}
+		}
+
+		seen := make(map[string]bool)
+
+		for _, include := range options.IncludeFiles {
+			rebaseName := options.RebaseNames[include]
+
+			walkRoot := getWalkRoot(srcPath, include)
+			filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
+				if err != nil {
+					logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
+					return nil
+				}
+
+				relFilePath, err := filepath.Rel(srcPath, filePath)
+				if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
+					// Error getting relative path OR we are looking
+					// at the source directory path. Skip in both situations.
+					return nil
+				}
+
+				if options.IncludeSourceDir && include == "." && relFilePath != "." {
+					relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
+				}
+
+				skip := false
+
+				// If "include" is an exact match for the current file
+				// then even if there's an "excludePatterns" pattern that
+				// matches it, don't skip it. IOW, assume an explicit 'include'
+				// is asking for that file no matter what - which is true
+				// for some files, like .dockerignore and Dockerfile (sometimes)
+				if include != relFilePath {
+					skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs)
+					if err != nil {
+						logrus.Errorf("Error matching %s: %v", relFilePath, err)
+						return err
+					}
+				}
+
+				if skip {
+					// If we want to skip this file and its a directory
+					// then we should first check to see if there's an
+					// excludes pattern (eg !dir/file) that starts with this
+					// dir. If so then we can't skip this dir.
+
+					// Its not a dir then so we can just return/skip.
+					if !f.IsDir() {
+						return nil
+					}
+
+					// No exceptions (!...) in patterns so just skip dir
+					if !exceptions {
+						return filepath.SkipDir
+					}
+
+					dirSlash := relFilePath + string(filepath.Separator)
+
+					for _, pat := range patterns {
+						if pat[0] != '!' {
+							continue
+						}
+						pat = pat[1:] + string(filepath.Separator)
+						if strings.HasPrefix(pat, dirSlash) {
+							// found a match - so can't skip this dir
+							return nil
+						}
+					}
+
+					// No matching exclusion dir so just skip dir
+					return filepath.SkipDir
+				}
+
+				if seen[relFilePath] {
+					return nil
+				}
+				seen[relFilePath] = true
+
+				// Rename the base resource.
+				if rebaseName != "" {
+					var replacement string
+					if rebaseName != string(filepath.Separator) {
+						// Special case the root directory to replace with an
+						// empty string instead so that we don't end up with
+						// double slashes in the paths.
+						replacement = rebaseName
+					}
+
+					relFilePath = strings.Replace(relFilePath, include, replacement, 1)
+				}
+
+				if err := ta.addTarFile(filePath, relFilePath); err != nil {
+					logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
+					// if pipe is broken, stop writing tar stream to it
+					if err == io.ErrClosedPipe {
+						return err
+					}
+				}
+				return nil
+			})
+		}
+	}()
+
+	return pipeReader, nil
+}
+
+// Unpack unpacks the decompressedArchive to dest with options.
+func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
+	tr := tar.NewReader(decompressedArchive)
+	trBuf := pools.BufioReader32KPool.Get(nil)
+	defer pools.BufioReader32KPool.Put(trBuf)
+
+	var dirs []*tar.Header
+	remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
+	if err != nil {
+		return err
+	}
+	whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat)
+
+	// Iterate through the files in the archive.
+loop:
+	for {
+		hdr, err := tr.Next()
+		if err == io.EOF {
+			// end of tar archive
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		// Normalize name, for safety and for a simple is-root check
+		// This keeps "../" as-is, but normalizes "/../" to "/". Or Windows:
+		// This keeps "..\" as-is, but normalizes "\..\" to "\".
+		hdr.Name = filepath.Clean(hdr.Name)
+
+		for _, exclude := range options.ExcludePatterns {
+			if strings.HasPrefix(hdr.Name, exclude) {
+				continue loop
+			}
+		}
+
+		// After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in
+		// the filepath format for the OS on which the daemon is running. Hence
+		// the check for a slash-suffix MUST be done in an OS-agnostic way.
+		if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+			// Not the root directory, ensure that the parent directory exists
+			parent := filepath.Dir(hdr.Name)
+			parentPath := filepath.Join(dest, parent)
+			if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+				err = idtools.MkdirAllNewAs(parentPath, 0777, remappedRootUID, remappedRootGID)
+				if err != nil {
+					return err
+				}
+			}
+		}
+
+		path := filepath.Join(dest, hdr.Name)
+		rel, err := filepath.Rel(dest, path)
+		if err != nil {
+			return err
+		}
+		if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+			return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+		}
+
+		// If path exits we almost always just want to remove and replace it
+		// The only exception is when it is a directory *and* the file from
+		// the layer is also a directory. Then we want to merge them (i.e.
+		// just apply the metadata from the layer).
+		if fi, err := os.Lstat(path); err == nil {
+			if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir {
+				// If NoOverwriteDirNonDir is true then we cannot replace
+				// an existing directory with a non-directory from the archive.
+				return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)
+			}
+
+			if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir {
+				// If NoOverwriteDirNonDir is true then we cannot replace
+				// an existing non-directory with a directory from the archive.
+				return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)
+			}
+
+			if fi.IsDir() && hdr.Name == "." {
+				continue
+			}
+
+			if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+				if err := os.RemoveAll(path); err != nil {
+					return err
+				}
+			}
+		}
+		trBuf.Reset(tr)
+
+		// if the options contain a uid & gid maps, convert header uid/gid
+		// entries using the maps such that lchown sets the proper mapped
+		// uid/gid after writing the file. We only perform this mapping if
+		// the file isn't already owned by the remapped root UID or GID, as
+		// that specific uid/gid has no mapping from container -> host, and
+		// those files already have the proper ownership for inside the
+		// container.
+		if hdr.Uid != remappedRootUID {
+			xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps)
+			if err != nil {
+				return err
+			}
+			hdr.Uid = xUID
+		}
+		if hdr.Gid != remappedRootGID {
+			xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps)
+			if err != nil {
+				return err
+			}
+			hdr.Gid = xGID
+		}
+
+		if whiteoutConverter != nil {
+			writeFile, err := whiteoutConverter.ConvertRead(hdr, path)
+			if err != nil {
+				return err
+			}
+			if !writeFile {
+				continue
+			}
+		}
+
+		if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil {
+			return err
+		}
+
+		// Directory mtimes must be handled at the end to avoid further
+		// file creation in them to modify the directory mtime
+		if hdr.Typeflag == tar.TypeDir {
+			dirs = append(dirs, hdr)
+		}
+	}
+
+	for _, hdr := range dirs {
+		path := filepath.Join(dest, hdr.Name)
+
+		if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive may be compressed with one of the following algorithms:
+//  identity (uncompressed), gzip, bzip2, xz.
+// FIXME: specify behavior when target path exists vs. doesn't exist.
+func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
+	return untarHandler(tarArchive, dest, options, true)
+}
+
+// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive must be an uncompressed stream.
+func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
+	return untarHandler(tarArchive, dest, options, false)
+}
+
+// Handler for teasing out the automatic decompression
+func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
+	if tarArchive == nil {
+		return fmt.Errorf("Empty archive")
+	}
+	dest = filepath.Clean(dest)
+	if options == nil {
+		options = &TarOptions{}
+	}
+	if options.ExcludePatterns == nil {
+		options.ExcludePatterns = []string{}
+	}
+
+	r := tarArchive
+	if decompress {
+		decompressedArchive, err := DecompressStream(tarArchive)
+		if err != nil {
+			return err
+		}
+		defer decompressedArchive.Close()
+		r = decompressedArchive
+	}
+
+	return Unpack(r, dest, options)
+}
+
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
+func (archiver *Archiver) TarUntar(src, dst string) error {
+	logrus.Debugf("TarUntar(%s %s)", src, dst)
+	archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
+	if err != nil {
+		return err
+	}
+	defer archive.Close()
+
+	var options *TarOptions
+	if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
+		options = &TarOptions{
+			UIDMaps: archiver.UIDMaps,
+			GIDMaps: archiver.GIDMaps,
+		}
+	}
+	return archiver.Untar(archive, dst, options)
+}
+
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
+func TarUntar(src, dst string) error {
+	return defaultArchiver.TarUntar(src, dst)
+}
+
+// UntarPath untar a file from path to a destination, src is the source tar file path.
+func (archiver *Archiver) UntarPath(src, dst string) error {
+	archive, err := os.Open(src)
+	if err != nil {
+		return err
+	}
+	defer archive.Close()
+	var options *TarOptions
+	if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
+		options = &TarOptions{
+			UIDMaps: archiver.UIDMaps,
+			GIDMaps: archiver.GIDMaps,
+		}
+	}
+	return archiver.Untar(archive, dst, options)
+}
+
+// UntarPath is a convenience function which looks for an archive
+// at filesystem path `src`, and unpacks it at `dst`.
+func UntarPath(src, dst string) error {
+	return defaultArchiver.UntarPath(src, dst)
+}
+
+// CopyWithTar creates a tar archive of filesystem path `src`, and
+// unpacks it at filesystem path `dst`.
+// The archive is streamed directly with fixed buffering and no
+// intermediary disk IO.
+func (archiver *Archiver) CopyWithTar(src, dst string) error {
+	srcSt, err := os.Stat(src)
+	if err != nil {
+		return err
+	}
+	if !srcSt.IsDir() {
+		return archiver.CopyFileWithTar(src, dst)
+	}
+
+	// if this archiver is set up with ID mapping we need to create
+	// the new destination directory with the remapped root UID/GID pair
+	// as owner
+	rootUID, rootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps)
+	if err != nil {
+		return err
+	}
+	// Create dst, copy src's content into it
+	logrus.Debugf("Creating dest directory: %s", dst)
+	if err := idtools.MkdirAllNewAs(dst, 0755, rootUID, rootGID); err != nil {
+		return err
+	}
+	logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
+	return archiver.TarUntar(src, dst)
+}
+
+// CopyWithTar creates a tar archive of filesystem path `src`, and
+// unpacks it at filesystem path `dst`.
+// The archive is streamed directly with fixed buffering and no
+// intermediary disk IO.
+func CopyWithTar(src, dst string) error {
+	return defaultArchiver.CopyWithTar(src, dst)
+}
+
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
+// for a single file. It copies a regular file from path `src` to
+// path `dst`, and preserves all its metadata.
+func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
+	logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
+	srcSt, err := os.Stat(src)
+	if err != nil {
+		return err
+	}
+
+	if srcSt.IsDir() {
+		return fmt.Errorf("Can't copy a directory")
+	}
+
+	// Clean up the trailing slash. This must be done in an operating
+	// system specific manner.
+	if dst[len(dst)-1] == os.PathSeparator {
+		dst = filepath.Join(dst, filepath.Base(src))
+	}
+	// Create the holding directory if necessary
+	if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil {
+		return err
+	}
+
+	r, w := io.Pipe()
+	errC := promise.Go(func() error {
+		defer w.Close()
+
+		srcF, err := os.Open(src)
+		if err != nil {
+			return err
+		}
+		defer srcF.Close()
+
+		hdr, err := tar.FileInfoHeader(srcSt, "")
+		if err != nil {
+			return err
+		}
+		hdr.Name = filepath.Base(dst)
+		hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+
+		remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps)
+		if err != nil {
+			return err
+		}
+
+		// only perform mapping if the file being copied isn't already owned by the
+		// uid or gid of the remapped root in the container
+		if remappedRootUID != hdr.Uid {
+			xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps)
+			if err != nil {
+				return err
+			}
+			hdr.Uid = xUID
+		}
+		if remappedRootGID != hdr.Gid {
+			xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps)
+			if err != nil {
+				return err
+			}
+			hdr.Gid = xGID
+		}
+
+		tw := tar.NewWriter(w)
+		defer tw.Close()
+		if err := tw.WriteHeader(hdr); err != nil {
+			return err
+		}
+		if _, err := io.Copy(tw, srcF); err != nil {
+			return err
+		}
+		return nil
+	})
+	defer func() {
+		if er := <-errC; err == nil && er != nil {
+			err = er
+		}
+	}()
+
+	err = archiver.Untar(r, filepath.Dir(dst), nil)
+	if err != nil {
+		r.CloseWithError(err)
+	}
+	return err
+}
+
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
+// for a single file. It copies a regular file from path `src` to
+// path `dst`, and preserves all its metadata.
+//
+// Destination handling is in an operating specific manner depending
+// where the daemon is running. If `dst` ends with a trailing slash
+// the final destination path will be `dst/base(src)`  (Linux) or
+// `dst\base(src)` (Windows).
+func CopyFileWithTar(src, dst string) (err error) {
+	return defaultArchiver.CopyFileWithTar(src, dst)
+}
+
+// cmdStream executes a command, and returns its stdout as a stream.
+// If the command fails to run or doesn't complete successfully, an error
+// will be returned, including anything written on stderr.
+func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) {
+	chdone := make(chan struct{})
+	cmd.Stdin = input
+	pipeR, pipeW := io.Pipe()
+	cmd.Stdout = pipeW
+	var errBuf bytes.Buffer
+	cmd.Stderr = &errBuf
+
+	// Run the command and return the pipe
+	if err := cmd.Start(); err != nil {
+		return nil, nil, err
+	}
+
+	// Copy stdout to the returned pipe
+	go func() {
+		if err := cmd.Wait(); err != nil {
+			pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
+		} else {
+			pipeW.Close()
+		}
+		close(chdone)
+	}()
+
+	return pipeR, chdone, nil
+}
+
+// NewTempArchive reads the content of src into a temporary file, and returns the contents
+// of that file as an archive. The archive can only be read once - as soon as reading completes,
+// the file will be deleted.
+func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) {
+	f, err := ioutil.TempFile(dir, "")
+	if err != nil {
+		return nil, err
+	}
+	if _, err := io.Copy(f, src); err != nil {
+		return nil, err
+	}
+	if _, err := f.Seek(0, 0); err != nil {
+		return nil, err
+	}
+	st, err := f.Stat()
+	if err != nil {
+		return nil, err
+	}
+	size := st.Size()
+	return &TempArchive{File: f, Size: size}, nil
+}
+
+// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes,
+// the file will be deleted.
+type TempArchive struct {
+	*os.File
+	Size   int64 // Pre-computed from Stat().Size() as a convenience
+	read   int64
+	closed bool
+}
+
+// Close closes the underlying file if it's still open, or does a no-op
+// to allow callers to try to close the TempArchive multiple times safely.
+func (archive *TempArchive) Close() error {
+	if archive.closed {
+		return nil
+	}
+
+	archive.closed = true
+
+	return archive.File.Close()
+}
+
+func (archive *TempArchive) Read(data []byte) (int, error) {
+	n, err := archive.File.Read(data)
+	archive.read += int64(n)
+	if err != nil || archive.read == archive.Size {
+		archive.Close()
+		os.Remove(archive.File.Name())
+	}
+	return n, err
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..6b2a31ff1fd78f9b18d22d562463b905dcc7095d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
@@ -0,0 +1,95 @@
+package archive
+
+import (
+	"archive/tar"
+	"os"
+	"path/filepath"
+	"strings"
+	"syscall"
+
+	"github.com/docker/docker/pkg/system"
+)
+
+func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
+	if format == OverlayWhiteoutFormat {
+		return overlayWhiteoutConverter{}
+	}
+	return nil
+}
+
+type overlayWhiteoutConverter struct{}
+
+func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
+	// convert whiteouts to AUFS format
+	if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
+		// we just rename the file and make it normal
+		dir, filename := filepath.Split(hdr.Name)
+		hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
+		hdr.Mode = 0600
+		hdr.Typeflag = tar.TypeReg
+		hdr.Size = 0
+	}
+
+	if fi.Mode()&os.ModeDir != 0 {
+		// convert opaque dirs to AUFS format by writing an empty file with the prefix
+		opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
+		if err != nil {
+			return nil, err
+		}
+		if len(opaque) == 1 && opaque[0] == 'y' {
+			if hdr.Xattrs != nil {
+				delete(hdr.Xattrs, "trusted.overlay.opaque")
+			}
+
+			// create a header for the whiteout file
+			// it should inherit some properties from the parent, but be a regular file
+			wo = &tar.Header{
+				Typeflag:   tar.TypeReg,
+				Mode:       hdr.Mode & int64(os.ModePerm),
+				Name:       filepath.Join(hdr.Name, WhiteoutOpaqueDir),
+				Size:       0,
+				Uid:        hdr.Uid,
+				Uname:      hdr.Uname,
+				Gid:        hdr.Gid,
+				Gname:      hdr.Gname,
+				AccessTime: hdr.AccessTime,
+				ChangeTime: hdr.ChangeTime,
+			}
+		}
+	}
+
+	return
+}
+
+func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
+	base := filepath.Base(path)
+	dir := filepath.Dir(path)
+
+	// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
+	if base == WhiteoutOpaqueDir {
+		if err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0); err != nil {
+			return false, err
+		}
+
+		// don't write the file itself
+		return false, nil
+	}
+
+	// if a file was deleted and we are using overlay, we need to create a character device
+	if strings.HasPrefix(base, WhiteoutPrefix) {
+		originalBase := base[len(WhiteoutPrefix):]
+		originalPath := filepath.Join(dir, originalBase)
+
+		if err := syscall.Mknod(originalPath, syscall.S_IFCHR, 0); err != nil {
+			return false, err
+		}
+		if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
+			return false, err
+		}
+
+		// don't write the file itself
+		return false, nil
+	}
+
+	return true, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go
new file mode 100644
index 0000000000000000000000000000000000000000..54acbf285667f05d234b39171c5a634386050e3b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go
@@ -0,0 +1,7 @@
+// +build !linux
+
+package archive
+
+func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
+	return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..7083f2fa535b44bf63fbba38b99814ad8fe7a7c1
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
@@ -0,0 +1,118 @@
+// +build !windows
+
+package archive
+
+import (
+	"archive/tar"
+	"errors"
+	"os"
+	"path/filepath"
+	"syscall"
+
+	"github.com/docker/docker/pkg/system"
+	rsystem "github.com/opencontainers/runc/libcontainer/system"
+)
+
+// fixVolumePathPrefix does platform specific processing to ensure that if
+// the path being passed in is not in a volume path format, convert it to one.
+func fixVolumePathPrefix(srcPath string) string {
+	return srcPath
+}
+
+// getWalkRoot calculates the root path when performing a TarWithOptions.
+// We use a separate function as this is platform specific. On Linux, we
+// can't use filepath.Join(srcPath,include) because this will clean away
+// a trailing "." or "/" which may be important.
+func getWalkRoot(srcPath string, include string) string {
+	return srcPath + string(filepath.Separator) + include
+}
+
+// CanonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) (string, error) {
+	return p, nil // already unix-style
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+	return perm // noop for unix as golang APIs provide perm bits correctly
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
+	s, ok := stat.(*syscall.Stat_t)
+
+	if !ok {
+		err = errors.New("cannot convert stat value to syscall.Stat_t")
+		return
+	}
+
+	inode = uint64(s.Ino)
+
+	// Currently go does not fill in the major/minors
+	if s.Mode&syscall.S_IFBLK != 0 ||
+		s.Mode&syscall.S_IFCHR != 0 {
+		hdr.Devmajor = int64(major(uint64(s.Rdev)))
+		hdr.Devminor = int64(minor(uint64(s.Rdev)))
+	}
+
+	return
+}
+
+func getFileUIDGID(stat interface{}) (int, int, error) {
+	s, ok := stat.(*syscall.Stat_t)
+
+	if !ok {
+		return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t")
+	}
+	return int(s.Uid), int(s.Gid), nil
+}
+
+func major(device uint64) uint64 {
+	return (device >> 8) & 0xfff
+}
+
+func minor(device uint64) uint64 {
+	return (device & 0xff) | ((device >> 12) & 0xfff00)
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+	if rsystem.RunningInUserNS() {
+		// cannot create a device if running in user namespace
+		return nil
+	}
+
+	mode := uint32(hdr.Mode & 07777)
+	switch hdr.Typeflag {
+	case tar.TypeBlock:
+		mode |= syscall.S_IFBLK
+	case tar.TypeChar:
+		mode |= syscall.S_IFCHR
+	case tar.TypeFifo:
+		mode |= syscall.S_IFIFO
+	}
+
+	if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
+		return err
+	}
+	return nil
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+	if hdr.Typeflag == tar.TypeLink {
+		if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+			if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+				return err
+			}
+		}
+	} else if hdr.Typeflag != tar.TypeSymlink {
+		if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..5c3a1be3401d5f7a801df696067c97d77c8cdc7b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
@@ -0,0 +1,70 @@
+// +build windows
+
+package archive
+
+import (
+	"archive/tar"
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/docker/docker/pkg/longpath"
+)
+
+// fixVolumePathPrefix does platform specific processing to ensure that if
+// the path being passed in is not in a volume path format, convert it to one.
+func fixVolumePathPrefix(srcPath string) string {
+	return longpath.AddPrefix(srcPath)
+}
+
+// getWalkRoot calculates the root path when performing a TarWithOptions.
+// We use a separate function as this is platform specific.
+func getWalkRoot(srcPath string, include string) string {
+	return filepath.Join(srcPath, include)
+}
+
+// CanonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) (string, error) {
+	// windows: convert windows style relative path with backslashes
+	// into forward slashes. Since windows does not allow '/' or '\'
+	// in file names, it is mostly safe to replace however we must
+	// check just in case
+	if strings.Contains(p, "/") {
+		return "", fmt.Errorf("Windows path contains forward slash: %s", p)
+	}
+	return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
+
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+	perm &= 0755
+	// Add the x bit: make everything +x from windows
+	perm |= 0111
+
+	return perm
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
+	// do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
+	return
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+	return nil
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+	return nil
+}
+
+func getFileUIDGID(stat interface{}) (int, int, error) {
+	// no notion of file ownership mapping yet on Windows
+	return 0, 0, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go
new file mode 100644
index 0000000000000000000000000000000000000000..c07d55cbd9a762c9495b8f4d7f2b6d627acb88df
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes.go
@@ -0,0 +1,446 @@
+package archive
+
+import (
+	"archive/tar"
+	"bytes"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"sort"
+	"strings"
+	"syscall"
+	"time"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/pkg/idtools"
+	"github.com/docker/docker/pkg/pools"
+	"github.com/docker/docker/pkg/system"
+)
+
+// ChangeType represents the change type.
+type ChangeType int
+
+const (
+	// ChangeModify represents the modify operation.
+	ChangeModify = iota
+	// ChangeAdd represents the add operation.
+	ChangeAdd
+	// ChangeDelete represents the delete operation.
+	ChangeDelete
+)
+
+func (c ChangeType) String() string {
+	switch c {
+	case ChangeModify:
+		return "C"
+	case ChangeAdd:
+		return "A"
+	case ChangeDelete:
+		return "D"
+	}
+	return ""
+}
+
+// Change represents a change, it wraps the change type and path.
+// It describes changes of the files in the path respect to the
+// parent layers. The change could be modify, add, delete.
+// This is used for layer diff.
+type Change struct {
+	Path string
+	Kind ChangeType
+}
+
+func (change *Change) String() string {
+	return fmt.Sprintf("%s %s", change.Kind, change.Path)
+}
+
+// for sort.Sort
+type changesByPath []Change
+
+func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
+func (c changesByPath) Len() int           { return len(c) }
+func (c changesByPath) Swap(i, j int)      { c[j], c[i] = c[i], c[j] }
+
+// Gnu tar and the go tar writer don't have sub-second mtime
+// precision, which is problematic when we apply changes via tar
+// files, we handle this by comparing for exact times, *or* same
+// second count and either a or b having exactly 0 nanoseconds
+func sameFsTime(a, b time.Time) bool {
+	return a == b ||
+		(a.Unix() == b.Unix() &&
+			(a.Nanosecond() == 0 || b.Nanosecond() == 0))
+}
+
+func sameFsTimeSpec(a, b syscall.Timespec) bool {
+	return a.Sec == b.Sec &&
+		(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
+}
+
+// Changes walks the path rw and determines changes for the files in the path,
+// with respect to the parent layers
+func Changes(layers []string, rw string) ([]Change, error) {
+	return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
+}
+
+func aufsMetadataSkip(path string) (skip bool, err error) {
+	skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
+	if err != nil {
+		skip = true
+	}
+	return
+}
+
+func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
+	f := filepath.Base(path)
+
+	// If there is a whiteout, then the file was removed
+	if strings.HasPrefix(f, WhiteoutPrefix) {
+		originalFile := f[len(WhiteoutPrefix):]
+		return filepath.Join(filepath.Dir(path), originalFile), nil
+	}
+
+	return "", nil
+}
+
+type skipChange func(string) (bool, error)
+type deleteChange func(string, string, os.FileInfo) (string, error)
+
+func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
+	var (
+		changes     []Change
+		changedDirs = make(map[string]struct{})
+	)
+
+	err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+
+		// Rebase path
+		path, err = filepath.Rel(rw, path)
+		if err != nil {
+			return err
+		}
+
+		// As this runs on the daemon side, file paths are OS specific.
+		path = filepath.Join(string(os.PathSeparator), path)
+
+		// Skip root
+		if path == string(os.PathSeparator) {
+			return nil
+		}
+
+		if sc != nil {
+			if skip, err := sc(path); skip {
+				return err
+			}
+		}
+
+		change := Change{
+			Path: path,
+		}
+
+		deletedFile, err := dc(rw, path, f)
+		if err != nil {
+			return err
+		}
+
+		// Find out what kind of modification happened
+		if deletedFile != "" {
+			change.Path = deletedFile
+			change.Kind = ChangeDelete
+		} else {
+			// Otherwise, the file was added
+			change.Kind = ChangeAdd
+
+			// ...Unless it already existed in a top layer, in which case, it's a modification
+			for _, layer := range layers {
+				stat, err := os.Stat(filepath.Join(layer, path))
+				if err != nil && !os.IsNotExist(err) {
+					return err
+				}
+				if err == nil {
+					// The file existed in the top layer, so that's a modification
+
+					// However, if it's a directory, maybe it wasn't actually modified.
+					// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
+					if stat.IsDir() && f.IsDir() {
+						if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
+							// Both directories are the same, don't record the change
+							return nil
+						}
+					}
+					change.Kind = ChangeModify
+					break
+				}
+			}
+		}
+
+		// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
+		// This block is here to ensure the change is recorded even if the
+		// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
+		// Check https://github.com/docker/docker/pull/13590 for details.
+		if f.IsDir() {
+			changedDirs[path] = struct{}{}
+		}
+		if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
+			parent := filepath.Dir(path)
+			if _, ok := changedDirs[parent]; !ok && parent != "/" {
+				changes = append(changes, Change{Path: parent, Kind: ChangeModify})
+				changedDirs[parent] = struct{}{}
+			}
+		}
+
+		// Record change
+		changes = append(changes, change)
+		return nil
+	})
+	if err != nil && !os.IsNotExist(err) {
+		return nil, err
+	}
+	return changes, nil
+}
+
+// FileInfo describes the information of a file.
+type FileInfo struct {
+	parent     *FileInfo
+	name       string
+	stat       *system.StatT
+	children   map[string]*FileInfo
+	capability []byte
+	added      bool
+}
+
+// LookUp looks up the file information of a file.
+func (info *FileInfo) LookUp(path string) *FileInfo {
+	// As this runs on the daemon side, file paths are OS specific.
+	parent := info
+	if path == string(os.PathSeparator) {
+		return info
+	}
+
+	pathElements := strings.Split(path, string(os.PathSeparator))
+	for _, elem := range pathElements {
+		if elem != "" {
+			child := parent.children[elem]
+			if child == nil {
+				return nil
+			}
+			parent = child
+		}
+	}
+	return parent
+}
+
+func (info *FileInfo) path() string {
+	if info.parent == nil {
+		// As this runs on the daemon side, file paths are OS specific.
+		return string(os.PathSeparator)
+	}
+	return filepath.Join(info.parent.path(), info.name)
+}
+
+func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
+
+	sizeAtEntry := len(*changes)
+
+	if oldInfo == nil {
+		// add
+		change := Change{
+			Path: info.path(),
+			Kind: ChangeAdd,
+		}
+		*changes = append(*changes, change)
+		info.added = true
+	}
+
+	// We make a copy so we can modify it to detect additions
+	// also, we only recurse on the old dir if the new info is a directory
+	// otherwise any previous delete/change is considered recursive
+	oldChildren := make(map[string]*FileInfo)
+	if oldInfo != nil && info.isDir() {
+		for k, v := range oldInfo.children {
+			oldChildren[k] = v
+		}
+	}
+
+	for name, newChild := range info.children {
+		oldChild, _ := oldChildren[name]
+		if oldChild != nil {
+			// change?
+			oldStat := oldChild.stat
+			newStat := newChild.stat
+			// Note: We can't compare inode or ctime or blocksize here, because these change
+			// when copying a file into a container. However, that is not generally a problem
+			// because any content change will change mtime, and any status change should
+			// be visible when actually comparing the stat fields. The only time this
+			// breaks down is if some code intentionally hides a change by setting
+			// back mtime
+			if statDifferent(oldStat, newStat) ||
+				bytes.Compare(oldChild.capability, newChild.capability) != 0 {
+				change := Change{
+					Path: newChild.path(),
+					Kind: ChangeModify,
+				}
+				*changes = append(*changes, change)
+				newChild.added = true
+			}
+
+			// Remove from copy so we can detect deletions
+			delete(oldChildren, name)
+		}
+
+		newChild.addChanges(oldChild, changes)
+	}
+	for _, oldChild := range oldChildren {
+		// delete
+		change := Change{
+			Path: oldChild.path(),
+			Kind: ChangeDelete,
+		}
+		*changes = append(*changes, change)
+	}
+
+	// If there were changes inside this directory, we need to add it, even if the directory
+	// itself wasn't changed. This is needed to properly save and restore filesystem permissions.
+	// As this runs on the daemon side, file paths are OS specific.
+	if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
+		change := Change{
+			Path: info.path(),
+			Kind: ChangeModify,
+		}
+		// Let's insert the directory entry before the recently added entries located inside this dir
+		*changes = append(*changes, change) // just to resize the slice, will be overwritten
+		copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
+		(*changes)[sizeAtEntry] = change
+	}
+
+}
+
+// Changes add changes to file information.
+func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
+	var changes []Change
+
+	info.addChanges(oldInfo, &changes)
+
+	return changes
+}
+
+func newRootFileInfo() *FileInfo {
+	// As this runs on the daemon side, file paths are OS specific.
+	root := &FileInfo{
+		name:     string(os.PathSeparator),
+		children: make(map[string]*FileInfo),
+	}
+	return root
+}
+
+// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
+// If oldDir is "", then all files in newDir will be Add-Changes.
+func ChangesDirs(newDir, oldDir string) ([]Change, error) {
+	var (
+		oldRoot, newRoot *FileInfo
+	)
+	if oldDir == "" {
+		emptyDir, err := ioutil.TempDir("", "empty")
+		if err != nil {
+			return nil, err
+		}
+		defer os.Remove(emptyDir)
+		oldDir = emptyDir
+	}
+	oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
+	if err != nil {
+		return nil, err
+	}
+
+	return newRoot.Changes(oldRoot), nil
+}
+
+// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
+func ChangesSize(newDir string, changes []Change) int64 {
+	var (
+		size int64
+		sf   = make(map[uint64]struct{})
+	)
+	for _, change := range changes {
+		if change.Kind == ChangeModify || change.Kind == ChangeAdd {
+			file := filepath.Join(newDir, change.Path)
+			fileInfo, err := os.Lstat(file)
+			if err != nil {
+				logrus.Errorf("Can not stat %q: %s", file, err)
+				continue
+			}
+
+			if fileInfo != nil && !fileInfo.IsDir() {
+				if hasHardlinks(fileInfo) {
+					inode := getIno(fileInfo)
+					if _, ok := sf[inode]; !ok {
+						size += fileInfo.Size()
+						sf[inode] = struct{}{}
+					}
+				} else {
+					size += fileInfo.Size()
+				}
+			}
+		}
+	}
+	return size
+}
+
+// ExportChanges produces an Archive from the provided changes, relative to dir.
+func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
+	reader, writer := io.Pipe()
+	go func() {
+		ta := &tarAppender{
+			TarWriter: tar.NewWriter(writer),
+			Buffer:    pools.BufioWriter32KPool.Get(nil),
+			SeenFiles: make(map[uint64]string),
+			UIDMaps:   uidMaps,
+			GIDMaps:   gidMaps,
+		}
+		// this buffer is needed for the duration of this piped stream
+		defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+		sort.Sort(changesByPath(changes))
+
+		// In general we log errors here but ignore them because
+		// during e.g. a diff operation the container can continue
+		// mutating the filesystem and we can see transient errors
+		// from this
+		for _, change := range changes {
+			if change.Kind == ChangeDelete {
+				whiteOutDir := filepath.Dir(change.Path)
+				whiteOutBase := filepath.Base(change.Path)
+				whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
+				timestamp := time.Now()
+				hdr := &tar.Header{
+					Name:       whiteOut[1:],
+					Size:       0,
+					ModTime:    timestamp,
+					AccessTime: timestamp,
+					ChangeTime: timestamp,
+				}
+				if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+					logrus.Debugf("Can't write whiteout header: %s", err)
+				}
+			} else {
+				path := filepath.Join(dir, change.Path)
+				if err := ta.addTarFile(path, change.Path[1:]); err != nil {
+					logrus.Debugf("Can't add file %s to tar: %s", path, err)
+				}
+			}
+		}
+
+		// Make sure to check the error on Close.
+		if err := ta.TarWriter.Close(); err != nil {
+			logrus.Debugf("Can't close layer: %s", err)
+		}
+		if err := writer.Close(); err != nil {
+			logrus.Debugf("failed close Changes writer: %s", err)
+		}
+	}()
+	return reader, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..fc5a9dfdb9cd07d3e4731169433fd62f2bde371c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
@@ -0,0 +1,312 @@
+package archive
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"path/filepath"
+	"sort"
+	"syscall"
+	"unsafe"
+
+	"github.com/docker/docker/pkg/system"
+)
+
+// walker is used to implement collectFileInfoForChanges on linux. Where this
+// method in general returns the entire contents of two directory trees, we
+// optimize some FS calls out on linux. In particular, we take advantage of the
+// fact that getdents(2) returns the inode of each file in the directory being
+// walked, which, when walking two trees in parallel to generate a list of
+// changes, can be used to prune subtrees without ever having to lstat(2) them
+// directly. Eliminating stat calls in this way can save up to seconds on large
+// images.
+type walker struct {
+	dir1  string
+	dir2  string
+	root1 *FileInfo
+	root2 *FileInfo
+}
+
+// collectFileInfoForChanges returns a complete representation of the trees
+// rooted at dir1 and dir2, with one important exception: any subtree or
+// leaf where the inode and device numbers are an exact match between dir1
+// and dir2 will be pruned from the results. This method is *only* to be used
+// to generating a list of changes between the two directories, as it does not
+// reflect the full contents.
+func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
+	w := &walker{
+		dir1:  dir1,
+		dir2:  dir2,
+		root1: newRootFileInfo(),
+		root2: newRootFileInfo(),
+	}
+
+	i1, err := os.Lstat(w.dir1)
+	if err != nil {
+		return nil, nil, err
+	}
+	i2, err := os.Lstat(w.dir2)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	if err := w.walk("/", i1, i2); err != nil {
+		return nil, nil, err
+	}
+
+	return w.root1, w.root2, nil
+}
+
+// Given a FileInfo, its path info, and a reference to the root of the tree
+// being constructed, register this file with the tree.
+func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
+	if fi == nil {
+		return nil
+	}
+	parent := root.LookUp(filepath.Dir(path))
+	if parent == nil {
+		return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path)
+	}
+	info := &FileInfo{
+		name:     filepath.Base(path),
+		children: make(map[string]*FileInfo),
+		parent:   parent,
+	}
+	cpath := filepath.Join(dir, path)
+	stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
+	if err != nil {
+		return err
+	}
+	info.stat = stat
+	info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
+	parent.children[info.name] = info
+	return nil
+}
+
+// Walk a subtree rooted at the same path in both trees being iterated. For
+// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
+func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
+	// Register these nodes with the return trees, unless we're still at the
+	// (already-created) roots:
+	if path != "/" {
+		if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
+			return err
+		}
+		if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
+			return err
+		}
+	}
+
+	is1Dir := i1 != nil && i1.IsDir()
+	is2Dir := i2 != nil && i2.IsDir()
+
+	sameDevice := false
+	if i1 != nil && i2 != nil {
+		si1 := i1.Sys().(*syscall.Stat_t)
+		si2 := i2.Sys().(*syscall.Stat_t)
+		if si1.Dev == si2.Dev {
+			sameDevice = true
+		}
+	}
+
+	// If these files are both non-existent, or leaves (non-dirs), we are done.
+	if !is1Dir && !is2Dir {
+		return nil
+	}
+
+	// Fetch the names of all the files contained in both directories being walked:
+	var names1, names2 []nameIno
+	if is1Dir {
+		names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
+		if err != nil {
+			return err
+		}
+	}
+	if is2Dir {
+		names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
+		if err != nil {
+			return err
+		}
+	}
+
+	// We have lists of the files contained in both parallel directories, sorted
+	// in the same order. Walk them in parallel, generating a unique merged list
+	// of all items present in either or both directories.
+	var names []string
+	ix1 := 0
+	ix2 := 0
+
+	for {
+		if ix1 >= len(names1) {
+			break
+		}
+		if ix2 >= len(names2) {
+			break
+		}
+
+		ni1 := names1[ix1]
+		ni2 := names2[ix2]
+
+		switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
+		case -1: // ni1 < ni2 -- advance ni1
+			// we will not encounter ni1 in names2
+			names = append(names, ni1.name)
+			ix1++
+		case 0: // ni1 == ni2
+			if ni1.ino != ni2.ino || !sameDevice {
+				names = append(names, ni1.name)
+			}
+			ix1++
+			ix2++
+		case 1: // ni1 > ni2 -- advance ni2
+			// we will not encounter ni2 in names1
+			names = append(names, ni2.name)
+			ix2++
+		}
+	}
+	for ix1 < len(names1) {
+		names = append(names, names1[ix1].name)
+		ix1++
+	}
+	for ix2 < len(names2) {
+		names = append(names, names2[ix2].name)
+		ix2++
+	}
+
+	// For each of the names present in either or both of the directories being
+	// iterated, stat the name under each root, and recurse the pair of them:
+	for _, name := range names {
+		fname := filepath.Join(path, name)
+		var cInfo1, cInfo2 os.FileInfo
+		if is1Dir {
+			cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
+			if err != nil && !os.IsNotExist(err) {
+				return err
+			}
+		}
+		if is2Dir {
+			cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
+			if err != nil && !os.IsNotExist(err) {
+				return err
+			}
+		}
+		if err = w.walk(fname, cInfo1, cInfo2); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// {name,inode} pairs used to support the early-pruning logic of the walker type
+type nameIno struct {
+	name string
+	ino  uint64
+}
+
+type nameInoSlice []nameIno
+
+func (s nameInoSlice) Len() int           { return len(s) }
+func (s nameInoSlice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
+
+// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
+// numbers further up the stack when reading directory contents. Unlike
+// os.Readdirnames, which returns a list of filenames, this function returns a
+// list of {filename,inode} pairs.
+func readdirnames(dirname string) (names []nameIno, err error) {
+	var (
+		size = 100
+		buf  = make([]byte, 4096)
+		nbuf int
+		bufp int
+		nb   int
+	)
+
+	f, err := os.Open(dirname)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	names = make([]nameIno, 0, size) // Empty with room to grow.
+	for {
+		// Refill the buffer if necessary
+		if bufp >= nbuf {
+			bufp = 0
+			nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux
+			if nbuf < 0 {
+				nbuf = 0
+			}
+			if err != nil {
+				return nil, os.NewSyscallError("readdirent", err)
+			}
+			if nbuf <= 0 {
+				break // EOF
+			}
+		}
+
+		// Drain the buffer
+		nb, names = parseDirent(buf[bufp:nbuf], names)
+		bufp += nb
+	}
+
+	sl := nameInoSlice(names)
+	sort.Sort(sl)
+	return sl, nil
+}
+
+// parseDirent is a minor modification of syscall.ParseDirent (linux version)
+// which returns {name,inode} pairs instead of just names.
+func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
+	origlen := len(buf)
+	for len(buf) > 0 {
+		dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
+		buf = buf[dirent.Reclen:]
+		if dirent.Ino == 0 { // File absent in directory.
+			continue
+		}
+		bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+		var name = string(bytes[0:clen(bytes[:])])
+		if name == "." || name == ".." { // Useless names
+			continue
+		}
+		names = append(names, nameIno{name, dirent.Ino})
+	}
+	return origlen - len(buf), names
+}
+
+func clen(n []byte) int {
+	for i := 0; i < len(n); i++ {
+		if n[i] == 0 {
+			return i
+		}
+	}
+	return len(n)
+}
+
+// OverlayChanges walks the path rw and determines changes for the files in the path,
+// with respect to the parent layers
+func OverlayChanges(layers []string, rw string) ([]Change, error) {
+	return changes(layers, rw, overlayDeletedFile, nil)
+}
+
+func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
+	if fi.Mode()&os.ModeCharDevice != 0 {
+		s := fi.Sys().(*syscall.Stat_t)
+		if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 {
+			return path, nil
+		}
+	}
+	if fi.Mode()&os.ModeDir != 0 {
+		opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque")
+		if err != nil {
+			return "", err
+		}
+		if len(opaque) == 1 && opaque[0] == 'y' {
+			return path, nil
+		}
+	}
+
+	return "", nil
+
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
new file mode 100644
index 0000000000000000000000000000000000000000..da70ed37c45ac547c4a59b2b76bb728439e7c6c2
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
@@ -0,0 +1,97 @@
+// +build !linux
+
+package archive
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strings"
+
+	"github.com/docker/docker/pkg/system"
+)
+
+func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
+	var (
+		oldRoot, newRoot *FileInfo
+		err1, err2       error
+		errs             = make(chan error, 2)
+	)
+	go func() {
+		oldRoot, err1 = collectFileInfo(oldDir)
+		errs <- err1
+	}()
+	go func() {
+		newRoot, err2 = collectFileInfo(newDir)
+		errs <- err2
+	}()
+
+	// block until both routines have returned
+	for i := 0; i < 2; i++ {
+		if err := <-errs; err != nil {
+			return nil, nil, err
+		}
+	}
+
+	return oldRoot, newRoot, nil
+}
+
+func collectFileInfo(sourceDir string) (*FileInfo, error) {
+	root := newRootFileInfo()
+
+	err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+
+		// Rebase path
+		relPath, err := filepath.Rel(sourceDir, path)
+		if err != nil {
+			return err
+		}
+
+		// As this runs on the daemon side, file paths are OS specific.
+		relPath = filepath.Join(string(os.PathSeparator), relPath)
+
+		// See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
+		// Temporary workaround. If the returned path starts with two backslashes,
+		// trim it down to a single backslash. Only relevant on Windows.
+		if runtime.GOOS == "windows" {
+			if strings.HasPrefix(relPath, `\\`) {
+				relPath = relPath[1:]
+			}
+		}
+
+		if relPath == string(os.PathSeparator) {
+			return nil
+		}
+
+		parent := root.LookUp(filepath.Dir(relPath))
+		if parent == nil {
+			return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
+		}
+
+		info := &FileInfo{
+			name:     filepath.Base(relPath),
+			children: make(map[string]*FileInfo),
+			parent:   parent,
+		}
+
+		s, err := system.Lstat(path)
+		if err != nil {
+			return err
+		}
+		info.stat = s
+
+		info.capability, _ = system.Lgetxattr(path, "security.capability")
+
+		parent.children[info.name] = info
+
+		return nil
+	})
+	if err != nil {
+		return nil, err
+	}
+	return root, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..3778b732cf402c28f634a493f147ca5f0d0aecb4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
@@ -0,0 +1,36 @@
+// +build !windows
+
+package archive
+
+import (
+	"os"
+	"syscall"
+
+	"github.com/docker/docker/pkg/system"
+)
+
+func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
+	// Don't look at size for dirs, its not a good measure of change
+	if oldStat.Mode() != newStat.Mode() ||
+		oldStat.UID() != newStat.UID() ||
+		oldStat.GID() != newStat.GID() ||
+		oldStat.Rdev() != newStat.Rdev() ||
+		// Don't look at size for dirs, its not a good measure of change
+		(oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
+			(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
+		return true
+	}
+	return false
+}
+
+func (info *FileInfo) isDir() bool {
+	return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0
+}
+
+func getIno(fi os.FileInfo) uint64 {
+	return uint64(fi.Sys().(*syscall.Stat_t).Ino)
+}
+
+func hasHardlinks(fi os.FileInfo) bool {
+	return fi.Sys().(*syscall.Stat_t).Nlink > 1
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..af94243fc4b43d193654f57ff5a662b70fc21229
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
@@ -0,0 +1,30 @@
+package archive
+
+import (
+	"os"
+
+	"github.com/docker/docker/pkg/system"
+)
+
+func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
+
+	// Don't look at size for dirs, its not a good measure of change
+	if oldStat.ModTime() != newStat.ModTime() ||
+		oldStat.Mode() != newStat.Mode() ||
+		oldStat.Size() != newStat.Size() && !oldStat.IsDir() {
+		return true
+	}
+	return false
+}
+
+func (info *FileInfo) isDir() bool {
+	return info.parent == nil || info.stat.IsDir()
+}
+
+func getIno(fi os.FileInfo) (inode uint64) {
+	return
+}
+
+func hasHardlinks(fi os.FileInfo) bool {
+	return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go
new file mode 100644
index 0000000000000000000000000000000000000000..0614c67cecaf8a09f3c44c76c1b4251b9b814cb7
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy.go
@@ -0,0 +1,458 @@
+package archive
+
+import (
+	"archive/tar"
+	"errors"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/pkg/system"
+)
+
+// Errors used or returned by this file.
+var (
+	ErrNotDirectory      = errors.New("not a directory")
+	ErrDirNotExists      = errors.New("no such directory")
+	ErrCannotCopyDir     = errors.New("cannot copy directory")
+	ErrInvalidCopySource = errors.New("invalid copy source content")
+)
+
+// PreserveTrailingDotOrSeparator returns the given cleaned path (after
+// processing using any utility functions from the path or filepath stdlib
+// packages) and appends a trailing `/.` or `/` if its corresponding  original
+// path (from before being processed by utility functions from the path or
+// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
+// path already ends in a `.` path segment, then another is not added. If the
+// clean path already ends in a path separator, then another is not added.
+func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
+	// Ensure paths are in platform semantics
+	cleanedPath = normalizePath(cleanedPath)
+	originalPath = normalizePath(originalPath)
+
+	if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
+		if !hasTrailingPathSeparator(cleanedPath) {
+			// Add a separator if it doesn't already end with one (a cleaned
+			// path would only end in a separator if it is the root).
+			cleanedPath += string(filepath.Separator)
+		}
+		cleanedPath += "."
+	}
+
+	if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) {
+		cleanedPath += string(filepath.Separator)
+	}
+
+	return cleanedPath
+}
+
+// assertsDirectory returns whether the given path is
+// asserted to be a directory, i.e., the path ends with
+// a trailing '/' or `/.`, assuming a path separator of `/`.
+func assertsDirectory(path string) bool {
+	return hasTrailingPathSeparator(path) || specifiesCurrentDir(path)
+}
+
+// hasTrailingPathSeparator returns whether the given
+// path ends with the system's path separator character.
+func hasTrailingPathSeparator(path string) bool {
+	return len(path) > 0 && os.IsPathSeparator(path[len(path)-1])
+}
+
+// specifiesCurrentDir returns whether the given path specifies
+// a "current directory", i.e., the last path segment is `.`.
+func specifiesCurrentDir(path string) bool {
+	return filepath.Base(path) == "."
+}
+
+// SplitPathDirEntry splits the given path between its directory name and its
+// basename by first cleaning the path but preserves a trailing "." if the
+// original path specified the current directory.
+func SplitPathDirEntry(path string) (dir, base string) {
+	cleanedPath := filepath.Clean(normalizePath(path))
+
+	if specifiesCurrentDir(path) {
+		cleanedPath += string(filepath.Separator) + "."
+	}
+
+	return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
+}
+
+// TarResource archives the resource described by the given CopyInfo to a Tar
+// archive. A non-nil error is returned if sourcePath does not exist or is
+// asserted to be a directory but exists as another type of file.
+//
+// This function acts as a convenient wrapper around TarWithOptions, which
+// requires a directory as the source path. TarResource accepts either a
+// directory or a file path and correctly sets the Tar options.
+func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
+	return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
+}
+
+// TarResourceRebase is like TarResource but renames the first path element of
+// items in the resulting tar archive to match the given rebaseName if not "".
+func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
+	sourcePath = normalizePath(sourcePath)
+	if _, err = os.Lstat(sourcePath); err != nil {
+		// Catches the case where the source does not exist or is not a
+		// directory if asserted to be a directory, as this also causes an
+		// error.
+		return
+	}
+
+	// Separate the source path between its directory and
+	// the entry in that directory which we are archiving.
+	sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
+
+	filter := []string{sourceBase}
+
+	logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
+
+	return TarWithOptions(sourceDir, &TarOptions{
+		Compression:      Uncompressed,
+		IncludeFiles:     filter,
+		IncludeSourceDir: true,
+		RebaseNames: map[string]string{
+			sourceBase: rebaseName,
+		},
+	})
+}
+
+// CopyInfo holds basic info about the source
+// or destination path of a copy operation.
+type CopyInfo struct {
+	Path       string
+	Exists     bool
+	IsDir      bool
+	RebaseName string
+}
+
+// CopyInfoSourcePath stats the given path to create a CopyInfo
+// struct representing that resource for the source of an archive copy
+// operation. The given path should be an absolute local path. A source path
+// has all symlinks evaluated that appear before the last path separator ("/"
+// on Unix). As it is to be a copy source, the path must exist.
+func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
+	// normalize the file path and then evaluate the symbol link
+	// we will use the target file instead of the symbol link if
+	// followLink is set
+	path = normalizePath(path)
+
+	resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
+	if err != nil {
+		return CopyInfo{}, err
+	}
+
+	stat, err := os.Lstat(resolvedPath)
+	if err != nil {
+		return CopyInfo{}, err
+	}
+
+	return CopyInfo{
+		Path:       resolvedPath,
+		Exists:     true,
+		IsDir:      stat.IsDir(),
+		RebaseName: rebaseName,
+	}, nil
+}
+
+// CopyInfoDestinationPath stats the given path to create a CopyInfo
+// struct representing that resource for the destination of an archive copy
+// operation. The given path should be an absolute local path.
+func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
+	maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
+	path = normalizePath(path)
+	originalPath := path
+
+	stat, err := os.Lstat(path)
+
+	if err == nil && stat.Mode()&os.ModeSymlink == 0 {
+		// The path exists and is not a symlink.
+		return CopyInfo{
+			Path:   path,
+			Exists: true,
+			IsDir:  stat.IsDir(),
+		}, nil
+	}
+
+	// While the path is a symlink.
+	for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
+		if n > maxSymlinkIter {
+			// Don't follow symlinks more than this arbitrary number of times.
+			return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
+		}
+
+		// The path is a symbolic link. We need to evaluate it so that the
+		// destination of the copy operation is the link target and not the
+		// link itself. This is notably different than CopyInfoSourcePath which
+		// only evaluates symlinks before the last appearing path separator.
+		// Also note that it is okay if the last path element is a broken
+		// symlink as the copy operation should create the target.
+		var linkTarget string
+
+		linkTarget, err = os.Readlink(path)
+		if err != nil {
+			return CopyInfo{}, err
+		}
+
+		if !system.IsAbs(linkTarget) {
+			// Join with the parent directory.
+			dstParent, _ := SplitPathDirEntry(path)
+			linkTarget = filepath.Join(dstParent, linkTarget)
+		}
+
+		path = linkTarget
+		stat, err = os.Lstat(path)
+	}
+
+	if err != nil {
+		// It's okay if the destination path doesn't exist. We can still
+		// continue the copy operation if the parent directory exists.
+		if !os.IsNotExist(err) {
+			return CopyInfo{}, err
+		}
+
+		// Ensure destination parent dir exists.
+		dstParent, _ := SplitPathDirEntry(path)
+
+		parentDirStat, err := os.Lstat(dstParent)
+		if err != nil {
+			return CopyInfo{}, err
+		}
+		if !parentDirStat.IsDir() {
+			return CopyInfo{}, ErrNotDirectory
+		}
+
+		return CopyInfo{Path: path}, nil
+	}
+
+	// The path exists after resolving symlinks.
+	return CopyInfo{
+		Path:   path,
+		Exists: true,
+		IsDir:  stat.IsDir(),
+	}, nil
+}
+
+// PrepareArchiveCopy prepares the given srcContent archive, which should
+// contain the archived resource described by srcInfo, to the destination
+// described by dstInfo. Returns the possibly modified content archive along
+// with the path to the destination directory which it should be extracted to.
+func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
+	// Ensure in platform semantics
+	srcInfo.Path = normalizePath(srcInfo.Path)
+	dstInfo.Path = normalizePath(dstInfo.Path)
+
+	// Separate the destination path between its directory and base
+	// components in case the source archive contents need to be rebased.
+	dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
+	_, srcBase := SplitPathDirEntry(srcInfo.Path)
+
+	switch {
+	case dstInfo.Exists && dstInfo.IsDir:
+		// The destination exists as a directory. No alteration
+		// to srcContent is needed as its contents can be
+		// simply extracted to the destination directory.
+		return dstInfo.Path, ioutil.NopCloser(srcContent), nil
+	case dstInfo.Exists && srcInfo.IsDir:
+		// The destination exists as some type of file and the source
+		// content is a directory. This is an error condition since
+		// you cannot copy a directory to an existing file location.
+		return "", nil, ErrCannotCopyDir
+	case dstInfo.Exists:
+		// The destination exists as some type of file and the source content
+		// is also a file. The source content entry will have to be renamed to
+		// have a basename which matches the destination path's basename.
+		if len(srcInfo.RebaseName) != 0 {
+			srcBase = srcInfo.RebaseName
+		}
+		return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+	case srcInfo.IsDir:
+		// The destination does not exist and the source content is an archive
+		// of a directory. The archive should be extracted to the parent of
+		// the destination path instead, and when it is, the directory that is
+		// created as a result should take the name of the destination path.
+		// The source content entries will have to be renamed to have a
+		// basename which matches the destination path's basename.
+		if len(srcInfo.RebaseName) != 0 {
+			srcBase = srcInfo.RebaseName
+		}
+		return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+	case assertsDirectory(dstInfo.Path):
+		// The destination does not exist and is asserted to be created as a
+		// directory, but the source content is not a directory. This is an
+		// error condition since you cannot create a directory from a file
+		// source.
+		return "", nil, ErrDirNotExists
+	default:
+		// The last remaining case is when the destination does not exist, is
+		// not asserted to be a directory, and the source content is not an
+		// archive of a directory. It this case, the destination file will need
+		// to be created when the archive is extracted and the source content
+		// entry will have to be renamed to have a basename which matches the
+		// destination path's basename.
+		if len(srcInfo.RebaseName) != 0 {
+			srcBase = srcInfo.RebaseName
+		}
+		return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+	}
+
+}
+
+// RebaseArchiveEntries rewrites the given srcContent archive replacing
+// an occurrence of oldBase with newBase at the beginning of entry names.
+func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
+	if oldBase == string(os.PathSeparator) {
+		// If oldBase specifies the root directory, use an empty string as
+		// oldBase instead so that newBase doesn't replace the path separator
+		// that all paths will start with.
+		oldBase = ""
+	}
+
+	rebased, w := io.Pipe()
+
+	go func() {
+		srcTar := tar.NewReader(srcContent)
+		rebasedTar := tar.NewWriter(w)
+
+		for {
+			hdr, err := srcTar.Next()
+			if err == io.EOF {
+				// Signals end of archive.
+				rebasedTar.Close()
+				w.Close()
+				return
+			}
+			if err != nil {
+				w.CloseWithError(err)
+				return
+			}
+
+			hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
+
+			if err = rebasedTar.WriteHeader(hdr); err != nil {
+				w.CloseWithError(err)
+				return
+			}
+
+			if _, err = io.Copy(rebasedTar, srcTar); err != nil {
+				w.CloseWithError(err)
+				return
+			}
+		}
+	}()
+
+	return rebased
+}
+
+// CopyResource performs an archive copy from the given source path to the
+// given destination path. The source path MUST exist and the destination
+// path's parent directory must exist.
+func CopyResource(srcPath, dstPath string, followLink bool) error {
+	var (
+		srcInfo CopyInfo
+		err     error
+	)
+
+	// Ensure in platform semantics
+	srcPath = normalizePath(srcPath)
+	dstPath = normalizePath(dstPath)
+
+	// Clean the source and destination paths.
+	srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
+	dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
+
+	if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
+		return err
+	}
+
+	content, err := TarResource(srcInfo)
+	if err != nil {
+		return err
+	}
+	defer content.Close()
+
+	return CopyTo(content, srcInfo, dstPath)
+}
+
+// CopyTo handles extracting the given content whose
+// entries should be sourced from srcInfo to dstPath.
+func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
+	// The destination path need not exist, but CopyInfoDestinationPath will
+	// ensure that at least the parent directory exists.
+	dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
+	if err != nil {
+		return err
+	}
+
+	dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
+	if err != nil {
+		return err
+	}
+	defer copyArchive.Close()
+
+	options := &TarOptions{
+		NoLchown:             true,
+		NoOverwriteDirNonDir: true,
+	}
+
+	return Untar(copyArchive, dstDir, options)
+}
+
+// ResolveHostSourcePath decides real path need to be copied with parameters such as
+// whether to follow symbol link or not, if followLink is true, resolvedPath will return
+// link target of any symbol link file, else it will only resolve symlink of directory
+// but return symbol link file itself without resolving.
+func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
+	if followLink {
+		resolvedPath, err = filepath.EvalSymlinks(path)
+		if err != nil {
+			return
+		}
+
+		resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
+	} else {
+		dirPath, basePath := filepath.Split(path)
+
+		// if not follow symbol link, then resolve symbol link of parent dir
+		var resolvedDirPath string
+		resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
+		if err != nil {
+			return
+		}
+		// resolvedDirPath will have been cleaned (no trailing path separators) so
+		// we can manually join it with the base path element.
+		resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
+		if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) {
+			rebaseName = filepath.Base(path)
+		}
+	}
+	return resolvedPath, rebaseName, nil
+}
+
+// GetRebaseName normalizes and compares path and resolvedPath,
+// return completed resolved path and rebased file name
+func GetRebaseName(path, resolvedPath string) (string, string) {
+	// linkTarget will have been cleaned (no trailing path separators and dot) so
+	// we can manually join it with them
+	var rebaseName string
+	if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) {
+		resolvedPath += string(filepath.Separator) + "."
+	}
+
+	if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) {
+		resolvedPath += string(filepath.Separator)
+	}
+
+	if filepath.Base(path) != filepath.Base(resolvedPath) {
+		// In the case where the path had a trailing separator and a symlink
+		// evaluation has changed the last path component, we will need to
+		// rebase the name in the archive that is being copied to match the
+		// originally requested name.
+		rebaseName = filepath.Base(path)
+	}
+	return resolvedPath, rebaseName
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..e305b5e4af9118bc41af56e560720ce1cc2a7888
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package archive
+
+import (
+	"path/filepath"
+)
+
+func normalizePath(path string) string {
+	return filepath.ToSlash(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..2b775b45c4f115ba1f0a08247de997f1d1b7522a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go
@@ -0,0 +1,9 @@
+package archive
+
+import (
+	"path/filepath"
+)
+
+func normalizePath(path string) string {
+	return filepath.FromSlash(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go
new file mode 100644
index 0000000000000000000000000000000000000000..9e1a58c499c5c62d765041670e5ecab139618b57
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/diff.go
@@ -0,0 +1,279 @@
+package archive
+
+import (
+	"archive/tar"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strings"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/pkg/idtools"
+	"github.com/docker/docker/pkg/pools"
+	"github.com/docker/docker/pkg/system"
+)
+
+// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
+// compressed or uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
+	tr := tar.NewReader(layer)
+	trBuf := pools.BufioReader32KPool.Get(tr)
+	defer pools.BufioReader32KPool.Put(trBuf)
+
+	var dirs []*tar.Header
+	unpackedPaths := make(map[string]struct{})
+
+	if options == nil {
+		options = &TarOptions{}
+	}
+	if options.ExcludePatterns == nil {
+		options.ExcludePatterns = []string{}
+	}
+	remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
+	if err != nil {
+		return 0, err
+	}
+
+	aufsTempdir := ""
+	aufsHardlinks := make(map[string]*tar.Header)
+
+	if options == nil {
+		options = &TarOptions{}
+	}
+	// Iterate through the files in the archive.
+	for {
+		hdr, err := tr.Next()
+		if err == io.EOF {
+			// end of tar archive
+			break
+		}
+		if err != nil {
+			return 0, err
+		}
+
+		size += hdr.Size
+
+		// Normalize name, for safety and for a simple is-root check
+		hdr.Name = filepath.Clean(hdr.Name)
+
+		// Windows does not support filenames with colons in them. Ignore
+		// these files. This is not a problem though (although it might
+		// appear that it is). Let's suppose a client is running docker pull.
+		// The daemon it points to is Windows. Would it make sense for the
+		// client to be doing a docker pull Ubuntu for example (which has files
+		// with colons in the name under /usr/share/man/man3)? No, absolutely
+		// not as it would really only make sense that they were pulling a
+		// Windows image. However, for development, it is necessary to be able
+		// to pull Linux images which are in the repository.
+		//
+		// TODO Windows. Once the registry is aware of what images are Windows-
+		// specific or Linux-specific, this warning should be changed to an error
+		// to cater for the situation where someone does manage to upload a Linux
+		// image but have it tagged as Windows inadvertently.
+		if runtime.GOOS == "windows" {
+			if strings.Contains(hdr.Name, ":") {
+				logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
+				continue
+			}
+		}
+
+		// Note as these operations are platform specific, so must the slash be.
+		if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+			// Not the root directory, ensure that the parent directory exists.
+			// This happened in some tests where an image had a tarfile without any
+			// parent directories.
+			parent := filepath.Dir(hdr.Name)
+			parentPath := filepath.Join(dest, parent)
+
+			if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+				err = system.MkdirAll(parentPath, 0600)
+				if err != nil {
+					return 0, err
+				}
+			}
+		}
+
+		// Skip AUFS metadata dirs
+		if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
+			// Regular files inside /.wh..wh.plnk can be used as hardlink targets
+			// We don't want this directory, but we need the files in them so that
+			// such hardlinks can be resolved.
+			if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
+				basename := filepath.Base(hdr.Name)
+				aufsHardlinks[basename] = hdr
+				if aufsTempdir == "" {
+					if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
+						return 0, err
+					}
+					defer os.RemoveAll(aufsTempdir)
+				}
+				if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil {
+					return 0, err
+				}
+			}
+
+			if hdr.Name != WhiteoutOpaqueDir {
+				continue
+			}
+		}
+		path := filepath.Join(dest, hdr.Name)
+		rel, err := filepath.Rel(dest, path)
+		if err != nil {
+			return 0, err
+		}
+
+		// Note as these operations are platform specific, so must the slash be.
+		if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+			return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+		}
+		base := filepath.Base(path)
+
+		if strings.HasPrefix(base, WhiteoutPrefix) {
+			dir := filepath.Dir(path)
+			if base == WhiteoutOpaqueDir {
+				_, err := os.Lstat(dir)
+				if err != nil {
+					return 0, err
+				}
+				err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+					if err != nil {
+						if os.IsNotExist(err) {
+							err = nil // parent was deleted
+						}
+						return err
+					}
+					if path == dir {
+						return nil
+					}
+					if _, exists := unpackedPaths[path]; !exists {
+						err := os.RemoveAll(path)
+						return err
+					}
+					return nil
+				})
+				if err != nil {
+					return 0, err
+				}
+			} else {
+				originalBase := base[len(WhiteoutPrefix):]
+				originalPath := filepath.Join(dir, originalBase)
+				if err := os.RemoveAll(originalPath); err != nil {
+					return 0, err
+				}
+			}
+		} else {
+			// If path exits we almost always just want to remove and replace it.
+			// The only exception is when it is a directory *and* the file from
+			// the layer is also a directory. Then we want to merge them (i.e.
+			// just apply the metadata from the layer).
+			if fi, err := os.Lstat(path); err == nil {
+				if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+					if err := os.RemoveAll(path); err != nil {
+						return 0, err
+					}
+				}
+			}
+
+			trBuf.Reset(tr)
+			srcData := io.Reader(trBuf)
+			srcHdr := hdr
+
+			// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
+			// we manually retarget these into the temporary files we extracted them into
+			if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
+				linkBasename := filepath.Base(hdr.Linkname)
+				srcHdr = aufsHardlinks[linkBasename]
+				if srcHdr == nil {
+					return 0, fmt.Errorf("Invalid aufs hardlink")
+				}
+				tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
+				if err != nil {
+					return 0, err
+				}
+				defer tmpFile.Close()
+				srcData = tmpFile
+			}
+
+			// if the options contain a uid & gid maps, convert header uid/gid
+			// entries using the maps such that lchown sets the proper mapped
+			// uid/gid after writing the file. We only perform this mapping if
+			// the file isn't already owned by the remapped root UID or GID, as
+			// that specific uid/gid has no mapping from container -> host, and
+			// those files already have the proper ownership for inside the
+			// container.
+			if srcHdr.Uid != remappedRootUID {
+				xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps)
+				if err != nil {
+					return 0, err
+				}
+				srcHdr.Uid = xUID
+			}
+			if srcHdr.Gid != remappedRootGID {
+				xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps)
+				if err != nil {
+					return 0, err
+				}
+				srcHdr.Gid = xGID
+			}
+			if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil {
+				return 0, err
+			}
+
+			// Directory mtimes must be handled at the end to avoid further
+			// file creation in them to modify the directory mtime
+			if hdr.Typeflag == tar.TypeDir {
+				dirs = append(dirs, hdr)
+			}
+			unpackedPaths[path] = struct{}{}
+		}
+	}
+
+	for _, hdr := range dirs {
+		path := filepath.Join(dest, hdr.Name)
+		if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
+			return 0, err
+		}
+	}
+
+	return size, nil
+}
+
+// ApplyLayer parses a diff in the standard layer format from `layer`,
+// and applies it to the directory `dest`. The stream `layer` can be
+// compressed or uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyLayer(dest string, layer io.Reader) (int64, error) {
+	return applyLayerHandler(dest, layer, &TarOptions{}, true)
+}
+
+// ApplyUncompressedLayer parses a diff in the standard layer format from
+// `layer`, and applies it to the directory `dest`. The stream `layer`
+// can only be uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
+	return applyLayerHandler(dest, layer, options, false)
+}
+
+// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
+func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
+	dest = filepath.Clean(dest)
+
+	// We need to be able to set any perms
+	oldmask, err := system.Umask(0)
+	if err != nil {
+		return 0, err
+	}
+	defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
+
+	if decompress {
+		layer, err = DecompressStream(layer)
+		if err != nil {
+			return 0, err
+		}
+	}
+	return UnpackLayer(dest, layer, options)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go
new file mode 100644
index 0000000000000000000000000000000000000000..cedd46a408e49a263aa207e240f7dc12ea139adf
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/example_changes.go
@@ -0,0 +1,97 @@
+// +build ignore
+
+// Simple tool to create an archive stream from an old and new directory
+//
+// By default it will stream the comparison of two temporary directories with junk files
+package main
+
+import (
+	"flag"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/pkg/archive"
+)
+
+var (
+	flDebug  = flag.Bool("D", false, "debugging output")
+	flNewDir = flag.String("newdir", "", "")
+	flOldDir = flag.String("olddir", "", "")
+	log      = logrus.New()
+)
+
+func main() {
+	flag.Usage = func() {
+		fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
+		fmt.Printf("%s [OPTIONS]\n", os.Args[0])
+		flag.PrintDefaults()
+	}
+	flag.Parse()
+	log.Out = os.Stderr
+	if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
+		logrus.SetLevel(logrus.DebugLevel)
+	}
+	var newDir, oldDir string
+
+	if len(*flNewDir) == 0 {
+		var err error
+		newDir, err = ioutil.TempDir("", "docker-test-newDir")
+		if err != nil {
+			log.Fatal(err)
+		}
+		defer os.RemoveAll(newDir)
+		if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
+			log.Fatal(err)
+		}
+	} else {
+		newDir = *flNewDir
+	}
+
+	if len(*flOldDir) == 0 {
+		oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
+		if err != nil {
+			log.Fatal(err)
+		}
+		defer os.RemoveAll(oldDir)
+	} else {
+		oldDir = *flOldDir
+	}
+
+	changes, err := archive.ChangesDirs(newDir, oldDir)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	a, err := archive.ExportChanges(newDir, changes)
+	if err != nil {
+		log.Fatal(err)
+	}
+	defer a.Close()
+
+	i, err := io.Copy(os.Stdout, a)
+	if err != nil && err != io.EOF {
+		log.Fatal(err)
+	}
+	fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
+}
+
+func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
+	fileData := []byte("fooo")
+	for n := 0; n < numberOfFiles; n++ {
+		fileName := fmt.Sprintf("file-%d", n)
+		if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
+			return 0, err
+		}
+		if makeLinks {
+			if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
+				return 0, err
+			}
+		}
+	}
+	totalSize := numberOfFiles * len(fileData)
+	return totalSize, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..3448569b1ebb2a92e94f4fb27176b78e373bdc7c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/time_linux.go
@@ -0,0 +1,16 @@
+package archive
+
+import (
+	"syscall"
+	"time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+	if time.IsZero() {
+		// Return UTIME_OMIT special value
+		ts.Sec = 0
+		ts.Nsec = ((1 << 30) - 2)
+		return
+	}
+	return syscall.NsecToTimespec(time.UnixNano())
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
new file mode 100644
index 0000000000000000000000000000000000000000..e85aac05408086241bbf1a895b439edca4075458
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
@@ -0,0 +1,16 @@
+// +build !linux
+
+package archive
+
+import (
+	"syscall"
+	"time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+	nsec := int64(0)
+	if !time.IsZero() {
+		nsec = time.UnixNano()
+	}
+	return syscall.NsecToTimespec(nsec)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
new file mode 100644
index 0000000000000000000000000000000000000000..d20478a10dc158c88ab312086d5fb20497192374
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
@@ -0,0 +1,23 @@
+package archive
+
+// Whiteouts are files with a special meaning for the layered filesystem.
+// Docker uses AUFS whiteout files inside exported archives. In other
+// filesystems these files are generated/handled on tar creation/extraction.
+
+// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
+// filename this means that file has been removed from the base layer.
+const WhiteoutPrefix = ".wh."
+
+// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
+// for removing an actual file. Normally these files are excluded from exported
+// archives.
+const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
+
+// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
+// layers. Normally these should not go into exported archives and all changed
+// hardlinks should be copied to the top layer.
+const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
+
+// WhiteoutOpaqueDir file means directory has been made opaque - meaning
+// readdir calls to this directory do not follow to lower layers.
+const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go
new file mode 100644
index 0000000000000000000000000000000000000000..b39d12c87800baa2c52eb6b7600d34f6cc9f0ace
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/wrap.go
@@ -0,0 +1,59 @@
+package archive
+
+import (
+	"archive/tar"
+	"bytes"
+	"io"
+)
+
+// Generate generates a new archive from the content provided
+// as input.
+//
+// `files` is a sequence of path/content pairs. A new file is
+// added to the archive for each pair.
+// If the last pair is incomplete, the file is created with an
+// empty content. For example:
+//
+// Generate("foo.txt", "hello world", "emptyfile")
+//
+// The above call will return an archive with 2 files:
+//  * ./foo.txt with content "hello world"
+//  * ./empty with empty content
+//
+// FIXME: stream content instead of buffering
+// FIXME: specify permissions and other archive metadata
+func Generate(input ...string) (io.Reader, error) {
+	files := parseStringPairs(input...)
+	buf := new(bytes.Buffer)
+	tw := tar.NewWriter(buf)
+	for _, file := range files {
+		name, content := file[0], file[1]
+		hdr := &tar.Header{
+			Name: name,
+			Size: int64(len(content)),
+		}
+		if err := tw.WriteHeader(hdr); err != nil {
+			return nil, err
+		}
+		if _, err := tw.Write([]byte(content)); err != nil {
+			return nil, err
+		}
+	}
+	if err := tw.Close(); err != nil {
+		return nil, err
+	}
+	return buf, nil
+}
+
+func parseStringPairs(input ...string) (output [][2]string) {
+	output = make([][2]string, 0, len(input)/2+1)
+	for i := 0; i < len(input); i += 2 {
+		var pair [2]string
+		pair[0] = input[i]
+		if i+1 < len(input) {
+			pair[1] = input[i+1]
+		}
+		output = append(output, pair)
+	}
+	return
+}
diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go
new file mode 100644
index 0000000000000000000000000000000000000000..a7814f5b906deac3f5168a23a3c026ba272f8cb2
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go
@@ -0,0 +1,97 @@
+package chrootarchive
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+
+	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/idtools"
+)
+
+var chrootArchiver = &archive.Archiver{Untar: Untar}
+
+// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive may be compressed with one of the following algorithms:
+//  identity (uncompressed), gzip, bzip2, xz.
+func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
+	return untarHandler(tarArchive, dest, options, true)
+}
+
+// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive must be an uncompressed stream.
+func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
+	return untarHandler(tarArchive, dest, options, false)
+}
+
+// Handler for teasing out the automatic decompression
+func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error {
+
+	if tarArchive == nil {
+		return fmt.Errorf("Empty archive")
+	}
+	if options == nil {
+		options = &archive.TarOptions{}
+	}
+	if options.ExcludePatterns == nil {
+		options.ExcludePatterns = []string{}
+	}
+
+	rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
+	if err != nil {
+		return err
+	}
+
+	dest = filepath.Clean(dest)
+	if _, err := os.Stat(dest); os.IsNotExist(err) {
+		if err := idtools.MkdirAllNewAs(dest, 0755, rootUID, rootGID); err != nil {
+			return err
+		}
+	}
+
+	r := ioutil.NopCloser(tarArchive)
+	if decompress {
+		decompressedArchive, err := archive.DecompressStream(tarArchive)
+		if err != nil {
+			return err
+		}
+		defer decompressedArchive.Close()
+		r = decompressedArchive
+	}
+
+	return invokeUnpack(r, dest, options)
+}
+
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
+func TarUntar(src, dst string) error {
+	return chrootArchiver.TarUntar(src, dst)
+}
+
+// CopyWithTar creates a tar archive of filesystem path `src`, and
+// unpacks it at filesystem path `dst`.
+// The archive is streamed directly with fixed buffering and no
+// intermediary disk IO.
+func CopyWithTar(src, dst string) error {
+	return chrootArchiver.CopyWithTar(src, dst)
+}
+
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
+// for a single file. It copies a regular file from path `src` to
+// path `dst`, and preserves all its metadata.
+//
+// If `dst` ends with a trailing slash '/' ('\' on Windows), the final
+// destination path will be `dst/base(src)` or `dst\base(src)`
+func CopyFileWithTar(src, dst string) (err error) {
+	return chrootArchiver.CopyFileWithTar(src, dst)
+}
+
+// UntarPath is a convenience function which looks for an archive
+// at filesystem path `src`, and unpacks it at `dst`.
+func UntarPath(src, dst string) error {
+	return chrootArchiver.UntarPath(src, dst)
+}
diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..f2325abd74e493d8d7abed18265c32d950e53e3e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go
@@ -0,0 +1,86 @@
+// +build !windows
+
+package chrootarchive
+
+import (
+	"bytes"
+	"encoding/json"
+	"flag"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"runtime"
+
+	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/reexec"
+)
+
+// untar is the entry-point for docker-untar on re-exec. This is not used on
+// Windows as it does not support chroot, hence no point sandboxing through
+// chroot and rexec.
+func untar() {
+	runtime.LockOSThread()
+	flag.Parse()
+
+	var options *archive.TarOptions
+
+	//read the options from the pipe "ExtraFiles"
+	if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
+		fatal(err)
+	}
+
+	if err := chroot(flag.Arg(0)); err != nil {
+		fatal(err)
+	}
+
+	if err := archive.Unpack(os.Stdin, "/", options); err != nil {
+		fatal(err)
+	}
+	// fully consume stdin in case it is zero padded
+	if _, err := flush(os.Stdin); err != nil {
+		fatal(err)
+	}
+
+	os.Exit(0)
+}
+
+func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error {
+
+	// We can't pass a potentially large exclude list directly via cmd line
+	// because we easily overrun the kernel's max argument/environment size
+	// when the full image list is passed (e.g. when this is used by
+	// `docker load`). We will marshall the options via a pipe to the
+	// child
+	r, w, err := os.Pipe()
+	if err != nil {
+		return fmt.Errorf("Untar pipe failure: %v", err)
+	}
+
+	cmd := reexec.Command("docker-untar", dest)
+	cmd.Stdin = decompressedArchive
+
+	cmd.ExtraFiles = append(cmd.ExtraFiles, r)
+	output := bytes.NewBuffer(nil)
+	cmd.Stdout = output
+	cmd.Stderr = output
+
+	if err := cmd.Start(); err != nil {
+		return fmt.Errorf("Untar error on re-exec cmd: %v", err)
+	}
+	//write the options to the pipe for the untar exec to read
+	if err := json.NewEncoder(w).Encode(options); err != nil {
+		return fmt.Errorf("Untar json encode to pipe failed: %v", err)
+	}
+	w.Close()
+
+	if err := cmd.Wait(); err != nil {
+		// when `xz -d -c -q | docker-untar ...` failed on docker-untar side,
+		// we need to exhaust `xz`'s output, otherwise the `xz` side will be
+		// pending on write pipe forever
+		io.Copy(ioutil.Discard, decompressedArchive)
+
+		return fmt.Errorf("Error processing tar file(%v): %s", err, output)
+	}
+	return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..0a500ed5c2dd70c76251cd62af0b1f316d41e48e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go
@@ -0,0 +1,22 @@
+package chrootarchive
+
+import (
+	"io"
+
+	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/longpath"
+)
+
+// chroot is not supported by Windows
+func chroot(path string) error {
+	return nil
+}
+
+func invokeUnpack(decompressedArchive io.ReadCloser,
+	dest string,
+	options *archive.TarOptions) error {
+	// Windows is different to Linux here because Windows does not support
+	// chroot. Hence there is no point sandboxing a chrooted process to
+	// do the unpack. We call inline instead within the daemon process.
+	return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options)
+}
diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..f9d7fed633defa82e11cec0f221263469dfd0551
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go
@@ -0,0 +1,108 @@
+package chrootarchive
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"syscall"
+
+	"github.com/docker/docker/pkg/mount"
+	rsystem "github.com/opencontainers/runc/libcontainer/system"
+)
+
+// chroot on linux uses pivot_root instead of chroot
+// pivot_root takes a new root and an old root.
+// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root.
+// New root is where the new rootfs is set to.
+// Old root is removed after the call to pivot_root so it is no longer available under the new root.
+// This is similar to how libcontainer sets up a container's rootfs
+func chroot(path string) (err error) {
+	// if the engine is running in a user namespace we need to use actual chroot
+	if rsystem.RunningInUserNS() {
+		return realChroot(path)
+	}
+	if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil {
+		return fmt.Errorf("Error creating mount namespace before pivot: %v", err)
+	}
+
+	// make everything in new ns private
+	if err := mount.MakeRPrivate("/"); err != nil {
+		return err
+	}
+
+	if mounted, _ := mount.Mounted(path); !mounted {
+		if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil {
+			return realChroot(path)
+		}
+	}
+
+	// setup oldRoot for pivot_root
+	pivotDir, err := ioutil.TempDir(path, ".pivot_root")
+	if err != nil {
+		return fmt.Errorf("Error setting up pivot dir: %v", err)
+	}
+
+	var mounted bool
+	defer func() {
+		if mounted {
+			// make sure pivotDir is not mounted before we try to remove it
+			if errCleanup := syscall.Unmount(pivotDir, syscall.MNT_DETACH); errCleanup != nil {
+				if err == nil {
+					err = errCleanup
+				}
+				return
+			}
+		}
+
+		errCleanup := os.Remove(pivotDir)
+		// pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful
+		// because we already cleaned it up on failed pivot_root
+		if errCleanup != nil && !os.IsNotExist(errCleanup) {
+			errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup)
+			if err == nil {
+				err = errCleanup
+			}
+		}
+	}()
+
+	if err := syscall.PivotRoot(path, pivotDir); err != nil {
+		// If pivot fails, fall back to the normal chroot after cleaning up temp dir
+		if err := os.Remove(pivotDir); err != nil {
+			return fmt.Errorf("Error cleaning up after failed pivot: %v", err)
+		}
+		return realChroot(path)
+	}
+	mounted = true
+
+	// This is the new path for where the old root (prior to the pivot) has been moved to
+	// This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction
+	pivotDir = filepath.Join("/", filepath.Base(pivotDir))
+
+	if err := syscall.Chdir("/"); err != nil {
+		return fmt.Errorf("Error changing to new root: %v", err)
+	}
+
+	// Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host
+	if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil {
+		return fmt.Errorf("Error making old root private after pivot: %v", err)
+	}
+
+	// Now unmount the old root so it's no longer visible from the new root
+	if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil {
+		return fmt.Errorf("Error while unmounting old root after pivot: %v", err)
+	}
+	mounted = false
+
+	return nil
+}
+
+func realChroot(path string) error {
+	if err := syscall.Chroot(path); err != nil {
+		return fmt.Errorf("Error after fallback to chroot: %v", err)
+	}
+	if err := syscall.Chdir("/"); err != nil {
+		return fmt.Errorf("Error changing to new root after chroot: %v", err)
+	}
+	return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..16354bf64877a01e014b136200fb97d62ee1b7f5
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go
@@ -0,0 +1,12 @@
+// +build !windows,!linux
+
+package chrootarchive
+
+import "syscall"
+
+func chroot(path string) error {
+	if err := syscall.Chroot(path); err != nil {
+		return err
+	}
+	return syscall.Chdir("/")
+}
diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go
new file mode 100644
index 0000000000000000000000000000000000000000..49acad79ff286c3a14c2dd53412b70a7ed435c63
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go
@@ -0,0 +1,23 @@
+package chrootarchive
+
+import (
+	"io"
+
+	"github.com/docker/docker/pkg/archive"
+)
+
+// ApplyLayer parses a diff in the standard layer format from `layer`,
+// and applies it to the directory `dest`. The stream `layer` can only be
+// uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyLayer(dest string, layer io.Reader) (size int64, err error) {
+	return applyLayerHandler(dest, layer, &archive.TarOptions{}, true)
+}
+
+// ApplyUncompressedLayer parses a diff in the standard layer format from
+// `layer`, and applies it to the directory `dest`. The stream `layer`
+// can only be uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) {
+	return applyLayerHandler(dest, layer, options, false)
+}
diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..eb0aacc3ab34300e1b21af22d303cda622896388
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go
@@ -0,0 +1,130 @@
+//+build !windows
+
+package chrootarchive
+
+import (
+	"bytes"
+	"encoding/json"
+	"flag"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"runtime"
+
+	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/reexec"
+	"github.com/docker/docker/pkg/system"
+	rsystem "github.com/opencontainers/runc/libcontainer/system"
+)
+
+type applyLayerResponse struct {
+	LayerSize int64 `json:"layerSize"`
+}
+
+// applyLayer is the entry-point for docker-applylayer on re-exec. This is not
+// used on Windows as it does not support chroot, hence no point sandboxing
+// through chroot and rexec.
+func applyLayer() {
+
+	var (
+		tmpDir  = ""
+		err     error
+		options *archive.TarOptions
+	)
+	runtime.LockOSThread()
+	flag.Parse()
+
+	inUserns := rsystem.RunningInUserNS()
+	if err := chroot(flag.Arg(0)); err != nil {
+		fatal(err)
+	}
+
+	// We need to be able to set any perms
+	oldmask, err := system.Umask(0)
+	defer system.Umask(oldmask)
+	if err != nil {
+		fatal(err)
+	}
+
+	if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil {
+		fatal(err)
+	}
+
+	if inUserns {
+		options.InUserNS = true
+	}
+
+	if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil {
+		fatal(err)
+	}
+
+	os.Setenv("TMPDIR", tmpDir)
+	size, err := archive.UnpackLayer("/", os.Stdin, options)
+	os.RemoveAll(tmpDir)
+	if err != nil {
+		fatal(err)
+	}
+
+	encoder := json.NewEncoder(os.Stdout)
+	if err := encoder.Encode(applyLayerResponse{size}); err != nil {
+		fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err))
+	}
+
+	if _, err := flush(os.Stdin); err != nil {
+		fatal(err)
+	}
+
+	os.Exit(0)
+}
+
+// applyLayerHandler parses a diff in the standard layer format from `layer`, and
+// applies it to the directory `dest`. Returns the size in bytes of the
+// contents of the layer.
+func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
+	dest = filepath.Clean(dest)
+	if decompress {
+		decompressed, err := archive.DecompressStream(layer)
+		if err != nil {
+			return 0, err
+		}
+		defer decompressed.Close()
+
+		layer = decompressed
+	}
+	if options == nil {
+		options = &archive.TarOptions{}
+		if rsystem.RunningInUserNS() {
+			options.InUserNS = true
+		}
+	}
+	if options.ExcludePatterns == nil {
+		options.ExcludePatterns = []string{}
+	}
+
+	data, err := json.Marshal(options)
+	if err != nil {
+		return 0, fmt.Errorf("ApplyLayer json encode: %v", err)
+	}
+
+	cmd := reexec.Command("docker-applyLayer", dest)
+	cmd.Stdin = layer
+	cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data))
+
+	outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer)
+	cmd.Stdout, cmd.Stderr = outBuf, errBuf
+
+	if err = cmd.Run(); err != nil {
+		return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf)
+	}
+
+	// Stdout should be a valid JSON struct representing an applyLayerResponse.
+	response := applyLayerResponse{}
+	decoder := json.NewDecoder(outBuf)
+	if err = decoder.Decode(&response); err != nil {
+		return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err)
+	}
+
+	return response.LayerSize, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..9dd9988de092036dc9c71a0e6d3f63cf8bb48552
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go
@@ -0,0 +1,45 @@
+package chrootarchive
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+
+	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/longpath"
+)
+
+// applyLayerHandler parses a diff in the standard layer format from `layer`, and
+// applies it to the directory `dest`. Returns the size in bytes of the
+// contents of the layer.
+func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
+	dest = filepath.Clean(dest)
+
+	// Ensure it is a Windows-style volume path
+	dest = longpath.AddPrefix(dest)
+
+	if decompress {
+		decompressed, err := archive.DecompressStream(layer)
+		if err != nil {
+			return 0, err
+		}
+		defer decompressed.Close()
+
+		layer = decompressed
+	}
+
+	tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract")
+	if err != nil {
+		return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err)
+	}
+
+	s, err := archive.UnpackLayer(dest, layer, nil)
+	os.RemoveAll(tmpDir)
+	if err != nil {
+		return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest)
+	}
+
+	return s, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..4f637f17b8f50e5b3588ea95ca0e5521e8f33189
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go
@@ -0,0 +1,28 @@
+// +build !windows
+
+package chrootarchive
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+
+	"github.com/docker/docker/pkg/reexec"
+)
+
+func init() {
+	reexec.Register("docker-applyLayer", applyLayer)
+	reexec.Register("docker-untar", untar)
+}
+
+func fatal(err error) {
+	fmt.Fprint(os.Stderr, err)
+	os.Exit(1)
+}
+
+// flush consumes all the bytes from the reader discarding
+// any errors
+func flush(r io.Reader) (bytes int64, err error) {
+	return io.Copy(ioutil.Discard, r)
+}
diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..fa17c9bf8316c275eefdd6d7e448397ef49f1627
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go
@@ -0,0 +1,4 @@
+package chrootarchive
+
+func init() {
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
new file mode 100644
index 0000000000000000000000000000000000000000..c63ae75ce81b63ba1d04dda32cfde180123a4f37
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
@@ -0,0 +1,283 @@
+package fileutils
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"regexp"
+	"strings"
+	"text/scanner"
+
+	"github.com/Sirupsen/logrus"
+)
+
+// exclusion returns true if the specified pattern is an exclusion
+func exclusion(pattern string) bool {
+	return pattern[0] == '!'
+}
+
+// empty returns true if the specified pattern is empty
+func empty(pattern string) bool {
+	return pattern == ""
+}
+
+// CleanPatterns takes a slice of patterns returns a new
+// slice of patterns cleaned with filepath.Clean, stripped
+// of any empty patterns and lets the caller know whether the
+// slice contains any exception patterns (prefixed with !).
+func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) {
+	// Loop over exclusion patterns and:
+	// 1. Clean them up.
+	// 2. Indicate whether we are dealing with any exception rules.
+	// 3. Error if we see a single exclusion marker on its own (!).
+	cleanedPatterns := []string{}
+	patternDirs := [][]string{}
+	exceptions := false
+	for _, pattern := range patterns {
+		// Eliminate leading and trailing whitespace.
+		pattern = strings.TrimSpace(pattern)
+		if empty(pattern) {
+			continue
+		}
+		if exclusion(pattern) {
+			if len(pattern) == 1 {
+				return nil, nil, false, errors.New("Illegal exclusion pattern: !")
+			}
+			exceptions = true
+		}
+		pattern = filepath.Clean(pattern)
+		cleanedPatterns = append(cleanedPatterns, pattern)
+		if exclusion(pattern) {
+			pattern = pattern[1:]
+		}
+		patternDirs = append(patternDirs, strings.Split(pattern, string(os.PathSeparator)))
+	}
+
+	return cleanedPatterns, patternDirs, exceptions, nil
+}
+
+// Matches returns true if file matches any of the patterns
+// and isn't excluded by any of the subsequent patterns.
+func Matches(file string, patterns []string) (bool, error) {
+	file = filepath.Clean(file)
+
+	if file == "." {
+		// Don't let them exclude everything, kind of silly.
+		return false, nil
+	}
+
+	patterns, patDirs, _, err := CleanPatterns(patterns)
+	if err != nil {
+		return false, err
+	}
+
+	return OptimizedMatches(file, patterns, patDirs)
+}
+
+// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go.
+// It will assume that the inputs have been preprocessed and therefore the function
+// doesn't need to do as much error checking and clean-up. This was done to avoid
+// repeating these steps on each file being checked during the archive process.
+// The more generic fileutils.Matches() can't make these assumptions.
+func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) {
+	matched := false
+	file = filepath.FromSlash(file)
+	parentPath := filepath.Dir(file)
+	parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
+
+	for i, pattern := range patterns {
+		negative := false
+
+		if exclusion(pattern) {
+			negative = true
+			pattern = pattern[1:]
+		}
+
+		match, err := regexpMatch(pattern, file)
+		if err != nil {
+			return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err)
+		}
+
+		if !match && parentPath != "." {
+			// Check to see if the pattern matches one of our parent dirs.
+			if len(patDirs[i]) <= len(parentPathDirs) {
+				match, _ = regexpMatch(strings.Join(patDirs[i], string(os.PathSeparator)),
+					strings.Join(parentPathDirs[:len(patDirs[i])], string(os.PathSeparator)))
+			}
+		}
+
+		if match {
+			matched = !negative
+		}
+	}
+
+	if matched {
+		logrus.Debugf("Skipping excluded path: %s", file)
+	}
+
+	return matched, nil
+}
+
+// regexpMatch tries to match the logic of filepath.Match but
+// does so using regexp logic. We do this so that we can expand the
+// wildcard set to include other things, like "**" to mean any number
+// of directories.  This means that we should be backwards compatible
+// with filepath.Match(). We'll end up supporting more stuff, due to
+// the fact that we're using regexp, but that's ok - it does no harm.
+//
+// As per the comment in golangs filepath.Match, on Windows, escaping
+// is disabled. Instead, '\\' is treated as path separator.
+func regexpMatch(pattern, path string) (bool, error) {
+	regStr := "^"
+
+	// Do some syntax checking on the pattern.
+	// filepath's Match() has some really weird rules that are inconsistent
+	// so instead of trying to dup their logic, just call Match() for its
+	// error state and if there is an error in the pattern return it.
+	// If this becomes an issue we can remove this since its really only
+	// needed in the error (syntax) case - which isn't really critical.
+	if _, err := filepath.Match(pattern, path); err != nil {
+		return false, err
+	}
+
+	// Go through the pattern and convert it to a regexp.
+	// We use a scanner so we can support utf-8 chars.
+	var scan scanner.Scanner
+	scan.Init(strings.NewReader(pattern))
+
+	sl := string(os.PathSeparator)
+	escSL := sl
+	if sl == `\` {
+		escSL += `\`
+	}
+
+	for scan.Peek() != scanner.EOF {
+		ch := scan.Next()
+
+		if ch == '*' {
+			if scan.Peek() == '*' {
+				// is some flavor of "**"
+				scan.Next()
+
+				if scan.Peek() == scanner.EOF {
+					// is "**EOF" - to align with .gitignore just accept all
+					regStr += ".*"
+				} else {
+					// is "**"
+					regStr += "((.*" + escSL + ")|([^" + escSL + "]*))"
+				}
+
+				// Treat **/ as ** so eat the "/"
+				if string(scan.Peek()) == sl {
+					scan.Next()
+				}
+			} else {
+				// is "*" so map it to anything but "/"
+				regStr += "[^" + escSL + "]*"
+			}
+		} else if ch == '?' {
+			// "?" is any char except "/"
+			regStr += "[^" + escSL + "]"
+		} else if ch == '.' || ch == '$' {
+			// Escape some regexp special chars that have no meaning
+			// in golang's filepath.Match
+			regStr += `\` + string(ch)
+		} else if ch == '\\' {
+			// escape next char. Note that a trailing \ in the pattern
+			// will be left alone (but need to escape it)
+			if sl == `\` {
+				// On windows map "\" to "\\", meaning an escaped backslash,
+				// and then just continue because filepath.Match on
+				// Windows doesn't allow escaping at all
+				regStr += escSL
+				continue
+			}
+			if scan.Peek() != scanner.EOF {
+				regStr += `\` + string(scan.Next())
+			} else {
+				regStr += `\`
+			}
+		} else {
+			regStr += string(ch)
+		}
+	}
+
+	regStr += "$"
+
+	res, err := regexp.MatchString(regStr, path)
+
+	// Map regexp's error to filepath's so no one knows we're not using filepath
+	if err != nil {
+		err = filepath.ErrBadPattern
+	}
+
+	return res, err
+}
+
+// CopyFile copies from src to dst until either EOF is reached
+// on src or an error occurs. It verifies src exists and removes
+// the dst if it exists.
+func CopyFile(src, dst string) (int64, error) {
+	cleanSrc := filepath.Clean(src)
+	cleanDst := filepath.Clean(dst)
+	if cleanSrc == cleanDst {
+		return 0, nil
+	}
+	sf, err := os.Open(cleanSrc)
+	if err != nil {
+		return 0, err
+	}
+	defer sf.Close()
+	if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) {
+		return 0, err
+	}
+	df, err := os.Create(cleanDst)
+	if err != nil {
+		return 0, err
+	}
+	defer df.Close()
+	return io.Copy(df, sf)
+}
+
+// ReadSymlinkedDirectory returns the target directory of a symlink.
+// The target of the symbolic link may not be a file.
+func ReadSymlinkedDirectory(path string) (string, error) {
+	var realPath string
+	var err error
+	if realPath, err = filepath.Abs(path); err != nil {
+		return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
+	}
+	if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
+		return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
+	}
+	realPathInfo, err := os.Stat(realPath)
+	if err != nil {
+		return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
+	}
+	if !realPathInfo.Mode().IsDir() {
+		return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
+	}
+	return realPath, nil
+}
+
+// CreateIfNotExists creates a file or a directory only if it does not already exist.
+func CreateIfNotExists(path string, isDir bool) error {
+	if _, err := os.Stat(path); err != nil {
+		if os.IsNotExist(err) {
+			if isDir {
+				return os.MkdirAll(path, 0755)
+			}
+			if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+				return err
+			}
+			f, err := os.OpenFile(path, os.O_CREATE, 0755)
+			if err != nil {
+				return err
+			}
+			f.Close()
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go
new file mode 100644
index 0000000000000000000000000000000000000000..ccd648fac30011ac7e2b5c69a9414d72e6205a61
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go
@@ -0,0 +1,27 @@
+package fileutils
+
+import (
+	"os"
+	"os/exec"
+	"strconv"
+	"strings"
+)
+
+// GetTotalUsedFds returns the number of used File Descriptors by
+// executing `lsof -p PID`
+func GetTotalUsedFds() int {
+	pid := os.Getpid()
+
+	cmd := exec.Command("lsof", "-p", strconv.Itoa(pid))
+
+	output, err := cmd.CombinedOutput()
+	if err != nil {
+		return -1
+	}
+
+	outputStr := strings.TrimSpace(string(output))
+
+	fds := strings.Split(outputStr, "\n")
+
+	return len(fds) - 1
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go
new file mode 100644
index 0000000000000000000000000000000000000000..0f2cb7ab93338ceb43e6ed713e43cfaa6abd520e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go
@@ -0,0 +1,7 @@
+package fileutils
+
+// GetTotalUsedFds Returns the number of used File Descriptors.
+// On Solaris these limits are per process and not systemwide
+func GetTotalUsedFds() int {
+	return -1
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..d5c3abf56880feaa9dd15a1cd1b25dd8b36e2c25
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
@@ -0,0 +1,22 @@
+// +build linux freebsd
+
+package fileutils
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+
+	"github.com/Sirupsen/logrus"
+)
+
+// GetTotalUsedFds Returns the number of used File Descriptors by
+// reading it via /proc filesystem.
+func GetTotalUsedFds() int {
+	if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
+		logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
+	} else {
+		return len(fds)
+	}
+	return -1
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..5ec21cace52685950552bbec0f050362aa514249
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
@@ -0,0 +1,7 @@
+package fileutils
+
+// GetTotalUsedFds Returns the number of used File Descriptors. Not supported
+// on Windows.
+func GetTotalUsedFds() int {
+	return -1
+}
diff --git a/vendor/github.com/docker/docker/pkg/httputils/httputils.go b/vendor/github.com/docker/docker/pkg/httputils/httputils.go
new file mode 100644
index 0000000000000000000000000000000000000000..d7dc43877df03501a54954cfcb898a88c5db53d0
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/httputils/httputils.go
@@ -0,0 +1,56 @@
+package httputils
+
+import (
+	"errors"
+	"fmt"
+	"net/http"
+	"regexp"
+	"strings"
+
+	"github.com/docker/docker/pkg/jsonmessage"
+)
+
+var (
+	headerRegexp     = regexp.MustCompile(`^(?:(.+)/(.+?))\((.+)\).*$`)
+	errInvalidHeader = errors.New("Bad header, should be in format `docker/version (platform)`")
+)
+
+// Download requests a given URL and returns an io.Reader.
+func Download(url string) (resp *http.Response, err error) {
+	if resp, err = http.Get(url); err != nil {
+		return nil, err
+	}
+	if resp.StatusCode >= 400 {
+		return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status)
+	}
+	return resp, nil
+}
+
+// NewHTTPRequestError returns a JSON response error.
+func NewHTTPRequestError(msg string, res *http.Response) error {
+	return &jsonmessage.JSONError{
+		Message: msg,
+		Code:    res.StatusCode,
+	}
+}
+
+// ServerHeader contains the server information.
+type ServerHeader struct {
+	App string // docker
+	Ver string // 1.8.0-dev
+	OS  string // windows or linux
+}
+
+// ParseServerHeader extracts pieces from an HTTP server header
+// which is in the format "docker/version (os)" eg docker/1.8.0-dev (windows).
+func ParseServerHeader(hdr string) (*ServerHeader, error) {
+	matches := headerRegexp.FindStringSubmatch(hdr)
+	if len(matches) != 4 {
+		return nil, errInvalidHeader
+	}
+	return &ServerHeader{
+		App: strings.TrimSpace(matches[1]),
+		Ver: strings.TrimSpace(matches[2]),
+		OS:  strings.TrimSpace(matches[3]),
+	}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/httputils/mimetype.go b/vendor/github.com/docker/docker/pkg/httputils/mimetype.go
new file mode 100644
index 0000000000000000000000000000000000000000..d5cf34e4f20eb28479a6ed3c2d2790da5af6a7e8
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/httputils/mimetype.go
@@ -0,0 +1,30 @@
+package httputils
+
+import (
+	"mime"
+	"net/http"
+)
+
+// MimeTypes stores the MIME content type.
+var MimeTypes = struct {
+	TextPlain   string
+	Tar         string
+	OctetStream string
+}{"text/plain", "application/tar", "application/octet-stream"}
+
+// DetectContentType returns a best guess representation of the MIME
+// content type for the bytes at c.  The value detected by
+// http.DetectContentType is guaranteed not be nil, defaulting to
+// application/octet-stream when a better guess cannot be made. The
+// result of this detection is then run through mime.ParseMediaType()
+// which separates the actual MIME string from any parameters.
+func DetectContentType(c []byte) (string, map[string]string, error) {
+
+	ct := http.DetectContentType(c)
+	contentType, args, err := mime.ParseMediaType(ct)
+	if err != nil {
+		return "", nil, err
+	}
+
+	return contentType, args, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go b/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go
new file mode 100644
index 0000000000000000000000000000000000000000..bebc8608cd9e15b3e1ba23152223776e9c10138f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go
@@ -0,0 +1,95 @@
+package httputils
+
+import (
+	"fmt"
+	"io"
+	"net/http"
+	"time"
+
+	"github.com/Sirupsen/logrus"
+)
+
+type resumableRequestReader struct {
+	client          *http.Client
+	request         *http.Request
+	lastRange       int64
+	totalSize       int64
+	currentResponse *http.Response
+	failures        uint32
+	maxFailures     uint32
+}
+
+// ResumableRequestReader makes it possible to resume reading a request's body transparently
+// maxfail is the number of times we retry to make requests again (not resumes)
+// totalsize is the total length of the body; auto detect if not provided
+func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser {
+	return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize}
+}
+
+// ResumableRequestReaderWithInitialResponse makes it possible to resume
+// reading the body of an already initiated request.
+func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser {
+	return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse}
+}
+
+func (r *resumableRequestReader) Read(p []byte) (n int, err error) {
+	if r.client == nil || r.request == nil {
+		return 0, fmt.Errorf("client and request can't be nil\n")
+	}
+	isFreshRequest := false
+	if r.lastRange != 0 && r.currentResponse == nil {
+		readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize)
+		r.request.Header.Set("Range", readRange)
+		time.Sleep(5 * time.Second)
+	}
+	if r.currentResponse == nil {
+		r.currentResponse, err = r.client.Do(r.request)
+		isFreshRequest = true
+	}
+	if err != nil && r.failures+1 != r.maxFailures {
+		r.cleanUpResponse()
+		r.failures++
+		time.Sleep(5 * time.Duration(r.failures) * time.Second)
+		return 0, nil
+	} else if err != nil {
+		r.cleanUpResponse()
+		return 0, err
+	}
+	if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 {
+		r.cleanUpResponse()
+		return 0, io.EOF
+	} else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest {
+		r.cleanUpResponse()
+		return 0, fmt.Errorf("the server doesn't support byte ranges")
+	}
+	if r.totalSize == 0 {
+		r.totalSize = r.currentResponse.ContentLength
+	} else if r.totalSize <= 0 {
+		r.cleanUpResponse()
+		return 0, fmt.Errorf("failed to auto detect content length")
+	}
+	n, err = r.currentResponse.Body.Read(p)
+	r.lastRange += int64(n)
+	if err != nil {
+		r.cleanUpResponse()
+	}
+	if err != nil && err != io.EOF {
+		logrus.Infof("encountered error during pull and clearing it before resume: %s", err)
+		err = nil
+	}
+	return n, err
+}
+
+func (r *resumableRequestReader) Close() error {
+	r.cleanUpResponse()
+	r.client = nil
+	r.request = nil
+	return nil
+}
+
+func (r *resumableRequestReader) cleanUpResponse() {
+	if r.currentResponse != nil {
+		r.currentResponse.Body.Close()
+		r.currentResponse = nil
+	}
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go
new file mode 100644
index 0000000000000000000000000000000000000000..6bca466286f7f70f944ed5c5c0fc6938c1c3cd99
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go
@@ -0,0 +1,197 @@
+package idtools
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// IDMap contains a single entry for user namespace range remapping. An array
+// of IDMap entries represents the structure that will be provided to the Linux
+// kernel for creating a user namespace.
+type IDMap struct {
+	ContainerID int `json:"container_id"`
+	HostID      int `json:"host_id"`
+	Size        int `json:"size"`
+}
+
+type subIDRange struct {
+	Start  int
+	Length int
+}
+
+type ranges []subIDRange
+
+func (e ranges) Len() int           { return len(e) }
+func (e ranges) Swap(i, j int)      { e[i], e[j] = e[j], e[i] }
+func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
+
+const (
+	subuidFileName string = "/etc/subuid"
+	subgidFileName string = "/etc/subgid"
+)
+
+// MkdirAllAs creates a directory (include any along the path) and then modifies
+// ownership to the requested uid/gid.  If the directory already exists, this
+// function will still change ownership to the requested uid/gid pair.
+func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
+	return mkdirAs(path, mode, ownerUID, ownerGID, true, true)
+}
+
+// MkdirAllNewAs creates a directory (include any along the path) and then modifies
+// ownership ONLY of newly created directories to the requested uid/gid. If the
+// directories along the path exist, no change of ownership will be performed
+func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
+	return mkdirAs(path, mode, ownerUID, ownerGID, true, false)
+}
+
+// MkdirAs creates a directory and then modifies ownership to the requested uid/gid.
+// If the directory already exists, this function still changes ownership
+func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
+	return mkdirAs(path, mode, ownerUID, ownerGID, false, true)
+}
+
+// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
+// If the maps are empty, then the root uid/gid will default to "real" 0/0
+func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
+	var uid, gid int
+
+	if uidMap != nil {
+		xUID, err := ToHost(0, uidMap)
+		if err != nil {
+			return -1, -1, err
+		}
+		uid = xUID
+	}
+	if gidMap != nil {
+		xGID, err := ToHost(0, gidMap)
+		if err != nil {
+			return -1, -1, err
+		}
+		gid = xGID
+	}
+	return uid, gid, nil
+}
+
+// ToContainer takes an id mapping, and uses it to translate a
+// host ID to the remapped ID. If no map is provided, then the translation
+// assumes a 1-to-1 mapping and returns the passed in id
+func ToContainer(hostID int, idMap []IDMap) (int, error) {
+	if idMap == nil {
+		return hostID, nil
+	}
+	for _, m := range idMap {
+		if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
+			contID := m.ContainerID + (hostID - m.HostID)
+			return contID, nil
+		}
+	}
+	return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
+}
+
+// ToHost takes an id mapping and a remapped ID, and translates the
+// ID to the mapped host ID. If no map is provided, then the translation
+// assumes a 1-to-1 mapping and returns the passed in id #
+func ToHost(contID int, idMap []IDMap) (int, error) {
+	if idMap == nil {
+		return contID, nil
+	}
+	for _, m := range idMap {
+		if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
+			hostID := m.HostID + (contID - m.ContainerID)
+			return hostID, nil
+		}
+	}
+	return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
+}
+
+// CreateIDMappings takes a requested user and group name and
+// using the data from /etc/sub{uid,gid} ranges, creates the
+// proper uid and gid remapping ranges for that user/group pair
+func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) {
+	subuidRanges, err := parseSubuid(username)
+	if err != nil {
+		return nil, nil, err
+	}
+	subgidRanges, err := parseSubgid(groupname)
+	if err != nil {
+		return nil, nil, err
+	}
+	if len(subuidRanges) == 0 {
+		return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username)
+	}
+	if len(subgidRanges) == 0 {
+		return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
+	}
+
+	return createIDMap(subuidRanges), createIDMap(subgidRanges), nil
+}
+
+func createIDMap(subidRanges ranges) []IDMap {
+	idMap := []IDMap{}
+
+	// sort the ranges by lowest ID first
+	sort.Sort(subidRanges)
+	containerID := 0
+	for _, idrange := range subidRanges {
+		idMap = append(idMap, IDMap{
+			ContainerID: containerID,
+			HostID:      idrange.Start,
+			Size:        idrange.Length,
+		})
+		containerID = containerID + idrange.Length
+	}
+	return idMap
+}
+
+func parseSubuid(username string) (ranges, error) {
+	return parseSubidFile(subuidFileName, username)
+}
+
+func parseSubgid(username string) (ranges, error) {
+	return parseSubidFile(subgidFileName, username)
+}
+
+// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid)
+// and return all found ranges for a specified username. If the special value
+// "ALL" is supplied for username, then all ranges in the file will be returned
+func parseSubidFile(path, username string) (ranges, error) {
+	var rangeList ranges
+
+	subidFile, err := os.Open(path)
+	if err != nil {
+		return rangeList, err
+	}
+	defer subidFile.Close()
+
+	s := bufio.NewScanner(subidFile)
+	for s.Scan() {
+		if err := s.Err(); err != nil {
+			return rangeList, err
+		}
+
+		text := strings.TrimSpace(s.Text())
+		if text == "" || strings.HasPrefix(text, "#") {
+			continue
+		}
+		parts := strings.Split(text, ":")
+		if len(parts) != 3 {
+			return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
+		}
+		if parts[0] == username || username == "ALL" {
+			startid, err := strconv.Atoi(parts[1])
+			if err != nil {
+				return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
+			}
+			length, err := strconv.Atoi(parts[2])
+			if err != nil {
+				return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
+			}
+			rangeList = append(rangeList, subIDRange{startid, length})
+		}
+	}
+	return rangeList, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..f9eb31c3ec2477b419f74a9affc1805f042f610d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
@@ -0,0 +1,207 @@
+// +build !windows
+
+package idtools
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"strings"
+	"sync"
+
+	"github.com/docker/docker/pkg/system"
+	"github.com/opencontainers/runc/libcontainer/user"
+)
+
+var (
+	entOnce   sync.Once
+	getentCmd string
+)
+
+func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
+	// make an array containing the original path asked for, plus (for mkAll == true)
+	// all path components leading up to the complete path that don't exist before we MkdirAll
+	// so that we can chown all of them properly at the end.  If chownExisting is false, we won't
+	// chown the full directory path if it exists
+	var paths []string
+	if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
+		paths = []string{path}
+	} else if err == nil && chownExisting {
+		if err := os.Chown(path, ownerUID, ownerGID); err != nil {
+			return err
+		}
+		// short-circuit--we were called with an existing directory and chown was requested
+		return nil
+	} else if err == nil {
+		// nothing to do; directory path fully exists already and chown was NOT requested
+		return nil
+	}
+
+	if mkAll {
+		// walk back to "/" looking for directories which do not exist
+		// and add them to the paths array for chown after creation
+		dirPath := path
+		for {
+			dirPath = filepath.Dir(dirPath)
+			if dirPath == "/" {
+				break
+			}
+			if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
+				paths = append(paths, dirPath)
+			}
+		}
+		if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
+			return err
+		}
+	} else {
+		if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) {
+			return err
+		}
+	}
+	// even if it existed, we will chown the requested path + any subpaths that
+	// didn't exist when we called MkdirAll
+	for _, pathComponent := range paths {
+		if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
+// if that uid, gid pair has access (execute bit) to the directory
+func CanAccess(path string, uid, gid int) bool {
+	statInfo, err := system.Stat(path)
+	if err != nil {
+		return false
+	}
+	fileMode := os.FileMode(statInfo.Mode())
+	permBits := fileMode.Perm()
+	return accessible(statInfo.UID() == uint32(uid),
+		statInfo.GID() == uint32(gid), permBits)
+}
+
+func accessible(isOwner, isGroup bool, perms os.FileMode) bool {
+	if isOwner && (perms&0100 == 0100) {
+		return true
+	}
+	if isGroup && (perms&0010 == 0010) {
+		return true
+	}
+	if perms&0001 == 0001 {
+		return true
+	}
+	return false
+}
+
+// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username,
+// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
+func LookupUser(username string) (user.User, error) {
+	// first try a local system files lookup using existing capabilities
+	usr, err := user.LookupUser(username)
+	if err == nil {
+		return usr, nil
+	}
+	// local files lookup failed; attempt to call `getent` to query configured passwd dbs
+	usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username))
+	if err != nil {
+		return user.User{}, err
+	}
+	return usr, nil
+}
+
+// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid,
+// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
+func LookupUID(uid int) (user.User, error) {
+	// first try a local system files lookup using existing capabilities
+	usr, err := user.LookupUid(uid)
+	if err == nil {
+		return usr, nil
+	}
+	// local files lookup failed; attempt to call `getent` to query configured passwd dbs
+	return getentUser(fmt.Sprintf("%s %d", "passwd", uid))
+}
+
+func getentUser(args string) (user.User, error) {
+	reader, err := callGetent(args)
+	if err != nil {
+		return user.User{}, err
+	}
+	users, err := user.ParsePasswd(reader)
+	if err != nil {
+		return user.User{}, err
+	}
+	if len(users) == 0 {
+		return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1])
+	}
+	return users[0], nil
+}
+
+// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name,
+// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
+func LookupGroup(groupname string) (user.Group, error) {
+	// first try a local system files lookup using existing capabilities
+	group, err := user.LookupGroup(groupname)
+	if err == nil {
+		return group, nil
+	}
+	// local files lookup failed; attempt to call `getent` to query configured group dbs
+	return getentGroup(fmt.Sprintf("%s %s", "group", groupname))
+}
+
+// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID,
+// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
+func LookupGID(gid int) (user.Group, error) {
+	// first try a local system files lookup using existing capabilities
+	group, err := user.LookupGid(gid)
+	if err == nil {
+		return group, nil
+	}
+	// local files lookup failed; attempt to call `getent` to query configured group dbs
+	return getentGroup(fmt.Sprintf("%s %d", "group", gid))
+}
+
+func getentGroup(args string) (user.Group, error) {
+	reader, err := callGetent(args)
+	if err != nil {
+		return user.Group{}, err
+	}
+	groups, err := user.ParseGroup(reader)
+	if err != nil {
+		return user.Group{}, err
+	}
+	if len(groups) == 0 {
+		return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1])
+	}
+	return groups[0], nil
+}
+
+func callGetent(args string) (io.Reader, error) {
+	entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") })
+	// if no `getent` command on host, can't do anything else
+	if getentCmd == "" {
+		return nil, fmt.Errorf("")
+	}
+	out, err := execCmd(getentCmd, args)
+	if err != nil {
+		exitCode, errC := system.GetExitCode(err)
+		if errC != nil {
+			return nil, err
+		}
+		switch exitCode {
+		case 1:
+			return nil, fmt.Errorf("getent reported invalid parameters/database unknown")
+		case 2:
+			terms := strings.Split(args, " ")
+			return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0])
+		case 3:
+			return nil, fmt.Errorf("getent database doesn't support enumeration")
+		default:
+			return nil, err
+		}
+
+	}
+	return bytes.NewReader(out), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..49f67e78c1623c9c0bdaa51c94249e3e0dba93c5
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
@@ -0,0 +1,25 @@
+// +build windows
+
+package idtools
+
+import (
+	"os"
+
+	"github.com/docker/docker/pkg/system"
+)
+
+// Platforms such as Windows do not support the UID/GID concept. So make this
+// just a wrapper around system.MkdirAll.
+func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
+	if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
+		return err
+	}
+	return nil
+}
+
+// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
+// if that uid, gid pair has access (execute bit) to the directory
+// Windows does not require/support this function, so always return true
+func CanAccess(path string, uid, gid int) bool {
+	return true
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..9da7975e2c174a4713c5ea93e3f512edc62013c3
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
@@ -0,0 +1,164 @@
+package idtools
+
+import (
+	"fmt"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+)
+
+// add a user and/or group to Linux /etc/passwd, /etc/group using standard
+// Linux distribution commands:
+// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group <username>
+// useradd -r -s /bin/false <username>
+
+var (
+	once        sync.Once
+	userCommand string
+
+	cmdTemplates = map[string]string{
+		"adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s",
+		"useradd": "-r -s /bin/false %s",
+		"usermod": "-%s %d-%d %s",
+	}
+
+	idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`)
+	// default length for a UID/GID subordinate range
+	defaultRangeLen   = 65536
+	defaultRangeStart = 100000
+	userMod           = "usermod"
+)
+
+// AddNamespaceRangesUser takes a username and uses the standard system
+// utility to create a system user/group pair used to hold the
+// /etc/sub{uid,gid} ranges which will be used for user namespace
+// mapping ranges in containers.
+func AddNamespaceRangesUser(name string) (int, int, error) {
+	if err := addUser(name); err != nil {
+		return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
+	}
+
+	// Query the system for the created uid and gid pair
+	out, err := execCmd("id", name)
+	if err != nil {
+		return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err)
+	}
+	matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out)))
+	if len(matches) != 3 {
+		return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out))
+	}
+	uid, err := strconv.Atoi(matches[1])
+	if err != nil {
+		return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err)
+	}
+	gid, err := strconv.Atoi(matches[2])
+	if err != nil {
+		return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err)
+	}
+
+	// Now we need to create the subuid/subgid ranges for our new user/group (system users
+	// do not get auto-created ranges in subuid/subgid)
+
+	if err := createSubordinateRanges(name); err != nil {
+		return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err)
+	}
+	return uid, gid, nil
+}
+
+func addUser(userName string) error {
+	once.Do(func() {
+		// set up which commands are used for adding users/groups dependent on distro
+		if _, err := resolveBinary("adduser"); err == nil {
+			userCommand = "adduser"
+		} else if _, err := resolveBinary("useradd"); err == nil {
+			userCommand = "useradd"
+		}
+	})
+	if userCommand == "" {
+		return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
+	}
+	args := fmt.Sprintf(cmdTemplates[userCommand], userName)
+	out, err := execCmd(userCommand, args)
+	if err != nil {
+		return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out))
+	}
+	return nil
+}
+
+func createSubordinateRanges(name string) error {
+
+	// first, we should verify that ranges weren't automatically created
+	// by the distro tooling
+	ranges, err := parseSubuid(name)
+	if err != nil {
+		return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err)
+	}
+	if len(ranges) == 0 {
+		// no UID ranges; let's create one
+		startID, err := findNextUIDRange()
+		if err != nil {
+			return fmt.Errorf("Can't find available subuid range: %v", err)
+		}
+		out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name))
+		if err != nil {
+			return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
+		}
+	}
+
+	ranges, err = parseSubgid(name)
+	if err != nil {
+		return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err)
+	}
+	if len(ranges) == 0 {
+		// no GID ranges; let's create one
+		startID, err := findNextGIDRange()
+		if err != nil {
+			return fmt.Errorf("Can't find available subgid range: %v", err)
+		}
+		out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name))
+		if err != nil {
+			return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
+		}
+	}
+	return nil
+}
+
+func findNextUIDRange() (int, error) {
+	ranges, err := parseSubuid("ALL")
+	if err != nil {
+		return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err)
+	}
+	sort.Sort(ranges)
+	return findNextRangeStart(ranges)
+}
+
+func findNextGIDRange() (int, error) {
+	ranges, err := parseSubgid("ALL")
+	if err != nil {
+		return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err)
+	}
+	sort.Sort(ranges)
+	return findNextRangeStart(ranges)
+}
+
+func findNextRangeStart(rangeList ranges) (int, error) {
+	startID := defaultRangeStart
+	for _, arange := range rangeList {
+		if wouldOverlap(arange, startID) {
+			startID = arange.Start + arange.Length
+		}
+	}
+	return startID, nil
+}
+
+func wouldOverlap(arange subIDRange, ID int) bool {
+	low := ID
+	high := ID + defaultRangeLen
+	if (low >= arange.Start && low <= arange.Start+arange.Length) ||
+		(high <= arange.Start+arange.Length && high >= arange.Start) {
+		return true
+	}
+	return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
new file mode 100644
index 0000000000000000000000000000000000000000..d98b354cbd824e0ac3eee0375442a4d92872bacc
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
@@ -0,0 +1,12 @@
+// +build !linux
+
+package idtools
+
+import "fmt"
+
+// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
+// and calls the appropriate helper function to add the group and then
+// the user to the group in /etc/group and /etc/passwd respectively.
+func AddNamespaceRangesUser(name string) (int, int, error) {
+	return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..9703ecbd9d6a043ada479208537d78aa6814c3d3
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go
@@ -0,0 +1,32 @@
+// +build !windows
+
+package idtools
+
+import (
+	"fmt"
+	"os/exec"
+	"path/filepath"
+	"strings"
+)
+
+func resolveBinary(binname string) (string, error) {
+	binaryPath, err := exec.LookPath(binname)
+	if err != nil {
+		return "", err
+	}
+	resolvedPath, err := filepath.EvalSymlinks(binaryPath)
+	if err != nil {
+		return "", err
+	}
+	//only return no error if the final resolved binary basename
+	//matches what was searched for
+	if filepath.Base(resolvedPath) == binname {
+		return resolvedPath, nil
+	}
+	return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
+}
+
+func execCmd(cmd, args string) ([]byte, error) {
+	execCmd := exec.Command(cmd, strings.Split(args, " ")...)
+	return execCmd.CombinedOutput()
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go
new file mode 100644
index 0000000000000000000000000000000000000000..3d737b3e19dc76a237a24d7bd23b8a4cddfa3090
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go
@@ -0,0 +1,51 @@
+package ioutils
+
+import (
+	"errors"
+	"io"
+)
+
+var errBufferFull = errors.New("buffer is full")
+
+type fixedBuffer struct {
+	buf      []byte
+	pos      int
+	lastRead int
+}
+
+func (b *fixedBuffer) Write(p []byte) (int, error) {
+	n := copy(b.buf[b.pos:cap(b.buf)], p)
+	b.pos += n
+
+	if n < len(p) {
+		if b.pos == cap(b.buf) {
+			return n, errBufferFull
+		}
+		return n, io.ErrShortWrite
+	}
+	return n, nil
+}
+
+func (b *fixedBuffer) Read(p []byte) (int, error) {
+	n := copy(p, b.buf[b.lastRead:b.pos])
+	b.lastRead += n
+	return n, nil
+}
+
+func (b *fixedBuffer) Len() int {
+	return b.pos - b.lastRead
+}
+
+func (b *fixedBuffer) Cap() int {
+	return cap(b.buf)
+}
+
+func (b *fixedBuffer) Reset() {
+	b.pos = 0
+	b.lastRead = 0
+	b.buf = b.buf[:0]
+}
+
+func (b *fixedBuffer) String() string {
+	return string(b.buf[b.lastRead:b.pos])
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
new file mode 100644
index 0000000000000000000000000000000000000000..72a04f34919be6f7722ddf7f0dd7d66d010df064
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
@@ -0,0 +1,186 @@
+package ioutils
+
+import (
+	"errors"
+	"io"
+	"sync"
+)
+
+// maxCap is the highest capacity to use in byte slices that buffer data.
+const maxCap = 1e6
+
+// minCap is the lowest capacity to use in byte slices that buffer data
+const minCap = 64
+
+// blockThreshold is the minimum number of bytes in the buffer which will cause
+// a write to BytesPipe to block when allocating a new slice.
+const blockThreshold = 1e6
+
+var (
+	// ErrClosed is returned when Write is called on a closed BytesPipe.
+	ErrClosed = errors.New("write to closed BytesPipe")
+
+	bufPools     = make(map[int]*sync.Pool)
+	bufPoolsLock sync.Mutex
+)
+
+// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
+// All written data may be read at most once. Also, BytesPipe allocates
+// and releases new byte slices to adjust to current needs, so the buffer
+// won't be overgrown after peak loads.
+type BytesPipe struct {
+	mu       sync.Mutex
+	wait     *sync.Cond
+	buf      []*fixedBuffer
+	bufLen   int
+	closeErr error // error to return from next Read. set to nil if not closed.
+}
+
+// NewBytesPipe creates new BytesPipe, initialized by specified slice.
+// If buf is nil, then it will be initialized with slice which cap is 64.
+// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
+func NewBytesPipe() *BytesPipe {
+	bp := &BytesPipe{}
+	bp.buf = append(bp.buf, getBuffer(minCap))
+	bp.wait = sync.NewCond(&bp.mu)
+	return bp
+}
+
+// Write writes p to BytesPipe.
+// It can allocate new []byte slices in a process of writing.
+func (bp *BytesPipe) Write(p []byte) (int, error) {
+	bp.mu.Lock()
+
+	written := 0
+loop0:
+	for {
+		if bp.closeErr != nil {
+			bp.mu.Unlock()
+			return written, ErrClosed
+		}
+
+		if len(bp.buf) == 0 {
+			bp.buf = append(bp.buf, getBuffer(64))
+		}
+		// get the last buffer
+		b := bp.buf[len(bp.buf)-1]
+
+		n, err := b.Write(p)
+		written += n
+		bp.bufLen += n
+
+		// errBufferFull is an error we expect to get if the buffer is full
+		if err != nil && err != errBufferFull {
+			bp.wait.Broadcast()
+			bp.mu.Unlock()
+			return written, err
+		}
+
+		// if there was enough room to write all then break
+		if len(p) == n {
+			break
+		}
+
+		// more data: write to the next slice
+		p = p[n:]
+
+		// make sure the buffer doesn't grow too big from this write
+		for bp.bufLen >= blockThreshold {
+			bp.wait.Wait()
+			if bp.closeErr != nil {
+				continue loop0
+			}
+		}
+
+		// add new byte slice to the buffers slice and continue writing
+		nextCap := b.Cap() * 2
+		if nextCap > maxCap {
+			nextCap = maxCap
+		}
+		bp.buf = append(bp.buf, getBuffer(nextCap))
+	}
+	bp.wait.Broadcast()
+	bp.mu.Unlock()
+	return written, nil
+}
+
+// CloseWithError causes further reads from a BytesPipe to return immediately.
+func (bp *BytesPipe) CloseWithError(err error) error {
+	bp.mu.Lock()
+	if err != nil {
+		bp.closeErr = err
+	} else {
+		bp.closeErr = io.EOF
+	}
+	bp.wait.Broadcast()
+	bp.mu.Unlock()
+	return nil
+}
+
+// Close causes further reads from a BytesPipe to return immediately.
+func (bp *BytesPipe) Close() error {
+	return bp.CloseWithError(nil)
+}
+
+// Read reads bytes from BytesPipe.
+// Data could be read only once.
+func (bp *BytesPipe) Read(p []byte) (n int, err error) {
+	bp.mu.Lock()
+	if bp.bufLen == 0 {
+		if bp.closeErr != nil {
+			bp.mu.Unlock()
+			return 0, bp.closeErr
+		}
+		bp.wait.Wait()
+		if bp.bufLen == 0 && bp.closeErr != nil {
+			err := bp.closeErr
+			bp.mu.Unlock()
+			return 0, err
+		}
+	}
+
+	for bp.bufLen > 0 {
+		b := bp.buf[0]
+		read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error
+		n += read
+		bp.bufLen -= read
+
+		if b.Len() == 0 {
+			// it's empty so return it to the pool and move to the next one
+			returnBuffer(b)
+			bp.buf[0] = nil
+			bp.buf = bp.buf[1:]
+		}
+
+		if len(p) == read {
+			break
+		}
+
+		p = p[read:]
+	}
+
+	bp.wait.Broadcast()
+	bp.mu.Unlock()
+	return
+}
+
+func returnBuffer(b *fixedBuffer) {
+	b.Reset()
+	bufPoolsLock.Lock()
+	pool := bufPools[b.Cap()]
+	bufPoolsLock.Unlock()
+	if pool != nil {
+		pool.Put(b)
+	}
+}
+
+func getBuffer(size int) *fixedBuffer {
+	bufPoolsLock.Lock()
+	pool, ok := bufPools[size]
+	if !ok {
+		pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }}
+		bufPools[size] = pool
+	}
+	bufPoolsLock.Unlock()
+	return pool.Get().(*fixedBuffer)
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fmt.go b/vendor/github.com/docker/docker/pkg/ioutils/fmt.go
new file mode 100644
index 0000000000000000000000000000000000000000..0b04b0ba3e63ae11effb6fccc6483c079cdb784c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/fmt.go
@@ -0,0 +1,22 @@
+package ioutils
+
+import (
+	"fmt"
+	"io"
+)
+
+// FprintfIfNotEmpty prints the string value if it's not empty
+func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) {
+	if value != "" {
+		return fmt.Fprintf(w, format, value)
+	}
+	return 0, nil
+}
+
+// FprintfIfTrue prints the boolean value if it's true
+func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) {
+	if ok {
+		return fmt.Fprintf(w, format, ok)
+	}
+	return 0, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go
new file mode 100644
index 0000000000000000000000000000000000000000..a56c4626515e343e61ca75f4f71b744695b3de56
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go
@@ -0,0 +1,162 @@
+package ioutils
+
+import (
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+)
+
+// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
+// temporary file and closing it atomically changes the temporary file to
+// destination path. Writing and closing concurrently is not allowed.
+func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
+	f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
+	if err != nil {
+		return nil, err
+	}
+
+	abspath, err := filepath.Abs(filename)
+	if err != nil {
+		return nil, err
+	}
+	return &atomicFileWriter{
+		f:    f,
+		fn:   abspath,
+		perm: perm,
+	}, nil
+}
+
+// AtomicWriteFile atomically writes data to a file named by filename.
+func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
+	f, err := NewAtomicFileWriter(filename, perm)
+	if err != nil {
+		return err
+	}
+	n, err := f.Write(data)
+	if err == nil && n < len(data) {
+		err = io.ErrShortWrite
+		f.(*atomicFileWriter).writeErr = err
+	}
+	if err1 := f.Close(); err == nil {
+		err = err1
+	}
+	return err
+}
+
+type atomicFileWriter struct {
+	f        *os.File
+	fn       string
+	writeErr error
+	perm     os.FileMode
+}
+
+func (w *atomicFileWriter) Write(dt []byte) (int, error) {
+	n, err := w.f.Write(dt)
+	if err != nil {
+		w.writeErr = err
+	}
+	return n, err
+}
+
+func (w *atomicFileWriter) Close() (retErr error) {
+	defer func() {
+		if retErr != nil || w.writeErr != nil {
+			os.Remove(w.f.Name())
+		}
+	}()
+	if err := w.f.Sync(); err != nil {
+		w.f.Close()
+		return err
+	}
+	if err := w.f.Close(); err != nil {
+		return err
+	}
+	if err := os.Chmod(w.f.Name(), w.perm); err != nil {
+		return err
+	}
+	if w.writeErr == nil {
+		return os.Rename(w.f.Name(), w.fn)
+	}
+	return nil
+}
+
+// AtomicWriteSet is used to atomically write a set
+// of files and ensure they are visible at the same time.
+// Must be committed to a new directory.
+type AtomicWriteSet struct {
+	root string
+}
+
+// NewAtomicWriteSet creates a new atomic write set to
+// atomically create a set of files. The given directory
+// is used as the base directory for storing files before
+// commit. If no temporary directory is given the system
+// default is used.
+func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) {
+	td, err := ioutil.TempDir(tmpDir, "write-set-")
+	if err != nil {
+		return nil, err
+	}
+
+	return &AtomicWriteSet{
+		root: td,
+	}, nil
+}
+
+// WriteFile writes a file to the set, guaranteeing the file
+// has been synced.
+func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error {
+	f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+	if err != nil {
+		return err
+	}
+	n, err := f.Write(data)
+	if err == nil && n < len(data) {
+		err = io.ErrShortWrite
+	}
+	if err1 := f.Close(); err == nil {
+		err = err1
+	}
+	return err
+}
+
+type syncFileCloser struct {
+	*os.File
+}
+
+func (w syncFileCloser) Close() error {
+	err := w.File.Sync()
+	if err1 := w.File.Close(); err == nil {
+		err = err1
+	}
+	return err
+}
+
+// FileWriter opens a file writer inside the set. The file
+// should be synced and closed before calling commit.
+func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) {
+	f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm)
+	if err != nil {
+		return nil, err
+	}
+	return syncFileCloser{f}, nil
+}
+
+// Cancel cancels the set and removes all temporary data
+// created in the set.
+func (ws *AtomicWriteSet) Cancel() error {
+	return os.RemoveAll(ws.root)
+}
+
+// Commit moves all created files to the target directory. The
+// target directory must not exist and the parent of the target
+// directory must exist.
+func (ws *AtomicWriteSet) Commit(target string) error {
+	return os.Rename(ws.root, target)
+}
+
+// String returns the location the set is writing to.
+func (ws *AtomicWriteSet) String() string {
+	return ws.root
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/multireader.go b/vendor/github.com/docker/docker/pkg/ioutils/multireader.go
new file mode 100644
index 0000000000000000000000000000000000000000..d7b97486c6b7fa7b19e356851882e824601e651a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/multireader.go
@@ -0,0 +1,223 @@
+package ioutils
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os"
+)
+
+type pos struct {
+	idx    int
+	offset int64
+}
+
+type multiReadSeeker struct {
+	readers []io.ReadSeeker
+	pos     *pos
+	posIdx  map[io.ReadSeeker]int
+}
+
+func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) {
+	var tmpOffset int64
+	switch whence {
+	case os.SEEK_SET:
+		for i, rdr := range r.readers {
+			// get size of the current reader
+			s, err := rdr.Seek(0, os.SEEK_END)
+			if err != nil {
+				return -1, err
+			}
+
+			if offset > tmpOffset+s {
+				if i == len(r.readers)-1 {
+					rdrOffset := s + (offset - tmpOffset)
+					if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil {
+						return -1, err
+					}
+					r.pos = &pos{i, rdrOffset}
+					return offset, nil
+				}
+
+				tmpOffset += s
+				continue
+			}
+
+			rdrOffset := offset - tmpOffset
+			idx := i
+
+			rdr.Seek(rdrOffset, os.SEEK_SET)
+			// make sure all following readers are at 0
+			for _, rdr := range r.readers[i+1:] {
+				rdr.Seek(0, os.SEEK_SET)
+			}
+
+			if rdrOffset == s && i != len(r.readers)-1 {
+				idx++
+				rdrOffset = 0
+			}
+			r.pos = &pos{idx, rdrOffset}
+			return offset, nil
+		}
+	case os.SEEK_END:
+		for _, rdr := range r.readers {
+			s, err := rdr.Seek(0, os.SEEK_END)
+			if err != nil {
+				return -1, err
+			}
+			tmpOffset += s
+		}
+		r.Seek(tmpOffset+offset, os.SEEK_SET)
+		return tmpOffset + offset, nil
+	case os.SEEK_CUR:
+		if r.pos == nil {
+			return r.Seek(offset, os.SEEK_SET)
+		}
+		// Just return the current offset
+		if offset == 0 {
+			return r.getCurOffset()
+		}
+
+		curOffset, err := r.getCurOffset()
+		if err != nil {
+			return -1, err
+		}
+		rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset)
+		if err != nil {
+			return -1, err
+		}
+
+		r.pos = &pos{r.posIdx[rdr], rdrOffset}
+		return curOffset + offset, nil
+	default:
+		return -1, fmt.Errorf("Invalid whence: %d", whence)
+	}
+
+	return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset)
+}
+
+func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) {
+
+	var offsetTo int64
+
+	for _, rdr := range r.readers {
+		size, err := getReadSeekerSize(rdr)
+		if err != nil {
+			return nil, -1, err
+		}
+		if offsetTo+size > offset {
+			return rdr, offset - offsetTo, nil
+		}
+		if rdr == r.readers[len(r.readers)-1] {
+			return rdr, offsetTo + offset, nil
+		}
+		offsetTo += size
+	}
+
+	return nil, 0, nil
+}
+
+func (r *multiReadSeeker) getCurOffset() (int64, error) {
+	var totalSize int64
+	for _, rdr := range r.readers[:r.pos.idx+1] {
+		if r.posIdx[rdr] == r.pos.idx {
+			totalSize += r.pos.offset
+			break
+		}
+
+		size, err := getReadSeekerSize(rdr)
+		if err != nil {
+			return -1, fmt.Errorf("error getting seeker size: %v", err)
+		}
+		totalSize += size
+	}
+	return totalSize, nil
+}
+
+func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) {
+	var offset int64
+	for _, r := range r.readers {
+		if r == rdr {
+			break
+		}
+
+		size, err := getReadSeekerSize(rdr)
+		if err != nil {
+			return -1, err
+		}
+		offset += size
+	}
+	return offset, nil
+}
+
+func (r *multiReadSeeker) Read(b []byte) (int, error) {
+	if r.pos == nil {
+		r.pos = &pos{0, 0}
+	}
+
+	bLen := int64(len(b))
+	buf := bytes.NewBuffer(nil)
+	var rdr io.ReadSeeker
+
+	for _, rdr = range r.readers[r.pos.idx:] {
+		readBytes, err := io.CopyN(buf, rdr, bLen)
+		if err != nil && err != io.EOF {
+			return -1, err
+		}
+		bLen -= readBytes
+
+		if bLen == 0 {
+			break
+		}
+	}
+
+	rdrPos, err := rdr.Seek(0, os.SEEK_CUR)
+	if err != nil {
+		return -1, err
+	}
+	r.pos = &pos{r.posIdx[rdr], rdrPos}
+	return buf.Read(b)
+}
+
+func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) {
+	// save the current position
+	pos, err := rdr.Seek(0, os.SEEK_CUR)
+	if err != nil {
+		return -1, err
+	}
+
+	// get the size
+	size, err := rdr.Seek(0, os.SEEK_END)
+	if err != nil {
+		return -1, err
+	}
+
+	// reset the position
+	if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil {
+		return -1, err
+	}
+	return size, nil
+}
+
+// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided
+// input readseekers. After calling this method the initial position is set to the
+// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances
+// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker.
+// Seek can be used over the sum of lengths of all readseekers.
+//
+// When a MultiReadSeeker is used, no Read and Seek operations should be made on
+// its ReadSeeker components. Also, users should make no assumption on the state
+// of individual readseekers while the MultiReadSeeker is used.
+func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker {
+	if len(readers) == 1 {
+		return readers[0]
+	}
+	idx := make(map[io.ReadSeeker]int)
+	for i, rdr := range readers {
+		idx[rdr] = i
+	}
+	return &multiReadSeeker{
+		readers: readers,
+		posIdx:  idx,
+	}
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go
new file mode 100644
index 0000000000000000000000000000000000000000..63f3c07f463758e85106c0ce9a537174dc7a2524
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go
@@ -0,0 +1,154 @@
+package ioutils
+
+import (
+	"crypto/sha256"
+	"encoding/hex"
+	"io"
+
+	"golang.org/x/net/context"
+)
+
+type readCloserWrapper struct {
+	io.Reader
+	closer func() error
+}
+
+func (r *readCloserWrapper) Close() error {
+	return r.closer()
+}
+
+// NewReadCloserWrapper returns a new io.ReadCloser.
+func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
+	return &readCloserWrapper{
+		Reader: r,
+		closer: closer,
+	}
+}
+
+type readerErrWrapper struct {
+	reader io.Reader
+	closer func()
+}
+
+func (r *readerErrWrapper) Read(p []byte) (int, error) {
+	n, err := r.reader.Read(p)
+	if err != nil {
+		r.closer()
+	}
+	return n, err
+}
+
+// NewReaderErrWrapper returns a new io.Reader.
+func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
+	return &readerErrWrapper{
+		reader: r,
+		closer: closer,
+	}
+}
+
+// HashData returns the sha256 sum of src.
+func HashData(src io.Reader) (string, error) {
+	h := sha256.New()
+	if _, err := io.Copy(h, src); err != nil {
+		return "", err
+	}
+	return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
+}
+
+// OnEOFReader wraps an io.ReadCloser and a function
+// the function will run at the end of file or close the file.
+type OnEOFReader struct {
+	Rc io.ReadCloser
+	Fn func()
+}
+
+func (r *OnEOFReader) Read(p []byte) (n int, err error) {
+	n, err = r.Rc.Read(p)
+	if err == io.EOF {
+		r.runFunc()
+	}
+	return
+}
+
+// Close closes the file and run the function.
+func (r *OnEOFReader) Close() error {
+	err := r.Rc.Close()
+	r.runFunc()
+	return err
+}
+
+func (r *OnEOFReader) runFunc() {
+	if fn := r.Fn; fn != nil {
+		fn()
+		r.Fn = nil
+	}
+}
+
+// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read
+// operations.
+type cancelReadCloser struct {
+	cancel func()
+	pR     *io.PipeReader // Stream to read from
+	pW     *io.PipeWriter
+}
+
+// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
+// context is cancelled. The returned io.ReadCloser must be closed when it is
+// no longer needed.
+func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser {
+	pR, pW := io.Pipe()
+
+	// Create a context used to signal when the pipe is closed
+	doneCtx, cancel := context.WithCancel(context.Background())
+
+	p := &cancelReadCloser{
+		cancel: cancel,
+		pR:     pR,
+		pW:     pW,
+	}
+
+	go func() {
+		_, err := io.Copy(pW, in)
+		select {
+		case <-ctx.Done():
+			// If the context was closed, p.closeWithError
+			// was already called. Calling it again would
+			// change the error that Read returns.
+		default:
+			p.closeWithError(err)
+		}
+		in.Close()
+	}()
+	go func() {
+		for {
+			select {
+			case <-ctx.Done():
+				p.closeWithError(ctx.Err())
+			case <-doneCtx.Done():
+				return
+			}
+		}
+	}()
+
+	return p
+}
+
+// Read wraps the Read method of the pipe that provides data from the wrapped
+// ReadCloser.
+func (p *cancelReadCloser) Read(buf []byte) (n int, err error) {
+	return p.pR.Read(buf)
+}
+
+// closeWithError closes the wrapper and its underlying reader. It will
+// cause future calls to Read to return err.
+func (p *cancelReadCloser) closeWithError(err error) {
+	p.pW.CloseWithError(err)
+	p.cancel()
+}
+
+// Close closes the wrapper its underlying reader. It will cause
+// future calls to Read to return io.EOF.
+func (p *cancelReadCloser) Close() error {
+	p.closeWithError(io.EOF)
+	return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..1539ad21b57afa2bb2b26825364e4b72a8806053
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
@@ -0,0 +1,10 @@
+// +build !windows
+
+package ioutils
+
+import "io/ioutil"
+
+// TempDir on Unix systems is equivalent to ioutil.TempDir.
+func TempDir(dir, prefix string) (string, error) {
+	return ioutil.TempDir(dir, prefix)
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..c258e5fdd87882fa5f229b0c288a7ea84560a1aa
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go
@@ -0,0 +1,18 @@
+// +build windows
+
+package ioutils
+
+import (
+	"io/ioutil"
+
+	"github.com/docker/docker/pkg/longpath"
+)
+
+// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
+func TempDir(dir, prefix string) (string, error) {
+	tempDir, err := ioutil.TempDir(dir, prefix)
+	if err != nil {
+		return "", err
+	}
+	return longpath.AddPrefix(tempDir), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
new file mode 100644
index 0000000000000000000000000000000000000000..52a4901adebfb393135b578aed851338aabf7fc6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
@@ -0,0 +1,92 @@
+package ioutils
+
+import (
+	"io"
+	"sync"
+)
+
+// WriteFlusher wraps the Write and Flush operation ensuring that every write
+// is a flush. In addition, the Close method can be called to intercept
+// Read/Write calls if the targets lifecycle has already ended.
+type WriteFlusher struct {
+	w           io.Writer
+	flusher     flusher
+	flushed     chan struct{}
+	flushedOnce sync.Once
+	closed      chan struct{}
+	closeLock   sync.Mutex
+}
+
+type flusher interface {
+	Flush()
+}
+
+var errWriteFlusherClosed = io.EOF
+
+func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
+	select {
+	case <-wf.closed:
+		return 0, errWriteFlusherClosed
+	default:
+	}
+
+	n, err = wf.w.Write(b)
+	wf.Flush() // every write is a flush.
+	return n, err
+}
+
+// Flush the stream immediately.
+func (wf *WriteFlusher) Flush() {
+	select {
+	case <-wf.closed:
+		return
+	default:
+	}
+
+	wf.flushedOnce.Do(func() {
+		close(wf.flushed)
+	})
+	wf.flusher.Flush()
+}
+
+// Flushed returns the state of flushed.
+// If it's flushed, return true, or else it return false.
+func (wf *WriteFlusher) Flushed() bool {
+	// BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
+	// be used to detect whether or a response code has been issued or not.
+	// Another hook should be used instead.
+	var flushed bool
+	select {
+	case <-wf.flushed:
+		flushed = true
+	default:
+	}
+	return flushed
+}
+
+// Close closes the write flusher, disallowing any further writes to the
+// target. After the flusher is closed, all calls to write or flush will
+// result in an error.
+func (wf *WriteFlusher) Close() error {
+	wf.closeLock.Lock()
+	defer wf.closeLock.Unlock()
+
+	select {
+	case <-wf.closed:
+		return errWriteFlusherClosed
+	default:
+		close(wf.closed)
+	}
+	return nil
+}
+
+// NewWriteFlusher returns a new WriteFlusher.
+func NewWriteFlusher(w io.Writer) *WriteFlusher {
+	var fl flusher
+	if f, ok := w.(flusher); ok {
+		fl = f
+	} else {
+		fl = &NopFlusher{}
+	}
+	return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})}
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go
new file mode 100644
index 0000000000000000000000000000000000000000..ccc7f9c23e0f4e9422de8d5bc736f8b488f58bf0
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/writers.go
@@ -0,0 +1,66 @@
+package ioutils
+
+import "io"
+
+// NopWriter represents a type which write operation is nop.
+type NopWriter struct{}
+
+func (*NopWriter) Write(buf []byte) (int, error) {
+	return len(buf), nil
+}
+
+type nopWriteCloser struct {
+	io.Writer
+}
+
+func (w *nopWriteCloser) Close() error { return nil }
+
+// NopWriteCloser returns a nopWriteCloser.
+func NopWriteCloser(w io.Writer) io.WriteCloser {
+	return &nopWriteCloser{w}
+}
+
+// NopFlusher represents a type which flush operation is nop.
+type NopFlusher struct{}
+
+// Flush is a nop operation.
+func (f *NopFlusher) Flush() {}
+
+type writeCloserWrapper struct {
+	io.Writer
+	closer func() error
+}
+
+func (r *writeCloserWrapper) Close() error {
+	return r.closer()
+}
+
+// NewWriteCloserWrapper returns a new io.WriteCloser.
+func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
+	return &writeCloserWrapper{
+		Writer: r,
+		closer: closer,
+	}
+}
+
+// WriteCounter wraps a concrete io.Writer and hold a count of the number
+// of bytes written to the writer during a "session".
+// This can be convenient when write return is masked
+// (e.g., json.Encoder.Encode())
+type WriteCounter struct {
+	Count  int64
+	Writer io.Writer
+}
+
+// NewWriteCounter returns a new WriteCounter.
+func NewWriteCounter(w io.Writer) *WriteCounter {
+	return &WriteCounter{
+		Writer: w,
+	}
+}
+
+func (wc *WriteCounter) Write(p []byte) (count int, err error) {
+	count, err = wc.Writer.Write(p)
+	wc.Count += int64(count)
+	return
+}
diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go
new file mode 100644
index 0000000000000000000000000000000000000000..4734c311196b84481fb204ea1810e1d4023f90b6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go
@@ -0,0 +1,42 @@
+package jsonlog
+
+import (
+	"encoding/json"
+	"fmt"
+	"time"
+)
+
+// JSONLog represents a log message, typically a single entry from a given log stream.
+// JSONLogs can be easily serialized to and from JSON and support custom formatting.
+type JSONLog struct {
+	// Log is the log message
+	Log string `json:"log,omitempty"`
+	// Stream is the log source
+	Stream string `json:"stream,omitempty"`
+	// Created is the created timestamp of log
+	Created time.Time `json:"time"`
+	// Attrs is the list of extra attributes provided by the user
+	Attrs map[string]string `json:"attrs,omitempty"`
+}
+
+// Format returns the log formatted according to format
+// If format is nil, returns the log message
+// If format is json, returns the log marshaled in json format
+// By default, returns the log with the log time formatted according to format.
+func (jl *JSONLog) Format(format string) (string, error) {
+	if format == "" {
+		return jl.Log, nil
+	}
+	if format == "json" {
+		m, err := json.Marshal(jl)
+		return string(m), err
+	}
+	return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil
+}
+
+// Reset resets the log to nil.
+func (jl *JSONLog) Reset() {
+	jl.Log = ""
+	jl.Stream = ""
+	jl.Created = time.Time{}
+}
diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go
new file mode 100644
index 0000000000000000000000000000000000000000..83ce684a8ef8271809c19dc55d6a0704edf92487
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go
@@ -0,0 +1,178 @@
+// This code was initially generated by ffjson <https://github.com/pquerna/ffjson>
+// This code was generated via the following steps:
+// $ go get -u github.com/pquerna/ffjson
+// $ make BIND_DIR=. shell
+// $ ffjson pkg/jsonlog/jsonlog.go
+// $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go
+//
+// It has been modified to improve the performance of time marshalling to JSON
+// and to clean it up.
+// Should this code need to be regenerated when the JSONLog struct is changed,
+// the relevant changes which have been made are:
+// import (
+//        "bytes"
+//-
+//        "unicode/utf8"
+// )
+//
+// func (mj *JSONLog) MarshalJSON() ([]byte, error) {
+//@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) {
+//        }
+//        return buf.Bytes(), nil
+// }
+//+
+// func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error {
+//-       var err error
+//-       var obj []byte
+//-       var first bool = true
+//-       _ = obj
+//-       _ = err
+//-       _ = first
+//+       var (
+//+               err       error
+//+               timestamp string
+//+               first     bool = true
+//+       )
+//        buf.WriteString(`{`)
+//        if len(mj.Log) != 0 {
+//                if first == true {
+//@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error {
+//                buf.WriteString(`,`)
+//        }
+//        buf.WriteString(`"time":`)
+//-       obj, err = mj.Created.MarshalJSON()
+//+       timestamp, err = FastTimeMarshalJSON(mj.Created)
+//        if err != nil {
+//                return err
+//        }
+//-       buf.Write(obj)
+//+       buf.WriteString(timestamp)
+//        buf.WriteString(`}`)
+//        return nil
+// }
+// @@ -81,9 +81,10 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error {
+//         if len(mj.Log) != 0 {
+// -                if first == true {
+// -                       first = false
+// -               } else {
+// -                       buf.WriteString(`,`)
+// -               }
+// +               first = false
+//                 buf.WriteString(`"log":`)
+//                 ffjsonWriteJSONString(buf, mj.Log)
+//         }
+
+package jsonlog
+
+import (
+	"bytes"
+	"unicode/utf8"
+)
+
+// MarshalJSON marshals the JSONLog.
+func (mj *JSONLog) MarshalJSON() ([]byte, error) {
+	var buf bytes.Buffer
+	buf.Grow(1024)
+	if err := mj.MarshalJSONBuf(&buf); err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshals the JSONLog and stores the result to a bytes.Buffer.
+func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error {
+	var (
+		err       error
+		timestamp string
+		first     = true
+	)
+	buf.WriteString(`{`)
+	if len(mj.Log) != 0 {
+		first = false
+		buf.WriteString(`"log":`)
+		ffjsonWriteJSONString(buf, mj.Log)
+	}
+	if len(mj.Stream) != 0 {
+		if first {
+			first = false
+		} else {
+			buf.WriteString(`,`)
+		}
+		buf.WriteString(`"stream":`)
+		ffjsonWriteJSONString(buf, mj.Stream)
+	}
+	if !first {
+		buf.WriteString(`,`)
+	}
+	buf.WriteString(`"time":`)
+	timestamp, err = FastTimeMarshalJSON(mj.Created)
+	if err != nil {
+		return err
+	}
+	buf.WriteString(timestamp)
+	buf.WriteString(`}`)
+	return nil
+}
+
+func ffjsonWriteJSONString(buf *bytes.Buffer, s string) {
+	const hex = "0123456789abcdef"
+
+	buf.WriteByte('"')
+	start := 0
+	for i := 0; i < len(s); {
+		if b := s[i]; b < utf8.RuneSelf {
+			if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+				i++
+				continue
+			}
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				buf.WriteByte('\\')
+				buf.WriteByte(b)
+			case '\n':
+				buf.WriteByte('\\')
+				buf.WriteByte('n')
+			case '\r':
+				buf.WriteByte('\\')
+				buf.WriteByte('r')
+			default:
+
+				buf.WriteString(`\u00`)
+				buf.WriteByte(hex[b>>4])
+				buf.WriteByte(hex[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRuneInString(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			buf.WriteString(`\ufffd`)
+			i += size
+			start = i
+			continue
+		}
+
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			buf.WriteString(`\u202`)
+			buf.WriteByte(hex[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		buf.WriteString(s[start:])
+	}
+	buf.WriteByte('"')
+}
diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go
new file mode 100644
index 0000000000000000000000000000000000000000..df522c0d66f5c4ca4113faa30f8995899ff69ae2
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go
@@ -0,0 +1,122 @@
+package jsonlog
+
+import (
+	"bytes"
+	"encoding/json"
+	"unicode/utf8"
+)
+
+// JSONLogs is based on JSONLog.
+// It allows marshalling JSONLog from Log as []byte
+// and an already marshalled Created timestamp.
+type JSONLogs struct {
+	Log     []byte `json:"log,omitempty"`
+	Stream  string `json:"stream,omitempty"`
+	Created string `json:"time"`
+
+	// json-encoded bytes
+	RawAttrs json.RawMessage `json:"attrs,omitempty"`
+}
+
+// MarshalJSONBuf is based on the same method from JSONLog
+// It has been modified to take into account the necessary changes.
+func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error {
+	var first = true
+
+	buf.WriteString(`{`)
+	if len(mj.Log) != 0 {
+		first = false
+		buf.WriteString(`"log":`)
+		ffjsonWriteJSONBytesAsString(buf, mj.Log)
+	}
+	if len(mj.Stream) != 0 {
+		if first == true {
+			first = false
+		} else {
+			buf.WriteString(`,`)
+		}
+		buf.WriteString(`"stream":`)
+		ffjsonWriteJSONString(buf, mj.Stream)
+	}
+	if len(mj.RawAttrs) > 0 {
+		if first {
+			first = false
+		} else {
+			buf.WriteString(`,`)
+		}
+		buf.WriteString(`"attrs":`)
+		buf.Write(mj.RawAttrs)
+	}
+	if !first {
+		buf.WriteString(`,`)
+	}
+	buf.WriteString(`"time":`)
+	buf.WriteString(mj.Created)
+	buf.WriteString(`}`)
+	return nil
+}
+
+// This is based on ffjsonWriteJSONBytesAsString. It has been changed
+// to accept a string passed as a slice of bytes.
+func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) {
+	const hex = "0123456789abcdef"
+
+	buf.WriteByte('"')
+	start := 0
+	for i := 0; i < len(s); {
+		if b := s[i]; b < utf8.RuneSelf {
+			if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+				i++
+				continue
+			}
+			if start < i {
+				buf.Write(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				buf.WriteByte('\\')
+				buf.WriteByte(b)
+			case '\n':
+				buf.WriteByte('\\')
+				buf.WriteByte('n')
+			case '\r':
+				buf.WriteByte('\\')
+				buf.WriteByte('r')
+			default:
+
+				buf.WriteString(`\u00`)
+				buf.WriteByte(hex[b>>4])
+				buf.WriteByte(hex[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRune(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				buf.Write(s[start:i])
+			}
+			buf.WriteString(`\ufffd`)
+			i += size
+			start = i
+			continue
+		}
+
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				buf.Write(s[start:i])
+			}
+			buf.WriteString(`\u202`)
+			buf.WriteByte(hex[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		buf.Write(s[start:])
+	}
+	buf.WriteByte('"')
+}
diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go b/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go
new file mode 100644
index 0000000000000000000000000000000000000000..21173381495d1708ce19e5b39134552d50214726
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go
@@ -0,0 +1,27 @@
+// Package jsonlog provides helper functions to parse and print time (time.Time) as JSON.
+package jsonlog
+
+import (
+	"errors"
+	"time"
+)
+
+const (
+	// RFC3339NanoFixed is our own version of RFC339Nano because we want one
+	// that pads the nano seconds part with zeros to ensure
+	// the timestamps are aligned in the logs.
+	RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
+	// JSONFormat is the format used by FastMarshalJSON
+	JSONFormat = `"` + time.RFC3339Nano + `"`
+)
+
+// FastTimeMarshalJSON avoids one of the extra allocations that
+// time.MarshalJSON is making.
+func FastTimeMarshalJSON(t time.Time) (string, error) {
+	if y := t.Year(); y < 0 || y >= 10000 {
+		// RFC 3339 is clear that years are 4 digits exactly.
+		// See golang.org/issue/4556#c15 for more discussion.
+		return "", errors.New("time.MarshalJSON: year outside of range [0,9999]")
+	}
+	return t.Format(JSONFormat), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
new file mode 100644
index 0000000000000000000000000000000000000000..5481433c5665c81a84ff9e0b677e57e5021321f9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
@@ -0,0 +1,225 @@
+package jsonmessage
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"strings"
+	"time"
+
+	"github.com/docker/docker/pkg/jsonlog"
+	"github.com/docker/docker/pkg/term"
+	"github.com/docker/go-units"
+)
+
+// JSONError wraps a concrete Code and Message, `Code` is
+// is an integer error code, `Message` is the error message.
+type JSONError struct {
+	Code    int    `json:"code,omitempty"`
+	Message string `json:"message,omitempty"`
+}
+
+func (e *JSONError) Error() string {
+	return e.Message
+}
+
+// JSONProgress describes a Progress. terminalFd is the fd of the current terminal,
+// Start is the initial value for the operation. Current is the current status and
+// value of the progress made towards Total. Total is the end value describing when
+// we made 100% progress for an operation.
+type JSONProgress struct {
+	terminalFd uintptr
+	Current    int64 `json:"current,omitempty"`
+	Total      int64 `json:"total,omitempty"`
+	Start      int64 `json:"start,omitempty"`
+}
+
+func (p *JSONProgress) String() string {
+	var (
+		width       = 200
+		pbBox       string
+		numbersBox  string
+		timeLeftBox string
+	)
+
+	ws, err := term.GetWinsize(p.terminalFd)
+	if err == nil {
+		width = int(ws.Width)
+	}
+
+	if p.Current <= 0 && p.Total <= 0 {
+		return ""
+	}
+	current := units.HumanSize(float64(p.Current))
+	if p.Total <= 0 {
+		return fmt.Sprintf("%8v", current)
+	}
+	total := units.HumanSize(float64(p.Total))
+	percentage := int(float64(p.Current)/float64(p.Total)*100) / 2
+	if percentage > 50 {
+		percentage = 50
+	}
+	if width > 110 {
+		// this number can't be negative gh#7136
+		numSpaces := 0
+		if 50-percentage > 0 {
+			numSpaces = 50 - percentage
+		}
+		pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces))
+	}
+
+	numbersBox = fmt.Sprintf("%8v/%v", current, total)
+
+	if p.Current > p.Total {
+		// remove total display if the reported current is wonky.
+		numbersBox = fmt.Sprintf("%8v", current)
+	}
+
+	if p.Current > 0 && p.Start > 0 && percentage < 50 {
+		fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0))
+		perEntry := fromStart / time.Duration(p.Current)
+		left := time.Duration(p.Total-p.Current) * perEntry
+		left = (left / time.Second) * time.Second
+
+		if width > 50 {
+			timeLeftBox = " " + left.String()
+		}
+	}
+	return pbBox + numbersBox + timeLeftBox
+}
+
+// JSONMessage defines a message struct. It describes
+// the created time, where it from, status, ID of the
+// message. It's used for docker events.
+type JSONMessage struct {
+	Stream          string        `json:"stream,omitempty"`
+	Status          string        `json:"status,omitempty"`
+	Progress        *JSONProgress `json:"progressDetail,omitempty"`
+	ProgressMessage string        `json:"progress,omitempty"` //deprecated
+	ID              string        `json:"id,omitempty"`
+	From            string        `json:"from,omitempty"`
+	Time            int64         `json:"time,omitempty"`
+	TimeNano        int64         `json:"timeNano,omitempty"`
+	Error           *JSONError    `json:"errorDetail,omitempty"`
+	ErrorMessage    string        `json:"error,omitempty"` //deprecated
+	// Aux contains out-of-band data, such as digests for push signing.
+	Aux *json.RawMessage `json:"aux,omitempty"`
+}
+
+// Display displays the JSONMessage to `out`. `isTerminal` describes if `out`
+// is a terminal. If this is the case, it will erase the entire current line
+// when displaying the progressbar.
+func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
+	if jm.Error != nil {
+		if jm.Error.Code == 401 {
+			return fmt.Errorf("Authentication is required.")
+		}
+		return jm.Error
+	}
+	var endl string
+	if isTerminal && jm.Stream == "" && jm.Progress != nil {
+		// <ESC>[2K = erase entire current line
+		fmt.Fprintf(out, "%c[2K\r", 27)
+		endl = "\r"
+	} else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal
+		return nil
+	}
+	if jm.TimeNano != 0 {
+		fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(jsonlog.RFC3339NanoFixed))
+	} else if jm.Time != 0 {
+		fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(jsonlog.RFC3339NanoFixed))
+	}
+	if jm.ID != "" {
+		fmt.Fprintf(out, "%s: ", jm.ID)
+	}
+	if jm.From != "" {
+		fmt.Fprintf(out, "(from %s) ", jm.From)
+	}
+	if jm.Progress != nil && isTerminal {
+		fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl)
+	} else if jm.ProgressMessage != "" { //deprecated
+		fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl)
+	} else if jm.Stream != "" {
+		fmt.Fprintf(out, "%s%s", jm.Stream, endl)
+	} else {
+		fmt.Fprintf(out, "%s%s\n", jm.Status, endl)
+	}
+	return nil
+}
+
+// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal`
+// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of
+// each line and move the cursor while displaying.
+func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error {
+	var (
+		dec = json.NewDecoder(in)
+		ids = make(map[string]int)
+	)
+	for {
+		diff := 0
+		var jm JSONMessage
+		if err := dec.Decode(&jm); err != nil {
+			if err == io.EOF {
+				break
+			}
+			return err
+		}
+
+		if jm.Aux != nil {
+			if auxCallback != nil {
+				auxCallback(jm.Aux)
+			}
+			continue
+		}
+
+		if jm.Progress != nil {
+			jm.Progress.terminalFd = terminalFd
+		}
+		if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") {
+			line, ok := ids[jm.ID]
+			if !ok {
+				// NOTE: This approach of using len(id) to
+				// figure out the number of lines of history
+				// only works as long as we clear the history
+				// when we output something that's not
+				// accounted for in the map, such as a line
+				// with no ID.
+				line = len(ids)
+				ids[jm.ID] = line
+				if isTerminal {
+					fmt.Fprintf(out, "\n")
+				}
+			}
+			diff = len(ids) - line
+			if isTerminal && diff > 0 {
+				fmt.Fprintf(out, "%c[%dA", 27, diff)
+			}
+		} else {
+			// When outputting something that isn't progress
+			// output, clear the history of previous lines. We
+			// don't want progress entries from some previous
+			// operation to be updated (for example, pull -a
+			// with multiple tags).
+			ids = make(map[string]int)
+		}
+		err := jm.Display(out, isTerminal)
+		if jm.ID != "" && isTerminal && diff > 0 {
+			fmt.Fprintf(out, "%c[%dB", 27, diff)
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+type stream interface {
+	io.Writer
+	FD() uintptr
+	IsTerminal() bool
+}
+
+// DisplayJSONMessagesToStream prints json messages to the output stream
+func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(*json.RawMessage)) error {
+	return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback)
+}
diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/docker/docker/pkg/longpath/longpath.go
new file mode 100644
index 0000000000000000000000000000000000000000..9b15bfff4c9a6b09ae2ce7e184fce9f07923728c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/longpath/longpath.go
@@ -0,0 +1,26 @@
+// longpath introduces some constants and helper functions for handling long paths
+// in Windows, which are expected to be prepended with `\\?\` and followed by either
+// a drive letter, a UNC server\share, or a volume identifier.
+
+package longpath
+
+import (
+	"strings"
+)
+
+// Prefix is the longpath prefix for Windows file paths.
+const Prefix = `\\?\`
+
+// AddPrefix will add the Windows long path prefix to the path provided if
+// it does not already have it.
+func AddPrefix(path string) string {
+	if !strings.HasPrefix(path, Prefix) {
+		if strings.HasPrefix(path, `\\`) {
+			// This is a UNC path, so we need to add 'UNC' to the path as well.
+			path = Prefix + `UNC` + path[1:]
+		} else {
+			path = Prefix + path
+		}
+	}
+	return path
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/flags.go b/vendor/github.com/docker/docker/pkg/mount/flags.go
new file mode 100644
index 0000000000000000000000000000000000000000..607dbed43a0a10a88ceddaac84ac862df9326d87
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/flags.go
@@ -0,0 +1,149 @@
+package mount
+
+import (
+	"fmt"
+	"strings"
+)
+
+var flags = map[string]struct {
+	clear bool
+	flag  int
+}{
+	"defaults":      {false, 0},
+	"ro":            {false, RDONLY},
+	"rw":            {true, RDONLY},
+	"suid":          {true, NOSUID},
+	"nosuid":        {false, NOSUID},
+	"dev":           {true, NODEV},
+	"nodev":         {false, NODEV},
+	"exec":          {true, NOEXEC},
+	"noexec":        {false, NOEXEC},
+	"sync":          {false, SYNCHRONOUS},
+	"async":         {true, SYNCHRONOUS},
+	"dirsync":       {false, DIRSYNC},
+	"remount":       {false, REMOUNT},
+	"mand":          {false, MANDLOCK},
+	"nomand":        {true, MANDLOCK},
+	"atime":         {true, NOATIME},
+	"noatime":       {false, NOATIME},
+	"diratime":      {true, NODIRATIME},
+	"nodiratime":    {false, NODIRATIME},
+	"bind":          {false, BIND},
+	"rbind":         {false, RBIND},
+	"unbindable":    {false, UNBINDABLE},
+	"runbindable":   {false, RUNBINDABLE},
+	"private":       {false, PRIVATE},
+	"rprivate":      {false, RPRIVATE},
+	"shared":        {false, SHARED},
+	"rshared":       {false, RSHARED},
+	"slave":         {false, SLAVE},
+	"rslave":        {false, RSLAVE},
+	"relatime":      {false, RELATIME},
+	"norelatime":    {true, RELATIME},
+	"strictatime":   {false, STRICTATIME},
+	"nostrictatime": {true, STRICTATIME},
+}
+
+var validFlags = map[string]bool{
+	"":          true,
+	"size":      true,
+	"mode":      true,
+	"uid":       true,
+	"gid":       true,
+	"nr_inodes": true,
+	"nr_blocks": true,
+	"mpol":      true,
+}
+
+var propagationFlags = map[string]bool{
+	"bind":        true,
+	"rbind":       true,
+	"unbindable":  true,
+	"runbindable": true,
+	"private":     true,
+	"rprivate":    true,
+	"shared":      true,
+	"rshared":     true,
+	"slave":       true,
+	"rslave":      true,
+}
+
+// MergeTmpfsOptions merge mount options to make sure there is no duplicate.
+func MergeTmpfsOptions(options []string) ([]string, error) {
+	// We use collisions maps to remove duplicates.
+	// For flag, the key is the flag value (the key for propagation flag is -1)
+	// For data=value, the key is the data
+	flagCollisions := map[int]bool{}
+	dataCollisions := map[string]bool{}
+
+	var newOptions []string
+	// We process in reverse order
+	for i := len(options) - 1; i >= 0; i-- {
+		option := options[i]
+		if option == "defaults" {
+			continue
+		}
+		if f, ok := flags[option]; ok && f.flag != 0 {
+			// There is only one propagation mode
+			key := f.flag
+			if propagationFlags[option] {
+				key = -1
+			}
+			// Check to see if there is collision for flag
+			if !flagCollisions[key] {
+				// We prepend the option and add to collision map
+				newOptions = append([]string{option}, newOptions...)
+				flagCollisions[key] = true
+			}
+			continue
+		}
+		opt := strings.SplitN(option, "=", 2)
+		if len(opt) != 2 || !validFlags[opt[0]] {
+			return nil, fmt.Errorf("Invalid tmpfs option %q", opt)
+		}
+		if !dataCollisions[opt[0]] {
+			// We prepend the option and add to collision map
+			newOptions = append([]string{option}, newOptions...)
+			dataCollisions[opt[0]] = true
+		}
+	}
+
+	return newOptions, nil
+}
+
+// Parse fstab type mount options into mount() flags
+// and device specific data
+func parseOptions(options string) (int, string) {
+	var (
+		flag int
+		data []string
+	)
+
+	for _, o := range strings.Split(options, ",") {
+		// If the option does not exist in the flags table or the flag
+		// is not supported on the platform,
+		// then it is a data value for a specific fs type
+		if f, exists := flags[o]; exists && f.flag != 0 {
+			if f.clear {
+				flag &= ^f.flag
+			} else {
+				flag |= f.flag
+			}
+		} else {
+			data = append(data, o)
+		}
+	}
+	return flag, strings.Join(data, ",")
+}
+
+// ParseTmpfsOptions parse fstab type mount options into flags and data
+func ParseTmpfsOptions(options string) (int, string, error) {
+	flags, data := parseOptions(options)
+	for _, o := range strings.Split(data, ",") {
+		opt := strings.SplitN(o, "=", 2)
+		if !validFlags[opt[0]] {
+			return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt)
+		}
+	}
+	return flags, data, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go
new file mode 100644
index 0000000000000000000000000000000000000000..f166cb2f77864fec5a9be945a2264e783585fd59
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go
@@ -0,0 +1,48 @@
+// +build freebsd,cgo
+
+package mount
+
+/*
+#include <sys/mount.h>
+*/
+import "C"
+
+const (
+	// RDONLY will mount the filesystem as read-only.
+	RDONLY = C.MNT_RDONLY
+
+	// NOSUID will not allow set-user-identifier or set-group-identifier bits to
+	// take effect.
+	NOSUID = C.MNT_NOSUID
+
+	// NOEXEC will not allow execution of any binaries on the mounted file system.
+	NOEXEC = C.MNT_NOEXEC
+
+	// SYNCHRONOUS will allow any I/O to the file system to be done synchronously.
+	SYNCHRONOUS = C.MNT_SYNCHRONOUS
+
+	// NOATIME will not update the file access time when reading from a file.
+	NOATIME = C.MNT_NOATIME
+)
+
+// These flags are unsupported.
+const (
+	BIND        = 0
+	DIRSYNC     = 0
+	MANDLOCK    = 0
+	NODEV       = 0
+	NODIRATIME  = 0
+	UNBINDABLE  = 0
+	RUNBINDABLE = 0
+	PRIVATE     = 0
+	RPRIVATE    = 0
+	SHARED      = 0
+	RSHARED     = 0
+	SLAVE       = 0
+	RSLAVE      = 0
+	RBIND       = 0
+	RELATIVE    = 0
+	RELATIME    = 0
+	REMOUNT     = 0
+	STRICTATIME = 0
+)
diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..dc696dce9075216b586db78059f3a06e44c75b1b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go
@@ -0,0 +1,85 @@
+package mount
+
+import (
+	"syscall"
+)
+
+const (
+	// RDONLY will mount the file system read-only.
+	RDONLY = syscall.MS_RDONLY
+
+	// NOSUID will not allow set-user-identifier or set-group-identifier bits to
+	// take effect.
+	NOSUID = syscall.MS_NOSUID
+
+	// NODEV will not interpret character or block special devices on the file
+	// system.
+	NODEV = syscall.MS_NODEV
+
+	// NOEXEC will not allow execution of any binaries on the mounted file system.
+	NOEXEC = syscall.MS_NOEXEC
+
+	// SYNCHRONOUS will allow I/O to the file system to be done synchronously.
+	SYNCHRONOUS = syscall.MS_SYNCHRONOUS
+
+	// DIRSYNC will force all directory updates within the file system to be done
+	// synchronously. This affects the following system calls: create, link,
+	// unlink, symlink, mkdir, rmdir, mknod and rename.
+	DIRSYNC = syscall.MS_DIRSYNC
+
+	// REMOUNT will attempt to remount an already-mounted file system. This is
+	// commonly used to change the mount flags for a file system, especially to
+	// make a readonly file system writeable. It does not change device or mount
+	// point.
+	REMOUNT = syscall.MS_REMOUNT
+
+	// MANDLOCK will force mandatory locks on a filesystem.
+	MANDLOCK = syscall.MS_MANDLOCK
+
+	// NOATIME will not update the file access time when reading from a file.
+	NOATIME = syscall.MS_NOATIME
+
+	// NODIRATIME will not update the directory access time.
+	NODIRATIME = syscall.MS_NODIRATIME
+
+	// BIND remounts a subtree somewhere else.
+	BIND = syscall.MS_BIND
+
+	// RBIND remounts a subtree and all possible submounts somewhere else.
+	RBIND = syscall.MS_BIND | syscall.MS_REC
+
+	// UNBINDABLE creates a mount which cannot be cloned through a bind operation.
+	UNBINDABLE = syscall.MS_UNBINDABLE
+
+	// RUNBINDABLE marks the entire mount tree as UNBINDABLE.
+	RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC
+
+	// PRIVATE creates a mount which carries no propagation abilities.
+	PRIVATE = syscall.MS_PRIVATE
+
+	// RPRIVATE marks the entire mount tree as PRIVATE.
+	RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC
+
+	// SLAVE creates a mount which receives propagation from its master, but not
+	// vice versa.
+	SLAVE = syscall.MS_SLAVE
+
+	// RSLAVE marks the entire mount tree as SLAVE.
+	RSLAVE = syscall.MS_SLAVE | syscall.MS_REC
+
+	// SHARED creates a mount which provides the ability to create mirrors of
+	// that mount such that mounts and unmounts within any of the mirrors
+	// propagate to the other mirrors.
+	SHARED = syscall.MS_SHARED
+
+	// RSHARED marks the entire mount tree as SHARED.
+	RSHARED = syscall.MS_SHARED | syscall.MS_REC
+
+	// RELATIME updates inode access times relative to modify or change time.
+	RELATIME = syscall.MS_RELATIME
+
+	// STRICTATIME allows to explicitly request full atime updates.  This makes
+	// it possible for the kernel to default to relatime or noatime but still
+	// allow userspace to override it.
+	STRICTATIME = syscall.MS_STRICTATIME
+)
diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go
new file mode 100644
index 0000000000000000000000000000000000000000..5564f7b3cdeacca058a40f9acf376e1d1262ed26
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go
@@ -0,0 +1,30 @@
+// +build !linux,!freebsd freebsd,!cgo solaris,!cgo
+
+package mount
+
+// These flags are unsupported.
+const (
+	BIND        = 0
+	DIRSYNC     = 0
+	MANDLOCK    = 0
+	NOATIME     = 0
+	NODEV       = 0
+	NODIRATIME  = 0
+	NOEXEC      = 0
+	NOSUID      = 0
+	UNBINDABLE  = 0
+	RUNBINDABLE = 0
+	PRIVATE     = 0
+	RPRIVATE    = 0
+	SHARED      = 0
+	RSHARED     = 0
+	SLAVE       = 0
+	RSLAVE      = 0
+	RBIND       = 0
+	RELATIME    = 0
+	RELATIVE    = 0
+	REMOUNT     = 0
+	STRICTATIME = 0
+	SYNCHRONOUS = 0
+	RDONLY      = 0
+)
diff --git a/vendor/github.com/docker/docker/pkg/mount/mount.go b/vendor/github.com/docker/docker/pkg/mount/mount.go
new file mode 100644
index 0000000000000000000000000000000000000000..66ac4bf4723ef2c3b01e9fdfa65bbc5c619cefb3
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mount.go
@@ -0,0 +1,74 @@
+package mount
+
+import (
+	"time"
+)
+
+// GetMounts retrieves a list of mounts for the current running process.
+func GetMounts() ([]*Info, error) {
+	return parseMountTable()
+}
+
+// Mounted determines if a specified mountpoint has been mounted.
+// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab.
+func Mounted(mountpoint string) (bool, error) {
+	entries, err := parseMountTable()
+	if err != nil {
+		return false, err
+	}
+
+	// Search the table for the mountpoint
+	for _, e := range entries {
+		if e.Mountpoint == mountpoint {
+			return true, nil
+		}
+	}
+	return false, nil
+}
+
+// Mount will mount filesystem according to the specified configuration, on the
+// condition that the target path is *not* already mounted. Options must be
+// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
+// flags.go for supported option flags.
+func Mount(device, target, mType, options string) error {
+	flag, _ := parseOptions(options)
+	if flag&REMOUNT != REMOUNT {
+		if mounted, err := Mounted(target); err != nil || mounted {
+			return err
+		}
+	}
+	return ForceMount(device, target, mType, options)
+}
+
+// ForceMount will mount a filesystem according to the specified configuration,
+// *regardless* if the target path is not already mounted. Options must be
+// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
+// flags.go for supported option flags.
+func ForceMount(device, target, mType, options string) error {
+	flag, data := parseOptions(options)
+	if err := mount(device, target, mType, uintptr(flag), data); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Unmount will unmount the target filesystem, so long as it is mounted.
+func Unmount(target string) error {
+	if mounted, err := Mounted(target); err != nil || !mounted {
+		return err
+	}
+	return ForceUnmount(target)
+}
+
+// ForceUnmount will force an unmount of the target filesystem, regardless if
+// it is mounted or not.
+func ForceUnmount(target string) (err error) {
+	// Simple retry logic for unmount
+	for i := 0; i < 10; i++ {
+		if err = unmount(target, 0); err == nil {
+			return nil
+		}
+		time.Sleep(100 * time.Millisecond)
+	}
+	return
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go
new file mode 100644
index 0000000000000000000000000000000000000000..bb870e6f59b9f2dcd50cf9469b6c7edb11dab281
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go
@@ -0,0 +1,59 @@
+package mount
+
+/*
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/_iovec.h>
+#include <sys/mount.h>
+#include <sys/param.h>
+*/
+import "C"
+
+import (
+	"fmt"
+	"strings"
+	"syscall"
+	"unsafe"
+)
+
+func allocateIOVecs(options []string) []C.struct_iovec {
+	out := make([]C.struct_iovec, len(options))
+	for i, option := range options {
+		out[i].iov_base = unsafe.Pointer(C.CString(option))
+		out[i].iov_len = C.size_t(len(option) + 1)
+	}
+	return out
+}
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+	isNullFS := false
+
+	xs := strings.Split(data, ",")
+	for _, x := range xs {
+		if x == "bind" {
+			isNullFS = true
+		}
+	}
+
+	options := []string{"fspath", target}
+	if isNullFS {
+		options = append(options, "fstype", "nullfs", "target", device)
+	} else {
+		options = append(options, "fstype", mType, "from", device)
+	}
+	rawOptions := allocateIOVecs(options)
+	for _, rawOption := range rawOptions {
+		defer C.free(rawOption.iov_base)
+	}
+
+	if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 {
+		reason := C.GoString(C.strerror(*C.__error()))
+		return fmt.Errorf("Failed to call nmount: %s", reason)
+	}
+	return nil
+}
+
+func unmount(target string, flag int) error {
+	return syscall.Unmount(target, flag)
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..dd4280c7778626251ca049f66d18e4d8409ebda2
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go
@@ -0,0 +1,21 @@
+package mount
+
+import (
+	"syscall"
+)
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+	if err := syscall.Mount(device, target, mType, flag, data); err != nil {
+		return err
+	}
+
+	// If we have a bind mount or remount, remount...
+	if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY {
+		return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data)
+	}
+	return nil
+}
+
+func unmount(target string, flag int) error {
+	return syscall.Unmount(target, flag)
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go
new file mode 100644
index 0000000000000000000000000000000000000000..c684aa81fcc1d47efdf075881221f4b76f5409f8
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go
@@ -0,0 +1,33 @@
+// +build solaris,cgo
+
+package mount
+
+import (
+	"golang.org/x/sys/unix"
+	"unsafe"
+)
+
+// #include <stdlib.h>
+// #include <stdio.h>
+// #include <sys/mount.h>
+// int Mount(const char *spec, const char *dir, int mflag,
+// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) {
+//     return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen);
+// }
+import "C"
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+	spec := C.CString(device)
+	dir := C.CString(target)
+	fstype := C.CString(mType)
+	_, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0)
+	C.free(unsafe.Pointer(spec))
+	C.free(unsafe.Pointer(dir))
+	C.free(unsafe.Pointer(fstype))
+	return err
+}
+
+func unmount(target string, flag int) error {
+	err := unix.Unmount(target, flag)
+	return err
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go
new file mode 100644
index 0000000000000000000000000000000000000000..a2a3bb457fcb9eb9fc4d3109670ff0c65ab0bb07
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go
@@ -0,0 +1,11 @@
+// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo
+
+package mount
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+	panic("Not implemented")
+}
+
+func unmount(target string, flag int) error {
+	panic("Not implemented")
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go
new file mode 100644
index 0000000000000000000000000000000000000000..e3fc3535e934f37f00d4f22db5f954fccc65397f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go
@@ -0,0 +1,40 @@
+package mount
+
+// Info reveals information about a particular mounted filesystem. This
+// struct is populated from the content in the /proc/<pid>/mountinfo file.
+type Info struct {
+	// ID is a unique identifier of the mount (may be reused after umount).
+	ID int
+
+	// Parent indicates the ID of the mount parent (or of self for the top of the
+	// mount tree).
+	Parent int
+
+	// Major indicates one half of the device ID which identifies the device class.
+	Major int
+
+	// Minor indicates one half of the device ID which identifies a specific
+	// instance of device.
+	Minor int
+
+	// Root of the mount within the filesystem.
+	Root string
+
+	// Mountpoint indicates the mount point relative to the process's root.
+	Mountpoint string
+
+	// Opts represents mount-specific options.
+	Opts string
+
+	// Optional represents optional fields.
+	Optional string
+
+	// Fstype indicates the type of filesystem, such as EXT3.
+	Fstype string
+
+	// Source indicates filesystem specific information or "none".
+	Source string
+
+	// VfsOpts represents per super block options.
+	VfsOpts string
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
new file mode 100644
index 0000000000000000000000000000000000000000..4f32edcd906adda8167e746d4ed762ff61a487c6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
@@ -0,0 +1,41 @@
+package mount
+
+/*
+#include <sys/param.h>
+#include <sys/ucred.h>
+#include <sys/mount.h>
+*/
+import "C"
+
+import (
+	"fmt"
+	"reflect"
+	"unsafe"
+)
+
+// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
+// bind mounts.
+func parseMountTable() ([]*Info, error) {
+	var rawEntries *C.struct_statfs
+
+	count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
+	if count == 0 {
+		return nil, fmt.Errorf("Failed to call getmntinfo")
+	}
+
+	var entries []C.struct_statfs
+	header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
+	header.Cap = count
+	header.Len = count
+	header.Data = uintptr(unsafe.Pointer(rawEntries))
+
+	var out []*Info
+	for _, entry := range entries {
+		var mountinfo Info
+		mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
+		mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
+		mountinfo.Fstype = C.GoString(&entry.f_fstypename[0])
+		out = append(out, &mountinfo)
+	}
+	return out, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..be69fee1d7bbf0383ae1c1dd6564652e1cfaa818
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go
@@ -0,0 +1,95 @@
+// +build linux
+
+package mount
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"os"
+	"strings"
+)
+
+const (
+	/* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
+	   (1)(2)(3)   (4)   (5)      (6)      (7)   (8) (9)   (10)         (11)
+
+	   (1) mount ID:  unique identifier of the mount (may be reused after umount)
+	   (2) parent ID:  ID of parent (or of self for the top of the mount tree)
+	   (3) major:minor:  value of st_dev for files on filesystem
+	   (4) root:  root of the mount within the filesystem
+	   (5) mount point:  mount point relative to the process's root
+	   (6) mount options:  per mount options
+	   (7) optional fields:  zero or more fields of the form "tag[:value]"
+	   (8) separator:  marks the end of the optional fields
+	   (9) filesystem type:  name of filesystem of the form "type[.subtype]"
+	   (10) mount source:  filesystem specific information or "none"
+	   (11) super options:  per super block options*/
+	mountinfoFormat = "%d %d %d:%d %s %s %s %s"
+)
+
+// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
+// bind mounts
+func parseMountTable() ([]*Info, error) {
+	f, err := os.Open("/proc/self/mountinfo")
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	return parseInfoFile(f)
+}
+
+func parseInfoFile(r io.Reader) ([]*Info, error) {
+	var (
+		s   = bufio.NewScanner(r)
+		out = []*Info{}
+	)
+
+	for s.Scan() {
+		if err := s.Err(); err != nil {
+			return nil, err
+		}
+
+		var (
+			p              = &Info{}
+			text           = s.Text()
+			optionalFields string
+		)
+
+		if _, err := fmt.Sscanf(text, mountinfoFormat,
+			&p.ID, &p.Parent, &p.Major, &p.Minor,
+			&p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil {
+			return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err)
+		}
+		// Safe as mountinfo encodes mountpoints with spaces as \040.
+		index := strings.Index(text, " - ")
+		postSeparatorFields := strings.Fields(text[index+3:])
+		if len(postSeparatorFields) < 3 {
+			return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
+		}
+
+		if optionalFields != "-" {
+			p.Optional = optionalFields
+		}
+
+		p.Fstype = postSeparatorFields[0]
+		p.Source = postSeparatorFields[1]
+		p.VfsOpts = strings.Join(postSeparatorFields[2:], " ")
+		out = append(out, p)
+	}
+	return out, nil
+}
+
+// PidMountInfo collects the mounts for a specific process ID. If the process
+// ID is unknown, it is better to use `GetMounts` which will inspect
+// "/proc/self/mountinfo" instead.
+func PidMountInfo(pid int) ([]*Info, error) {
+	f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	return parseInfoFile(f)
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go
new file mode 100644
index 0000000000000000000000000000000000000000..ad9ab57f8b8ef7d888201ed556c2e1100b2204ca
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go
@@ -0,0 +1,37 @@
+// +build solaris,cgo
+
+package mount
+
+/*
+#include <stdio.h>
+#include <sys/mnttab.h>
+*/
+import "C"
+
+import (
+	"fmt"
+)
+
+func parseMountTable() ([]*Info, error) {
+	mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r"))
+	if mnttab == nil {
+		return nil, fmt.Errorf("Failed to open %s", C.MNTTAB)
+	}
+
+	var out []*Info
+	var mp C.struct_mnttab
+
+	ret := C.getmntent(mnttab, &mp)
+	for ret == 0 {
+		var mountinfo Info
+		mountinfo.Mountpoint = C.GoString(mp.mnt_mountp)
+		mountinfo.Source = C.GoString(mp.mnt_special)
+		mountinfo.Fstype = C.GoString(mp.mnt_fstype)
+		mountinfo.Opts = C.GoString(mp.mnt_mntopts)
+		out = append(out, &mountinfo)
+		ret = C.getmntent(mnttab, &mp)
+	}
+
+	C.fclose(mnttab)
+	return out, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
new file mode 100644
index 0000000000000000000000000000000000000000..7fbcf19214b9086c3e3391f6a95f0758ec771c09
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
@@ -0,0 +1,12 @@
+// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo
+
+package mount
+
+import (
+	"fmt"
+	"runtime"
+)
+
+func parseMountTable() ([]*Info, error) {
+	return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..dab8a37ed01dca267a96774e37cfa5a6350788d6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go
@@ -0,0 +1,6 @@
+package mount
+
+func parseMountTable() ([]*Info, error) {
+	// Do NOT return an error!
+	return nil, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..8ceec84bc6c8a53b686a4c0903124a7540901dce
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
@@ -0,0 +1,69 @@
+// +build linux
+
+package mount
+
+// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeShared(mountPoint string) error {
+	return ensureMountedAs(mountPoint, "shared")
+}
+
+// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRShared(mountPoint string) error {
+	return ensureMountedAs(mountPoint, "rshared")
+}
+
+// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakePrivate(mountPoint string) error {
+	return ensureMountedAs(mountPoint, "private")
+}
+
+// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeRPrivate(mountPoint string) error {
+	return ensureMountedAs(mountPoint, "rprivate")
+}
+
+// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeSlave(mountPoint string) error {
+	return ensureMountedAs(mountPoint, "slave")
+}
+
+// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRSlave(mountPoint string) error {
+	return ensureMountedAs(mountPoint, "rslave")
+}
+
+// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeUnbindable(mountPoint string) error {
+	return ensureMountedAs(mountPoint, "unbindable")
+}
+
+// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
+// option enabled. See the supported options in flags.go for further reference.
+func MakeRUnbindable(mountPoint string) error {
+	return ensureMountedAs(mountPoint, "runbindable")
+}
+
+func ensureMountedAs(mountPoint, options string) error {
+	mounted, err := Mounted(mountPoint)
+	if err != nil {
+		return err
+	}
+
+	if !mounted {
+		if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil {
+			return err
+		}
+	}
+	if _, err = Mounted(mountPoint); err != nil {
+		return err
+	}
+
+	return ForceMount("", mountPoint, "none", options)
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go
new file mode 100644
index 0000000000000000000000000000000000000000..09f6b03cbc0c71a01ca8bc95cfeacd47aaf2a355
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go
@@ -0,0 +1,58 @@
+// +build solaris
+
+package mount
+
+// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeShared(mountPoint string) error {
+	return ensureMountedAs(mountPoint, "shared")
+}
+
+// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRShared(mountPoint string) error {
+	return ensureMountedAs(mountPoint, "rshared")
+}
+
+// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakePrivate(mountPoint string) error {
+	return ensureMountedAs(mountPoint, "private")
+}
+
+// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeRPrivate(mountPoint string) error {
+	return ensureMountedAs(mountPoint, "rprivate")
+}
+
+// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeSlave(mountPoint string) error {
+	return ensureMountedAs(mountPoint, "slave")
+}
+
+// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRSlave(mountPoint string) error {
+	return ensureMountedAs(mountPoint, "rslave")
+}
+
+// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeUnbindable(mountPoint string) error {
+	return ensureMountedAs(mountPoint, "unbindable")
+}
+
+// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
+// option enabled. See the supported options in flags.go for further reference.
+func MakeRUnbindable(mountPoint string) error {
+	return ensureMountedAs(mountPoint, "runbindable")
+}
+
+func ensureMountedAs(mountPoint, options string) error {
+	// TODO: Solaris does not support bind mounts.
+	// Evaluate lofs and also look at the relevant
+	// mount flags to be supported.
+	return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/plugingetter/getter.go b/vendor/github.com/docker/docker/pkg/plugingetter/getter.go
new file mode 100644
index 0000000000000000000000000000000000000000..dde5f660358c4be7fdc6fd6a95cadb844827372c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugingetter/getter.go
@@ -0,0 +1,35 @@
+package plugingetter
+
+import "github.com/docker/docker/pkg/plugins"
+
+const (
+	// LOOKUP doesn't update RefCount
+	LOOKUP = 0
+	// ACQUIRE increments RefCount
+	ACQUIRE = 1
+	// RELEASE decrements RefCount
+	RELEASE = -1
+)
+
+// CompatPlugin is a abstraction to handle both v2(new) and v1(legacy) plugins.
+type CompatPlugin interface {
+	Client() *plugins.Client
+	Name() string
+	BasePath() string
+	IsV1() bool
+}
+
+// CountedPlugin is a plugin which is reference counted.
+type CountedPlugin interface {
+	Acquire()
+	Release()
+	CompatPlugin
+}
+
+// PluginGetter is the interface implemented by Store
+type PluginGetter interface {
+	Get(name, capability string, mode int) (CompatPlugin, error)
+	GetAllByCap(capability string) ([]CompatPlugin, error)
+	GetAllManagedPluginsByCap(capability string) []CompatPlugin
+	Handle(capability string, callback func(string, *plugins.Client))
+}
diff --git a/vendor/github.com/docker/docker/pkg/plugins/client.go b/vendor/github.com/docker/docker/pkg/plugins/client.go
new file mode 100644
index 0000000000000000000000000000000000000000..e8e730eb58e58d4924fd1393dcb090b4bd6da035
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/client.go
@@ -0,0 +1,205 @@
+package plugins
+
+import (
+	"bytes"
+	"encoding/json"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"time"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/pkg/plugins/transport"
+	"github.com/docker/go-connections/sockets"
+	"github.com/docker/go-connections/tlsconfig"
+)
+
+const (
+	defaultTimeOut = 30
+)
+
+func newTransport(addr string, tlsConfig *tlsconfig.Options) (transport.Transport, error) {
+	tr := &http.Transport{}
+
+	if tlsConfig != nil {
+		c, err := tlsconfig.Client(*tlsConfig)
+		if err != nil {
+			return nil, err
+		}
+		tr.TLSClientConfig = c
+	}
+
+	u, err := url.Parse(addr)
+	if err != nil {
+		return nil, err
+	}
+	socket := u.Host
+	if socket == "" {
+		// valid local socket addresses have the host empty.
+		socket = u.Path
+	}
+	if err := sockets.ConfigureTransport(tr, u.Scheme, socket); err != nil {
+		return nil, err
+	}
+	scheme := httpScheme(u)
+
+	return transport.NewHTTPTransport(tr, scheme, socket), nil
+}
+
+// NewClient creates a new plugin client (http).
+func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) {
+	clientTransport, err := newTransport(addr, tlsConfig)
+	if err != nil {
+		return nil, err
+	}
+	return newClientWithTransport(clientTransport, 0), nil
+}
+
+// NewClientWithTimeout creates a new plugin client (http).
+func NewClientWithTimeout(addr string, tlsConfig *tlsconfig.Options, timeoutInSecs int) (*Client, error) {
+	clientTransport, err := newTransport(addr, tlsConfig)
+	if err != nil {
+		return nil, err
+	}
+	return newClientWithTransport(clientTransport, timeoutInSecs), nil
+}
+
+// newClientWithTransport creates a new plugin client with a given transport.
+func newClientWithTransport(tr transport.Transport, timeoutInSecs int) *Client {
+	return &Client{
+		http: &http.Client{
+			Transport: tr,
+			Timeout:   time.Duration(timeoutInSecs) * time.Second,
+		},
+		requestFactory: tr,
+	}
+}
+
+// Client represents a plugin client.
+type Client struct {
+	http           *http.Client // http client to use
+	requestFactory transport.RequestFactory
+}
+
+// Call calls the specified method with the specified arguments for the plugin.
+// It will retry for 30 seconds if a failure occurs when calling.
+func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error {
+	var buf bytes.Buffer
+	if args != nil {
+		if err := json.NewEncoder(&buf).Encode(args); err != nil {
+			return err
+		}
+	}
+	body, err := c.callWithRetry(serviceMethod, &buf, true)
+	if err != nil {
+		return err
+	}
+	defer body.Close()
+	if ret != nil {
+		if err := json.NewDecoder(body).Decode(&ret); err != nil {
+			logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err)
+			return err
+		}
+	}
+	return nil
+}
+
+// Stream calls the specified method with the specified arguments for the plugin and returns the response body
+func (c *Client) Stream(serviceMethod string, args interface{}) (io.ReadCloser, error) {
+	var buf bytes.Buffer
+	if err := json.NewEncoder(&buf).Encode(args); err != nil {
+		return nil, err
+	}
+	return c.callWithRetry(serviceMethod, &buf, true)
+}
+
+// SendFile calls the specified method, and passes through the IO stream
+func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) error {
+	body, err := c.callWithRetry(serviceMethod, data, true)
+	if err != nil {
+		return err
+	}
+	defer body.Close()
+	if err := json.NewDecoder(body).Decode(&ret); err != nil {
+		logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err)
+		return err
+	}
+	return nil
+}
+
+func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) {
+	req, err := c.requestFactory.NewRequest(serviceMethod, data)
+	if err != nil {
+		return nil, err
+	}
+
+	var retries int
+	start := time.Now()
+
+	for {
+		resp, err := c.http.Do(req)
+		if err != nil {
+			if !retry {
+				return nil, err
+			}
+
+			timeOff := backoff(retries)
+			if abort(start, timeOff) {
+				return nil, err
+			}
+			retries++
+			logrus.Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff)
+			time.Sleep(timeOff)
+			continue
+		}
+
+		if resp.StatusCode != http.StatusOK {
+			b, err := ioutil.ReadAll(resp.Body)
+			resp.Body.Close()
+			if err != nil {
+				return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()}
+			}
+
+			// Plugins' Response(s) should have an Err field indicating what went
+			// wrong. Try to unmarshal into ResponseErr. Otherwise fallback to just
+			// return the string(body)
+			type responseErr struct {
+				Err string
+			}
+			remoteErr := responseErr{}
+			if err := json.Unmarshal(b, &remoteErr); err == nil {
+				if remoteErr.Err != "" {
+					return nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err}
+				}
+			}
+			// old way...
+			return nil, &statusError{resp.StatusCode, serviceMethod, string(b)}
+		}
+		return resp.Body, nil
+	}
+}
+
+func backoff(retries int) time.Duration {
+	b, max := 1, defaultTimeOut
+	for b < max && retries > 0 {
+		b *= 2
+		retries--
+	}
+	if b > max {
+		b = max
+	}
+	return time.Duration(b) * time.Second
+}
+
+func abort(start time.Time, timeOff time.Duration) bool {
+	return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second
+}
+
+func httpScheme(u *url.URL) string {
+	scheme := u.Scheme
+	if scheme != "https" {
+		scheme = "http"
+	}
+	return scheme
+}
diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery.go b/vendor/github.com/docker/docker/pkg/plugins/discovery.go
new file mode 100644
index 0000000000000000000000000000000000000000..e99581c5733153f4cbead6b1090ddeb9d06dda93
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/discovery.go
@@ -0,0 +1,131 @@
+package plugins
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net/url"
+	"os"
+	"path/filepath"
+	"strings"
+	"sync"
+)
+
+var (
+	// ErrNotFound plugin not found
+	ErrNotFound = errors.New("plugin not found")
+	socketsPath = "/run/docker/plugins"
+)
+
+// localRegistry defines a registry that is local (using unix socket).
+type localRegistry struct{}
+
+func newLocalRegistry() localRegistry {
+	return localRegistry{}
+}
+
+// Scan scans all the plugin paths and returns all the names it found
+func Scan() ([]string, error) {
+	var names []string
+	if err := filepath.Walk(socketsPath, func(path string, fi os.FileInfo, err error) error {
+		if err != nil {
+			return nil
+		}
+
+		if fi.Mode()&os.ModeSocket != 0 {
+			name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name()))
+			names = append(names, name)
+		}
+		return nil
+	}); err != nil {
+		return nil, err
+	}
+
+	for _, path := range specsPaths {
+		if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error {
+			if err != nil || fi.IsDir() {
+				return nil
+			}
+			name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name()))
+			names = append(names, name)
+			return nil
+		}); err != nil {
+			return nil, err
+		}
+	}
+	return names, nil
+}
+
+// Plugin returns the plugin registered with the given name (or returns an error).
+func (l *localRegistry) Plugin(name string) (*Plugin, error) {
+	socketpaths := pluginPaths(socketsPath, name, ".sock")
+
+	for _, p := range socketpaths {
+		if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 {
+			return NewLocalPlugin(name, "unix://"+p), nil
+		}
+	}
+
+	var txtspecpaths []string
+	for _, p := range specsPaths {
+		txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".spec")...)
+		txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".json")...)
+	}
+
+	for _, p := range txtspecpaths {
+		if _, err := os.Stat(p); err == nil {
+			if strings.HasSuffix(p, ".json") {
+				return readPluginJSONInfo(name, p)
+			}
+			return readPluginInfo(name, p)
+		}
+	}
+	return nil, ErrNotFound
+}
+
+func readPluginInfo(name, path string) (*Plugin, error) {
+	content, err := ioutil.ReadFile(path)
+	if err != nil {
+		return nil, err
+	}
+	addr := strings.TrimSpace(string(content))
+
+	u, err := url.Parse(addr)
+	if err != nil {
+		return nil, err
+	}
+
+	if len(u.Scheme) == 0 {
+		return nil, fmt.Errorf("Unknown protocol")
+	}
+
+	return NewLocalPlugin(name, addr), nil
+}
+
+func readPluginJSONInfo(name, path string) (*Plugin, error) {
+	f, err := os.Open(path)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	var p Plugin
+	if err := json.NewDecoder(f).Decode(&p); err != nil {
+		return nil, err
+	}
+	p.name = name
+	if p.TLSConfig != nil && len(p.TLSConfig.CAFile) == 0 {
+		p.TLSConfig.InsecureSkipVerify = true
+	}
+	p.activateWait = sync.NewCond(&sync.Mutex{})
+
+	return &p, nil
+}
+
+func pluginPaths(base, name, ext string) []string {
+	return []string{
+		filepath.Join(base, name+ext),
+		filepath.Join(base, name, name+ext),
+	}
+}
diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..693a47e3948525b96064165b50683059f7aed93b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go
@@ -0,0 +1,5 @@
+// +build !windows
+
+package plugins
+
+var specsPaths = []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"}
diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..d7c1fe4942bc3ef91379a6bfd7d5957fbb666c2b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go
@@ -0,0 +1,8 @@
+package plugins
+
+import (
+	"os"
+	"path/filepath"
+)
+
+var specsPaths = []string{filepath.Join(os.Getenv("programdata"), "docker", "plugins")}
diff --git a/vendor/github.com/docker/docker/pkg/plugins/errors.go b/vendor/github.com/docker/docker/pkg/plugins/errors.go
new file mode 100644
index 0000000000000000000000000000000000000000..7988471026d4d5d74eaa97cd9eb3d9a4f1af6e5e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/errors.go
@@ -0,0 +1,33 @@
+package plugins
+
+import (
+	"fmt"
+	"net/http"
+)
+
+type statusError struct {
+	status int
+	method string
+	err    string
+}
+
+// Error returns a formatted string for this error type
+func (e *statusError) Error() string {
+	return fmt.Sprintf("%s: %v", e.method, e.err)
+}
+
+// IsNotFound indicates if the passed in error is from an http.StatusNotFound from the plugin
+func IsNotFound(err error) bool {
+	return isStatusError(err, http.StatusNotFound)
+}
+
+func isStatusError(err error, status int) bool {
+	if err == nil {
+		return false
+	}
+	e, ok := err.(*statusError)
+	if !ok {
+		return false
+	}
+	return e.status == status
+}
diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins.go b/vendor/github.com/docker/docker/pkg/plugins/plugins.go
new file mode 100644
index 0000000000000000000000000000000000000000..861daa3207cea2ddb00c1fffc2d0640e7d7c01da
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/plugins.go
@@ -0,0 +1,335 @@
+// Package plugins provides structures and helper functions to manage Docker
+// plugins.
+//
+// Docker discovers plugins by looking for them in the plugin directory whenever
+// a user or container tries to use one by name. UNIX domain socket files must
+// be located under /run/docker/plugins, whereas spec files can be located
+// either under /etc/docker/plugins or /usr/lib/docker/plugins. This is handled
+// by the Registry interface, which lets you list all plugins or get a plugin by
+// its name if it exists.
+//
+// The plugins need to implement an HTTP server and bind this to the UNIX socket
+// or the address specified in the spec files.
+// A handshake is send at /Plugin.Activate, and plugins are expected to return
+// a Manifest with a list of of Docker subsystems which this plugin implements.
+//
+// In order to use a plugins, you can use the ``Get`` with the name of the
+// plugin and the subsystem it implements.
+//
+//	plugin, err := plugins.Get("example", "VolumeDriver")
+//	if err != nil {
+//		return fmt.Errorf("Error looking up volume plugin example: %v", err)
+//	}
+package plugins
+
+import (
+	"errors"
+	"sync"
+	"time"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/go-connections/tlsconfig"
+)
+
+var (
+	// ErrNotImplements is returned if the plugin does not implement the requested driver.
+	ErrNotImplements = errors.New("Plugin does not implement the requested driver")
+)
+
+type plugins struct {
+	sync.Mutex
+	plugins map[string]*Plugin
+}
+
+type extpointHandlers struct {
+	sync.RWMutex
+	extpointHandlers map[string][]func(string, *Client)
+}
+
+var (
+	storage  = plugins{plugins: make(map[string]*Plugin)}
+	handlers = extpointHandlers{extpointHandlers: make(map[string][]func(string, *Client))}
+)
+
+// Manifest lists what a plugin implements.
+type Manifest struct {
+	// List of subsystem the plugin implements.
+	Implements []string
+}
+
+// Plugin is the definition of a docker plugin.
+type Plugin struct {
+	// Name of the plugin
+	name string
+	// Address of the plugin
+	Addr string
+	// TLS configuration of the plugin
+	TLSConfig *tlsconfig.Options
+	// Client attached to the plugin
+	client *Client
+	// Manifest of the plugin (see above)
+	Manifest *Manifest `json:"-"`
+
+	// wait for activation to finish
+	activateWait *sync.Cond
+	// error produced by activation
+	activateErr error
+	// keeps track of callback handlers run against this plugin
+	handlersRun bool
+}
+
+// BasePath returns the path to which all paths returned by the plugin are relative to.
+// For v1 plugins, this always returns the host's root directory.
+func (p *Plugin) BasePath() string {
+	return "/"
+}
+
+// Name returns the name of the plugin.
+func (p *Plugin) Name() string {
+	return p.name
+}
+
+// Client returns a ready-to-use plugin client that can be used to communicate with the plugin.
+func (p *Plugin) Client() *Client {
+	return p.client
+}
+
+// IsV1 returns true for V1 plugins and false otherwise.
+func (p *Plugin) IsV1() bool {
+	return true
+}
+
+// NewLocalPlugin creates a new local plugin.
+func NewLocalPlugin(name, addr string) *Plugin {
+	return &Plugin{
+		name: name,
+		Addr: addr,
+		// TODO: change to nil
+		TLSConfig:    &tlsconfig.Options{InsecureSkipVerify: true},
+		activateWait: sync.NewCond(&sync.Mutex{}),
+	}
+}
+
+func (p *Plugin) activate() error {
+	p.activateWait.L.Lock()
+
+	if p.activated() {
+		p.runHandlers()
+		p.activateWait.L.Unlock()
+		return p.activateErr
+	}
+
+	p.activateErr = p.activateWithLock()
+
+	p.runHandlers()
+	p.activateWait.L.Unlock()
+	p.activateWait.Broadcast()
+	return p.activateErr
+}
+
+// runHandlers runs the registered handlers for the implemented plugin types
+// This should only be run after activation, and while the activation lock is held.
+func (p *Plugin) runHandlers() {
+	if !p.activated() {
+		return
+	}
+
+	handlers.RLock()
+	if !p.handlersRun {
+		for _, iface := range p.Manifest.Implements {
+			hdlrs, handled := handlers.extpointHandlers[iface]
+			if !handled {
+				continue
+			}
+			for _, handler := range hdlrs {
+				handler(p.name, p.client)
+			}
+		}
+		p.handlersRun = true
+	}
+	handlers.RUnlock()
+
+}
+
+// activated returns if the plugin has already been activated.
+// This should only be called with the activation lock held
+func (p *Plugin) activated() bool {
+	return p.Manifest != nil
+}
+
+func (p *Plugin) activateWithLock() error {
+	c, err := NewClient(p.Addr, p.TLSConfig)
+	if err != nil {
+		return err
+	}
+	p.client = c
+
+	m := new(Manifest)
+	if err = p.client.Call("Plugin.Activate", nil, m); err != nil {
+		return err
+	}
+
+	p.Manifest = m
+	return nil
+}
+
+func (p *Plugin) waitActive() error {
+	p.activateWait.L.Lock()
+	for !p.activated() {
+		p.activateWait.Wait()
+	}
+	p.activateWait.L.Unlock()
+	return p.activateErr
+}
+
+func (p *Plugin) implements(kind string) bool {
+	if p.Manifest == nil {
+		return false
+	}
+	for _, driver := range p.Manifest.Implements {
+		if driver == kind {
+			return true
+		}
+	}
+	return false
+}
+
+func load(name string) (*Plugin, error) {
+	return loadWithRetry(name, true)
+}
+
+func loadWithRetry(name string, retry bool) (*Plugin, error) {
+	registry := newLocalRegistry()
+	start := time.Now()
+
+	var retries int
+	for {
+		pl, err := registry.Plugin(name)
+		if err != nil {
+			if !retry {
+				return nil, err
+			}
+
+			timeOff := backoff(retries)
+			if abort(start, timeOff) {
+				return nil, err
+			}
+			retries++
+			logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff)
+			time.Sleep(timeOff)
+			continue
+		}
+
+		storage.Lock()
+		if pl, exists := storage.plugins[name]; exists {
+			storage.Unlock()
+			return pl, pl.activate()
+		}
+		storage.plugins[name] = pl
+		storage.Unlock()
+
+		err = pl.activate()
+
+		if err != nil {
+			storage.Lock()
+			delete(storage.plugins, name)
+			storage.Unlock()
+		}
+
+		return pl, err
+	}
+}
+
+func get(name string) (*Plugin, error) {
+	storage.Lock()
+	pl, ok := storage.plugins[name]
+	storage.Unlock()
+	if ok {
+		return pl, pl.activate()
+	}
+	return load(name)
+}
+
+// Get returns the plugin given the specified name and requested implementation.
+func Get(name, imp string) (*Plugin, error) {
+	pl, err := get(name)
+	if err != nil {
+		return nil, err
+	}
+	if err := pl.waitActive(); err == nil && pl.implements(imp) {
+		logrus.Debugf("%s implements: %s", name, imp)
+		return pl, nil
+	}
+	return nil, ErrNotImplements
+}
+
+// Handle adds the specified function to the extpointHandlers.
+func Handle(iface string, fn func(string, *Client)) {
+	handlers.Lock()
+	hdlrs, ok := handlers.extpointHandlers[iface]
+	if !ok {
+		hdlrs = []func(string, *Client){}
+	}
+
+	hdlrs = append(hdlrs, fn)
+	handlers.extpointHandlers[iface] = hdlrs
+
+	storage.Lock()
+	for _, p := range storage.plugins {
+		p.activateWait.L.Lock()
+		if p.activated() && p.implements(iface) {
+			p.handlersRun = false
+		}
+		p.activateWait.L.Unlock()
+	}
+	storage.Unlock()
+
+	handlers.Unlock()
+}
+
+// GetAll returns all the plugins for the specified implementation
+func GetAll(imp string) ([]*Plugin, error) {
+	pluginNames, err := Scan()
+	if err != nil {
+		return nil, err
+	}
+
+	type plLoad struct {
+		pl  *Plugin
+		err error
+	}
+
+	chPl := make(chan *plLoad, len(pluginNames))
+	var wg sync.WaitGroup
+	for _, name := range pluginNames {
+		storage.Lock()
+		pl, ok := storage.plugins[name]
+		storage.Unlock()
+		if ok {
+			chPl <- &plLoad{pl, nil}
+			continue
+		}
+
+		wg.Add(1)
+		go func(name string) {
+			defer wg.Done()
+			pl, err := loadWithRetry(name, false)
+			chPl <- &plLoad{pl, err}
+		}(name)
+	}
+
+	wg.Wait()
+	close(chPl)
+
+	var out []*Plugin
+	for pl := range chPl {
+		if pl.err != nil {
+			logrus.Error(pl.err)
+			continue
+		}
+		if err := pl.pl.waitActive(); err == nil && pl.pl.implements(imp) {
+			out = append(out, pl.pl)
+		}
+	}
+	return out, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/http.go b/vendor/github.com/docker/docker/pkg/plugins/transport/http.go
new file mode 100644
index 0000000000000000000000000000000000000000..5be146af65740485d6a5541b7029e92cd112e4bc
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/transport/http.go
@@ -0,0 +1,36 @@
+package transport
+
+import (
+	"io"
+	"net/http"
+)
+
+// httpTransport holds an http.RoundTripper
+// and information about the scheme and address the transport
+// sends request to.
+type httpTransport struct {
+	http.RoundTripper
+	scheme string
+	addr   string
+}
+
+// NewHTTPTransport creates a new httpTransport.
+func NewHTTPTransport(r http.RoundTripper, scheme, addr string) Transport {
+	return httpTransport{
+		RoundTripper: r,
+		scheme:       scheme,
+		addr:         addr,
+	}
+}
+
+// NewRequest creates a new http.Request and sets the URL
+// scheme and address with the transport's fields.
+func (t httpTransport) NewRequest(path string, data io.Reader) (*http.Request, error) {
+	req, err := newHTTPRequest(path, data)
+	if err != nil {
+		return nil, err
+	}
+	req.URL.Scheme = t.scheme
+	req.URL.Host = t.addr
+	return req, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go b/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go
new file mode 100644
index 0000000000000000000000000000000000000000..d7f1e2100c435c68ef407d69735f4eb3ed495636
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go
@@ -0,0 +1,36 @@
+package transport
+
+import (
+	"io"
+	"net/http"
+	"strings"
+)
+
+// VersionMimetype is the Content-Type the engine sends to plugins.
+const VersionMimetype = "application/vnd.docker.plugins.v1.2+json"
+
+// RequestFactory defines an interface that
+// transports can implement to create new requests.
+type RequestFactory interface {
+	NewRequest(path string, data io.Reader) (*http.Request, error)
+}
+
+// Transport defines an interface that plugin transports
+// must implement.
+type Transport interface {
+	http.RoundTripper
+	RequestFactory
+}
+
+// newHTTPRequest creates a new request with a path and a body.
+func newHTTPRequest(path string, data io.Reader) (*http.Request, error) {
+	if !strings.HasPrefix(path, "/") {
+		path = "/" + path
+	}
+	req, err := http.NewRequest("POST", path, data)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Add("Accept", VersionMimetype)
+	return req, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go
new file mode 100644
index 0000000000000000000000000000000000000000..5c5aead698af63066dc742faae33513a50efcaa5
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/pools/pools.go
@@ -0,0 +1,116 @@
+// Package pools provides a collection of pools which provide various
+// data types with buffers. These can be used to lower the number of
+// memory allocations and reuse buffers.
+//
+// New pools should be added to this package to allow them to be
+// shared across packages.
+//
+// Utility functions which operate on pools should be added to this
+// package to allow them to be reused.
+package pools
+
+import (
+	"bufio"
+	"io"
+	"sync"
+
+	"github.com/docker/docker/pkg/ioutils"
+)
+
+var (
+	// BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
+	BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
+	// BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
+	BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
+)
+
+const buffer32K = 32 * 1024
+
+// BufioReaderPool is a bufio reader that uses sync.Pool.
+type BufioReaderPool struct {
+	pool sync.Pool
+}
+
+// newBufioReaderPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
+	return &BufioReaderPool{
+		pool: sync.Pool{
+			New: func() interface{} { return bufio.NewReaderSize(nil, size) },
+		},
+	}
+}
+
+// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
+func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
+	buf := bufPool.pool.Get().(*bufio.Reader)
+	buf.Reset(r)
+	return buf
+}
+
+// Put puts the bufio.Reader back into the pool.
+func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
+	b.Reset(nil)
+	bufPool.pool.Put(b)
+}
+
+// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
+func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
+	buf := BufioReader32KPool.Get(src)
+	written, err = io.Copy(dst, buf)
+	BufioReader32KPool.Put(buf)
+	return
+}
+
+// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
+// into the pool and closes the reader if it's an io.ReadCloser.
+func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
+	return ioutils.NewReadCloserWrapper(r, func() error {
+		if readCloser, ok := r.(io.ReadCloser); ok {
+			readCloser.Close()
+		}
+		bufPool.Put(buf)
+		return nil
+	})
+}
+
+// BufioWriterPool is a bufio writer that uses sync.Pool.
+type BufioWriterPool struct {
+	pool sync.Pool
+}
+
+// newBufioWriterPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
+	return &BufioWriterPool{
+		pool: sync.Pool{
+			New: func() interface{} { return bufio.NewWriterSize(nil, size) },
+		},
+	}
+}
+
+// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
+func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
+	buf := bufPool.pool.Get().(*bufio.Writer)
+	buf.Reset(w)
+	return buf
+}
+
+// Put puts the bufio.Writer back into the pool.
+func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
+	b.Reset(nil)
+	bufPool.pool.Put(b)
+}
+
+// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
+// into the pool and closes the writer if it's an io.Writecloser.
+func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
+	return ioutils.NewWriteCloserWrapper(w, func() error {
+		buf.Flush()
+		if writeCloser, ok := w.(io.WriteCloser); ok {
+			writeCloser.Close()
+		}
+		bufPool.Put(buf)
+		return nil
+	})
+}
diff --git a/vendor/github.com/docker/docker/pkg/promise/promise.go b/vendor/github.com/docker/docker/pkg/promise/promise.go
new file mode 100644
index 0000000000000000000000000000000000000000..dd52b9082f744d0b78ada8e2c9c8069d9d853213
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/promise/promise.go
@@ -0,0 +1,11 @@
+package promise
+
+// Go is a basic promise implementation: it wraps calls a function in a goroutine,
+// and returns a channel which will later return the function's return value.
+func Go(f func() error) chan error {
+	ch := make(chan error, 1)
+	go func() {
+		ch <- f()
+	}()
+	return ch
+}
diff --git a/vendor/github.com/docker/docker/pkg/random/random.go b/vendor/github.com/docker/docker/pkg/random/random.go
new file mode 100644
index 0000000000000000000000000000000000000000..70de4d1304c54572f8e7adc9a171a83f63d44f29
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/random/random.go
@@ -0,0 +1,71 @@
+package random
+
+import (
+	cryptorand "crypto/rand"
+	"io"
+	"math"
+	"math/big"
+	"math/rand"
+	"sync"
+	"time"
+)
+
+// Rand is a global *rand.Rand instance, which initialized with NewSource() source.
+var Rand = rand.New(NewSource())
+
+// Reader is a global, shared instance of a pseudorandom bytes generator.
+// It doesn't consume entropy.
+var Reader io.Reader = &reader{rnd: Rand}
+
+// copypaste from standard math/rand
+type lockedSource struct {
+	lk  sync.Mutex
+	src rand.Source
+}
+
+func (r *lockedSource) Int63() (n int64) {
+	r.lk.Lock()
+	n = r.src.Int63()
+	r.lk.Unlock()
+	return
+}
+
+func (r *lockedSource) Seed(seed int64) {
+	r.lk.Lock()
+	r.src.Seed(seed)
+	r.lk.Unlock()
+}
+
+// NewSource returns math/rand.Source safe for concurrent use and initialized
+// with current unix-nano timestamp
+func NewSource() rand.Source {
+	var seed int64
+	if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil {
+		// This should not happen, but worst-case fallback to time-based seed.
+		seed = time.Now().UnixNano()
+	} else {
+		seed = cryptoseed.Int64()
+	}
+	return &lockedSource{
+		src: rand.NewSource(seed),
+	}
+}
+
+type reader struct {
+	rnd *rand.Rand
+}
+
+func (r *reader) Read(b []byte) (int, error) {
+	i := 0
+	for {
+		val := r.rnd.Int63()
+		for val > 0 {
+			b[i] = byte(val)
+			i++
+			if i == len(b) {
+				return i, nil
+			}
+			val >>= 8
+		}
+	}
+}
diff --git a/vendor/github.com/docker/docker/pkg/reexec/README.md b/vendor/github.com/docker/docker/pkg/reexec/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..45592ce85a8e4b91b1eae9c83949f4744c3a0ac6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/reexec/README.md
@@ -0,0 +1,5 @@
+## reexec
+
+The `reexec` package facilitates the busybox style reexec of the docker binary that we require because 
+of the forking limitations of using Go.  Handlers can be registered with a name and the argv 0 of 
+the exec of the binary will be used to find and execute custom init paths.
diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_linux.go b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..34ae2a9dcdab426e2ed7e0f0033f6d28920f60cd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go
@@ -0,0 +1,28 @@
+// +build linux
+
+package reexec
+
+import (
+	"os/exec"
+	"syscall"
+)
+
+// Self returns the path to the current process's binary.
+// Returns "/proc/self/exe".
+func Self() string {
+	return "/proc/self/exe"
+}
+
+// Command returns *exec.Cmd which has Path as current binary. Also it setting
+// SysProcAttr.Pdeathsig to SIGTERM.
+// This will use the in-memory version (/proc/self/exe) of the current binary,
+// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
+func Command(args ...string) *exec.Cmd {
+	return &exec.Cmd{
+		Path: Self(),
+		Args: args,
+		SysProcAttr: &syscall.SysProcAttr{
+			Pdeathsig: syscall.SIGTERM,
+		},
+	}
+}
diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unix.go b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..778a720e3b9114e94bd3db4cfd4de140538bfc41
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go
@@ -0,0 +1,23 @@
+// +build freebsd solaris darwin
+
+package reexec
+
+import (
+	"os/exec"
+)
+
+// Self returns the path to the current process's binary.
+// Uses os.Args[0].
+func Self() string {
+	return naiveSelf()
+}
+
+// Command returns *exec.Cmd which has Path as current binary.
+// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will
+// be set to "/usr/bin/docker".
+func Command(args ...string) *exec.Cmd {
+	return &exec.Cmd{
+		Path: Self(),
+		Args: args,
+	}
+}
diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go
new file mode 100644
index 0000000000000000000000000000000000000000..76edd824273ec3188d7633e81ab5e67cf4dba662
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go
@@ -0,0 +1,12 @@
+// +build !linux,!windows,!freebsd,!solaris,!darwin
+
+package reexec
+
+import (
+	"os/exec"
+)
+
+// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin.
+func Command(args ...string) *exec.Cmd {
+	return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_windows.go b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..ca871c4227ede82a4df5d4dfb5aa519867b50580
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go
@@ -0,0 +1,23 @@
+// +build windows
+
+package reexec
+
+import (
+	"os/exec"
+)
+
+// Self returns the path to the current process's binary.
+// Uses os.Args[0].
+func Self() string {
+	return naiveSelf()
+}
+
+// Command returns *exec.Cmd which has Path as current binary.
+// For example if current binary is "docker.exe" at "C:\", then cmd.Path will
+// be set to "C:\docker.exe".
+func Command(args ...string) *exec.Cmd {
+	return &exec.Cmd{
+		Path: Self(),
+		Args: args,
+	}
+}
diff --git a/vendor/github.com/docker/docker/pkg/reexec/reexec.go b/vendor/github.com/docker/docker/pkg/reexec/reexec.go
new file mode 100644
index 0000000000000000000000000000000000000000..c56671d91927bc8310ee1903140eb46db18d4f04
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/reexec/reexec.go
@@ -0,0 +1,47 @@
+package reexec
+
+import (
+	"fmt"
+	"os"
+	"os/exec"
+	"path/filepath"
+)
+
+var registeredInitializers = make(map[string]func())
+
+// Register adds an initialization func under the specified name
+func Register(name string, initializer func()) {
+	if _, exists := registeredInitializers[name]; exists {
+		panic(fmt.Sprintf("reexec func already registered under name %q", name))
+	}
+
+	registeredInitializers[name] = initializer
+}
+
+// Init is called as the first part of the exec process and returns true if an
+// initialization function was called.
+func Init() bool {
+	initializer, exists := registeredInitializers[os.Args[0]]
+	if exists {
+		initializer()
+
+		return true
+	}
+	return false
+}
+
+func naiveSelf() string {
+	name := os.Args[0]
+	if filepath.Base(name) == name {
+		if lp, err := exec.LookPath(name); err == nil {
+			return lp
+		}
+	}
+	// handle conversion of relative paths to absolute
+	if absName, err := filepath.Abs(name); err == nil {
+		return absName
+	}
+	// if we couldn't get absolute name, return original
+	// (NOTE: Go only errors on Abs() if os.Getwd fails)
+	return name
+}
diff --git a/vendor/github.com/docker/docker/pkg/stringid/README.md b/vendor/github.com/docker/docker/pkg/stringid/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..37a5098fd988ea2fefd77aeb2f9035dbb0271189
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/stringid/README.md
@@ -0,0 +1 @@
+This package provides helper functions for dealing with string identifiers
diff --git a/vendor/github.com/docker/docker/pkg/stringid/stringid.go b/vendor/github.com/docker/docker/pkg/stringid/stringid.go
new file mode 100644
index 0000000000000000000000000000000000000000..fa35d8bad560c36f0f20c16452ed345154dd6485
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/stringid/stringid.go
@@ -0,0 +1,69 @@
+// Package stringid provides helper functions for dealing with string identifiers
+package stringid
+
+import (
+	"crypto/rand"
+	"encoding/hex"
+	"io"
+	"regexp"
+	"strconv"
+	"strings"
+
+	"github.com/docker/docker/pkg/random"
+)
+
+const shortLen = 12
+
+var validShortID = regexp.MustCompile("^[a-z0-9]{12}$")
+
+// IsShortID determines if an arbitrary string *looks like* a short ID.
+func IsShortID(id string) bool {
+	return validShortID.MatchString(id)
+}
+
+// TruncateID returns a shorthand version of a string identifier for convenience.
+// A collision with other shorthands is very unlikely, but possible.
+// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
+// will need to use a longer prefix, or the full-length Id.
+func TruncateID(id string) string {
+	if i := strings.IndexRune(id, ':'); i >= 0 {
+		id = id[i+1:]
+	}
+	if len(id) > shortLen {
+		id = id[:shortLen]
+	}
+	return id
+}
+
+func generateID(crypto bool) string {
+	b := make([]byte, 32)
+	r := random.Reader
+	if crypto {
+		r = rand.Reader
+	}
+	for {
+		if _, err := io.ReadFull(r, b); err != nil {
+			panic(err) // This shouldn't happen
+		}
+		id := hex.EncodeToString(b)
+		// if we try to parse the truncated for as an int and we don't have
+		// an error then the value is all numeric and causes issues when
+		// used as a hostname. ref #3869
+		if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil {
+			continue
+		}
+		return id
+	}
+}
+
+// GenerateRandomID returns a unique id.
+func GenerateRandomID() string {
+	return generateID(true)
+}
+
+// GenerateNonCryptoID generates unique id without using cryptographically
+// secure sources of random.
+// It helps you to save entropy.
+func GenerateNonCryptoID() string {
+	return generateID(false)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/docker/docker/pkg/system/chtimes.go
new file mode 100644
index 0000000000000000000000000000000000000000..7637f12e1a7d21ef15cedf4fa67115f93180ec85
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/chtimes.go
@@ -0,0 +1,52 @@
+package system
+
+import (
+	"os"
+	"syscall"
+	"time"
+	"unsafe"
+)
+
+var (
+	maxTime time.Time
+)
+
+func init() {
+	if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 {
+		// This is a 64 bit timespec
+		// os.Chtimes limits time to the following
+		maxTime = time.Unix(0, 1<<63-1)
+	} else {
+		// This is a 32 bit timespec
+		maxTime = time.Unix(1<<31-1, 0)
+	}
+}
+
+// Chtimes changes the access time and modified time of a file at the given path
+func Chtimes(name string, atime time.Time, mtime time.Time) error {
+	unixMinTime := time.Unix(0, 0)
+	unixMaxTime := maxTime
+
+	// If the modified time is prior to the Unix Epoch, or after the
+	// end of Unix Time, os.Chtimes has undefined behavior
+	// default to Unix Epoch in this case, just in case
+
+	if atime.Before(unixMinTime) || atime.After(unixMaxTime) {
+		atime = unixMinTime
+	}
+
+	if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) {
+		mtime = unixMinTime
+	}
+
+	if err := os.Chtimes(name, atime, mtime); err != nil {
+		return err
+	}
+
+	// Take platform specific action for setting create time.
+	if err := setCTime(name, mtime); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..09d58bcbfdd4e6a94c40f584fbf239fd028d5a70
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go
@@ -0,0 +1,14 @@
+// +build !windows
+
+package system
+
+import (
+	"time"
+)
+
+//setCTime will set the create time on a file. On Unix, the create
+//time is updated as a side effect of setting the modified time, so
+//no action is required.
+func setCTime(path string, ctime time.Time) error {
+	return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..29458684659b61da14f039820c70eaf8d2ec83b6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go
@@ -0,0 +1,27 @@
+// +build windows
+
+package system
+
+import (
+	"syscall"
+	"time"
+)
+
+//setCTime will set the create time on a file. On Windows, this requires
+//calling SetFileTime and explicitly including the create time.
+func setCTime(path string, ctime time.Time) error {
+	ctimespec := syscall.NsecToTimespec(ctime.UnixNano())
+	pathp, e := syscall.UTF16PtrFromString(path)
+	if e != nil {
+		return e
+	}
+	h, e := syscall.CreateFile(pathp,
+		syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil,
+		syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
+	if e != nil {
+		return e
+	}
+	defer syscall.Close(h)
+	c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec))
+	return syscall.SetFileTime(h, &c, nil, nil)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/docker/docker/pkg/system/errors.go
new file mode 100644
index 0000000000000000000000000000000000000000..288318985e3e4da571e3376af209fdfaa1edd61f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/errors.go
@@ -0,0 +1,10 @@
+package system
+
+import (
+	"errors"
+)
+
+var (
+	// ErrNotSupportedPlatform means the platform is not supported.
+	ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
+)
diff --git a/vendor/github.com/docker/docker/pkg/system/events_windows.go b/vendor/github.com/docker/docker/pkg/system/events_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..3ec6d22151cd4dafadc8a2d23e02f244e3cab286
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/events_windows.go
@@ -0,0 +1,85 @@
+package system
+
+// This file implements syscalls for Win32 events which are not implemented
+// in golang.
+
+import (
+	"syscall"
+	"unsafe"
+
+	"golang.org/x/sys/windows"
+)
+
+var (
+	procCreateEvent = modkernel32.NewProc("CreateEventW")
+	procOpenEvent   = modkernel32.NewProc("OpenEventW")
+	procSetEvent    = modkernel32.NewProc("SetEvent")
+	procResetEvent  = modkernel32.NewProc("ResetEvent")
+	procPulseEvent  = modkernel32.NewProc("PulseEvent")
+)
+
+// CreateEvent implements win32 CreateEventW func in golang. It will create an event object.
+func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) {
+	namep, _ := syscall.UTF16PtrFromString(name)
+	var _p1 uint32
+	if manualReset {
+		_p1 = 1
+	}
+	var _p2 uint32
+	if initialState {
+		_p2 = 1
+	}
+	r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep)))
+	use(unsafe.Pointer(namep))
+	handle = syscall.Handle(r0)
+	if handle == syscall.InvalidHandle {
+		err = e1
+	}
+	return
+}
+
+// OpenEvent implements win32 OpenEventW func in golang. It opens an event object.
+func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) {
+	namep, _ := syscall.UTF16PtrFromString(name)
+	var _p1 uint32
+	if inheritHandle {
+		_p1 = 1
+	}
+	r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep)))
+	use(unsafe.Pointer(namep))
+	handle = syscall.Handle(r0)
+	if handle == syscall.InvalidHandle {
+		err = e1
+	}
+	return
+}
+
+// SetEvent implements win32 SetEvent func in golang.
+func SetEvent(handle syscall.Handle) (err error) {
+	return setResetPulse(handle, procSetEvent)
+}
+
+// ResetEvent implements win32 ResetEvent func in golang.
+func ResetEvent(handle syscall.Handle) (err error) {
+	return setResetPulse(handle, procResetEvent)
+}
+
+// PulseEvent implements win32 PulseEvent func in golang.
+func PulseEvent(handle syscall.Handle) (err error) {
+	return setResetPulse(handle, procPulseEvent)
+}
+
+func setResetPulse(handle syscall.Handle, proc *windows.LazyProc) (err error) {
+	r0, _, _ := proc.Call(uintptr(handle))
+	if r0 != 0 {
+		err = syscall.Errno(r0)
+	}
+	return
+}
+
+var temp unsafe.Pointer
+
+// use ensures a variable is kept alive without the GC freeing while still needed
+func use(p unsafe.Pointer) {
+	temp = p
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/exitcode.go b/vendor/github.com/docker/docker/pkg/system/exitcode.go
new file mode 100644
index 0000000000000000000000000000000000000000..60f0514b1ddd93042976b0e41c2150d7f795f7ff
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/exitcode.go
@@ -0,0 +1,33 @@
+package system
+
+import (
+	"fmt"
+	"os/exec"
+	"syscall"
+)
+
+// GetExitCode returns the ExitStatus of the specified error if its type is
+// exec.ExitError, returns 0 and an error otherwise.
+func GetExitCode(err error) (int, error) {
+	exitCode := 0
+	if exiterr, ok := err.(*exec.ExitError); ok {
+		if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok {
+			return procExit.ExitStatus(), nil
+		}
+	}
+	return exitCode, fmt.Errorf("failed to get exit code")
+}
+
+// ProcessExitCode process the specified error and returns the exit status code
+// if the error was of type exec.ExitError, returns nothing otherwise.
+func ProcessExitCode(err error) (exitCode int) {
+	if err != nil {
+		var exiterr error
+		if exitCode, exiterr = GetExitCode(err); exiterr != nil {
+			// TODO: Fix this so we check the error's text.
+			// we've failed to retrieve exit code, so we set it to 127
+			exitCode = 127
+		}
+	}
+	return
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go
new file mode 100644
index 0000000000000000000000000000000000000000..810c79478681f472c69713c65d14b5a86ce1738c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/filesys.go
@@ -0,0 +1,54 @@
+// +build !windows
+
+package system
+
+import (
+	"os"
+	"path/filepath"
+)
+
+// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory
+// ACL'd for Builtin Administrators and Local System.
+func MkdirAllWithACL(path string, perm os.FileMode) error {
+	return MkdirAll(path, perm)
+}
+
+// MkdirAll creates a directory named path along with any necessary parents,
+// with permission specified by attribute perm for all dir created.
+func MkdirAll(path string, perm os.FileMode) error {
+	return os.MkdirAll(path, perm)
+}
+
+// IsAbs is a platform-specific wrapper for filepath.IsAbs.
+func IsAbs(path string) bool {
+	return filepath.IsAbs(path)
+}
+
+// The functions below here are wrappers for the equivalents in the os package.
+// They are passthrough on Unix platforms, and only relevant on Windows.
+
+// CreateSequential creates the named file with mode 0666 (before umask), truncating
+// it if it already exists. If successful, methods on the returned
+// File can be used for I/O; the associated file descriptor has mode
+// O_RDWR.
+// If there is an error, it will be of type *PathError.
+func CreateSequential(name string) (*os.File, error) {
+	return os.Create(name)
+}
+
+// OpenSequential opens the named file for reading. If successful, methods on
+// the returned file can be used for reading; the associated file
+// descriptor has mode O_RDONLY.
+// If there is an error, it will be of type *PathError.
+func OpenSequential(name string) (*os.File, error) {
+	return os.Open(name)
+}
+
+// OpenFileSequential is the generalized open call; most users will use Open
+// or Create instead. It opens the named file with specified flag
+// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful,
+// methods on the returned File can be used for I/O.
+// If there is an error, it will be of type *PathError.
+func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) {
+	return os.OpenFile(name, flag, perm)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..6094f01fd4090b2dd3abd53bf7bbd8953639e186
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
@@ -0,0 +1,236 @@
+// +build windows
+
+package system
+
+import (
+	"os"
+	"path/filepath"
+	"regexp"
+	"strings"
+	"syscall"
+	"unsafe"
+
+	winio "github.com/Microsoft/go-winio"
+)
+
+// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory
+// ACL'd for Builtin Administrators and Local System.
+func MkdirAllWithACL(path string, perm os.FileMode) error {
+	return mkdirall(path, true)
+}
+
+// MkdirAll implementation that is volume path aware for Windows.
+func MkdirAll(path string, _ os.FileMode) error {
+	return mkdirall(path, false)
+}
+
+// mkdirall is a custom version of os.MkdirAll modified for use on Windows
+// so that it is both volume path aware, and can create a directory with
+// a DACL.
+func mkdirall(path string, adminAndLocalSystem bool) error {
+	if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) {
+		return nil
+	}
+
+	// The rest of this method is largely copied from os.MkdirAll and should be kept
+	// as-is to ensure compatibility.
+
+	// Fast path: if we can tell whether path is a directory or file, stop with success or error.
+	dir, err := os.Stat(path)
+	if err == nil {
+		if dir.IsDir() {
+			return nil
+		}
+		return &os.PathError{
+			Op:   "mkdir",
+			Path: path,
+			Err:  syscall.ENOTDIR,
+		}
+	}
+
+	// Slow path: make sure parent exists and then call Mkdir for path.
+	i := len(path)
+	for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
+		i--
+	}
+
+	j := i
+	for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
+		j--
+	}
+
+	if j > 1 {
+		// Create parent
+		err = mkdirall(path[0:j-1], false)
+		if err != nil {
+			return err
+		}
+	}
+
+	// Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result.
+	if adminAndLocalSystem {
+		err = mkdirWithACL(path)
+	} else {
+		err = os.Mkdir(path, 0)
+	}
+
+	if err != nil {
+		// Handle arguments like "foo/." by
+		// double-checking that directory doesn't exist.
+		dir, err1 := os.Lstat(path)
+		if err1 == nil && dir.IsDir() {
+			return nil
+		}
+		return err
+	}
+	return nil
+}
+
+// mkdirWithACL creates a new directory. If there is an error, it will be of
+// type *PathError. .
+//
+// This is a modified and combined version of os.Mkdir and syscall.Mkdir
+// in golang to cater for creating a directory am ACL permitting full
+// access, with inheritance, to any subfolder/file for Built-in Administrators
+// and Local System.
+func mkdirWithACL(name string) error {
+	sa := syscall.SecurityAttributes{Length: 0}
+	sddl := "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
+	sd, err := winio.SddlToSecurityDescriptor(sddl)
+	if err != nil {
+		return &os.PathError{Op: "mkdir", Path: name, Err: err}
+	}
+	sa.Length = uint32(unsafe.Sizeof(sa))
+	sa.InheritHandle = 1
+	sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0]))
+
+	namep, err := syscall.UTF16PtrFromString(name)
+	if err != nil {
+		return &os.PathError{Op: "mkdir", Path: name, Err: err}
+	}
+
+	e := syscall.CreateDirectory(namep, &sa)
+	if e != nil {
+		return &os.PathError{Op: "mkdir", Path: name, Err: e}
+	}
+	return nil
+}
+
+// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows,
+// golang filepath.IsAbs does not consider a path \windows\system32 as absolute
+// as it doesn't start with a drive-letter/colon combination. However, in
+// docker we need to verify things such as WORKDIR /windows/system32 in
+// a Dockerfile (which gets translated to \windows\system32 when being processed
+// by the daemon. This SHOULD be treated as absolute from a docker processing
+// perspective.
+func IsAbs(path string) bool {
+	if !filepath.IsAbs(path) {
+		if !strings.HasPrefix(path, string(os.PathSeparator)) {
+			return false
+		}
+	}
+	return true
+}
+
+// The origin of the functions below here are the golang OS and syscall packages,
+// slightly modified to only cope with files, not directories due to the
+// specific use case.
+//
+// The alteration is to allow a file on Windows to be opened with
+// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating
+// the standby list, particularly when accessing large files such as layer.tar.
+
+// CreateSequential creates the named file with mode 0666 (before umask), truncating
+// it if it already exists. If successful, methods on the returned
+// File can be used for I/O; the associated file descriptor has mode
+// O_RDWR.
+// If there is an error, it will be of type *PathError.
+func CreateSequential(name string) (*os.File, error) {
+	return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0)
+}
+
+// OpenSequential opens the named file for reading. If successful, methods on
+// the returned file can be used for reading; the associated file
+// descriptor has mode O_RDONLY.
+// If there is an error, it will be of type *PathError.
+func OpenSequential(name string) (*os.File, error) {
+	return OpenFileSequential(name, os.O_RDONLY, 0)
+}
+
+// OpenFileSequential is the generalized open call; most users will use Open
+// or Create instead.
+// If there is an error, it will be of type *PathError.
+func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) {
+	if name == "" {
+		return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT}
+	}
+	r, errf := syscallOpenFileSequential(name, flag, 0)
+	if errf == nil {
+		return r, nil
+	}
+	return nil, &os.PathError{Op: "open", Path: name, Err: errf}
+}
+
+func syscallOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) {
+	r, e := syscallOpenSequential(name, flag|syscall.O_CLOEXEC, 0)
+	if e != nil {
+		return nil, e
+	}
+	return os.NewFile(uintptr(r), name), nil
+}
+
+func makeInheritSa() *syscall.SecurityAttributes {
+	var sa syscall.SecurityAttributes
+	sa.Length = uint32(unsafe.Sizeof(sa))
+	sa.InheritHandle = 1
+	return &sa
+}
+
+func syscallOpenSequential(path string, mode int, _ uint32) (fd syscall.Handle, err error) {
+	if len(path) == 0 {
+		return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
+	}
+	pathp, err := syscall.UTF16PtrFromString(path)
+	if err != nil {
+		return syscall.InvalidHandle, err
+	}
+	var access uint32
+	switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) {
+	case syscall.O_RDONLY:
+		access = syscall.GENERIC_READ
+	case syscall.O_WRONLY:
+		access = syscall.GENERIC_WRITE
+	case syscall.O_RDWR:
+		access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
+	}
+	if mode&syscall.O_CREAT != 0 {
+		access |= syscall.GENERIC_WRITE
+	}
+	if mode&syscall.O_APPEND != 0 {
+		access &^= syscall.GENERIC_WRITE
+		access |= syscall.FILE_APPEND_DATA
+	}
+	sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE)
+	var sa *syscall.SecurityAttributes
+	if mode&syscall.O_CLOEXEC == 0 {
+		sa = makeInheritSa()
+	}
+	var createmode uint32
+	switch {
+	case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL):
+		createmode = syscall.CREATE_NEW
+	case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC):
+		createmode = syscall.CREATE_ALWAYS
+	case mode&syscall.O_CREAT == syscall.O_CREAT:
+		createmode = syscall.OPEN_ALWAYS
+	case mode&syscall.O_TRUNC == syscall.O_TRUNC:
+		createmode = syscall.TRUNCATE_EXISTING
+	default:
+		createmode = syscall.OPEN_EXISTING
+	}
+	// Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang.
+	//https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
+	const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN
+	h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0)
+	return h, e
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/lstat.go b/vendor/github.com/docker/docker/pkg/system/lstat.go
new file mode 100644
index 0000000000000000000000000000000000000000..bd23c4d50b21d6138260027e86fbac64f8d234d3
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/lstat.go
@@ -0,0 +1,19 @@
+// +build !windows
+
+package system
+
+import (
+	"syscall"
+)
+
+// Lstat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Lstat(path string) (*StatT, error) {
+	s := &syscall.Stat_t{}
+	if err := syscall.Lstat(path, s); err != nil {
+		return nil, err
+	}
+	return fromStatT(s)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..49e87eb40bae9d4fb6f0cc5d04dfb2801d2df46b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go
@@ -0,0 +1,25 @@
+// +build windows
+
+package system
+
+import (
+	"os"
+)
+
+// Lstat calls os.Lstat to get a fileinfo interface back.
+// This is then copied into our own locally defined structure.
+// Note the Linux version uses fromStatT to do the copy back,
+// but that not strictly necessary when already in an OS specific module.
+func Lstat(path string) (*StatT, error) {
+	fi, err := os.Lstat(path)
+	if err != nil {
+		return nil, err
+	}
+
+	return &StatT{
+		name:    fi.Name(),
+		size:    fi.Size(),
+		mode:    fi.Mode(),
+		modTime: fi.ModTime(),
+		isDir:   fi.IsDir()}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo.go b/vendor/github.com/docker/docker/pkg/system/meminfo.go
new file mode 100644
index 0000000000000000000000000000000000000000..3b6e947e6753d8fce73de3817b3d959aad1fb49f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo.go
@@ -0,0 +1,17 @@
+package system
+
+// MemInfo contains memory statistics of the host system.
+type MemInfo struct {
+	// Total usable RAM (i.e. physical RAM minus a few reserved bits and the
+	// kernel binary code).
+	MemTotal int64
+
+	// Amount of free memory.
+	MemFree int64
+
+	// Total amount of swap space available.
+	SwapTotal int64
+
+	// Amount of swap space that is currently unused.
+	SwapFree int64
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..385f1d5e735aa725de249e9430f1602a30779728
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
@@ -0,0 +1,65 @@
+package system
+
+import (
+	"bufio"
+	"io"
+	"os"
+	"strconv"
+	"strings"
+
+	"github.com/docker/go-units"
+)
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+	file, err := os.Open("/proc/meminfo")
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+	return parseMemInfo(file)
+}
+
+// parseMemInfo parses the /proc/meminfo file into
+// a MemInfo object given an io.Reader to the file.
+// Throws error if there are problems reading from the file
+func parseMemInfo(reader io.Reader) (*MemInfo, error) {
+	meminfo := &MemInfo{}
+	scanner := bufio.NewScanner(reader)
+	for scanner.Scan() {
+		// Expected format: ["MemTotal:", "1234", "kB"]
+		parts := strings.Fields(scanner.Text())
+
+		// Sanity checks: Skip malformed entries.
+		if len(parts) < 3 || parts[2] != "kB" {
+			continue
+		}
+
+		// Convert to bytes.
+		size, err := strconv.Atoi(parts[1])
+		if err != nil {
+			continue
+		}
+		bytes := int64(size) * units.KiB
+
+		switch parts[0] {
+		case "MemTotal:":
+			meminfo.MemTotal = bytes
+		case "MemFree:":
+			meminfo.MemFree = bytes
+		case "SwapTotal:":
+			meminfo.SwapTotal = bytes
+		case "SwapFree:":
+			meminfo.SwapFree = bytes
+		}
+
+	}
+
+	// Handle errors that may have occurred during the reading of the file.
+	if err := scanner.Err(); err != nil {
+		return nil, err
+	}
+
+	return meminfo, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go b/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go
new file mode 100644
index 0000000000000000000000000000000000000000..7f4f84f73ac903bc3b82d5bf3d7218734cdbc983
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go
@@ -0,0 +1,128 @@
+// +build solaris,cgo
+
+package system
+
+import (
+	"fmt"
+	"unsafe"
+)
+
+// #cgo LDFLAGS: -lkstat
+// #include <unistd.h>
+// #include <stdlib.h>
+// #include <stdio.h>
+// #include <kstat.h>
+// #include <sys/swap.h>
+// #include <sys/param.h>
+// struct swaptable *allocSwaptable(int num) {
+//	struct swaptable *st;
+//	struct swapent *swapent;
+// 	st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int));
+//	swapent = st->swt_ent;
+//	for (int i = 0; i < num; i++,swapent++) {
+//		swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char));
+//	}
+//	st->swt_n = num;
+//	return st;
+//}
+// void freeSwaptable (struct swaptable *st) {
+//	struct swapent *swapent = st->swt_ent;
+//	for (int i = 0; i < st->swt_n; i++,swapent++) {
+//		free(swapent->ste_path);
+//	}
+//	free(st);
+// }
+// swapent_t getSwapEnt(swapent_t *ent, int i) {
+//	return ent[i];
+// }
+// int64_t getPpKernel() {
+//	int64_t pp_kernel = 0;
+//	kstat_ctl_t *ksc;
+//	kstat_t *ks;
+//	kstat_named_t *knp;
+//	kid_t kid;
+//
+//	if ((ksc = kstat_open()) == NULL) {
+//		return -1;
+//	}
+//	if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) {
+//		return -1;
+//	}
+//	if (((kid = kstat_read(ksc, ks, NULL)) == -1) ||
+//	    ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) {
+//		return -1;
+//	}
+//	switch (knp->data_type) {
+//	case KSTAT_DATA_UINT64:
+//		pp_kernel = knp->value.ui64;
+//		break;
+//	case KSTAT_DATA_UINT32:
+//		pp_kernel = knp->value.ui32;
+//		break;
+//	}
+//	pp_kernel *= sysconf(_SC_PAGESIZE);
+//	return (pp_kernel > 0 ? pp_kernel : -1);
+// }
+import "C"
+
+// Get the system memory info using sysconf same as prtconf
+func getTotalMem() int64 {
+	pagesize := C.sysconf(C._SC_PAGESIZE)
+	npages := C.sysconf(C._SC_PHYS_PAGES)
+	return int64(pagesize * npages)
+}
+
+func getFreeMem() int64 {
+	pagesize := C.sysconf(C._SC_PAGESIZE)
+	npages := C.sysconf(C._SC_AVPHYS_PAGES)
+	return int64(pagesize * npages)
+}
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+//  MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+
+	ppKernel := C.getPpKernel()
+	MemTotal := getTotalMem()
+	MemFree := getFreeMem()
+	SwapTotal, SwapFree, err := getSysSwap()
+
+	if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 ||
+		SwapFree < 0 {
+		return nil, fmt.Errorf("error getting system memory info %v\n", err)
+	}
+
+	meminfo := &MemInfo{}
+	// Total memory is total physical memory less than memory locked by kernel
+	meminfo.MemTotal = MemTotal - int64(ppKernel)
+	meminfo.MemFree = MemFree
+	meminfo.SwapTotal = SwapTotal
+	meminfo.SwapFree = SwapFree
+
+	return meminfo, nil
+}
+
+func getSysSwap() (int64, int64, error) {
+	var tSwap int64
+	var fSwap int64
+	var diskblksPerPage int64
+	num, err := C.swapctl(C.SC_GETNSWP, nil)
+	if err != nil {
+		return -1, -1, err
+	}
+	st := C.allocSwaptable(num)
+	_, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st))
+	if err != nil {
+		C.freeSwaptable(st)
+		return -1, -1, err
+	}
+
+	diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT)
+	for i := 0; i < int(num); i++ {
+		swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i))
+		tSwap += int64(swapent.ste_pages) * diskblksPerPage
+		fSwap += int64(swapent.ste_free) * diskblksPerPage
+	}
+	C.freeSwaptable(st)
+	return tSwap, fSwap, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go
new file mode 100644
index 0000000000000000000000000000000000000000..3ce019dffdda9f6393f9d86267901ddb759744ce
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go
@@ -0,0 +1,8 @@
+// +build !linux,!windows,!solaris
+
+package system
+
+// ReadMemInfo is not supported on platforms other than linux and windows.
+func ReadMemInfo() (*MemInfo, error) {
+	return nil, ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..883944a4c536a787a37f2b93ee4fb50ed9496dee
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go
@@ -0,0 +1,45 @@
+package system
+
+import (
+	"unsafe"
+
+	"golang.org/x/sys/windows"
+)
+
+var (
+	modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+
+	procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx")
+)
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx
+type memorystatusex struct {
+	dwLength                uint32
+	dwMemoryLoad            uint32
+	ullTotalPhys            uint64
+	ullAvailPhys            uint64
+	ullTotalPageFile        uint64
+	ullAvailPageFile        uint64
+	ullTotalVirtual         uint64
+	ullAvailVirtual         uint64
+	ullAvailExtendedVirtual uint64
+}
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+//  MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+	msi := &memorystatusex{
+		dwLength: 64,
+	}
+	r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi)))
+	if r1 == 0 {
+		return &MemInfo{}, nil
+	}
+	return &MemInfo{
+		MemTotal:  int64(msi.ullTotalPhys),
+		MemFree:   int64(msi.ullAvailPhys),
+		SwapTotal: int64(msi.ullTotalPageFile),
+		SwapFree:  int64(msi.ullAvailPageFile),
+	}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go
new file mode 100644
index 0000000000000000000000000000000000000000..73958182b4ebecbde32adcde8e4ef695a8268227
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/mknod.go
@@ -0,0 +1,22 @@
+// +build !windows
+
+package system
+
+import (
+	"syscall"
+)
+
+// Mknod creates a filesystem node (file, device special file or named pipe) named path
+// with attributes specified by mode and dev.
+func Mknod(path string, mode uint32, dev int) error {
+	return syscall.Mknod(path, mode, dev)
+}
+
+// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
+// and minor number of the newly created device special file.
+// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
+// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
+// then the top 12 bits of the minor.
+func Mkdev(major int64, minor int64) uint32 {
+	return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..2e863c0215b3276196d5990020cbeae6de1f631b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go
@@ -0,0 +1,13 @@
+// +build windows
+
+package system
+
+// Mknod is not implemented on Windows.
+func Mknod(path string, mode uint32, dev int) error {
+	return ErrNotSupportedPlatform
+}
+
+// Mkdev is not implemented on Windows.
+func Mkdev(major int64, minor int64) uint32 {
+	panic("Mkdev not implemented on Windows.")
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..c607c4db09f20c9ddea7a687afe5fa81322bdd42
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/path_unix.go
@@ -0,0 +1,14 @@
+// +build !windows
+
+package system
+
+// DefaultPathEnv is unix style list of directories to search for
+// executables. Each directory is separated from the next by a colon
+// ':' character .
+const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+
+// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
+// is the system drive. This is a no-op on Linux.
+func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
+	return path, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..cbfe2c1576ceb371fb3eed84895b49b0feb83beb
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/path_windows.go
@@ -0,0 +1,37 @@
+// +build windows
+
+package system
+
+import (
+	"fmt"
+	"path/filepath"
+	"strings"
+)
+
+// DefaultPathEnv is deliberately empty on Windows as the default path will be set by
+// the container. Docker has no context of what the default path should be.
+const DefaultPathEnv = ""
+
+// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
+// This is used, for example, when validating a user provided path in docker cp.
+// If a drive letter is supplied, it must be the system drive. The drive letter
+// is always removed. Also, it translates it to OS semantics (IOW / to \). We
+// need the path in this syntax so that it can ultimately be contatenated with
+// a Windows long-path which doesn't support drive-letters. Examples:
+// C:			--> Fail
+// C:\			--> \
+// a			--> a
+// /a			--> \a
+// d:\			--> Fail
+func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
+	if len(path) == 2 && string(path[1]) == ":" {
+		return "", fmt.Errorf("No relative path specified in %q", path)
+	}
+	if !filepath.IsAbs(path) || len(path) < 2 {
+		return filepath.FromSlash(path), nil
+	}
+	if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
+		return "", fmt.Errorf("The specified path is not on the system drive (C:)")
+	}
+	return filepath.FromSlash(path[2:]), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat.go b/vendor/github.com/docker/docker/pkg/system/stat.go
new file mode 100644
index 0000000000000000000000000000000000000000..087034c5ec55e1723c737dcd5197c9949252d8f3
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat.go
@@ -0,0 +1,53 @@
+// +build !windows
+
+package system
+
+import (
+	"syscall"
+)
+
+// StatT type contains status of a file. It contains metadata
+// like permission, owner, group, size, etc about a file.
+type StatT struct {
+	mode uint32
+	uid  uint32
+	gid  uint32
+	rdev uint64
+	size int64
+	mtim syscall.Timespec
+}
+
+// Mode returns file's permission mode.
+func (s StatT) Mode() uint32 {
+	return s.mode
+}
+
+// UID returns file's user id of owner.
+func (s StatT) UID() uint32 {
+	return s.uid
+}
+
+// GID returns file's group id of owner.
+func (s StatT) GID() uint32 {
+	return s.gid
+}
+
+// Rdev returns file's device ID (if it's special file).
+func (s StatT) Rdev() uint64 {
+	return s.rdev
+}
+
+// Size returns file's size.
+func (s StatT) Size() int64 {
+	return s.size
+}
+
+// Mtim returns file's last modification time.
+func (s StatT) Mtim() syscall.Timespec {
+	return s.mtim
+}
+
+// GetLastModification returns file's last modification time.
+func (s StatT) GetLastModification() syscall.Timespec {
+	return s.Mtim()
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go
new file mode 100644
index 0000000000000000000000000000000000000000..f0742f59e54dca5e44f453333240a1f4d8fc855d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go
@@ -0,0 +1,32 @@
+package system
+
+import (
+	"syscall"
+)
+
+// fromStatT creates a system.StatT type from a syscall.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+	return &StatT{size: s.Size,
+		mode: uint32(s.Mode),
+		uid:  s.Uid,
+		gid:  s.Gid,
+		rdev: uint64(s.Rdev),
+		mtim: s.Mtimespec}, nil
+}
+
+// FromStatT loads a system.StatT from a syscall.Stat_t.
+func FromStatT(s *syscall.Stat_t) (*StatT, error) {
+	return fromStatT(s)
+}
+
+// Stat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*StatT, error) {
+	s := &syscall.Stat_t{}
+	if err := syscall.Stat(path, s); err != nil {
+		return nil, err
+	}
+	return fromStatT(s)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go
new file mode 100644
index 0000000000000000000000000000000000000000..d0fb6f15190a36ee20cefa19372951d796222261
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go
@@ -0,0 +1,27 @@
+package system
+
+import (
+	"syscall"
+)
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+	return &StatT{size: s.Size,
+		mode: uint32(s.Mode),
+		uid:  s.Uid,
+		gid:  s.Gid,
+		rdev: uint64(s.Rdev),
+		mtim: s.Mtimespec}, nil
+}
+
+// Stat takes a path to a file and returns
+// a system.Stat_t type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*StatT, error) {
+	s := &syscall.Stat_t{}
+	if err := syscall.Stat(path, s); err != nil {
+		return nil, err
+	}
+	return fromStatT(s)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..8b1eded1387af1bfaa3669afb7937549830b17d9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go
@@ -0,0 +1,33 @@
+package system
+
+import (
+	"syscall"
+)
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+	return &StatT{size: s.Size,
+		mode: s.Mode,
+		uid:  s.Uid,
+		gid:  s.Gid,
+		rdev: s.Rdev,
+		mtim: s.Mtim}, nil
+}
+
+// FromStatT exists only on linux, and loads a system.StatT from a
+// syscal.Stat_t.
+func FromStatT(s *syscall.Stat_t) (*StatT, error) {
+	return fromStatT(s)
+}
+
+// Stat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*StatT, error) {
+	s := &syscall.Stat_t{}
+	if err := syscall.Stat(path, s); err != nil {
+		return nil, err
+	}
+	return fromStatT(s)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go
new file mode 100644
index 0000000000000000000000000000000000000000..3c3b71fb21969b051b37a72c4f775a8a02d56657
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go
@@ -0,0 +1,15 @@
+package system
+
+import (
+	"syscall"
+)
+
+// fromStatT creates a system.StatT type from a syscall.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+	return &StatT{size: s.Size,
+		mode: uint32(s.Mode),
+		uid:  s.Uid,
+		gid:  s.Gid,
+		rdev: uint64(s.Rdev),
+		mtim: s.Mtim}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go
new file mode 100644
index 0000000000000000000000000000000000000000..0216985a25257a6d7c98df4ed97316de860d1c33
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go
@@ -0,0 +1,34 @@
+// +build solaris
+
+package system
+
+import (
+	"syscall"
+)
+
+// fromStatT creates a system.StatT type from a syscall.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+	return &StatT{size: s.Size,
+		mode: uint32(s.Mode),
+		uid:  s.Uid,
+		gid:  s.Gid,
+		rdev: uint64(s.Rdev),
+		mtim: s.Mtim}, nil
+}
+
+// FromStatT loads a system.StatT from a syscal.Stat_t.
+func FromStatT(s *syscall.Stat_t) (*StatT, error) {
+	return fromStatT(s)
+}
+
+// Stat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*StatT, error) {
+	s := &syscall.Stat_t{}
+	if err := syscall.Stat(path, s); err != nil {
+		return nil, err
+	}
+	return fromStatT(s)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go b/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go
new file mode 100644
index 0000000000000000000000000000000000000000..5d85f523cf9ee42d8f518ce3d06035b6c6cea380
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go
@@ -0,0 +1,17 @@
+// +build !linux,!windows,!freebsd,!solaris,!openbsd,!darwin
+
+package system
+
+import (
+	"syscall"
+)
+
+// fromStatT creates a system.StatT type from a syscall.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+	return &StatT{size: s.Size,
+		mode: uint32(s.Mode),
+		uid:  s.Uid,
+		gid:  s.Gid,
+		rdev: uint64(s.Rdev),
+		mtim: s.Mtimespec}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..39490c625c03e94d0036c535dfc5c9b0b037c328
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_windows.go
@@ -0,0 +1,43 @@
+// +build windows
+
+package system
+
+import (
+	"os"
+	"time"
+)
+
+// StatT type contains status of a file. It contains metadata
+// like name, permission, size, etc about a file.
+type StatT struct {
+	name    string
+	size    int64
+	mode    os.FileMode
+	modTime time.Time
+	isDir   bool
+}
+
+// Name returns file's name.
+func (s StatT) Name() string {
+	return s.name
+}
+
+// Size returns file's size.
+func (s StatT) Size() int64 {
+	return s.size
+}
+
+// Mode returns file's permission mode.
+func (s StatT) Mode() os.FileMode {
+	return s.mode
+}
+
+// ModTime returns file's last modification time.
+func (s StatT) ModTime() time.Time {
+	return s.modTime
+}
+
+// IsDir returns whether file is actually a directory.
+func (s StatT) IsDir() bool {
+	return s.isDir
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..3ae912846844bebbe5dc49ed72489cae59aa3154
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go
@@ -0,0 +1,17 @@
+// +build linux freebsd
+
+package system
+
+import "syscall"
+
+// Unmount is a platform-specific helper function to call
+// the unmount syscall.
+func Unmount(dest string) error {
+	return syscall.Unmount(dest, 0)
+}
+
+// CommandLineToArgv should not be used on Unix.
+// It simply returns commandLine in the only element in the returned array.
+func CommandLineToArgv(commandLine string) ([]string, error) {
+	return []string{commandLine}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..1f311874f46c71d8574765a1d9a19bda1ac419f4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
@@ -0,0 +1,105 @@
+package system
+
+import (
+	"syscall"
+	"unsafe"
+
+	"github.com/Sirupsen/logrus"
+)
+
+var (
+	ntuserApiset      = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0")
+	procGetVersionExW = modkernel32.NewProc("GetVersionExW")
+)
+
+// OSVersion is a wrapper for Windows version information
+// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx
+type OSVersion struct {
+	Version      uint32
+	MajorVersion uint8
+	MinorVersion uint8
+	Build        uint16
+}
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx
+type osVersionInfoEx struct {
+	OSVersionInfoSize uint32
+	MajorVersion      uint32
+	MinorVersion      uint32
+	BuildNumber       uint32
+	PlatformID        uint32
+	CSDVersion        [128]uint16
+	ServicePackMajor  uint16
+	ServicePackMinor  uint16
+	SuiteMask         uint16
+	ProductType       byte
+	Reserve           byte
+}
+
+// GetOSVersion gets the operating system version on Windows. Note that
+// docker.exe must be manifested to get the correct version information.
+func GetOSVersion() OSVersion {
+	var err error
+	osv := OSVersion{}
+	osv.Version, err = syscall.GetVersion()
+	if err != nil {
+		// GetVersion never fails.
+		panic(err)
+	}
+	osv.MajorVersion = uint8(osv.Version & 0xFF)
+	osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF)
+	osv.Build = uint16(osv.Version >> 16)
+	return osv
+}
+
+// IsWindowsClient returns true if the SKU is client
+// @engine maintainers - this function should not be removed or modified as it
+// is used to enforce licensing restrictions on Windows.
+func IsWindowsClient() bool {
+	osviex := &osVersionInfoEx{OSVersionInfoSize: 284}
+	r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex)))
+	if r1 == 0 {
+		logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err)
+		return false
+	}
+	const verNTWorkstation = 0x00000001
+	return osviex.ProductType == verNTWorkstation
+}
+
+// Unmount is a platform-specific helper function to call
+// the unmount syscall. Not supported on Windows
+func Unmount(dest string) error {
+	return nil
+}
+
+// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array.
+func CommandLineToArgv(commandLine string) ([]string, error) {
+	var argc int32
+
+	argsPtr, err := syscall.UTF16PtrFromString(commandLine)
+	if err != nil {
+		return nil, err
+	}
+
+	argv, err := syscall.CommandLineToArgv(argsPtr, &argc)
+	if err != nil {
+		return nil, err
+	}
+	defer syscall.LocalFree(syscall.Handle(uintptr(unsafe.Pointer(argv))))
+
+	newArgs := make([]string, argc)
+	for i, v := range (*argv)[:argc] {
+		newArgs[i] = string(syscall.UTF16ToString((*v)[:]))
+	}
+
+	return newArgs, nil
+}
+
+// HasWin32KSupport determines whether containers that depend on win32k can
+// run on this machine. Win32k is the driver used to implement windowing.
+func HasWin32KSupport() bool {
+	// For now, check for ntuser API support on the host. In the future, a host
+	// may support win32k in containers even if the host does not support ntuser
+	// APIs.
+	return ntuserApiset.Load() == nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go
new file mode 100644
index 0000000000000000000000000000000000000000..3d0146b01ad4bc9be4cf112f25e727c1a70eb550
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/umask.go
@@ -0,0 +1,13 @@
+// +build !windows
+
+package system
+
+import (
+	"syscall"
+)
+
+// Umask sets current process's file mode creation mask to newmask
+// and returns oldmask.
+func Umask(newmask int) (oldmask int, err error) {
+	return syscall.Umask(newmask), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/docker/docker/pkg/system/umask_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..13f1de1769c79a608984504fde7896ffb819afd5
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/umask_windows.go
@@ -0,0 +1,9 @@
+// +build windows
+
+package system
+
+// Umask is not supported on the windows platform.
+func Umask(newmask int) (oldmask int, err error) {
+	// should not be called on cli code path
+	return 0, ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go
new file mode 100644
index 0000000000000000000000000000000000000000..e2eac3b553e0b6c35d64eb9aead347ddfa2ed93c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go
@@ -0,0 +1,22 @@
+package system
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+// LUtimesNano is used to change access and modification time of the specified path.
+// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+	var _path *byte
+	_path, err := syscall.BytePtrFromString(path)
+	if err != nil {
+		return err
+	}
+
+	if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..fc8a1aba95cb12e00f159b0e6dfbcf2d728670a9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go
@@ -0,0 +1,26 @@
+package system
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+// LUtimesNano is used to change access and modification time of the specified path.
+// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+	// These are not currently available in syscall
+	atFdCwd := -100
+	atSymLinkNoFollow := 0x100
+
+	var _path *byte
+	_path, err := syscall.BytePtrFromString(path)
+	if err != nil {
+		return err
+	}
+
+	if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go
new file mode 100644
index 0000000000000000000000000000000000000000..139714544d0266da9810ce8986f55a10204ac582
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go
@@ -0,0 +1,10 @@
+// +build !linux,!freebsd
+
+package system
+
+import "syscall"
+
+// LUtimesNano is only supported on linux and freebsd.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+	return ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..d2e2c05799839419ac12bdcf5496f16188f8b049
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
@@ -0,0 +1,63 @@
+package system
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+// Lgetxattr retrieves the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+// It will returns a nil slice and nil error if the xattr is not set.
+func Lgetxattr(path string, attr string) ([]byte, error) {
+	pathBytes, err := syscall.BytePtrFromString(path)
+	if err != nil {
+		return nil, err
+	}
+	attrBytes, err := syscall.BytePtrFromString(attr)
+	if err != nil {
+		return nil, err
+	}
+
+	dest := make([]byte, 128)
+	destBytes := unsafe.Pointer(&dest[0])
+	sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+	if errno == syscall.ENODATA {
+		return nil, nil
+	}
+	if errno == syscall.ERANGE {
+		dest = make([]byte, sz)
+		destBytes := unsafe.Pointer(&dest[0])
+		sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+	}
+	if errno != 0 {
+		return nil, errno
+	}
+
+	return dest[:sz], nil
+}
+
+var _zero uintptr
+
+// Lsetxattr sets the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+	pathBytes, err := syscall.BytePtrFromString(path)
+	if err != nil {
+		return err
+	}
+	attrBytes, err := syscall.BytePtrFromString(attr)
+	if err != nil {
+		return err
+	}
+	var dataBytes unsafe.Pointer
+	if len(data) > 0 {
+		dataBytes = unsafe.Pointer(&data[0])
+	} else {
+		dataBytes = unsafe.Pointer(&_zero)
+	}
+	_, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0)
+	if errno != 0 {
+		return errno
+	}
+	return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go
new file mode 100644
index 0000000000000000000000000000000000000000..0114f2227cf0cd3c5792c40e971d888ed976abab
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go
@@ -0,0 +1,13 @@
+// +build !linux
+
+package system
+
+// Lgetxattr is not supported on platforms other than linux.
+func Lgetxattr(path string, attr string) ([]byte, error) {
+	return nil, ErrNotSupportedPlatform
+}
+
+// Lsetxattr is not supported on platforms other than linux.
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+	return ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go b/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go
new file mode 100644
index 0000000000000000000000000000000000000000..b42983e984931283ef54255a9f4ffbd73a636e2f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go
@@ -0,0 +1,21 @@
+package tarsum
+
+// BuilderContext is an interface extending TarSum by adding the Remove method.
+// In general there was concern about adding this method to TarSum itself
+// so instead it is being added just to "BuilderContext" which will then
+// only be used during the .dockerignore file processing
+// - see builder/evaluator.go
+type BuilderContext interface {
+	TarSum
+	Remove(string)
+}
+
+func (bc *tarSum) Remove(filename string) {
+	for i, fis := range bc.sums {
+		if fis.Name() == filename {
+			bc.sums = append(bc.sums[:i], bc.sums[i+1:]...)
+			// Note, we don't just return because there could be
+			// more than one with this name
+		}
+	}
+}
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go
new file mode 100644
index 0000000000000000000000000000000000000000..5abf5e7ba39e6a5dd60384994f4e8099319f0d69
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go
@@ -0,0 +1,126 @@
+package tarsum
+
+import "sort"
+
+// FileInfoSumInterface provides an interface for accessing file checksum
+// information within a tar file. This info is accessed through interface
+// so the actual name and sum cannot be melded with.
+type FileInfoSumInterface interface {
+	// File name
+	Name() string
+	// Checksum of this particular file and its headers
+	Sum() string
+	// Position of file in the tar
+	Pos() int64
+}
+
+type fileInfoSum struct {
+	name string
+	sum  string
+	pos  int64
+}
+
+func (fis fileInfoSum) Name() string {
+	return fis.name
+}
+func (fis fileInfoSum) Sum() string {
+	return fis.sum
+}
+func (fis fileInfoSum) Pos() int64 {
+	return fis.pos
+}
+
+// FileInfoSums provides a list of FileInfoSumInterfaces.
+type FileInfoSums []FileInfoSumInterface
+
+// GetFile returns the first FileInfoSumInterface with a matching name.
+func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface {
+	for i := range fis {
+		if fis[i].Name() == name {
+			return fis[i]
+		}
+	}
+	return nil
+}
+
+// GetAllFile returns a FileInfoSums with all matching names.
+func (fis FileInfoSums) GetAllFile(name string) FileInfoSums {
+	f := FileInfoSums{}
+	for i := range fis {
+		if fis[i].Name() == name {
+			f = append(f, fis[i])
+		}
+	}
+	return f
+}
+
+// GetDuplicatePaths returns a FileInfoSums with all duplicated paths.
+func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) {
+	seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map.
+	for i := range fis {
+		f := fis[i]
+		if _, ok := seen[f.Name()]; ok {
+			dups = append(dups, f)
+		} else {
+			seen[f.Name()] = 0
+		}
+	}
+	return dups
+}
+
+// Len returns the size of the FileInfoSums.
+func (fis FileInfoSums) Len() int { return len(fis) }
+
+// Swap swaps two FileInfoSum values if a FileInfoSums list.
+func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] }
+
+// SortByPos sorts FileInfoSums content by position.
+func (fis FileInfoSums) SortByPos() {
+	sort.Sort(byPos{fis})
+}
+
+// SortByNames sorts FileInfoSums content by name.
+func (fis FileInfoSums) SortByNames() {
+	sort.Sort(byName{fis})
+}
+
+// SortBySums sorts FileInfoSums content by sums.
+func (fis FileInfoSums) SortBySums() {
+	dups := fis.GetDuplicatePaths()
+	if len(dups) > 0 {
+		sort.Sort(bySum{fis, dups})
+	} else {
+		sort.Sort(bySum{fis, nil})
+	}
+}
+
+// byName is a sort.Sort helper for sorting by file names.
+// If names are the same, order them by their appearance in the tar archive
+type byName struct{ FileInfoSums }
+
+func (bn byName) Less(i, j int) bool {
+	if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() {
+		return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos()
+	}
+	return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name()
+}
+
+// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive
+type bySum struct {
+	FileInfoSums
+	dups FileInfoSums
+}
+
+func (bs bySum) Less(i, j int) bool {
+	if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() {
+		return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos()
+	}
+	return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum()
+}
+
+// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order
+type byPos struct{ FileInfoSums }
+
+func (bp byPos) Less(i, j int) bool {
+	return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos()
+}
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go
new file mode 100644
index 0000000000000000000000000000000000000000..154788db82e706c3af153918bec4a39018102483
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go
@@ -0,0 +1,295 @@
+// Package tarsum provides algorithms to perform checksum calculation on
+// filesystem layers.
+//
+// The transportation of filesystems, regarding Docker, is done with tar(1)
+// archives. There are a variety of tar serialization formats [2], and a key
+// concern here is ensuring a repeatable checksum given a set of inputs from a
+// generic tar archive. Types of transportation include distribution to and from a
+// registry endpoint, saving and loading through commands or Docker daemon APIs,
+// transferring the build context from client to Docker daemon, and committing the
+// filesystem of a container to become an image.
+//
+// As tar archives are used for transit, but not preserved in many situations, the
+// focus of the algorithm is to ensure the integrity of the preserved filesystem,
+// while maintaining a deterministic accountability. This includes neither
+// constraining the ordering or manipulation of the files during the creation or
+// unpacking of the archive, nor include additional metadata state about the file
+// system attributes.
+package tarsum
+
+import (
+	"archive/tar"
+	"bytes"
+	"compress/gzip"
+	"crypto"
+	"crypto/sha256"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"hash"
+	"io"
+	"path"
+	"strings"
+)
+
+const (
+	buf8K  = 8 * 1024
+	buf16K = 16 * 1024
+	buf32K = 32 * 1024
+)
+
+// NewTarSum creates a new interface for calculating a fixed time checksum of a
+// tar archive.
+//
+// This is used for calculating checksums of layers of an image, in some cases
+// including the byte payload of the image's json metadata as well, and for
+// calculating the checksums for buildcache.
+func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) {
+	return NewTarSumHash(r, dc, v, DefaultTHash)
+}
+
+// NewTarSumHash creates a new TarSum, providing a THash to use rather than
+// the DefaultTHash.
+func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) {
+	headerSelector, err := getTarHeaderSelector(v)
+	if err != nil {
+		return nil, err
+	}
+	ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash}
+	err = ts.initTarSum()
+	return ts, err
+}
+
+// NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label.
+func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) {
+	parts := strings.SplitN(label, "+", 2)
+	if len(parts) != 2 {
+		return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}")
+	}
+
+	versionName, hashName := parts[0], parts[1]
+
+	version, ok := tarSumVersionsByName[versionName]
+	if !ok {
+		return nil, fmt.Errorf("unknown TarSum version name: %q", versionName)
+	}
+
+	hashConfig, ok := standardHashConfigs[hashName]
+	if !ok {
+		return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName)
+	}
+
+	tHash := NewTHash(hashConfig.name, hashConfig.hash.New)
+
+	return NewTarSumHash(r, disableCompression, version, tHash)
+}
+
+// TarSum is the generic interface for calculating fixed time
+// checksums of a tar archive.
+type TarSum interface {
+	io.Reader
+	GetSums() FileInfoSums
+	Sum([]byte) string
+	Version() Version
+	Hash() THash
+}
+
+// tarSum struct is the structure for a Version0 checksum calculation.
+type tarSum struct {
+	io.Reader
+	tarR               *tar.Reader
+	tarW               *tar.Writer
+	writer             writeCloseFlusher
+	bufTar             *bytes.Buffer
+	bufWriter          *bytes.Buffer
+	bufData            []byte
+	h                  hash.Hash
+	tHash              THash
+	sums               FileInfoSums
+	fileCounter        int64
+	currentFile        string
+	finished           bool
+	first              bool
+	DisableCompression bool              // false by default. When false, the output gzip compressed.
+	tarSumVersion      Version           // this field is not exported so it can not be mutated during use
+	headerSelector     tarHeaderSelector // handles selecting and ordering headers for files in the archive
+}
+
+func (ts tarSum) Hash() THash {
+	return ts.tHash
+}
+
+func (ts tarSum) Version() Version {
+	return ts.tarSumVersion
+}
+
+// THash provides a hash.Hash type generator and its name.
+type THash interface {
+	Hash() hash.Hash
+	Name() string
+}
+
+// NewTHash is a convenience method for creating a THash.
+func NewTHash(name string, h func() hash.Hash) THash {
+	return simpleTHash{n: name, h: h}
+}
+
+type tHashConfig struct {
+	name string
+	hash crypto.Hash
+}
+
+var (
+	// NOTE: DO NOT include MD5 or SHA1, which are considered insecure.
+	standardHashConfigs = map[string]tHashConfig{
+		"sha256": {name: "sha256", hash: crypto.SHA256},
+		"sha512": {name: "sha512", hash: crypto.SHA512},
+	}
+)
+
+// DefaultTHash is default TarSum hashing algorithm - "sha256".
+var DefaultTHash = NewTHash("sha256", sha256.New)
+
+type simpleTHash struct {
+	n string
+	h func() hash.Hash
+}
+
+func (sth simpleTHash) Name() string    { return sth.n }
+func (sth simpleTHash) Hash() hash.Hash { return sth.h() }
+
+func (ts *tarSum) encodeHeader(h *tar.Header) error {
+	for _, elem := range ts.headerSelector.selectHeaders(h) {
+		if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (ts *tarSum) initTarSum() error {
+	ts.bufTar = bytes.NewBuffer([]byte{})
+	ts.bufWriter = bytes.NewBuffer([]byte{})
+	ts.tarR = tar.NewReader(ts.Reader)
+	ts.tarW = tar.NewWriter(ts.bufTar)
+	if !ts.DisableCompression {
+		ts.writer = gzip.NewWriter(ts.bufWriter)
+	} else {
+		ts.writer = &nopCloseFlusher{Writer: ts.bufWriter}
+	}
+	if ts.tHash == nil {
+		ts.tHash = DefaultTHash
+	}
+	ts.h = ts.tHash.Hash()
+	ts.h.Reset()
+	ts.first = true
+	ts.sums = FileInfoSums{}
+	return nil
+}
+
+func (ts *tarSum) Read(buf []byte) (int, error) {
+	if ts.finished {
+		return ts.bufWriter.Read(buf)
+	}
+	if len(ts.bufData) < len(buf) {
+		switch {
+		case len(buf) <= buf8K:
+			ts.bufData = make([]byte, buf8K)
+		case len(buf) <= buf16K:
+			ts.bufData = make([]byte, buf16K)
+		case len(buf) <= buf32K:
+			ts.bufData = make([]byte, buf32K)
+		default:
+			ts.bufData = make([]byte, len(buf))
+		}
+	}
+	buf2 := ts.bufData[:len(buf)]
+
+	n, err := ts.tarR.Read(buf2)
+	if err != nil {
+		if err == io.EOF {
+			if _, err := ts.h.Write(buf2[:n]); err != nil {
+				return 0, err
+			}
+			if !ts.first {
+				ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter})
+				ts.fileCounter++
+				ts.h.Reset()
+			} else {
+				ts.first = false
+			}
+
+			currentHeader, err := ts.tarR.Next()
+			if err != nil {
+				if err == io.EOF {
+					if err := ts.tarW.Close(); err != nil {
+						return 0, err
+					}
+					if _, err := io.Copy(ts.writer, ts.bufTar); err != nil {
+						return 0, err
+					}
+					if err := ts.writer.Close(); err != nil {
+						return 0, err
+					}
+					ts.finished = true
+					return n, nil
+				}
+				return n, err
+			}
+			ts.currentFile = path.Clean(currentHeader.Name)
+			if err := ts.encodeHeader(currentHeader); err != nil {
+				return 0, err
+			}
+			if err := ts.tarW.WriteHeader(currentHeader); err != nil {
+				return 0, err
+			}
+			if _, err := ts.tarW.Write(buf2[:n]); err != nil {
+				return 0, err
+			}
+			ts.tarW.Flush()
+			if _, err := io.Copy(ts.writer, ts.bufTar); err != nil {
+				return 0, err
+			}
+			ts.writer.Flush()
+
+			return ts.bufWriter.Read(buf)
+		}
+		return n, err
+	}
+
+	// Filling the hash buffer
+	if _, err = ts.h.Write(buf2[:n]); err != nil {
+		return 0, err
+	}
+
+	// Filling the tar writer
+	if _, err = ts.tarW.Write(buf2[:n]); err != nil {
+		return 0, err
+	}
+	ts.tarW.Flush()
+
+	// Filling the output writer
+	if _, err = io.Copy(ts.writer, ts.bufTar); err != nil {
+		return 0, err
+	}
+	ts.writer.Flush()
+
+	return ts.bufWriter.Read(buf)
+}
+
+func (ts *tarSum) Sum(extra []byte) string {
+	ts.sums.SortBySums()
+	h := ts.tHash.Hash()
+	if extra != nil {
+		h.Write(extra)
+	}
+	for _, fis := range ts.sums {
+		h.Write([]byte(fis.Sum()))
+	}
+	checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil))
+	return checksum
+}
+
+func (ts *tarSum) GetSums() FileInfoSums {
+	return ts.sums
+}
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md
new file mode 100644
index 0000000000000000000000000000000000000000..89b2e49f985586cfeb41df4086b9dbaba3e5b2e2
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md
@@ -0,0 +1,230 @@
+page_title: TarSum checksum specification
+page_description: Documentation for algorithms used in the TarSum checksum calculation
+page_keywords: docker, checksum, validation, tarsum
+
+# TarSum Checksum Specification
+
+## Abstract
+
+This document describes the algorithms used in performing the TarSum checksum
+calculation on filesystem layers, the need for this method over existing
+methods, and the versioning of this calculation.
+
+## Warning
+
+This checksum algorithm is for best-effort comparison of file trees with fuzzy logic.
+
+This is _not_ a cryptographic attestation, and should not be considered secure.
+
+## Introduction
+
+The transportation of filesystems, regarding Docker, is done with tar(1)
+archives. There are a variety of tar serialization formats [2], and a key
+concern here is ensuring a repeatable checksum given a set of inputs from a
+generic tar archive. Types of transportation include distribution to and from a
+registry endpoint, saving and loading through commands or Docker daemon APIs,
+transferring the build context from client to Docker daemon, and committing the
+filesystem of a container to become an image.
+
+As tar archives are used for transit, but not preserved in many situations, the
+focus of the algorithm is to ensure the integrity of the preserved filesystem,
+while maintaining a deterministic accountability. This includes neither
+constraining the ordering or manipulation of the files during the creation or
+unpacking of the archive, nor include additional metadata state about the file
+system attributes.
+
+## Intended Audience
+
+This document is outlining the methods used for consistent checksum calculation
+for filesystems transported via tar archives.
+
+Auditing these methodologies is an open and iterative process. This document
+should accommodate the review of source code. Ultimately, this document should
+be the starting point of further refinements to the algorithm and its future
+versions.
+
+## Concept
+
+The checksum mechanism must ensure the integrity and assurance of the
+filesystem payload.
+
+## Checksum Algorithm Profile
+
+A checksum mechanism must define the following operations and attributes:
+
+* Associated hashing cipher - used to checksum each file payload and attribute
+  information.
+* Checksum list - each file of the filesystem archive has its checksum
+  calculated from the payload and attributes of the file. The final checksum is
+  calculated from this list, with specific ordering.
+* Version - as the algorithm adapts to requirements, there are behaviors of the
+  algorithm to manage by versioning.
+* Archive being calculated - the tar archive having its checksum calculated
+
+## Elements of TarSum checksum
+
+The calculated sum output is a text string. The elements included in the output
+of the calculated sum comprise the information needed for validation of the sum
+(TarSum version and hashing cipher used) and the expected checksum in hexadecimal
+form.
+
+There are two delimiters used:
+* '+' separates TarSum version from hashing cipher
+* ':' separates calculation mechanics from expected hash
+
+Example:
+
+```
+	"tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e"
+	|         |       \                                                               |
+	|         |        \                                                              |
+	|_version_|_cipher__|__                                                           |
+	|                      \                                                          |
+	|_calculation_mechanics_|______________________expected_sum_______________________|
+```
+
+## Versioning
+
+Versioning was introduced [0] to accommodate differences in calculation needed,
+and ability to maintain reverse compatibility.
+
+The general algorithm will be describe further in the 'Calculation'.
+
+### Version0
+
+This is the initial version of TarSum.
+
+Its element in the TarSum checksum string is `tarsum`.
+
+### Version1
+
+Its element in the TarSum checksum is `tarsum.v1`.
+
+The notable changes in this version:
+* Exclusion of file `mtime` from the file information headers, in each file
+  checksum calculation
+* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax
+  tar file info headers) keys and values in each file checksum calculation
+
+### VersionDev
+
+*Do not use unless validating refinements to the checksum algorithm*
+
+Its element in the TarSum checksum is `tarsum.dev`.
+
+This is a floating place holder for a next version and grounds for testing
+changes. The methods used for calculation are subject to change without notice,
+and this version is for testing and not for production use.
+
+## Ciphers
+
+The official default and standard hashing cipher used in the calculation mechanic
+is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4.
+
+Though the TarSum algorithm itself is not exclusively bound to the single
+hashing cipher `sha256`, support for alternate hashing ciphers was later added
+[1]. Use cases for alternate cipher could include future-proofing TarSum
+checksum format and using faster cipher hashes for tar filesystem checksums.
+
+## Calculation
+
+### Requirement
+
+As mentioned earlier, the calculation is such that it takes into consideration
+the lifecycle of the tar archive. In that the tar archive is not an immutable,
+permanent artifact. Otherwise options like relying on a known hashing cipher
+checksum of the archive itself would be reliable enough. The tar archive of the
+filesystem is used as a transportation medium for Docker images, and the
+archive is discarded once its contents are extracted. Therefore, for consistent
+validation items such as order of files in the tar archive and time stamps are
+subject to change once an image is received.
+
+### Process
+
+The method is typically iterative due to reading tar info headers from the
+archive stream, though this is not a strict requirement.
+
+#### Files
+
+Each file in the tar archive have their contents (headers and body) checksummed
+individually using the designated associated hashing cipher. The ordered
+headers of the file are written to the checksum calculation first, and then the
+payload of the file body.
+
+The resulting checksum of the file is appended to the list of file sums. The
+sum is encoded as a string of the hexadecimal digest. Additionally, the file
+name and position in the archive is kept as reference for special ordering.
+
+#### Headers
+
+The following headers are read, in this
+order ( and the corresponding representation of its value):
+* 'name' - string
+* 'mode' - string of the base10 integer
+* 'uid' - string of the integer
+* 'gid' - string of the integer
+* 'size' - string of the integer
+* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC
+* 'typeflag' - string of the char
+* 'linkname' - string
+* 'uname' - string
+* 'gname' - string
+* 'devmajor' - string of the integer
+* 'devminor' - string of the integer
+
+For >= Version1, the extended attribute headers ("SCHILY.xattr." prefixed pax
+headers) included after the above list. These xattrs key/values are first
+sorted by the keys.
+
+#### Header Format
+
+The ordered headers are written to the hash in the format of
+
+	"{.key}{.value}"
+
+with no newline.
+
+#### Body
+
+After the order headers of the file have been added to the checksum for the
+file, the body of the file is written to the hash.
+
+#### List of file sums
+
+The list of file sums is sorted by the string of the hexadecimal digest.
+
+If there are two files in the tar with matching paths, the order of occurrence
+for that path is reflected for the sums of the corresponding file header and
+body.
+
+#### Final Checksum
+
+Begin with a fresh or initial state of the associated hash cipher. If there is
+additional payload to include in the TarSum calculation for the archive, it is
+written first. Then each checksum from the ordered list of file sums is written
+to the hash.
+
+The resulting digest is formatted per the Elements of TarSum checksum,
+including the TarSum version, the associated hash cipher and the hexadecimal
+encoded checksum digest.
+
+## Security Considerations
+
+The initial version of TarSum has undergone one update that could invalidate
+handcrafted tar archives. The tar archive format supports appending of files
+with same names as prior files in the archive. The latter file will clobber the
+prior file of the same path. Due to this the algorithm now accounts for files
+with matching paths, and orders the list of file sums accordingly [3].
+
+## Footnotes
+
+* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0
+* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e
+* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29
+* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31
+
+## Acknowledgments
+
+Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the
+TarSum calculation.
+
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/versioning.go b/vendor/github.com/docker/docker/pkg/tarsum/versioning.go
new file mode 100644
index 0000000000000000000000000000000000000000..2882286854114c7466046ce90176d4e773a1855f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tarsum/versioning.go
@@ -0,0 +1,150 @@
+package tarsum
+
+import (
+	"archive/tar"
+	"errors"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// Version is used for versioning of the TarSum algorithm
+// based on the prefix of the hash used
+// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b"
+type Version int
+
+// Prefix of "tarsum"
+const (
+	Version0 Version = iota
+	Version1
+	// VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation
+	VersionDev
+)
+
+// VersionLabelForChecksum returns the label for the given tarsum
+// checksum, i.e., everything before the first `+` character in
+// the string or an empty string if no label separator is found.
+func VersionLabelForChecksum(checksum string) string {
+	// Checksums are in the form: {versionLabel}+{hashID}:{hex}
+	sepIndex := strings.Index(checksum, "+")
+	if sepIndex < 0 {
+		return ""
+	}
+	return checksum[:sepIndex]
+}
+
+// GetVersions gets a list of all known tarsum versions.
+func GetVersions() []Version {
+	v := []Version{}
+	for k := range tarSumVersions {
+		v = append(v, k)
+	}
+	return v
+}
+
+var (
+	tarSumVersions = map[Version]string{
+		Version0:   "tarsum",
+		Version1:   "tarsum.v1",
+		VersionDev: "tarsum.dev",
+	}
+	tarSumVersionsByName = map[string]Version{
+		"tarsum":     Version0,
+		"tarsum.v1":  Version1,
+		"tarsum.dev": VersionDev,
+	}
+)
+
+func (tsv Version) String() string {
+	return tarSumVersions[tsv]
+}
+
+// GetVersionFromTarsum returns the Version from the provided string.
+func GetVersionFromTarsum(tarsum string) (Version, error) {
+	tsv := tarsum
+	if strings.Contains(tarsum, "+") {
+		tsv = strings.SplitN(tarsum, "+", 2)[0]
+	}
+	for v, s := range tarSumVersions {
+		if s == tsv {
+			return v, nil
+		}
+	}
+	return -1, ErrNotVersion
+}
+
+// Errors that may be returned by functions in this package
+var (
+	ErrNotVersion            = errors.New("string does not include a TarSum Version")
+	ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented")
+)
+
+// tarHeaderSelector is the interface which different versions
+// of tarsum should use for selecting and ordering tar headers
+// for each item in the archive.
+type tarHeaderSelector interface {
+	selectHeaders(h *tar.Header) (orderedHeaders [][2]string)
+}
+
+type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string)
+
+func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) {
+	return f(h)
+}
+
+func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
+	return [][2]string{
+		{"name", h.Name},
+		{"mode", strconv.FormatInt(h.Mode, 10)},
+		{"uid", strconv.Itoa(h.Uid)},
+		{"gid", strconv.Itoa(h.Gid)},
+		{"size", strconv.FormatInt(h.Size, 10)},
+		{"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)},
+		{"typeflag", string([]byte{h.Typeflag})},
+		{"linkname", h.Linkname},
+		{"uname", h.Uname},
+		{"gname", h.Gname},
+		{"devmajor", strconv.FormatInt(h.Devmajor, 10)},
+		{"devminor", strconv.FormatInt(h.Devminor, 10)},
+	}
+}
+
+func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
+	// Get extended attributes.
+	xAttrKeys := make([]string, len(h.Xattrs))
+	for k := range h.Xattrs {
+		xAttrKeys = append(xAttrKeys, k)
+	}
+	sort.Strings(xAttrKeys)
+
+	// Make the slice with enough capacity to hold the 11 basic headers
+	// we want from the v0 selector plus however many xattrs we have.
+	orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys))
+
+	// Copy all headers from v0 excluding the 'mtime' header (the 5th element).
+	v0headers := v0TarHeaderSelect(h)
+	orderedHeaders = append(orderedHeaders, v0headers[0:5]...)
+	orderedHeaders = append(orderedHeaders, v0headers[6:]...)
+
+	// Finally, append the sorted xattrs.
+	for _, k := range xAttrKeys {
+		orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]})
+	}
+
+	return
+}
+
+var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{
+	Version0:   v0TarHeaderSelect,
+	Version1:   v1TarHeaderSelect,
+	VersionDev: v1TarHeaderSelect,
+}
+
+func getTarHeaderSelector(v Version) (tarHeaderSelector, error) {
+	headerSelector, ok := registeredHeaderSelectors[v]
+	if !ok {
+		return nil, ErrVersionNotImplemented
+	}
+
+	return headerSelector, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go b/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go
new file mode 100644
index 0000000000000000000000000000000000000000..9727ecde3eba955993ba6e2d9b89aa217ca38166
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go
@@ -0,0 +1,22 @@
+package tarsum
+
+import (
+	"io"
+)
+
+type writeCloseFlusher interface {
+	io.WriteCloser
+	Flush() error
+}
+
+type nopCloseFlusher struct {
+	io.Writer
+}
+
+func (n *nopCloseFlusher) Close() error {
+	return nil
+}
+
+func (n *nopCloseFlusher) Flush() error {
+	return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/ascii.go b/vendor/github.com/docker/docker/pkg/term/ascii.go
new file mode 100644
index 0000000000000000000000000000000000000000..f5262bccf5f3062bc3907d9b4f9b9ac1f9b7e515
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/ascii.go
@@ -0,0 +1,66 @@
+package term
+
+import (
+	"fmt"
+	"strings"
+)
+
+// ASCII list the possible supported ASCII key sequence
+var ASCII = []string{
+	"ctrl-@",
+	"ctrl-a",
+	"ctrl-b",
+	"ctrl-c",
+	"ctrl-d",
+	"ctrl-e",
+	"ctrl-f",
+	"ctrl-g",
+	"ctrl-h",
+	"ctrl-i",
+	"ctrl-j",
+	"ctrl-k",
+	"ctrl-l",
+	"ctrl-m",
+	"ctrl-n",
+	"ctrl-o",
+	"ctrl-p",
+	"ctrl-q",
+	"ctrl-r",
+	"ctrl-s",
+	"ctrl-t",
+	"ctrl-u",
+	"ctrl-v",
+	"ctrl-w",
+	"ctrl-x",
+	"ctrl-y",
+	"ctrl-z",
+	"ctrl-[",
+	"ctrl-\\",
+	"ctrl-]",
+	"ctrl-^",
+	"ctrl-_",
+}
+
+// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code.
+func ToBytes(keys string) ([]byte, error) {
+	codes := []byte{}
+next:
+	for _, key := range strings.Split(keys, ",") {
+		if len(key) != 1 {
+			for code, ctrl := range ASCII {
+				if ctrl == key {
+					codes = append(codes, byte(code))
+					continue next
+				}
+			}
+			if key == "DEL" {
+				codes = append(codes, 127)
+			} else {
+				return nil, fmt.Errorf("Unknown character: '%s'", key)
+			}
+		} else {
+			codes = append(codes, byte(key[0]))
+		}
+	}
+	return codes, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go
new file mode 100644
index 0000000000000000000000000000000000000000..59dac5ba8e49b42c507930e8082b344e1b120803
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go
@@ -0,0 +1,50 @@
+// +build linux,cgo
+
+package term
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+// #include <termios.h>
+import "C"
+
+// Termios is the Unix API for terminal I/O.
+// It is passthrough for syscall.Termios in order to make it portable with
+// other platforms where it is not available or handled differently.
+type Termios syscall.Termios
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd uintptr) (*State, error) {
+	var oldState State
+	if err := tcget(fd, &oldState.termios); err != 0 {
+		return nil, err
+	}
+
+	newState := oldState.termios
+
+	C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState)))
+	if err := tcset(fd, &newState); err != 0 {
+		return nil, err
+	}
+	return &oldState, nil
+}
+
+func tcget(fd uintptr, p *Termios) syscall.Errno {
+	ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p)))
+	if ret != 0 {
+		return err.(syscall.Errno)
+	}
+	return 0
+}
+
+func tcset(fd uintptr, p *Termios) syscall.Errno {
+	ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p)))
+	if ret != 0 {
+		return err.(syscall.Errno)
+	}
+	return 0
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/tc_other.go b/vendor/github.com/docker/docker/pkg/term/tc_other.go
new file mode 100644
index 0000000000000000000000000000000000000000..750d7c3f6075ff3e88f996694a6bd556e6dac8ae
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/tc_other.go
@@ -0,0 +1,20 @@
+// +build !windows
+// +build !linux !cgo
+// +build !solaris !cgo
+
+package term
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+func tcget(fd uintptr, p *Termios) syscall.Errno {
+	_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p)))
+	return err
+}
+
+func tcset(fd uintptr, p *Termios) syscall.Errno {
+	_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p)))
+	return err
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go
new file mode 100644
index 0000000000000000000000000000000000000000..c9139d0ca807aabc858d2c98a268385b25a53198
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go
@@ -0,0 +1,63 @@
+// +build solaris,cgo
+
+package term
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+// #include <termios.h>
+import "C"
+
+// Termios is the Unix API for terminal I/O.
+// It is passthrough for syscall.Termios in order to make it portable with
+// other platforms where it is not available or handled differently.
+type Termios syscall.Termios
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd uintptr) (*State, error) {
+	var oldState State
+	if err := tcget(fd, &oldState.termios); err != 0 {
+		return nil, err
+	}
+
+	newState := oldState.termios
+
+	newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON | syscall.IXANY)
+	newState.Oflag &^= syscall.OPOST
+	newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN)
+	newState.Cflag &^= (syscall.CSIZE | syscall.PARENB)
+	newState.Cflag |= syscall.CS8
+
+	/*
+		VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned
+		Since VMIN is overloaded with another element in canonical mode when we switch modes it defaults to 4. It
+		needs to be explicitly set to 1.
+	*/
+	newState.Cc[C.VMIN] = 1
+	newState.Cc[C.VTIME] = 0
+
+	if err := tcset(fd, &newState); err != 0 {
+		return nil, err
+	}
+	return &oldState, nil
+}
+
+func tcget(fd uintptr, p *Termios) syscall.Errno {
+	ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p)))
+	if ret != 0 {
+		return err.(syscall.Errno)
+	}
+	return 0
+}
+
+func tcset(fd uintptr, p *Termios) syscall.Errno {
+	ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p)))
+	if ret != 0 {
+		return err.(syscall.Errno)
+	}
+	return 0
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/term.go b/vendor/github.com/docker/docker/pkg/term/term.go
new file mode 100644
index 0000000000000000000000000000000000000000..fe59faa949cbce160bf9d184856500df35cb64ba
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/term.go
@@ -0,0 +1,123 @@
+// +build !windows
+
+// Package term provides structures and helper functions to work with
+// terminal (state, sizes).
+package term
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"os/signal"
+	"syscall"
+)
+
+var (
+	// ErrInvalidState is returned if the state of the terminal is invalid.
+	ErrInvalidState = errors.New("Invalid terminal state")
+)
+
+// State represents the state of the terminal.
+type State struct {
+	termios Termios
+}
+
+// Winsize represents the size of the terminal window.
+type Winsize struct {
+	Height uint16
+	Width  uint16
+	x      uint16
+	y      uint16
+}
+
+// StdStreams returns the standard streams (stdin, stdout, stedrr).
+func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
+	return os.Stdin, os.Stdout, os.Stderr
+}
+
+// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
+func GetFdInfo(in interface{}) (uintptr, bool) {
+	var inFd uintptr
+	var isTerminalIn bool
+	if file, ok := in.(*os.File); ok {
+		inFd = file.Fd()
+		isTerminalIn = IsTerminal(inFd)
+	}
+	return inFd, isTerminalIn
+}
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd uintptr) bool {
+	var termios Termios
+	return tcget(fd, &termios) == 0
+}
+
+// RestoreTerminal restores the terminal connected to the given file descriptor
+// to a previous state.
+func RestoreTerminal(fd uintptr, state *State) error {
+	if state == nil {
+		return ErrInvalidState
+	}
+	if err := tcset(fd, &state.termios); err != 0 {
+		return err
+	}
+	return nil
+}
+
+// SaveState saves the state of the terminal connected to the given file descriptor.
+func SaveState(fd uintptr) (*State, error) {
+	var oldState State
+	if err := tcget(fd, &oldState.termios); err != 0 {
+		return nil, err
+	}
+
+	return &oldState, nil
+}
+
+// DisableEcho applies the specified state to the terminal connected to the file
+// descriptor, with echo disabled.
+func DisableEcho(fd uintptr, state *State) error {
+	newState := state.termios
+	newState.Lflag &^= syscall.ECHO
+
+	if err := tcset(fd, &newState); err != 0 {
+		return err
+	}
+	handleInterrupt(fd, state)
+	return nil
+}
+
+// SetRawTerminal puts the terminal connected to the given file descriptor into
+// raw mode and returns the previous state. On UNIX, this puts both the input
+// and output into raw mode. On Windows, it only puts the input into raw mode.
+func SetRawTerminal(fd uintptr) (*State, error) {
+	oldState, err := MakeRaw(fd)
+	if err != nil {
+		return nil, err
+	}
+	handleInterrupt(fd, oldState)
+	return oldState, err
+}
+
+// SetRawTerminalOutput puts the output of terminal connected to the given file
+// descriptor into raw mode. On UNIX, this does nothing and returns nil for the
+// state. On Windows, it disables LF -> CRLF translation.
+func SetRawTerminalOutput(fd uintptr) (*State, error) {
+	return nil, nil
+}
+
+func handleInterrupt(fd uintptr, state *State) {
+	sigchan := make(chan os.Signal, 1)
+	signal.Notify(sigchan, os.Interrupt)
+	go func() {
+		for range sigchan {
+			// quit cleanly and the new terminal item is on a new line
+			fmt.Println()
+			signal.Stop(sigchan)
+			close(sigchan)
+			RestoreTerminal(fd, state)
+			os.Exit(1)
+		}
+	}()
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/term_solaris.go b/vendor/github.com/docker/docker/pkg/term/term_solaris.go
new file mode 100644
index 0000000000000000000000000000000000000000..112debbec5639265695e1985f42c4df214932e9d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/term_solaris.go
@@ -0,0 +1,41 @@
+// +build solaris
+
+package term
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+/*
+#include <unistd.h>
+#include <stropts.h>
+#include <termios.h>
+
+// Small wrapper to get rid of variadic args of ioctl()
+int my_ioctl(int fd, int cmd, struct winsize *ws) {
+	return ioctl(fd, cmd, ws);
+}
+*/
+import "C"
+
+// GetWinsize returns the window size based on the specified file descriptor.
+func GetWinsize(fd uintptr) (*Winsize, error) {
+	ws := &Winsize{}
+	ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws)))
+	// Skip retval = 0
+	if ret == 0 {
+		return ws, nil
+	}
+	return ws, err
+}
+
+// SetWinsize tries to set the specified window size for the specified file descriptor.
+func SetWinsize(fd uintptr, ws *Winsize) error {
+	ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws)))
+	// Skip retval = 0
+	if ret == 0 {
+		return nil
+	}
+	return err
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/term_unix.go b/vendor/github.com/docker/docker/pkg/term/term_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..ddf87a0e58a21bd0b89659996cc346072304c536
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/term_unix.go
@@ -0,0 +1,29 @@
+// +build !solaris,!windows
+
+package term
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+// GetWinsize returns the window size based on the specified file descriptor.
+func GetWinsize(fd uintptr) (*Winsize, error) {
+	ws := &Winsize{}
+	_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws)))
+	// Skipp errno = 0
+	if err == 0 {
+		return ws, nil
+	}
+	return ws, err
+}
+
+// SetWinsize tries to set the specified window size for the specified file descriptor.
+func SetWinsize(fd uintptr, ws *Winsize) error {
+	_, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws)))
+	// Skipp errno = 0
+	if err == 0 {
+		return nil
+	}
+	return err
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..a91f07e48239133338b94705764e5b763acc635b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/term_windows.go
@@ -0,0 +1,233 @@
+// +build windows
+
+package term
+
+import (
+	"io"
+	"os"
+	"os/signal"
+	"syscall"
+
+	"github.com/Azure/go-ansiterm/winterm"
+	"github.com/docker/docker/pkg/term/windows"
+)
+
+// State holds the console mode for the terminal.
+type State struct {
+	mode uint32
+}
+
+// Winsize is used for window size.
+type Winsize struct {
+	Height uint16
+	Width  uint16
+}
+
+const (
+	// https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx
+	enableVirtualTerminalInput      = 0x0200
+	enableVirtualTerminalProcessing = 0x0004
+	disableNewlineAutoReturn        = 0x0008
+)
+
+// vtInputSupported is true if enableVirtualTerminalInput is supported by the console
+var vtInputSupported bool
+
+// StdStreams returns the standard streams (stdin, stdout, stedrr).
+func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
+	// Turn on VT handling on all std handles, if possible. This might
+	// fail, in which case we will fall back to terminal emulation.
+	var emulateStdin, emulateStdout, emulateStderr bool
+	fd := os.Stdin.Fd()
+	if mode, err := winterm.GetConsoleMode(fd); err == nil {
+		// Validate that enableVirtualTerminalInput is supported, but do not set it.
+		if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalInput); err != nil {
+			emulateStdin = true
+		} else {
+			vtInputSupported = true
+		}
+		// Unconditionally set the console mode back even on failure because SetConsoleMode
+		// remembers invalid bits on input handles.
+		winterm.SetConsoleMode(fd, mode)
+	}
+
+	fd = os.Stdout.Fd()
+	if mode, err := winterm.GetConsoleMode(fd); err == nil {
+		// Validate disableNewlineAutoReturn is supported, but do not set it.
+		if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil {
+			emulateStdout = true
+		} else {
+			winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing)
+		}
+	}
+
+	fd = os.Stderr.Fd()
+	if mode, err := winterm.GetConsoleMode(fd); err == nil {
+		// Validate disableNewlineAutoReturn is supported, but do not set it.
+		if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil {
+			emulateStderr = true
+		} else {
+			winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing)
+		}
+	}
+
+	if os.Getenv("ConEmuANSI") == "ON" || os.Getenv("ConsoleZVersion") != "" {
+		// The ConEmu and ConsoleZ terminals emulate ANSI on output streams well.
+		emulateStdin = true
+		emulateStdout = false
+		emulateStderr = false
+	}
+
+	if emulateStdin {
+		stdIn = windows.NewAnsiReader(syscall.STD_INPUT_HANDLE)
+	} else {
+		stdIn = os.Stdin
+	}
+
+	if emulateStdout {
+		stdOut = windows.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE)
+	} else {
+		stdOut = os.Stdout
+	}
+
+	if emulateStderr {
+		stdErr = windows.NewAnsiWriter(syscall.STD_ERROR_HANDLE)
+	} else {
+		stdErr = os.Stderr
+	}
+
+	return
+}
+
+// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
+func GetFdInfo(in interface{}) (uintptr, bool) {
+	return windows.GetHandleInfo(in)
+}
+
+// GetWinsize returns the window size based on the specified file descriptor.
+func GetWinsize(fd uintptr) (*Winsize, error) {
+	info, err := winterm.GetConsoleScreenBufferInfo(fd)
+	if err != nil {
+		return nil, err
+	}
+
+	winsize := &Winsize{
+		Width:  uint16(info.Window.Right - info.Window.Left + 1),
+		Height: uint16(info.Window.Bottom - info.Window.Top + 1),
+	}
+
+	return winsize, nil
+}
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd uintptr) bool {
+	return windows.IsConsole(fd)
+}
+
+// RestoreTerminal restores the terminal connected to the given file descriptor
+// to a previous state.
+func RestoreTerminal(fd uintptr, state *State) error {
+	return winterm.SetConsoleMode(fd, state.mode)
+}
+
+// SaveState saves the state of the terminal connected to the given file descriptor.
+func SaveState(fd uintptr) (*State, error) {
+	mode, e := winterm.GetConsoleMode(fd)
+	if e != nil {
+		return nil, e
+	}
+
+	return &State{mode: mode}, nil
+}
+
+// DisableEcho disables echo for the terminal connected to the given file descriptor.
+// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
+func DisableEcho(fd uintptr, state *State) error {
+	mode := state.mode
+	mode &^= winterm.ENABLE_ECHO_INPUT
+	mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT
+	err := winterm.SetConsoleMode(fd, mode)
+	if err != nil {
+		return err
+	}
+
+	// Register an interrupt handler to catch and restore prior state
+	restoreAtInterrupt(fd, state)
+	return nil
+}
+
+// SetRawTerminal puts the terminal connected to the given file descriptor into
+// raw mode and returns the previous state. On UNIX, this puts both the input
+// and output into raw mode. On Windows, it only puts the input into raw mode.
+func SetRawTerminal(fd uintptr) (*State, error) {
+	state, err := MakeRaw(fd)
+	if err != nil {
+		return nil, err
+	}
+
+	// Register an interrupt handler to catch and restore prior state
+	restoreAtInterrupt(fd, state)
+	return state, err
+}
+
+// SetRawTerminalOutput puts the output of terminal connected to the given file
+// descriptor into raw mode. On UNIX, this does nothing and returns nil for the
+// state. On Windows, it disables LF -> CRLF translation.
+func SetRawTerminalOutput(fd uintptr) (*State, error) {
+	state, err := SaveState(fd)
+	if err != nil {
+		return nil, err
+	}
+
+	// Ignore failures, since disableNewlineAutoReturn might not be supported on this
+	// version of Windows.
+	winterm.SetConsoleMode(fd, state.mode|disableNewlineAutoReturn)
+	return state, err
+}
+
+// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be restored.
+func MakeRaw(fd uintptr) (*State, error) {
+	state, err := SaveState(fd)
+	if err != nil {
+		return nil, err
+	}
+
+	mode := state.mode
+
+	// See
+	// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
+	// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
+
+	// Disable these modes
+	mode &^= winterm.ENABLE_ECHO_INPUT
+	mode &^= winterm.ENABLE_LINE_INPUT
+	mode &^= winterm.ENABLE_MOUSE_INPUT
+	mode &^= winterm.ENABLE_WINDOW_INPUT
+	mode &^= winterm.ENABLE_PROCESSED_INPUT
+
+	// Enable these modes
+	mode |= winterm.ENABLE_EXTENDED_FLAGS
+	mode |= winterm.ENABLE_INSERT_MODE
+	mode |= winterm.ENABLE_QUICK_EDIT_MODE
+	if vtInputSupported {
+		mode |= enableVirtualTerminalInput
+	}
+
+	err = winterm.SetConsoleMode(fd, mode)
+	if err != nil {
+		return nil, err
+	}
+	return state, nil
+}
+
+func restoreAtInterrupt(fd uintptr, state *State) {
+	sigchan := make(chan os.Signal, 1)
+	signal.Notify(sigchan, os.Interrupt)
+
+	go func() {
+		_ = <-sigchan
+		RestoreTerminal(fd, state)
+		os.Exit(0)
+	}()
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/termios_darwin.go b/vendor/github.com/docker/docker/pkg/term/termios_darwin.go
new file mode 100644
index 0000000000000000000000000000000000000000..480db900ac9271b1c5dd6efb4e12e4a50079b631
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/termios_darwin.go
@@ -0,0 +1,69 @@
+package term
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+const (
+	getTermios = syscall.TIOCGETA
+	setTermios = syscall.TIOCSETA
+)
+
+// Termios magic numbers, passthrough to the ones defined in syscall.
+const (
+	IGNBRK = syscall.IGNBRK
+	PARMRK = syscall.PARMRK
+	INLCR  = syscall.INLCR
+	IGNCR  = syscall.IGNCR
+	ECHONL = syscall.ECHONL
+	CSIZE  = syscall.CSIZE
+	ICRNL  = syscall.ICRNL
+	ISTRIP = syscall.ISTRIP
+	PARENB = syscall.PARENB
+	ECHO   = syscall.ECHO
+	ICANON = syscall.ICANON
+	ISIG   = syscall.ISIG
+	IXON   = syscall.IXON
+	BRKINT = syscall.BRKINT
+	INPCK  = syscall.INPCK
+	OPOST  = syscall.OPOST
+	CS8    = syscall.CS8
+	IEXTEN = syscall.IEXTEN
+)
+
+// Termios is the Unix API for terminal I/O.
+type Termios struct {
+	Iflag  uint64
+	Oflag  uint64
+	Cflag  uint64
+	Lflag  uint64
+	Cc     [20]byte
+	Ispeed uint64
+	Ospeed uint64
+}
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd uintptr) (*State, error) {
+	var oldState State
+	if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
+		return nil, err
+	}
+
+	newState := oldState.termios
+	newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)
+	newState.Oflag &^= OPOST
+	newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN)
+	newState.Cflag &^= (CSIZE | PARENB)
+	newState.Cflag |= CS8
+	newState.Cc[syscall.VMIN] = 1
+	newState.Cc[syscall.VTIME] = 0
+
+	if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 {
+		return nil, err
+	}
+
+	return &oldState, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go b/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go
new file mode 100644
index 0000000000000000000000000000000000000000..ed843ad69c9442f45b985090b357da865d3eb693
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go
@@ -0,0 +1,69 @@
+package term
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+const (
+	getTermios = syscall.TIOCGETA
+	setTermios = syscall.TIOCSETA
+)
+
+// Termios magic numbers, passthrough to the ones defined in syscall.
+const (
+	IGNBRK = syscall.IGNBRK
+	PARMRK = syscall.PARMRK
+	INLCR  = syscall.INLCR
+	IGNCR  = syscall.IGNCR
+	ECHONL = syscall.ECHONL
+	CSIZE  = syscall.CSIZE
+	ICRNL  = syscall.ICRNL
+	ISTRIP = syscall.ISTRIP
+	PARENB = syscall.PARENB
+	ECHO   = syscall.ECHO
+	ICANON = syscall.ICANON
+	ISIG   = syscall.ISIG
+	IXON   = syscall.IXON
+	BRKINT = syscall.BRKINT
+	INPCK  = syscall.INPCK
+	OPOST  = syscall.OPOST
+	CS8    = syscall.CS8
+	IEXTEN = syscall.IEXTEN
+)
+
+// Termios is the Unix API for terminal I/O.
+type Termios struct {
+	Iflag  uint32
+	Oflag  uint32
+	Cflag  uint32
+	Lflag  uint32
+	Cc     [20]byte
+	Ispeed uint32
+	Ospeed uint32
+}
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd uintptr) (*State, error) {
+	var oldState State
+	if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
+		return nil, err
+	}
+
+	newState := oldState.termios
+	newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)
+	newState.Oflag &^= OPOST
+	newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN)
+	newState.Cflag &^= (CSIZE | PARENB)
+	newState.Cflag |= CS8
+	newState.Cc[syscall.VMIN] = 1
+	newState.Cc[syscall.VTIME] = 0
+
+	if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 {
+		return nil, err
+	}
+
+	return &oldState, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/termios_linux.go b/vendor/github.com/docker/docker/pkg/term/termios_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..22921b6aef38afe0151fba0403b7b86aa4f15a97
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/termios_linux.go
@@ -0,0 +1,47 @@
+// +build !cgo
+
+package term
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+const (
+	getTermios = syscall.TCGETS
+	setTermios = syscall.TCSETS
+)
+
+// Termios is the Unix API for terminal I/O.
+type Termios struct {
+	Iflag  uint32
+	Oflag  uint32
+	Cflag  uint32
+	Lflag  uint32
+	Cc     [20]byte
+	Ispeed uint32
+	Ospeed uint32
+}
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd uintptr) (*State, error) {
+	var oldState State
+	if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
+		return nil, err
+	}
+
+	newState := oldState.termios
+
+	newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON)
+	newState.Oflag &^= syscall.OPOST
+	newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN)
+	newState.Cflag &^= (syscall.CSIZE | syscall.PARENB)
+	newState.Cflag |= syscall.CS8
+
+	if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 {
+		return nil, err
+	}
+	return &oldState, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go b/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go
new file mode 100644
index 0000000000000000000000000000000000000000..ed843ad69c9442f45b985090b357da865d3eb693
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go
@@ -0,0 +1,69 @@
+package term
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+const (
+	getTermios = syscall.TIOCGETA
+	setTermios = syscall.TIOCSETA
+)
+
+// Termios magic numbers, passthrough to the ones defined in syscall.
+const (
+	IGNBRK = syscall.IGNBRK
+	PARMRK = syscall.PARMRK
+	INLCR  = syscall.INLCR
+	IGNCR  = syscall.IGNCR
+	ECHONL = syscall.ECHONL
+	CSIZE  = syscall.CSIZE
+	ICRNL  = syscall.ICRNL
+	ISTRIP = syscall.ISTRIP
+	PARENB = syscall.PARENB
+	ECHO   = syscall.ECHO
+	ICANON = syscall.ICANON
+	ISIG   = syscall.ISIG
+	IXON   = syscall.IXON
+	BRKINT = syscall.BRKINT
+	INPCK  = syscall.INPCK
+	OPOST  = syscall.OPOST
+	CS8    = syscall.CS8
+	IEXTEN = syscall.IEXTEN
+)
+
+// Termios is the Unix API for terminal I/O.
+type Termios struct {
+	Iflag  uint32
+	Oflag  uint32
+	Cflag  uint32
+	Lflag  uint32
+	Cc     [20]byte
+	Ispeed uint32
+	Ospeed uint32
+}
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd uintptr) (*State, error) {
+	var oldState State
+	if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
+		return nil, err
+	}
+
+	newState := oldState.termios
+	newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)
+	newState.Oflag &^= OPOST
+	newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN)
+	newState.Cflag &^= (CSIZE | PARENB)
+	newState.Cflag |= CS8
+	newState.Cc[syscall.VMIN] = 1
+	newState.Cc[syscall.VTIME] = 0
+
+	if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 {
+		return nil, err
+	}
+
+	return &oldState, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go
new file mode 100644
index 0000000000000000000000000000000000000000..cb0b88356df8591187f307b41d2073746ce21e1a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go
@@ -0,0 +1,263 @@
+// +build windows
+
+package windows
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"strings"
+	"unsafe"
+
+	ansiterm "github.com/Azure/go-ansiterm"
+	"github.com/Azure/go-ansiterm/winterm"
+)
+
+const (
+	escapeSequence = ansiterm.KEY_ESC_CSI
+)
+
+// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation.
+type ansiReader struct {
+	file     *os.File
+	fd       uintptr
+	buffer   []byte
+	cbBuffer int
+	command  []byte
+}
+
+// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a
+// Windows console input handle.
+func NewAnsiReader(nFile int) io.ReadCloser {
+	initLogger()
+	file, fd := winterm.GetStdFile(nFile)
+	return &ansiReader{
+		file:    file,
+		fd:      fd,
+		command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH),
+		buffer:  make([]byte, 0),
+	}
+}
+
+// Close closes the wrapped file.
+func (ar *ansiReader) Close() (err error) {
+	return ar.file.Close()
+}
+
+// Fd returns the file descriptor of the wrapped file.
+func (ar *ansiReader) Fd() uintptr {
+	return ar.fd
+}
+
+// Read reads up to len(p) bytes of translated input events into p.
+func (ar *ansiReader) Read(p []byte) (int, error) {
+	if len(p) == 0 {
+		return 0, nil
+	}
+
+	// Previously read bytes exist, read as much as we can and return
+	if len(ar.buffer) > 0 {
+		logger.Debugf("Reading previously cached bytes")
+
+		originalLength := len(ar.buffer)
+		copiedLength := copy(p, ar.buffer)
+
+		if copiedLength == originalLength {
+			ar.buffer = make([]byte, 0, len(p))
+		} else {
+			ar.buffer = ar.buffer[copiedLength:]
+		}
+
+		logger.Debugf("Read from cache p[%d]: % x", copiedLength, p)
+		return copiedLength, nil
+	}
+
+	// Read and translate key events
+	events, err := readInputEvents(ar.fd, len(p))
+	if err != nil {
+		return 0, err
+	} else if len(events) == 0 {
+		logger.Debug("No input events detected")
+		return 0, nil
+	}
+
+	keyBytes := translateKeyEvents(events, []byte(escapeSequence))
+
+	// Save excess bytes and right-size keyBytes
+	if len(keyBytes) > len(p) {
+		logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p))
+		ar.buffer = keyBytes[len(p):]
+		keyBytes = keyBytes[:len(p)]
+	} else if len(keyBytes) == 0 {
+		logger.Debug("No key bytes returned from the translator")
+		return 0, nil
+	}
+
+	copiedLength := copy(p, keyBytes)
+	if copiedLength != len(keyBytes) {
+		return 0, errors.New("unexpected copy length encountered")
+	}
+
+	logger.Debugf("Read        p[%d]: % x", copiedLength, p)
+	logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes)
+	return copiedLength, nil
+}
+
+// readInputEvents polls until at least one event is available.
+func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) {
+	// Determine the maximum number of records to retrieve
+	// -- Cast around the type system to obtain the size of a single INPUT_RECORD.
+	//    unsafe.Sizeof requires an expression vs. a type-reference; the casting
+	//    tricks the type system into believing it has such an expression.
+	recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes)))))
+	countRecords := maxBytes / recordSize
+	if countRecords > ansiterm.MAX_INPUT_EVENTS {
+		countRecords = ansiterm.MAX_INPUT_EVENTS
+	} else if countRecords == 0 {
+		countRecords = 1
+	}
+	logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize)
+
+	// Wait for and read input events
+	events := make([]winterm.INPUT_RECORD, countRecords)
+	nEvents := uint32(0)
+	eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE)
+	if err != nil {
+		return nil, err
+	}
+
+	if eventsExist {
+		err = winterm.ReadConsoleInput(fd, events, &nEvents)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	// Return a slice restricted to the number of returned records
+	logger.Debugf("[windows] readInputEvents: Read %v events", nEvents)
+	return events[:nEvents], nil
+}
+
+// KeyEvent Translation Helpers
+
+var arrowKeyMapPrefix = map[uint16]string{
+	winterm.VK_UP:    "%s%sA",
+	winterm.VK_DOWN:  "%s%sB",
+	winterm.VK_RIGHT: "%s%sC",
+	winterm.VK_LEFT:  "%s%sD",
+}
+
+var keyMapPrefix = map[uint16]string{
+	winterm.VK_UP:     "\x1B[%sA",
+	winterm.VK_DOWN:   "\x1B[%sB",
+	winterm.VK_RIGHT:  "\x1B[%sC",
+	winterm.VK_LEFT:   "\x1B[%sD",
+	winterm.VK_HOME:   "\x1B[1%s~", // showkey shows ^[[1
+	winterm.VK_END:    "\x1B[4%s~", // showkey shows ^[[4
+	winterm.VK_INSERT: "\x1B[2%s~",
+	winterm.VK_DELETE: "\x1B[3%s~",
+	winterm.VK_PRIOR:  "\x1B[5%s~",
+	winterm.VK_NEXT:   "\x1B[6%s~",
+	winterm.VK_F1:     "",
+	winterm.VK_F2:     "",
+	winterm.VK_F3:     "\x1B[13%s~",
+	winterm.VK_F4:     "\x1B[14%s~",
+	winterm.VK_F5:     "\x1B[15%s~",
+	winterm.VK_F6:     "\x1B[17%s~",
+	winterm.VK_F7:     "\x1B[18%s~",
+	winterm.VK_F8:     "\x1B[19%s~",
+	winterm.VK_F9:     "\x1B[20%s~",
+	winterm.VK_F10:    "\x1B[21%s~",
+	winterm.VK_F11:    "\x1B[23%s~",
+	winterm.VK_F12:    "\x1B[24%s~",
+}
+
+// translateKeyEvents converts the input events into the appropriate ANSI string.
+func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte {
+	var buffer bytes.Buffer
+	for _, event := range events {
+		if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 {
+			buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence))
+		}
+	}
+
+	return buffer.Bytes()
+}
+
+// keyToString maps the given input event record to the corresponding string.
+func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string {
+	if keyEvent.UnicodeChar == 0 {
+		return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence)
+	}
+
+	_, alt, control := getControlKeys(keyEvent.ControlKeyState)
+	if control {
+		// TODO(azlinux): Implement following control sequences
+		// <Ctrl>-D  Signals the end of input from the keyboard; also exits current shell.
+		// <Ctrl>-H  Deletes the first character to the left of the cursor. Also called the ERASE key.
+		// <Ctrl>-Q  Restarts printing after it has been stopped with <Ctrl>-s.
+		// <Ctrl>-S  Suspends printing on the screen (does not stop the program).
+		// <Ctrl>-U  Deletes all characters on the current line. Also called the KILL key.
+		// <Ctrl>-E  Quits current command and creates a core
+
+	}
+
+	// <Alt>+Key generates ESC N Key
+	if !control && alt {
+		return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar))
+	}
+
+	return string(keyEvent.UnicodeChar)
+}
+
+// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string.
+func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string {
+	shift, alt, control := getControlKeys(controlState)
+	modifier := getControlKeysModifier(shift, alt, control)
+
+	if format, ok := arrowKeyMapPrefix[key]; ok {
+		return fmt.Sprintf(format, escapeSequence, modifier)
+	}
+
+	if format, ok := keyMapPrefix[key]; ok {
+		return fmt.Sprintf(format, modifier)
+	}
+
+	return ""
+}
+
+// getControlKeys extracts the shift, alt, and ctrl key states.
+func getControlKeys(controlState uint32) (shift, alt, control bool) {
+	shift = 0 != (controlState & winterm.SHIFT_PRESSED)
+	alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED))
+	control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED))
+	return shift, alt, control
+}
+
+// getControlKeysModifier returns the ANSI modifier for the given combination of control keys.
+func getControlKeysModifier(shift, alt, control bool) string {
+	if shift && alt && control {
+		return ansiterm.KEY_CONTROL_PARAM_8
+	}
+	if alt && control {
+		return ansiterm.KEY_CONTROL_PARAM_7
+	}
+	if shift && control {
+		return ansiterm.KEY_CONTROL_PARAM_6
+	}
+	if control {
+		return ansiterm.KEY_CONTROL_PARAM_5
+	}
+	if shift && alt {
+		return ansiterm.KEY_CONTROL_PARAM_4
+	}
+	if alt {
+		return ansiterm.KEY_CONTROL_PARAM_3
+	}
+	if shift {
+		return ansiterm.KEY_CONTROL_PARAM_2
+	}
+	return ""
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go
new file mode 100644
index 0000000000000000000000000000000000000000..a3ce5697d956aa8b1a3e9d10c6591f553c1a10fd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go
@@ -0,0 +1,64 @@
+// +build windows
+
+package windows
+
+import (
+	"io"
+	"os"
+
+	ansiterm "github.com/Azure/go-ansiterm"
+	"github.com/Azure/go-ansiterm/winterm"
+)
+
+// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation.
+type ansiWriter struct {
+	file           *os.File
+	fd             uintptr
+	infoReset      *winterm.CONSOLE_SCREEN_BUFFER_INFO
+	command        []byte
+	escapeSequence []byte
+	inAnsiSequence bool
+	parser         *ansiterm.AnsiParser
+}
+
+// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a
+// Windows console output handle.
+func NewAnsiWriter(nFile int) io.Writer {
+	initLogger()
+	file, fd := winterm.GetStdFile(nFile)
+	info, err := winterm.GetConsoleScreenBufferInfo(fd)
+	if err != nil {
+		return nil
+	}
+
+	parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file))
+	logger.Infof("newAnsiWriter: parser %p", parser)
+
+	aw := &ansiWriter{
+		file:           file,
+		fd:             fd,
+		infoReset:      info,
+		command:        make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH),
+		escapeSequence: []byte(ansiterm.KEY_ESC_CSI),
+		parser:         parser,
+	}
+
+	logger.Infof("newAnsiWriter: aw.parser %p", aw.parser)
+	logger.Infof("newAnsiWriter: %v", aw)
+	return aw
+}
+
+func (aw *ansiWriter) Fd() uintptr {
+	return aw.fd
+}
+
+// Write writes len(p) bytes from p to the underlying data stream.
+func (aw *ansiWriter) Write(p []byte) (total int, err error) {
+	if len(p) == 0 {
+		return 0, nil
+	}
+
+	logger.Infof("Write: % x", p)
+	logger.Infof("Write: %s", string(p))
+	return aw.parser.Parse(p)
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/console.go b/vendor/github.com/docker/docker/pkg/term/windows/console.go
new file mode 100644
index 0000000000000000000000000000000000000000..ca5c3b2e535bb59c94acb065ce54a9396f405976
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/windows/console.go
@@ -0,0 +1,35 @@
+// +build windows
+
+package windows
+
+import (
+	"os"
+
+	"github.com/Azure/go-ansiterm/winterm"
+)
+
+// GetHandleInfo returns file descriptor and bool indicating whether the file is a console.
+func GetHandleInfo(in interface{}) (uintptr, bool) {
+	switch t := in.(type) {
+	case *ansiReader:
+		return t.Fd(), true
+	case *ansiWriter:
+		return t.Fd(), true
+	}
+
+	var inFd uintptr
+	var isTerminal bool
+
+	if file, ok := in.(*os.File); ok {
+		inFd = file.Fd()
+		isTerminal = IsConsole(inFd)
+	}
+	return inFd, isTerminal
+}
+
+// IsConsole returns true if the given file descriptor is a Windows Console.
+// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console.
+func IsConsole(fd uintptr) bool {
+	_, e := winterm.GetConsoleMode(fd)
+	return e == nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/vendor/github.com/docker/docker/pkg/term/windows/windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..ce4cb5990ee1a9b2cdb9c8ede26032880cf8454d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/windows/windows.go
@@ -0,0 +1,33 @@
+// These files implement ANSI-aware input and output streams for use by the Docker Windows client.
+// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create
+// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls.
+
+package windows
+
+import (
+	"io/ioutil"
+	"os"
+	"sync"
+
+	ansiterm "github.com/Azure/go-ansiterm"
+	"github.com/Sirupsen/logrus"
+)
+
+var logger *logrus.Logger
+var initOnce sync.Once
+
+func initLogger() {
+	initOnce.Do(func() {
+		logFile := ioutil.Discard
+
+		if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
+			logFile, _ = os.Create("ansiReaderWriter.log")
+		}
+
+		logger = &logrus.Logger{
+			Out:       logFile,
+			Formatter: new(logrus.TextFormatter),
+			Level:     logrus.DebugLevel,
+		}
+	})
+}
diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin.go b/vendor/github.com/docker/docker/plugin/v2/plugin.go
new file mode 100644
index 0000000000000000000000000000000000000000..93b489a14b6c01004d6505c9e5c47e7ed1502295
--- /dev/null
+++ b/vendor/github.com/docker/docker/plugin/v2/plugin.go
@@ -0,0 +1,244 @@
+package v2
+
+import (
+	"fmt"
+	"strings"
+	"sync"
+
+	"github.com/docker/distribution/digest"
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/pkg/plugingetter"
+	"github.com/docker/docker/pkg/plugins"
+)
+
+// Plugin represents an individual plugin.
+type Plugin struct {
+	mu              sync.RWMutex
+	PluginObj       types.Plugin `json:"plugin"` // todo: embed struct
+	pClient         *plugins.Client
+	refCount        int
+	PropagatedMount string // TODO: make private
+	Rootfs          string // TODO: make private
+
+	Config   digest.Digest
+	Blobsums []digest.Digest
+}
+
+const defaultPluginRuntimeDestination = "/run/docker/plugins"
+
+// ErrInadequateCapability indicates that the plugin did not have the requested capability.
+type ErrInadequateCapability struct {
+	cap string
+}
+
+func (e ErrInadequateCapability) Error() string {
+	return fmt.Sprintf("plugin does not provide %q capability", e.cap)
+}
+
+// BasePath returns the path to which all paths returned by the plugin are relative to.
+// For Plugin objects this returns the host path of the plugin container's rootfs.
+func (p *Plugin) BasePath() string {
+	return p.Rootfs
+}
+
+// Client returns the plugin client.
+func (p *Plugin) Client() *plugins.Client {
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+
+	return p.pClient
+}
+
+// SetPClient set the plugin client.
+func (p *Plugin) SetPClient(client *plugins.Client) {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	p.pClient = client
+}
+
+// IsV1 returns true for V1 plugins and false otherwise.
+func (p *Plugin) IsV1() bool {
+	return false
+}
+
+// Name returns the plugin name.
+func (p *Plugin) Name() string {
+	return p.PluginObj.Name
+}
+
+// FilterByCap query the plugin for a given capability.
+func (p *Plugin) FilterByCap(capability string) (*Plugin, error) {
+	capability = strings.ToLower(capability)
+	for _, typ := range p.PluginObj.Config.Interface.Types {
+		if typ.Capability == capability && typ.Prefix == "docker" {
+			return p, nil
+		}
+	}
+	return nil, ErrInadequateCapability{capability}
+}
+
+// InitEmptySettings initializes empty settings for a plugin.
+func (p *Plugin) InitEmptySettings() {
+	p.PluginObj.Settings.Mounts = make([]types.PluginMount, len(p.PluginObj.Config.Mounts))
+	copy(p.PluginObj.Settings.Mounts, p.PluginObj.Config.Mounts)
+	p.PluginObj.Settings.Devices = make([]types.PluginDevice, len(p.PluginObj.Config.Linux.Devices))
+	copy(p.PluginObj.Settings.Devices, p.PluginObj.Config.Linux.Devices)
+	p.PluginObj.Settings.Env = make([]string, 0, len(p.PluginObj.Config.Env))
+	for _, env := range p.PluginObj.Config.Env {
+		if env.Value != nil {
+			p.PluginObj.Settings.Env = append(p.PluginObj.Settings.Env, fmt.Sprintf("%s=%s", env.Name, *env.Value))
+		}
+	}
+	p.PluginObj.Settings.Args = make([]string, len(p.PluginObj.Config.Args.Value))
+	copy(p.PluginObj.Settings.Args, p.PluginObj.Config.Args.Value)
+}
+
+// Set is used to pass arguments to the plugin.
+func (p *Plugin) Set(args []string) error {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	if p.PluginObj.Enabled {
+		return fmt.Errorf("cannot set on an active plugin, disable plugin before setting")
+	}
+
+	sets, err := newSettables(args)
+	if err != nil {
+		return err
+	}
+
+	// TODO(vieux): lots of code duplication here, needs to be refactored.
+
+next:
+	for _, s := range sets {
+		// range over all the envs in the config
+		for _, env := range p.PluginObj.Config.Env {
+			// found the env in the config
+			if env.Name == s.name {
+				// is it settable ?
+				if ok, err := s.isSettable(allowedSettableFieldsEnv, env.Settable); err != nil {
+					return err
+				} else if !ok {
+					return fmt.Errorf("%q is not settable", s.prettyName())
+				}
+				// is it, so lets update the settings in memory
+				updateSettingsEnv(&p.PluginObj.Settings.Env, &s)
+				continue next
+			}
+		}
+
+		// range over all the mounts in the config
+		for _, mount := range p.PluginObj.Config.Mounts {
+			// found the mount in the config
+			if mount.Name == s.name {
+				// is it settable ?
+				if ok, err := s.isSettable(allowedSettableFieldsMounts, mount.Settable); err != nil {
+					return err
+				} else if !ok {
+					return fmt.Errorf("%q is not settable", s.prettyName())
+				}
+
+				// it is, so lets update the settings in memory
+				*mount.Source = s.value
+				continue next
+			}
+		}
+
+		// range over all the devices in the config
+		for _, device := range p.PluginObj.Config.Linux.Devices {
+			// found the device in the config
+			if device.Name == s.name {
+				// is it settable ?
+				if ok, err := s.isSettable(allowedSettableFieldsDevices, device.Settable); err != nil {
+					return err
+				} else if !ok {
+					return fmt.Errorf("%q is not settable", s.prettyName())
+				}
+
+				// it is, so lets update the settings in memory
+				*device.Path = s.value
+				continue next
+			}
+		}
+
+		// found the name in the config
+		if p.PluginObj.Config.Args.Name == s.name {
+			// is it settable ?
+			if ok, err := s.isSettable(allowedSettableFieldsArgs, p.PluginObj.Config.Args.Settable); err != nil {
+				return err
+			} else if !ok {
+				return fmt.Errorf("%q is not settable", s.prettyName())
+			}
+
+			// it is, so lets update the settings in memory
+			p.PluginObj.Settings.Args = strings.Split(s.value, " ")
+			continue next
+		}
+
+		return fmt.Errorf("setting %q not found in the plugin configuration", s.name)
+	}
+
+	return nil
+}
+
+// IsEnabled returns the active state of the plugin.
+func (p *Plugin) IsEnabled() bool {
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+
+	return p.PluginObj.Enabled
+}
+
+// GetID returns the plugin's ID.
+func (p *Plugin) GetID() string {
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+
+	return p.PluginObj.ID
+}
+
+// GetSocket returns the plugin socket.
+func (p *Plugin) GetSocket() string {
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+
+	return p.PluginObj.Config.Interface.Socket
+}
+
+// GetTypes returns the interface types of a plugin.
+func (p *Plugin) GetTypes() []types.PluginInterfaceType {
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+
+	return p.PluginObj.Config.Interface.Types
+}
+
+// GetRefCount returns the reference count.
+func (p *Plugin) GetRefCount() int {
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+
+	return p.refCount
+}
+
+// AddRefCount adds to reference count.
+func (p *Plugin) AddRefCount(count int) {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+
+	p.refCount += count
+}
+
+// Acquire increments the plugin's reference count
+// This should be followed up by `Release()` when the plugin is no longer in use.
+func (p *Plugin) Acquire() {
+	p.AddRefCount(plugingetter.ACQUIRE)
+}
+
+// Release decrements the plugin's reference count
+// This should only be called when the plugin is no longer in use, e.g. with
+// via `Acquire()` or getter.Get("name", "type", plugingetter.ACQUIRE)
+func (p *Plugin) Release() {
+	p.AddRefCount(plugingetter.RELEASE)
+}
diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go b/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go
new file mode 100644
index 0000000000000000000000000000000000000000..f1c2da0bc7bd4cc9932682279abc34e72d53b264
--- /dev/null
+++ b/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go
@@ -0,0 +1,121 @@
+// +build linux
+
+package v2
+
+import (
+	"errors"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/oci"
+	"github.com/docker/docker/pkg/system"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// InitSpec creates an OCI spec from the plugin's config.
+func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) {
+	s := oci.DefaultSpec()
+	s.Root = specs.Root{
+		Path:     p.Rootfs,
+		Readonly: false, // TODO: all plugins should be readonly? settable in config?
+	}
+
+	userMounts := make(map[string]struct{}, len(p.PluginObj.Settings.Mounts))
+	for _, m := range p.PluginObj.Settings.Mounts {
+		userMounts[m.Destination] = struct{}{}
+	}
+
+	execRoot = filepath.Join(execRoot, p.PluginObj.ID)
+	if err := os.MkdirAll(execRoot, 0700); err != nil {
+		return nil, err
+	}
+
+	mounts := append(p.PluginObj.Config.Mounts, types.PluginMount{
+		Source:      &execRoot,
+		Destination: defaultPluginRuntimeDestination,
+		Type:        "bind",
+		Options:     []string{"rbind", "rshared"},
+	})
+
+	if p.PluginObj.Config.Network.Type != "" {
+		// TODO: if net == bridge, use libnetwork controller to create a new plugin-specific bridge, bind mount /etc/hosts and /etc/resolv.conf look at the docker code (allocateNetwork, initialize)
+		if p.PluginObj.Config.Network.Type == "host" {
+			oci.RemoveNamespace(&s, specs.NamespaceType("network"))
+		}
+		etcHosts := "/etc/hosts"
+		resolvConf := "/etc/resolv.conf"
+		mounts = append(mounts,
+			types.PluginMount{
+				Source:      &etcHosts,
+				Destination: etcHosts,
+				Type:        "bind",
+				Options:     []string{"rbind", "ro"},
+			},
+			types.PluginMount{
+				Source:      &resolvConf,
+				Destination: resolvConf,
+				Type:        "bind",
+				Options:     []string{"rbind", "ro"},
+			})
+	}
+
+	for _, mnt := range mounts {
+		m := specs.Mount{
+			Destination: mnt.Destination,
+			Type:        mnt.Type,
+			Options:     mnt.Options,
+		}
+		if mnt.Source == nil {
+			return nil, errors.New("mount source is not specified")
+		}
+		m.Source = *mnt.Source
+		s.Mounts = append(s.Mounts, m)
+	}
+
+	for i, m := range s.Mounts {
+		if strings.HasPrefix(m.Destination, "/dev/") {
+			if _, ok := userMounts[m.Destination]; ok {
+				s.Mounts = append(s.Mounts[:i], s.Mounts[i+1:]...)
+			}
+		}
+	}
+
+	if p.PluginObj.Config.PropagatedMount != "" {
+		p.PropagatedMount = filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount)
+		s.Linux.RootfsPropagation = "rshared"
+	}
+
+	if p.PluginObj.Config.Linux.AllowAllDevices {
+		rwm := "rwm"
+		s.Linux.Resources.Devices = []specs.DeviceCgroup{{Allow: true, Access: &rwm}}
+	}
+	for _, dev := range p.PluginObj.Settings.Devices {
+		path := *dev.Path
+		d, dPermissions, err := oci.DevicesFromPath(path, path, "rwm")
+		if err != nil {
+			return nil, err
+		}
+		s.Linux.Devices = append(s.Linux.Devices, d...)
+		s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, dPermissions...)
+	}
+
+	envs := make([]string, 1, len(p.PluginObj.Settings.Env)+1)
+	envs[0] = "PATH=" + system.DefaultPathEnv
+	envs = append(envs, p.PluginObj.Settings.Env...)
+
+	args := append(p.PluginObj.Config.Entrypoint, p.PluginObj.Settings.Args...)
+	cwd := p.PluginObj.Config.WorkDir
+	if len(cwd) == 0 {
+		cwd = "/"
+	}
+	s.Process.Terminal = false
+	s.Process.Args = args
+	s.Process.Cwd = cwd
+	s.Process.Env = envs
+
+	s.Process.Capabilities = append(s.Process.Capabilities, p.PluginObj.Config.Linux.Capabilities...)
+
+	return &s, nil
+}
diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go b/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go
new file mode 100644
index 0000000000000000000000000000000000000000..e60fb8311edba9c7411fe3672d5ff91012b248b4
--- /dev/null
+++ b/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go
@@ -0,0 +1,14 @@
+// +build !linux
+
+package v2
+
+import (
+	"errors"
+
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// InitSpec creates an OCI spec from the plugin's config.
+func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) {
+	return nil, errors.New("not supported")
+}
diff --git a/vendor/github.com/docker/docker/plugin/v2/settable.go b/vendor/github.com/docker/docker/plugin/v2/settable.go
new file mode 100644
index 0000000000000000000000000000000000000000..79c6befc24bf88e7c35ee076bb873524a406ddb5
--- /dev/null
+++ b/vendor/github.com/docker/docker/plugin/v2/settable.go
@@ -0,0 +1,102 @@
+package v2
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+)
+
+type settable struct {
+	name  string
+	field string
+	value string
+}
+
+var (
+	allowedSettableFieldsEnv     = []string{"value"}
+	allowedSettableFieldsArgs    = []string{"value"}
+	allowedSettableFieldsDevices = []string{"path"}
+	allowedSettableFieldsMounts  = []string{"source"}
+
+	errMultipleFields = errors.New("multiple fields are settable, one must be specified")
+	errInvalidFormat  = errors.New("invalid format, must be <name>[.<field>][=<value>]")
+)
+
+func newSettables(args []string) ([]settable, error) {
+	sets := make([]settable, 0, len(args))
+	for _, arg := range args {
+		set, err := newSettable(arg)
+		if err != nil {
+			return nil, err
+		}
+		sets = append(sets, set)
+	}
+	return sets, nil
+}
+
+func newSettable(arg string) (settable, error) {
+	var set settable
+	if i := strings.Index(arg, "="); i == 0 {
+		return set, errInvalidFormat
+	} else if i < 0 {
+		set.name = arg
+	} else {
+		set.name = arg[:i]
+		set.value = arg[i+1:]
+	}
+
+	if i := strings.LastIndex(set.name, "."); i > 0 {
+		set.field = set.name[i+1:]
+		set.name = arg[:i]
+	}
+
+	return set, nil
+}
+
+// prettyName return name.field if there is a field, otherwise name.
+func (set *settable) prettyName() string {
+	if set.field != "" {
+		return fmt.Sprintf("%s.%s", set.name, set.field)
+	}
+	return set.name
+}
+
+func (set *settable) isSettable(allowedSettableFields []string, settable []string) (bool, error) {
+	if set.field == "" {
+		if len(settable) == 1 {
+			// if field is not specified and there only one settable, default to it.
+			set.field = settable[0]
+		} else if len(settable) > 1 {
+			return false, errMultipleFields
+		}
+	}
+
+	isAllowed := false
+	for _, allowedSettableField := range allowedSettableFields {
+		if set.field == allowedSettableField {
+			isAllowed = true
+			break
+		}
+	}
+
+	if isAllowed {
+		for _, settableField := range settable {
+			if set.field == settableField {
+				return true, nil
+			}
+		}
+	}
+
+	return false, nil
+}
+
+func updateSettingsEnv(env *[]string, set *settable) {
+	for i, e := range *env {
+		if parts := strings.SplitN(e, "=", 2); parts[0] == set.name {
+			(*env)[i] = fmt.Sprintf("%s=%s", set.name, set.value)
+			return
+		}
+	}
+
+	*env = append(*env, fmt.Sprintf("%s=%s", set.name, set.value))
+}
diff --git a/vendor/github.com/docker/docker/reference/reference.go b/vendor/github.com/docker/docker/reference/reference.go
new file mode 100644
index 0000000000000000000000000000000000000000..996fc507042b0792971a3b5b1a858803dccc664d
--- /dev/null
+++ b/vendor/github.com/docker/docker/reference/reference.go
@@ -0,0 +1,216 @@
+package reference
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+
+	"github.com/docker/distribution/digest"
+	distreference "github.com/docker/distribution/reference"
+	"github.com/docker/docker/image/v1"
+)
+
+const (
+	// DefaultTag defines the default tag used when performing images related actions and no tag or digest is specified
+	DefaultTag = "latest"
+	// DefaultHostname is the default built-in hostname
+	DefaultHostname = "docker.io"
+	// LegacyDefaultHostname is automatically converted to DefaultHostname
+	LegacyDefaultHostname = "index.docker.io"
+	// DefaultRepoPrefix is the prefix used for default repositories in default host
+	DefaultRepoPrefix = "library/"
+)
+
+// Named is an object with a full name
+type Named interface {
+	// Name returns normalized repository name, like "ubuntu".
+	Name() string
+	// String returns full reference, like "ubuntu@sha256:abcdef..."
+	String() string
+	// FullName returns full repository name with hostname, like "docker.io/library/ubuntu"
+	FullName() string
+	// Hostname returns hostname for the reference, like "docker.io"
+	Hostname() string
+	// RemoteName returns the repository component of the full name, like "library/ubuntu"
+	RemoteName() string
+}
+
+// NamedTagged is an object including a name and tag.
+type NamedTagged interface {
+	Named
+	Tag() string
+}
+
+// Canonical reference is an object with a fully unique
+// name including a name with hostname and digest
+type Canonical interface {
+	Named
+	Digest() digest.Digest
+}
+
+// ParseNamed parses s and returns a syntactically valid reference implementing
+// the Named interface. The reference must have a name, otherwise an error is
+// returned.
+// If an error was encountered it is returned, along with a nil Reference.
+func ParseNamed(s string) (Named, error) {
+	named, err := distreference.ParseNamed(s)
+	if err != nil {
+		return nil, fmt.Errorf("Error parsing reference: %q is not a valid repository/tag: %s", s, err)
+	}
+	r, err := WithName(named.Name())
+	if err != nil {
+		return nil, err
+	}
+	if canonical, isCanonical := named.(distreference.Canonical); isCanonical {
+		return WithDigest(r, canonical.Digest())
+	}
+	if tagged, isTagged := named.(distreference.NamedTagged); isTagged {
+		return WithTag(r, tagged.Tag())
+	}
+	return r, nil
+}
+
+// TrimNamed removes any tag or digest from the named reference
+func TrimNamed(ref Named) Named {
+	return &namedRef{distreference.TrimNamed(ref)}
+}
+
+// WithName returns a named object representing the given string. If the input
+// is invalid ErrReferenceInvalidFormat will be returned.
+func WithName(name string) (Named, error) {
+	name, err := normalize(name)
+	if err != nil {
+		return nil, err
+	}
+	if err := validateName(name); err != nil {
+		return nil, err
+	}
+	r, err := distreference.WithName(name)
+	if err != nil {
+		return nil, err
+	}
+	return &namedRef{r}, nil
+}
+
+// WithTag combines the name from "name" and the tag from "tag" to form a
+// reference incorporating both the name and the tag.
+func WithTag(name Named, tag string) (NamedTagged, error) {
+	r, err := distreference.WithTag(name, tag)
+	if err != nil {
+		return nil, err
+	}
+	return &taggedRef{namedRef{r}}, nil
+}
+
+// WithDigest combines the name from "name" and the digest from "digest" to form
+// a reference incorporating both the name and the digest.
+func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
+	r, err := distreference.WithDigest(name, digest)
+	if err != nil {
+		return nil, err
+	}
+	return &canonicalRef{namedRef{r}}, nil
+}
+
+type namedRef struct {
+	distreference.Named
+}
+type taggedRef struct {
+	namedRef
+}
+type canonicalRef struct {
+	namedRef
+}
+
+func (r *namedRef) FullName() string {
+	hostname, remoteName := splitHostname(r.Name())
+	return hostname + "/" + remoteName
+}
+func (r *namedRef) Hostname() string {
+	hostname, _ := splitHostname(r.Name())
+	return hostname
+}
+func (r *namedRef) RemoteName() string {
+	_, remoteName := splitHostname(r.Name())
+	return remoteName
+}
+func (r *taggedRef) Tag() string {
+	return r.namedRef.Named.(distreference.NamedTagged).Tag()
+}
+func (r *canonicalRef) Digest() digest.Digest {
+	return r.namedRef.Named.(distreference.Canonical).Digest()
+}
+
+// WithDefaultTag adds a default tag to a reference if it only has a repo name.
+func WithDefaultTag(ref Named) Named {
+	if IsNameOnly(ref) {
+		ref, _ = WithTag(ref, DefaultTag)
+	}
+	return ref
+}
+
+// IsNameOnly returns true if reference only contains a repo name.
+func IsNameOnly(ref Named) bool {
+	if _, ok := ref.(NamedTagged); ok {
+		return false
+	}
+	if _, ok := ref.(Canonical); ok {
+		return false
+	}
+	return true
+}
+
+// ParseIDOrReference parses string for an image ID or a reference. ID can be
+// without a default prefix.
+func ParseIDOrReference(idOrRef string) (digest.Digest, Named, error) {
+	if err := v1.ValidateID(idOrRef); err == nil {
+		idOrRef = "sha256:" + idOrRef
+	}
+	if dgst, err := digest.ParseDigest(idOrRef); err == nil {
+		return dgst, nil, nil
+	}
+	ref, err := ParseNamed(idOrRef)
+	return "", ref, err
+}
+
+// splitHostname splits a repository name to hostname and remotename string.
+// If no valid hostname is found, the default hostname is used. Repository name
+// needs to be already validated before.
+func splitHostname(name string) (hostname, remoteName string) {
+	i := strings.IndexRune(name, '/')
+	if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
+		hostname, remoteName = DefaultHostname, name
+	} else {
+		hostname, remoteName = name[:i], name[i+1:]
+	}
+	if hostname == LegacyDefaultHostname {
+		hostname = DefaultHostname
+	}
+	if hostname == DefaultHostname && !strings.ContainsRune(remoteName, '/') {
+		remoteName = DefaultRepoPrefix + remoteName
+	}
+	return
+}
+
+// normalize returns a repository name in its normalized form, meaning it
+// will not contain default hostname nor library/ prefix for official images.
+func normalize(name string) (string, error) {
+	host, remoteName := splitHostname(name)
+	if strings.ToLower(remoteName) != remoteName {
+		return "", errors.New("invalid reference format: repository name must be lowercase")
+	}
+	if host == DefaultHostname {
+		if strings.HasPrefix(remoteName, DefaultRepoPrefix) {
+			return strings.TrimPrefix(remoteName, DefaultRepoPrefix), nil
+		}
+		return remoteName, nil
+	}
+	return name, nil
+}
+
+func validateName(name string) error {
+	if err := v1.ValidateID(name); err == nil {
+		return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name)
+	}
+	return nil
+}
diff --git a/vendor/github.com/docker/docker/reference/store.go b/vendor/github.com/docker/docker/reference/store.go
new file mode 100644
index 0000000000000000000000000000000000000000..71ca236c9c820bcd511641c3d48a550776e64d1e
--- /dev/null
+++ b/vendor/github.com/docker/docker/reference/store.go
@@ -0,0 +1,286 @@
+package reference
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"os"
+	"path/filepath"
+	"sort"
+	"sync"
+
+	"github.com/docker/distribution/digest"
+	"github.com/docker/docker/pkg/ioutils"
+)
+
+var (
+	// ErrDoesNotExist is returned if a reference is not found in the
+	// store.
+	ErrDoesNotExist = errors.New("reference does not exist")
+)
+
+// An Association is a tuple associating a reference with an image ID.
+type Association struct {
+	Ref Named
+	ID  digest.Digest
+}
+
+// Store provides the set of methods which can operate on a tag store.
+type Store interface {
+	References(id digest.Digest) []Named
+	ReferencesByName(ref Named) []Association
+	AddTag(ref Named, id digest.Digest, force bool) error
+	AddDigest(ref Canonical, id digest.Digest, force bool) error
+	Delete(ref Named) (bool, error)
+	Get(ref Named) (digest.Digest, error)
+}
+
+type store struct {
+	mu sync.RWMutex
+	// jsonPath is the path to the file where the serialized tag data is
+	// stored.
+	jsonPath string
+	// Repositories is a map of repositories, indexed by name.
+	Repositories map[string]repository
+	// referencesByIDCache is a cache of references indexed by ID, to speed
+	// up References.
+	referencesByIDCache map[digest.Digest]map[string]Named
+}
+
+// Repository maps tags to digests. The key is a stringified Reference,
+// including the repository name.
+type repository map[string]digest.Digest
+
+type lexicalRefs []Named
+
+func (a lexicalRefs) Len() int           { return len(a) }
+func (a lexicalRefs) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a lexicalRefs) Less(i, j int) bool { return a[i].String() < a[j].String() }
+
+type lexicalAssociations []Association
+
+func (a lexicalAssociations) Len() int           { return len(a) }
+func (a lexicalAssociations) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a lexicalAssociations) Less(i, j int) bool { return a[i].Ref.String() < a[j].Ref.String() }
+
+// NewReferenceStore creates a new reference store, tied to a file path where
+// the set of references are serialized in JSON format.
+func NewReferenceStore(jsonPath string) (Store, error) {
+	abspath, err := filepath.Abs(jsonPath)
+	if err != nil {
+		return nil, err
+	}
+
+	store := &store{
+		jsonPath:            abspath,
+		Repositories:        make(map[string]repository),
+		referencesByIDCache: make(map[digest.Digest]map[string]Named),
+	}
+	// Load the json file if it exists, otherwise create it.
+	if err := store.reload(); os.IsNotExist(err) {
+		if err := store.save(); err != nil {
+			return nil, err
+		}
+	} else if err != nil {
+		return nil, err
+	}
+	return store, nil
+}
+
+// AddTag adds a tag reference to the store. If force is set to true, existing
+// references can be overwritten. This only works for tags, not digests.
+func (store *store) AddTag(ref Named, id digest.Digest, force bool) error {
+	if _, isCanonical := ref.(Canonical); isCanonical {
+		return errors.New("refusing to create a tag with a digest reference")
+	}
+	return store.addReference(WithDefaultTag(ref), id, force)
+}
+
+// AddDigest adds a digest reference to the store.
+func (store *store) AddDigest(ref Canonical, id digest.Digest, force bool) error {
+	return store.addReference(ref, id, force)
+}
+
+func (store *store) addReference(ref Named, id digest.Digest, force bool) error {
+	if ref.Name() == string(digest.Canonical) {
+		return errors.New("refusing to create an ambiguous tag using digest algorithm as name")
+	}
+
+	store.mu.Lock()
+	defer store.mu.Unlock()
+
+	repository, exists := store.Repositories[ref.Name()]
+	if !exists || repository == nil {
+		repository = make(map[string]digest.Digest)
+		store.Repositories[ref.Name()] = repository
+	}
+
+	refStr := ref.String()
+	oldID, exists := repository[refStr]
+
+	if exists {
+		// force only works for tags
+		if digested, isDigest := ref.(Canonical); isDigest {
+			return fmt.Errorf("Cannot overwrite digest %s", digested.Digest().String())
+		}
+
+		if !force {
+			return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", ref.String(), oldID.String())
+		}
+
+		if store.referencesByIDCache[oldID] != nil {
+			delete(store.referencesByIDCache[oldID], refStr)
+			if len(store.referencesByIDCache[oldID]) == 0 {
+				delete(store.referencesByIDCache, oldID)
+			}
+		}
+	}
+
+	repository[refStr] = id
+	if store.referencesByIDCache[id] == nil {
+		store.referencesByIDCache[id] = make(map[string]Named)
+	}
+	store.referencesByIDCache[id][refStr] = ref
+
+	return store.save()
+}
+
+// Delete deletes a reference from the store. It returns true if a deletion
+// happened, or false otherwise.
+func (store *store) Delete(ref Named) (bool, error) {
+	ref = WithDefaultTag(ref)
+
+	store.mu.Lock()
+	defer store.mu.Unlock()
+
+	repoName := ref.Name()
+
+	repository, exists := store.Repositories[repoName]
+	if !exists {
+		return false, ErrDoesNotExist
+	}
+
+	refStr := ref.String()
+	if id, exists := repository[refStr]; exists {
+		delete(repository, refStr)
+		if len(repository) == 0 {
+			delete(store.Repositories, repoName)
+		}
+		if store.referencesByIDCache[id] != nil {
+			delete(store.referencesByIDCache[id], refStr)
+			if len(store.referencesByIDCache[id]) == 0 {
+				delete(store.referencesByIDCache, id)
+			}
+		}
+		return true, store.save()
+	}
+
+	return false, ErrDoesNotExist
+}
+
+// Get retrieves an item from the store by reference
+func (store *store) Get(ref Named) (digest.Digest, error) {
+	ref = WithDefaultTag(ref)
+
+	store.mu.RLock()
+	defer store.mu.RUnlock()
+
+	repository, exists := store.Repositories[ref.Name()]
+	if !exists || repository == nil {
+		return "", ErrDoesNotExist
+	}
+
+	id, exists := repository[ref.String()]
+	if !exists {
+		return "", ErrDoesNotExist
+	}
+
+	return id, nil
+}
+
+// References returns a slice of references to the given ID. The slice
+// will be nil if there are no references to this ID.
+func (store *store) References(id digest.Digest) []Named {
+	store.mu.RLock()
+	defer store.mu.RUnlock()
+
+	// Convert the internal map to an array for two reasons:
+	// 1) We must not return a mutable
+	// 2) It would be ugly to expose the extraneous map keys to callers.
+
+	var references []Named
+	for _, ref := range store.referencesByIDCache[id] {
+		references = append(references, ref)
+	}
+
+	sort.Sort(lexicalRefs(references))
+
+	return references
+}
+
+// ReferencesByName returns the references for a given repository name.
+// If there are no references known for this repository name,
+// ReferencesByName returns nil.
+func (store *store) ReferencesByName(ref Named) []Association {
+	store.mu.RLock()
+	defer store.mu.RUnlock()
+
+	repository, exists := store.Repositories[ref.Name()]
+	if !exists {
+		return nil
+	}
+
+	var associations []Association
+	for refStr, refID := range repository {
+		ref, err := ParseNamed(refStr)
+		if err != nil {
+			// Should never happen
+			return nil
+		}
+		associations = append(associations,
+			Association{
+				Ref: ref,
+				ID:  refID,
+			})
+	}
+
+	sort.Sort(lexicalAssociations(associations))
+
+	return associations
+}
+
+func (store *store) save() error {
+	// Store the json
+	jsonData, err := json.Marshal(store)
+	if err != nil {
+		return err
+	}
+	return ioutils.AtomicWriteFile(store.jsonPath, jsonData, 0600)
+}
+
+func (store *store) reload() error {
+	f, err := os.Open(store.jsonPath)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	if err := json.NewDecoder(f).Decode(&store); err != nil {
+		return err
+	}
+
+	for _, repository := range store.Repositories {
+		for refStr, refID := range repository {
+			ref, err := ParseNamed(refStr)
+			if err != nil {
+				// Should never happen
+				continue
+			}
+			if store.referencesByIDCache[refID] == nil {
+				store.referencesByIDCache[refID] = make(map[string]Named)
+			}
+			store.referencesByIDCache[refID][refStr] = ref
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/docker/docker/registry/auth.go b/vendor/github.com/docker/docker/registry/auth.go
new file mode 100644
index 0000000000000000000000000000000000000000..8cadd51ba04a08b8051642d27d7d531376aa4ef3
--- /dev/null
+++ b/vendor/github.com/docker/docker/registry/auth.go
@@ -0,0 +1,303 @@
+package registry
+
+import (
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"strings"
+	"time"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/distribution/registry/client/auth"
+	"github.com/docker/distribution/registry/client/auth/challenge"
+	"github.com/docker/distribution/registry/client/transport"
+	"github.com/docker/docker/api/types"
+	registrytypes "github.com/docker/docker/api/types/registry"
+)
+
+const (
+	// AuthClientID is used the ClientID used for the token server
+	AuthClientID = "docker"
+)
+
+// loginV1 tries to register/login to the v1 registry server.
+func loginV1(authConfig *types.AuthConfig, apiEndpoint APIEndpoint, userAgent string) (string, string, error) {
+	registryEndpoint, err := apiEndpoint.ToV1Endpoint(userAgent, nil)
+	if err != nil {
+		return "", "", err
+	}
+
+	serverAddress := registryEndpoint.String()
+
+	logrus.Debugf("attempting v1 login to registry endpoint %s", serverAddress)
+
+	if serverAddress == "" {
+		return "", "", fmt.Errorf("Server Error: Server Address not set.")
+	}
+
+	loginAgainstOfficialIndex := serverAddress == IndexServer
+
+	req, err := http.NewRequest("GET", serverAddress+"users/", nil)
+	if err != nil {
+		return "", "", err
+	}
+	req.SetBasicAuth(authConfig.Username, authConfig.Password)
+	resp, err := registryEndpoint.client.Do(req)
+	if err != nil {
+		// fallback when request could not be completed
+		return "", "", fallbackError{
+			err: err,
+		}
+	}
+	defer resp.Body.Close()
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return "", "", err
+	}
+	if resp.StatusCode == http.StatusOK {
+		return "Login Succeeded", "", nil
+	} else if resp.StatusCode == http.StatusUnauthorized {
+		if loginAgainstOfficialIndex {
+			return "", "", fmt.Errorf("Wrong login/password, please try again. Haven't got a Docker ID? Create one at https://hub.docker.com")
+		}
+		return "", "", fmt.Errorf("Wrong login/password, please try again")
+	} else if resp.StatusCode == http.StatusForbidden {
+		if loginAgainstOfficialIndex {
+			return "", "", fmt.Errorf("Login: Account is not active. Please check your e-mail for a confirmation link.")
+		}
+		// *TODO: Use registry configuration to determine what this says, if anything?
+		return "", "", fmt.Errorf("Login: Account is not active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress)
+	} else if resp.StatusCode == http.StatusInternalServerError { // Issue #14326
+		logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body)
+		return "", "", fmt.Errorf("Internal Server Error")
+	}
+	return "", "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body,
+		resp.StatusCode, resp.Header)
+}
+
+type loginCredentialStore struct {
+	authConfig *types.AuthConfig
+}
+
+func (lcs loginCredentialStore) Basic(*url.URL) (string, string) {
+	return lcs.authConfig.Username, lcs.authConfig.Password
+}
+
+func (lcs loginCredentialStore) RefreshToken(*url.URL, string) string {
+	return lcs.authConfig.IdentityToken
+}
+
+func (lcs loginCredentialStore) SetRefreshToken(u *url.URL, service, token string) {
+	lcs.authConfig.IdentityToken = token
+}
+
+type staticCredentialStore struct {
+	auth *types.AuthConfig
+}
+
+// NewStaticCredentialStore returns a credential store
+// which always returns the same credential values.
+func NewStaticCredentialStore(auth *types.AuthConfig) auth.CredentialStore {
+	return staticCredentialStore{
+		auth: auth,
+	}
+}
+
+func (scs staticCredentialStore) Basic(*url.URL) (string, string) {
+	if scs.auth == nil {
+		return "", ""
+	}
+	return scs.auth.Username, scs.auth.Password
+}
+
+func (scs staticCredentialStore) RefreshToken(*url.URL, string) string {
+	if scs.auth == nil {
+		return ""
+	}
+	return scs.auth.IdentityToken
+}
+
+func (scs staticCredentialStore) SetRefreshToken(*url.URL, string, string) {
+}
+
+type fallbackError struct {
+	err error
+}
+
+func (err fallbackError) Error() string {
+	return err.err.Error()
+}
+
+// loginV2 tries to login to the v2 registry server. The given registry
+// endpoint will be pinged to get authorization challenges. These challenges
+// will be used to authenticate against the registry to validate credentials.
+func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) {
+	logrus.Debugf("attempting v2 login to registry endpoint %s", strings.TrimRight(endpoint.URL.String(), "/")+"/v2/")
+
+	modifiers := DockerHeaders(userAgent, nil)
+	authTransport := transport.NewTransport(NewTransport(endpoint.TLSConfig), modifiers...)
+
+	credentialAuthConfig := *authConfig
+	creds := loginCredentialStore{
+		authConfig: &credentialAuthConfig,
+	}
+
+	loginClient, foundV2, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil)
+	if err != nil {
+		return "", "", err
+	}
+
+	endpointStr := strings.TrimRight(endpoint.URL.String(), "/") + "/v2/"
+	req, err := http.NewRequest("GET", endpointStr, nil)
+	if err != nil {
+		if !foundV2 {
+			err = fallbackError{err: err}
+		}
+		return "", "", err
+	}
+
+	resp, err := loginClient.Do(req)
+	if err != nil {
+		if !foundV2 {
+			err = fallbackError{err: err}
+		}
+		return "", "", err
+	}
+	defer resp.Body.Close()
+
+	if resp.StatusCode != http.StatusOK {
+		// TODO(dmcgowan): Attempt to further interpret result, status code and error code string
+		err := fmt.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode))
+		if !foundV2 {
+			err = fallbackError{err: err}
+		}
+		return "", "", err
+	}
+
+	return "Login Succeeded", credentialAuthConfig.IdentityToken, nil
+
+}
+
+func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, bool, error) {
+	challengeManager, foundV2, err := PingV2Registry(endpoint, authTransport)
+	if err != nil {
+		if !foundV2 {
+			err = fallbackError{err: err}
+		}
+		return nil, foundV2, err
+	}
+
+	tokenHandlerOptions := auth.TokenHandlerOptions{
+		Transport:     authTransport,
+		Credentials:   creds,
+		OfflineAccess: true,
+		ClientID:      AuthClientID,
+		Scopes:        scopes,
+	}
+	tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions)
+	basicHandler := auth.NewBasicHandler(creds)
+	modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))
+	tr := transport.NewTransport(authTransport, modifiers...)
+
+	return &http.Client{
+		Transport: tr,
+		Timeout:   15 * time.Second,
+	}, foundV2, nil
+
+}
+
+// ConvertToHostname converts a registry url which has http|https prepended
+// to just an hostname.
+func ConvertToHostname(url string) string {
+	stripped := url
+	if strings.HasPrefix(url, "http://") {
+		stripped = strings.TrimPrefix(url, "http://")
+	} else if strings.HasPrefix(url, "https://") {
+		stripped = strings.TrimPrefix(url, "https://")
+	}
+
+	nameParts := strings.SplitN(stripped, "/", 2)
+
+	return nameParts[0]
+}
+
+// ResolveAuthConfig matches an auth configuration to a server address or a URL
+func ResolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registrytypes.IndexInfo) types.AuthConfig {
+	configKey := GetAuthConfigKey(index)
+	// First try the happy case
+	if c, found := authConfigs[configKey]; found || index.Official {
+		return c
+	}
+
+	// Maybe they have a legacy config file, we will iterate the keys converting
+	// them to the new format and testing
+	for registry, ac := range authConfigs {
+		if configKey == ConvertToHostname(registry) {
+			return ac
+		}
+	}
+
+	// When all else fails, return an empty auth config
+	return types.AuthConfig{}
+}
+
+// PingResponseError is used when the response from a ping
+// was received but invalid.
+type PingResponseError struct {
+	Err error
+}
+
+func (err PingResponseError) Error() string {
+	return err.Err.Error()
+}
+
+// PingV2Registry attempts to ping a v2 registry and on success return a
+// challenge manager for the supported authentication types and
+// whether v2 was confirmed by the response. If a response is received but
+// cannot be interpreted a PingResponseError will be returned.
+func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.Manager, bool, error) {
+	var (
+		foundV2   = false
+		v2Version = auth.APIVersion{
+			Type:    "registry",
+			Version: "2.0",
+		}
+	)
+
+	pingClient := &http.Client{
+		Transport: transport,
+		Timeout:   15 * time.Second,
+	}
+	endpointStr := strings.TrimRight(endpoint.String(), "/") + "/v2/"
+	req, err := http.NewRequest("GET", endpointStr, nil)
+	if err != nil {
+		return nil, false, err
+	}
+	resp, err := pingClient.Do(req)
+	if err != nil {
+		return nil, false, err
+	}
+	defer resp.Body.Close()
+
+	versions := auth.APIVersions(resp, DefaultRegistryVersionHeader)
+	for _, pingVersion := range versions {
+		if pingVersion == v2Version {
+			// The version header indicates we're definitely
+			// talking to a v2 registry. So don't allow future
+			// fallbacks to the v1 protocol.
+
+			foundV2 = true
+			break
+		}
+	}
+
+	challengeManager := challenge.NewSimpleManager()
+	if err := challengeManager.AddResponse(resp); err != nil {
+		return nil, foundV2, PingResponseError{
+			Err: err,
+		}
+	}
+
+	return challengeManager, foundV2, nil
+}
diff --git a/vendor/github.com/docker/docker/registry/config.go b/vendor/github.com/docker/docker/registry/config.go
new file mode 100644
index 0000000000000000000000000000000000000000..9a4f6a92515ce3be41a345a088a33b15621c4c47
--- /dev/null
+++ b/vendor/github.com/docker/docker/registry/config.go
@@ -0,0 +1,305 @@
+package registry
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"net/url"
+	"strings"
+
+	registrytypes "github.com/docker/docker/api/types/registry"
+	"github.com/docker/docker/opts"
+	"github.com/docker/docker/reference"
+	"github.com/spf13/pflag"
+)
+
+// ServiceOptions holds command line options.
+type ServiceOptions struct {
+	Mirrors            []string `json:"registry-mirrors,omitempty"`
+	InsecureRegistries []string `json:"insecure-registries,omitempty"`
+
+	// V2Only controls access to legacy registries.  If it is set to true via the
+	// command line flag the daemon will not attempt to contact v1 legacy registries
+	V2Only bool `json:"disable-legacy-registry,omitempty"`
+}
+
+// serviceConfig holds daemon configuration for the registry service.
+type serviceConfig struct {
+	registrytypes.ServiceConfig
+	V2Only bool
+}
+
+var (
+	// DefaultNamespace is the default namespace
+	DefaultNamespace = "docker.io"
+	// DefaultRegistryVersionHeader is the name of the default HTTP header
+	// that carries Registry version info
+	DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version"
+
+	// IndexHostname is the index hostname
+	IndexHostname = "index.docker.io"
+	// IndexServer is used for user auth and image search
+	IndexServer = "https://" + IndexHostname + "/v1/"
+	// IndexName is the name of the index
+	IndexName = "docker.io"
+
+	// NotaryServer is the endpoint serving the Notary trust server
+	NotaryServer = "https://notary.docker.io"
+
+	// DefaultV2Registry is the URI of the default v2 registry
+	DefaultV2Registry = &url.URL{
+		Scheme: "https",
+		Host:   "registry-1.docker.io",
+	}
+)
+
+var (
+	// ErrInvalidRepositoryName is an error returned if the repository name did
+	// not have the correct form
+	ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")")
+
+	emptyServiceConfig = newServiceConfig(ServiceOptions{})
+)
+
+// for mocking in unit tests
+var lookupIP = net.LookupIP
+
+// InstallCliFlags adds command-line options to the top-level flag parser for
+// the current process.
+func (options *ServiceOptions) InstallCliFlags(flags *pflag.FlagSet) {
+	mirrors := opts.NewNamedListOptsRef("registry-mirrors", &options.Mirrors, ValidateMirror)
+	insecureRegistries := opts.NewNamedListOptsRef("insecure-registries", &options.InsecureRegistries, ValidateIndexName)
+
+	flags.Var(mirrors, "registry-mirror", "Preferred Docker registry mirror")
+	flags.Var(insecureRegistries, "insecure-registry", "Enable insecure registry communication")
+
+	options.installCliPlatformFlags(flags)
+}
+
+// newServiceConfig returns a new instance of ServiceConfig
+func newServiceConfig(options ServiceOptions) *serviceConfig {
+	config := &serviceConfig{
+		ServiceConfig: registrytypes.ServiceConfig{
+			InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0),
+			IndexConfigs:          make(map[string]*registrytypes.IndexInfo, 0),
+			// Hack: Bypass setting the mirrors to IndexConfigs since they are going away
+			// and Mirrors are only for the official registry anyways.
+			Mirrors: options.Mirrors,
+		},
+		V2Only: options.V2Only,
+	}
+
+	config.LoadInsecureRegistries(options.InsecureRegistries)
+
+	return config
+}
+
+// LoadInsecureRegistries loads insecure registries to config
+func (config *serviceConfig) LoadInsecureRegistries(registries []string) error {
+	// Localhost is by default considered as an insecure registry
+	// This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker).
+	//
+	// TODO: should we deprecate this once it is easier for people to set up a TLS registry or change
+	// daemon flags on boot2docker?
+	registries = append(registries, "127.0.0.0/8")
+
+	// Store original InsecureRegistryCIDRs and IndexConfigs
+	// Clean InsecureRegistryCIDRs and IndexConfigs in config, as passed registries has all insecure registry info.
+	originalCIDRs := config.ServiceConfig.InsecureRegistryCIDRs
+	originalIndexInfos := config.ServiceConfig.IndexConfigs
+
+	config.ServiceConfig.InsecureRegistryCIDRs = make([]*registrytypes.NetIPNet, 0)
+	config.ServiceConfig.IndexConfigs = make(map[string]*registrytypes.IndexInfo, 0)
+
+skip:
+	for _, r := range registries {
+		// validate insecure registry
+		if _, err := ValidateIndexName(r); err != nil {
+			// before returning err, roll back to original data
+			config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs
+			config.ServiceConfig.IndexConfigs = originalIndexInfos
+			return err
+		}
+		// Check if CIDR was passed to --insecure-registry
+		_, ipnet, err := net.ParseCIDR(r)
+		if err == nil {
+			// Valid CIDR. If ipnet is already in config.InsecureRegistryCIDRs, skip.
+			data := (*registrytypes.NetIPNet)(ipnet)
+			for _, value := range config.InsecureRegistryCIDRs {
+				if value.IP.String() == data.IP.String() && value.Mask.String() == data.Mask.String() {
+					continue skip
+				}
+			}
+			// ipnet is not found, add it in config.InsecureRegistryCIDRs
+			config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, data)
+
+		} else {
+			// Assume `host:port` if not CIDR.
+			config.IndexConfigs[r] = &registrytypes.IndexInfo{
+				Name:     r,
+				Mirrors:  make([]string, 0),
+				Secure:   false,
+				Official: false,
+			}
+		}
+	}
+
+	// Configure public registry.
+	config.IndexConfigs[IndexName] = &registrytypes.IndexInfo{
+		Name:     IndexName,
+		Mirrors:  config.Mirrors,
+		Secure:   true,
+		Official: true,
+	}
+
+	return nil
+}
+
+// isSecureIndex returns false if the provided indexName is part of the list of insecure registries
+// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs.
+//
+// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet.
+// If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered
+// insecure.
+//
+// indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name
+// or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained
+// in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element
+// of insecureRegistries.
+func isSecureIndex(config *serviceConfig, indexName string) bool {
+	// Check for configured index, first.  This is needed in case isSecureIndex
+	// is called from anything besides newIndexInfo, in order to honor per-index configurations.
+	if index, ok := config.IndexConfigs[indexName]; ok {
+		return index.Secure
+	}
+
+	host, _, err := net.SplitHostPort(indexName)
+	if err != nil {
+		// assume indexName is of the form `host` without the port and go on.
+		host = indexName
+	}
+
+	addrs, err := lookupIP(host)
+	if err != nil {
+		ip := net.ParseIP(host)
+		if ip != nil {
+			addrs = []net.IP{ip}
+		}
+
+		// if ip == nil, then `host` is neither an IP nor it could be looked up,
+		// either because the index is unreachable, or because the index is behind an HTTP proxy.
+		// So, len(addrs) == 0 and we're not aborting.
+	}
+
+	// Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined.
+	for _, addr := range addrs {
+		for _, ipnet := range config.InsecureRegistryCIDRs {
+			// check if the addr falls in the subnet
+			if (*net.IPNet)(ipnet).Contains(addr) {
+				return false
+			}
+		}
+	}
+
+	return true
+}
+
+// ValidateMirror validates an HTTP(S) registry mirror
+func ValidateMirror(val string) (string, error) {
+	uri, err := url.Parse(val)
+	if err != nil {
+		return "", fmt.Errorf("%s is not a valid URI", val)
+	}
+
+	if uri.Scheme != "http" && uri.Scheme != "https" {
+		return "", fmt.Errorf("Unsupported scheme %s", uri.Scheme)
+	}
+
+	if uri.Path != "" || uri.RawQuery != "" || uri.Fragment != "" {
+		return "", fmt.Errorf("Unsupported path/query/fragment at end of the URI")
+	}
+
+	return fmt.Sprintf("%s://%s/", uri.Scheme, uri.Host), nil
+}
+
+// ValidateIndexName validates an index name.
+func ValidateIndexName(val string) (string, error) {
+	if val == reference.LegacyDefaultHostname {
+		val = reference.DefaultHostname
+	}
+	if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") {
+		return "", fmt.Errorf("Invalid index name (%s). Cannot begin or end with a hyphen.", val)
+	}
+	return val, nil
+}
+
+func validateNoScheme(reposName string) error {
+	if strings.Contains(reposName, "://") {
+		// It cannot contain a scheme!
+		return ErrInvalidRepositoryName
+	}
+	return nil
+}
+
+// newIndexInfo returns IndexInfo configuration from indexName
+func newIndexInfo(config *serviceConfig, indexName string) (*registrytypes.IndexInfo, error) {
+	var err error
+	indexName, err = ValidateIndexName(indexName)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return any configured index info, first.
+	if index, ok := config.IndexConfigs[indexName]; ok {
+		return index, nil
+	}
+
+	// Construct a non-configured index info.
+	index := &registrytypes.IndexInfo{
+		Name:     indexName,
+		Mirrors:  make([]string, 0),
+		Official: false,
+	}
+	index.Secure = isSecureIndex(config, indexName)
+	return index, nil
+}
+
+// GetAuthConfigKey special-cases using the full index address of the official
+// index as the AuthConfig key, and uses the (host)name[:port] for private indexes.
+func GetAuthConfigKey(index *registrytypes.IndexInfo) string {
+	if index.Official {
+		return IndexServer
+	}
+	return index.Name
+}
+
+// newRepositoryInfo validates and breaks down a repository name into a RepositoryInfo
+func newRepositoryInfo(config *serviceConfig, name reference.Named) (*RepositoryInfo, error) {
+	index, err := newIndexInfo(config, name.Hostname())
+	if err != nil {
+		return nil, err
+	}
+	official := !strings.ContainsRune(name.Name(), '/')
+	return &RepositoryInfo{
+		Named:    name,
+		Index:    index,
+		Official: official,
+	}, nil
+}
+
+// ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but
+// lacks registry configuration.
+func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) {
+	return newRepositoryInfo(emptyServiceConfig, reposName)
+}
+
+// ParseSearchIndexInfo will use repository name to get back an indexInfo.
+func ParseSearchIndexInfo(reposName string) (*registrytypes.IndexInfo, error) {
+	indexName, _ := splitReposSearchTerm(reposName)
+
+	indexInfo, err := newIndexInfo(emptyServiceConfig, indexName)
+	if err != nil {
+		return nil, err
+	}
+	return indexInfo, nil
+}
diff --git a/vendor/github.com/docker/docker/registry/config_unix.go b/vendor/github.com/docker/docker/registry/config_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..d692e8ef5012dd246c99a6261289f0eb3faf607d
--- /dev/null
+++ b/vendor/github.com/docker/docker/registry/config_unix.go
@@ -0,0 +1,25 @@
+// +build !windows
+
+package registry
+
+import (
+	"github.com/spf13/pflag"
+)
+
+var (
+	// CertsDir is the directory where certificates are stored
+	CertsDir = "/etc/docker/certs.d"
+)
+
+// cleanPath is used to ensure that a directory name is valid on the target
+// platform. It will be passed in something *similar* to a URL such as
+// https:/index.docker.io/v1. Not all platforms support directory names
+// which contain those characters (such as : on Windows)
+func cleanPath(s string) string {
+	return s
+}
+
+// installCliPlatformFlags handles any platform specific flags for the service.
+func (options *ServiceOptions) installCliPlatformFlags(flags *pflag.FlagSet) {
+	flags.BoolVar(&options.V2Only, "disable-legacy-registry", false, "Disable contacting legacy registries")
+}
diff --git a/vendor/github.com/docker/docker/registry/config_windows.go b/vendor/github.com/docker/docker/registry/config_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..d1b313dc1e2ff47f20578ab657c1a2ff76f79332
--- /dev/null
+++ b/vendor/github.com/docker/docker/registry/config_windows.go
@@ -0,0 +1,25 @@
+package registry
+
+import (
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/spf13/pflag"
+)
+
+// CertsDir is the directory where certificates are stored
+var CertsDir = os.Getenv("programdata") + `\docker\certs.d`
+
+// cleanPath is used to ensure that a directory name is valid on the target
+// platform. It will be passed in something *similar* to a URL such as
+// https:\index.docker.io\v1. Not all platforms support directory names
+// which contain those characters (such as : on Windows)
+func cleanPath(s string) string {
+	return filepath.FromSlash(strings.Replace(s, ":", "", -1))
+}
+
+// installCliPlatformFlags handles any platform specific flags for the service.
+func (options *ServiceOptions) installCliPlatformFlags(flags *pflag.FlagSet) {
+	// No Windows specific flags.
+}
diff --git a/vendor/github.com/docker/docker/registry/endpoint_v1.go b/vendor/github.com/docker/docker/registry/endpoint_v1.go
new file mode 100644
index 0000000000000000000000000000000000000000..6bcf8c935d13675520013eda7d0ae5ceabd32c41
--- /dev/null
+++ b/vendor/github.com/docker/docker/registry/endpoint_v1.go
@@ -0,0 +1,198 @@
+package registry
+
+import (
+	"crypto/tls"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"strings"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/distribution/registry/client/transport"
+	registrytypes "github.com/docker/docker/api/types/registry"
+)
+
+// V1Endpoint stores basic information about a V1 registry endpoint.
+type V1Endpoint struct {
+	client   *http.Client
+	URL      *url.URL
+	IsSecure bool
+}
+
+// NewV1Endpoint parses the given address to return a registry endpoint.
+func NewV1Endpoint(index *registrytypes.IndexInfo, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) {
+	tlsConfig, err := newTLSConfig(index.Name, index.Secure)
+	if err != nil {
+		return nil, err
+	}
+
+	endpoint, err := newV1EndpointFromStr(GetAuthConfigKey(index), tlsConfig, userAgent, metaHeaders)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := validateEndpoint(endpoint); err != nil {
+		return nil, err
+	}
+
+	return endpoint, nil
+}
+
+func validateEndpoint(endpoint *V1Endpoint) error {
+	logrus.Debugf("pinging registry endpoint %s", endpoint)
+
+	// Try HTTPS ping to registry
+	endpoint.URL.Scheme = "https"
+	if _, err := endpoint.Ping(); err != nil {
+		if endpoint.IsSecure {
+			// If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry`
+			// in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP.
+			return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host)
+		}
+
+		// If registry is insecure and HTTPS failed, fallback to HTTP.
+		logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err)
+		endpoint.URL.Scheme = "http"
+
+		var err2 error
+		if _, err2 = endpoint.Ping(); err2 == nil {
+			return nil
+		}
+
+		return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2)
+	}
+
+	return nil
+}
+
+func newV1Endpoint(address url.URL, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) {
+	endpoint := &V1Endpoint{
+		IsSecure: (tlsConfig == nil || !tlsConfig.InsecureSkipVerify),
+		URL:      new(url.URL),
+	}
+
+	*endpoint.URL = address
+
+	// TODO(tiborvass): make sure a ConnectTimeout transport is used
+	tr := NewTransport(tlsConfig)
+	endpoint.client = HTTPClient(transport.NewTransport(tr, DockerHeaders(userAgent, metaHeaders)...))
+	return endpoint, nil
+}
+
+// trimV1Address trims the version off the address and returns the
+// trimmed address or an error if there is a non-V1 version.
+func trimV1Address(address string) (string, error) {
+	var (
+		chunks        []string
+		apiVersionStr string
+	)
+
+	if strings.HasSuffix(address, "/") {
+		address = address[:len(address)-1]
+	}
+
+	chunks = strings.Split(address, "/")
+	apiVersionStr = chunks[len(chunks)-1]
+	if apiVersionStr == "v1" {
+		return strings.Join(chunks[:len(chunks)-1], "/"), nil
+	}
+
+	for k, v := range apiVersions {
+		if k != APIVersion1 && apiVersionStr == v {
+			return "", fmt.Errorf("unsupported V1 version path %s", apiVersionStr)
+		}
+	}
+
+	return address, nil
+}
+
+func newV1EndpointFromStr(address string, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) {
+	if !strings.HasPrefix(address, "http://") && !strings.HasPrefix(address, "https://") {
+		address = "https://" + address
+	}
+
+	address, err := trimV1Address(address)
+	if err != nil {
+		return nil, err
+	}
+
+	uri, err := url.Parse(address)
+	if err != nil {
+		return nil, err
+	}
+
+	endpoint, err := newV1Endpoint(*uri, tlsConfig, userAgent, metaHeaders)
+	if err != nil {
+		return nil, err
+	}
+
+	return endpoint, nil
+}
+
+// Get the formatted URL for the root of this registry Endpoint
+func (e *V1Endpoint) String() string {
+	return e.URL.String() + "/v1/"
+}
+
+// Path returns a formatted string for the URL
+// of this endpoint with the given path appended.
+func (e *V1Endpoint) Path(path string) string {
+	return e.URL.String() + "/v1/" + path
+}
+
+// Ping returns a PingResult which indicates whether the registry is standalone or not.
+func (e *V1Endpoint) Ping() (PingResult, error) {
+	logrus.Debugf("attempting v1 ping for registry endpoint %s", e)
+
+	if e.String() == IndexServer {
+		// Skip the check, we know this one is valid
+		// (and we never want to fallback to http in case of error)
+		return PingResult{Standalone: false}, nil
+	}
+
+	req, err := http.NewRequest("GET", e.Path("_ping"), nil)
+	if err != nil {
+		return PingResult{Standalone: false}, err
+	}
+
+	resp, err := e.client.Do(req)
+	if err != nil {
+		return PingResult{Standalone: false}, err
+	}
+
+	defer resp.Body.Close()
+
+	jsonString, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err)
+	}
+
+	// If the header is absent, we assume true for compatibility with earlier
+	// versions of the registry. default to true
+	info := PingResult{
+		Standalone: true,
+	}
+	if err := json.Unmarshal(jsonString, &info); err != nil {
+		logrus.Debugf("Error unmarshalling the _ping PingResult: %s", err)
+		// don't stop here. Just assume sane defaults
+	}
+	if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" {
+		logrus.Debugf("Registry version header: '%s'", hdr)
+		info.Version = hdr
+	}
+	logrus.Debugf("PingResult.Version: %q", info.Version)
+
+	standalone := resp.Header.Get("X-Docker-Registry-Standalone")
+	logrus.Debugf("Registry standalone header: '%s'", standalone)
+	// Accepted values are "true" (case-insensitive) and "1".
+	if strings.EqualFold(standalone, "true") || standalone == "1" {
+		info.Standalone = true
+	} else if len(standalone) > 0 {
+		// there is a header set, and it is not "true" or "1", so assume fails
+		info.Standalone = false
+	}
+	logrus.Debugf("PingResult.Standalone: %t", info.Standalone)
+	return info, nil
+}
diff --git a/vendor/github.com/docker/docker/registry/registry.go b/vendor/github.com/docker/docker/registry/registry.go
new file mode 100644
index 0000000000000000000000000000000000000000..17fa97ce3dadf1c27d9ee6e78618c6201cacd6a7
--- /dev/null
+++ b/vendor/github.com/docker/docker/registry/registry.go
@@ -0,0 +1,191 @@
+// Package registry contains client primitives to interact with a remote Docker registry.
+package registry
+
+import (
+	"crypto/tls"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"os"
+	"path/filepath"
+	"strings"
+	"time"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/distribution/registry/client/transport"
+	"github.com/docker/go-connections/sockets"
+	"github.com/docker/go-connections/tlsconfig"
+)
+
+var (
+	// ErrAlreadyExists is an error returned if an image being pushed
+	// already exists on the remote side
+	ErrAlreadyExists = errors.New("Image already exists")
+)
+
+func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) {
+	// PreferredServerCipherSuites should have no effect
+	tlsConfig := tlsconfig.ServerDefault()
+
+	tlsConfig.InsecureSkipVerify = !isSecure
+
+	if isSecure && CertsDir != "" {
+		hostDir := filepath.Join(CertsDir, cleanPath(hostname))
+		logrus.Debugf("hostDir: %s", hostDir)
+		if err := ReadCertsDirectory(tlsConfig, hostDir); err != nil {
+			return nil, err
+		}
+	}
+
+	return tlsConfig, nil
+}
+
+func hasFile(files []os.FileInfo, name string) bool {
+	for _, f := range files {
+		if f.Name() == name {
+			return true
+		}
+	}
+	return false
+}
+
+// ReadCertsDirectory reads the directory for TLS certificates
+// including roots and certificate pairs and updates the
+// provided TLS configuration.
+func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error {
+	fs, err := ioutil.ReadDir(directory)
+	if err != nil && !os.IsNotExist(err) {
+		return err
+	}
+
+	for _, f := range fs {
+		if strings.HasSuffix(f.Name(), ".crt") {
+			if tlsConfig.RootCAs == nil {
+				systemPool, err := tlsconfig.SystemCertPool()
+				if err != nil {
+					return fmt.Errorf("unable to get system cert pool: %v", err)
+				}
+				tlsConfig.RootCAs = systemPool
+			}
+			logrus.Debugf("crt: %s", filepath.Join(directory, f.Name()))
+			data, err := ioutil.ReadFile(filepath.Join(directory, f.Name()))
+			if err != nil {
+				return err
+			}
+			tlsConfig.RootCAs.AppendCertsFromPEM(data)
+		}
+		if strings.HasSuffix(f.Name(), ".cert") {
+			certName := f.Name()
+			keyName := certName[:len(certName)-5] + ".key"
+			logrus.Debugf("cert: %s", filepath.Join(directory, f.Name()))
+			if !hasFile(fs, keyName) {
+				return fmt.Errorf("Missing key %s for client certificate %s. Note that CA certificates should use the extension .crt.", keyName, certName)
+			}
+			cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName))
+			if err != nil {
+				return err
+			}
+			tlsConfig.Certificates = append(tlsConfig.Certificates, cert)
+		}
+		if strings.HasSuffix(f.Name(), ".key") {
+			keyName := f.Name()
+			certName := keyName[:len(keyName)-4] + ".cert"
+			logrus.Debugf("key: %s", filepath.Join(directory, f.Name()))
+			if !hasFile(fs, certName) {
+				return fmt.Errorf("Missing client certificate %s for key %s", certName, keyName)
+			}
+		}
+	}
+
+	return nil
+}
+
+// DockerHeaders returns request modifiers with a User-Agent and metaHeaders
+func DockerHeaders(userAgent string, metaHeaders http.Header) []transport.RequestModifier {
+	modifiers := []transport.RequestModifier{}
+	if userAgent != "" {
+		modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{
+			"User-Agent": []string{userAgent},
+		}))
+	}
+	if metaHeaders != nil {
+		modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders))
+	}
+	return modifiers
+}
+
+// HTTPClient returns an HTTP client structure which uses the given transport
+// and contains the necessary headers for redirected requests
+func HTTPClient(transport http.RoundTripper) *http.Client {
+	return &http.Client{
+		Transport:     transport,
+		CheckRedirect: addRequiredHeadersToRedirectedRequests,
+	}
+}
+
+func trustedLocation(req *http.Request) bool {
+	var (
+		trusteds = []string{"docker.com", "docker.io"}
+		hostname = strings.SplitN(req.Host, ":", 2)[0]
+	)
+	if req.URL.Scheme != "https" {
+		return false
+	}
+
+	for _, trusted := range trusteds {
+		if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) {
+			return true
+		}
+	}
+	return false
+}
+
+// addRequiredHeadersToRedirectedRequests adds the necessary redirection headers
+// for redirected requests
+func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error {
+	if via != nil && via[0] != nil {
+		if trustedLocation(req) && trustedLocation(via[0]) {
+			req.Header = via[0].Header
+			return nil
+		}
+		for k, v := range via[0].Header {
+			if k != "Authorization" {
+				for _, vv := range v {
+					req.Header.Add(k, vv)
+				}
+			}
+		}
+	}
+	return nil
+}
+
+// NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the
+// default TLS configuration.
+func NewTransport(tlsConfig *tls.Config) *http.Transport {
+	if tlsConfig == nil {
+		tlsConfig = tlsconfig.ServerDefault()
+	}
+
+	direct := &net.Dialer{
+		Timeout:   30 * time.Second,
+		KeepAlive: 30 * time.Second,
+		DualStack: true,
+	}
+
+	base := &http.Transport{
+		Proxy:               http.ProxyFromEnvironment,
+		Dial:                direct.Dial,
+		TLSHandshakeTimeout: 10 * time.Second,
+		TLSClientConfig:     tlsConfig,
+		// TODO(dmcgowan): Call close idle connections when complete and use keep alive
+		DisableKeepAlives: true,
+	}
+
+	proxyDialer, err := sockets.DialerFromEnvironment(direct)
+	if err == nil {
+		base.Dial = proxyDialer.Dial
+	}
+	return base
+}
diff --git a/vendor/github.com/docker/docker/registry/service.go b/vendor/github.com/docker/docker/registry/service.go
new file mode 100644
index 0000000000000000000000000000000000000000..596a9c7e5fa51f2bb7f92147bc12138365fbef21
--- /dev/null
+++ b/vendor/github.com/docker/docker/registry/service.go
@@ -0,0 +1,304 @@
+package registry
+
+import (
+	"crypto/tls"
+	"fmt"
+	"net/http"
+	"net/url"
+	"strings"
+	"sync"
+
+	"golang.org/x/net/context"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/distribution/registry/client/auth"
+	"github.com/docker/docker/api/types"
+	registrytypes "github.com/docker/docker/api/types/registry"
+	"github.com/docker/docker/reference"
+)
+
+const (
+	// DefaultSearchLimit is the default value for maximum number of returned search results.
+	DefaultSearchLimit = 25
+)
+
+// Service is the interface defining what a registry service should implement.
+type Service interface {
+	Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error)
+	LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error)
+	LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error)
+	ResolveRepository(name reference.Named) (*RepositoryInfo, error)
+	Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error)
+	ServiceConfig() *registrytypes.ServiceConfig
+	TLSConfig(hostname string) (*tls.Config, error)
+	LoadInsecureRegistries([]string) error
+}
+
+// DefaultService is a registry service. It tracks configuration data such as a list
+// of mirrors.
+type DefaultService struct {
+	config *serviceConfig
+	mu     sync.Mutex
+}
+
+// NewService returns a new instance of DefaultService ready to be
+// installed into an engine.
+func NewService(options ServiceOptions) *DefaultService {
+	return &DefaultService{
+		config: newServiceConfig(options),
+	}
+}
+
+// ServiceConfig returns the public registry service configuration.
+func (s *DefaultService) ServiceConfig() *registrytypes.ServiceConfig {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	servConfig := registrytypes.ServiceConfig{
+		InsecureRegistryCIDRs: make([]*(registrytypes.NetIPNet), 0),
+		IndexConfigs:          make(map[string]*(registrytypes.IndexInfo)),
+		Mirrors:               make([]string, 0),
+	}
+
+	// construct a new ServiceConfig which will not retrieve s.Config directly,
+	// and look up items in s.config with mu locked
+	servConfig.InsecureRegistryCIDRs = append(servConfig.InsecureRegistryCIDRs, s.config.ServiceConfig.InsecureRegistryCIDRs...)
+
+	for key, value := range s.config.ServiceConfig.IndexConfigs {
+		servConfig.IndexConfigs[key] = value
+	}
+
+	servConfig.Mirrors = append(servConfig.Mirrors, s.config.ServiceConfig.Mirrors...)
+
+	return &servConfig
+}
+
+// LoadInsecureRegistries loads insecure registries for Service
+func (s *DefaultService) LoadInsecureRegistries(registries []string) error {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	return s.config.LoadInsecureRegistries(registries)
+}
+
+// Auth contacts the public registry with the provided credentials,
+// and returns OK if authentication was successful.
+// It can be used to verify the validity of a client's credentials.
+func (s *DefaultService) Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) {
+	// TODO Use ctx when searching for repositories
+	serverAddress := authConfig.ServerAddress
+	if serverAddress == "" {
+		serverAddress = IndexServer
+	}
+	if !strings.HasPrefix(serverAddress, "https://") && !strings.HasPrefix(serverAddress, "http://") {
+		serverAddress = "https://" + serverAddress
+	}
+	u, err := url.Parse(serverAddress)
+	if err != nil {
+		return "", "", fmt.Errorf("unable to parse server address: %v", err)
+	}
+
+	endpoints, err := s.LookupPushEndpoints(u.Host)
+	if err != nil {
+		return "", "", err
+	}
+
+	for _, endpoint := range endpoints {
+		login := loginV2
+		if endpoint.Version == APIVersion1 {
+			login = loginV1
+		}
+
+		status, token, err = login(authConfig, endpoint, userAgent)
+		if err == nil {
+			return
+		}
+		if fErr, ok := err.(fallbackError); ok {
+			err = fErr.err
+			logrus.Infof("Error logging in to %s endpoint, trying next endpoint: %v", endpoint.Version, err)
+			continue
+		}
+		return "", "", err
+	}
+
+	return "", "", err
+}
+
+// splitReposSearchTerm breaks a search term into an index name and remote name
+func splitReposSearchTerm(reposName string) (string, string) {
+	nameParts := strings.SplitN(reposName, "/", 2)
+	var indexName, remoteName string
+	if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") &&
+		!strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") {
+		// This is a Docker Index repos (ex: samalba/hipache or ubuntu)
+		// 'docker.io'
+		indexName = IndexName
+		remoteName = reposName
+	} else {
+		indexName = nameParts[0]
+		remoteName = nameParts[1]
+	}
+	return indexName, remoteName
+}
+
+// Search queries the public registry for images matching the specified
+// search terms, and returns the results.
+func (s *DefaultService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) {
+	// TODO Use ctx when searching for repositories
+	if err := validateNoScheme(term); err != nil {
+		return nil, err
+	}
+
+	indexName, remoteName := splitReposSearchTerm(term)
+
+	// Search is a long-running operation, just lock s.config to avoid block others.
+	s.mu.Lock()
+	index, err := newIndexInfo(s.config, indexName)
+	s.mu.Unlock()
+
+	if err != nil {
+		return nil, err
+	}
+
+	// *TODO: Search multiple indexes.
+	endpoint, err := NewV1Endpoint(index, userAgent, http.Header(headers))
+	if err != nil {
+		return nil, err
+	}
+
+	var client *http.Client
+	if authConfig != nil && authConfig.IdentityToken != "" && authConfig.Username != "" {
+		creds := NewStaticCredentialStore(authConfig)
+		scopes := []auth.Scope{
+			auth.RegistryScope{
+				Name:    "catalog",
+				Actions: []string{"search"},
+			},
+		}
+
+		modifiers := DockerHeaders(userAgent, nil)
+		v2Client, foundV2, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, scopes)
+		if err != nil {
+			if fErr, ok := err.(fallbackError); ok {
+				logrus.Errorf("Cannot use identity token for search, v2 auth not supported: %v", fErr.err)
+			} else {
+				return nil, err
+			}
+		} else if foundV2 {
+			// Copy non transport http client features
+			v2Client.Timeout = endpoint.client.Timeout
+			v2Client.CheckRedirect = endpoint.client.CheckRedirect
+			v2Client.Jar = endpoint.client.Jar
+
+			logrus.Debugf("using v2 client for search to %s", endpoint.URL)
+			client = v2Client
+		}
+	}
+
+	if client == nil {
+		client = endpoint.client
+		if err := authorizeClient(client, authConfig, endpoint); err != nil {
+			return nil, err
+		}
+	}
+
+	r := newSession(client, authConfig, endpoint)
+
+	if index.Official {
+		localName := remoteName
+		if strings.HasPrefix(localName, "library/") {
+			// If pull "library/foo", it's stored locally under "foo"
+			localName = strings.SplitN(localName, "/", 2)[1]
+		}
+
+		return r.SearchRepositories(localName, limit)
+	}
+	return r.SearchRepositories(remoteName, limit)
+}
+
+// ResolveRepository splits a repository name into its components
+// and configuration of the associated registry.
+func (s *DefaultService) ResolveRepository(name reference.Named) (*RepositoryInfo, error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return newRepositoryInfo(s.config, name)
+}
+
+// APIEndpoint represents a remote API endpoint
+type APIEndpoint struct {
+	Mirror       bool
+	URL          *url.URL
+	Version      APIVersion
+	Official     bool
+	TrimHostname bool
+	TLSConfig    *tls.Config
+}
+
+// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint
+func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) (*V1Endpoint, error) {
+	return newV1Endpoint(*e.URL, e.TLSConfig, userAgent, metaHeaders)
+}
+
+// TLSConfig constructs a client TLS configuration based on server defaults
+func (s *DefaultService) TLSConfig(hostname string) (*tls.Config, error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	return newTLSConfig(hostname, isSecureIndex(s.config, hostname))
+}
+
+// tlsConfig constructs a client TLS configuration based on server defaults
+func (s *DefaultService) tlsConfig(hostname string) (*tls.Config, error) {
+	return newTLSConfig(hostname, isSecureIndex(s.config, hostname))
+}
+
+func (s *DefaultService) tlsConfigForMirror(mirrorURL *url.URL) (*tls.Config, error) {
+	return s.tlsConfig(mirrorURL.Host)
+}
+
+// LookupPullEndpoints creates a list of endpoints to try to pull from, in order of preference.
+// It gives preference to v2 endpoints over v1, mirrors over the actual
+// registry, and HTTPS over plain HTTP.
+func (s *DefaultService) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	return s.lookupEndpoints(hostname)
+}
+
+// LookupPushEndpoints creates a list of endpoints to try to push to, in order of preference.
+// It gives preference to v2 endpoints over v1, and HTTPS over plain HTTP.
+// Mirrors are not included.
+func (s *DefaultService) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	allEndpoints, err := s.lookupEndpoints(hostname)
+	if err == nil {
+		for _, endpoint := range allEndpoints {
+			if !endpoint.Mirror {
+				endpoints = append(endpoints, endpoint)
+			}
+		}
+	}
+	return endpoints, err
+}
+
+func (s *DefaultService) lookupEndpoints(hostname string) (endpoints []APIEndpoint, err error) {
+	endpoints, err = s.lookupV2Endpoints(hostname)
+	if err != nil {
+		return nil, err
+	}
+
+	if s.config.V2Only {
+		return endpoints, nil
+	}
+
+	legacyEndpoints, err := s.lookupV1Endpoints(hostname)
+	if err != nil {
+		return nil, err
+	}
+	endpoints = append(endpoints, legacyEndpoints...)
+
+	return endpoints, nil
+}
diff --git a/vendor/github.com/docker/docker/registry/service_v1.go b/vendor/github.com/docker/docker/registry/service_v1.go
new file mode 100644
index 0000000000000000000000000000000000000000..1d251aec6eeec49ce544bcc4a05c3e4700b43b58
--- /dev/null
+++ b/vendor/github.com/docker/docker/registry/service_v1.go
@@ -0,0 +1,40 @@
+package registry
+
+import "net/url"
+
+func (s *DefaultService) lookupV1Endpoints(hostname string) (endpoints []APIEndpoint, err error) {
+	if hostname == DefaultNamespace || hostname == DefaultV2Registry.Host || hostname == IndexHostname {
+		return []APIEndpoint{}, nil
+	}
+
+	tlsConfig, err := s.tlsConfig(hostname)
+	if err != nil {
+		return nil, err
+	}
+
+	endpoints = []APIEndpoint{
+		{
+			URL: &url.URL{
+				Scheme: "https",
+				Host:   hostname,
+			},
+			Version:      APIVersion1,
+			TrimHostname: true,
+			TLSConfig:    tlsConfig,
+		},
+	}
+
+	if tlsConfig.InsecureSkipVerify {
+		endpoints = append(endpoints, APIEndpoint{ // or this
+			URL: &url.URL{
+				Scheme: "http",
+				Host:   hostname,
+			},
+			Version:      APIVersion1,
+			TrimHostname: true,
+			// used to check if supposed to be secure via InsecureSkipVerify
+			TLSConfig: tlsConfig,
+		})
+	}
+	return endpoints, nil
+}
diff --git a/vendor/github.com/docker/docker/registry/service_v2.go b/vendor/github.com/docker/docker/registry/service_v2.go
new file mode 100644
index 0000000000000000000000000000000000000000..228d745f8c2878e8e56472c888c5639457fe2a58
--- /dev/null
+++ b/vendor/github.com/docker/docker/registry/service_v2.go
@@ -0,0 +1,78 @@
+package registry
+
+import (
+	"net/url"
+	"strings"
+
+	"github.com/docker/go-connections/tlsconfig"
+)
+
+func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, err error) {
+	tlsConfig := tlsconfig.ServerDefault()
+	if hostname == DefaultNamespace || hostname == IndexHostname {
+		// v2 mirrors
+		for _, mirror := range s.config.Mirrors {
+			if !strings.HasPrefix(mirror, "http://") && !strings.HasPrefix(mirror, "https://") {
+				mirror = "https://" + mirror
+			}
+			mirrorURL, err := url.Parse(mirror)
+			if err != nil {
+				return nil, err
+			}
+			mirrorTLSConfig, err := s.tlsConfigForMirror(mirrorURL)
+			if err != nil {
+				return nil, err
+			}
+			endpoints = append(endpoints, APIEndpoint{
+				URL: mirrorURL,
+				// guess mirrors are v2
+				Version:      APIVersion2,
+				Mirror:       true,
+				TrimHostname: true,
+				TLSConfig:    mirrorTLSConfig,
+			})
+		}
+		// v2 registry
+		endpoints = append(endpoints, APIEndpoint{
+			URL:          DefaultV2Registry,
+			Version:      APIVersion2,
+			Official:     true,
+			TrimHostname: true,
+			TLSConfig:    tlsConfig,
+		})
+
+		return endpoints, nil
+	}
+
+	tlsConfig, err = s.tlsConfig(hostname)
+	if err != nil {
+		return nil, err
+	}
+
+	endpoints = []APIEndpoint{
+		{
+			URL: &url.URL{
+				Scheme: "https",
+				Host:   hostname,
+			},
+			Version:      APIVersion2,
+			TrimHostname: true,
+			TLSConfig:    tlsConfig,
+		},
+	}
+
+	if tlsConfig.InsecureSkipVerify {
+		endpoints = append(endpoints, APIEndpoint{
+			URL: &url.URL{
+				Scheme: "http",
+				Host:   hostname,
+			},
+			Version:      APIVersion2,
+			TrimHostname: true,
+			// used to check if supposed to be secure via InsecureSkipVerify
+			TLSConfig: tlsConfig,
+		})
+	}
+
+	return endpoints, nil
+}
diff --git a/vendor/github.com/docker/docker/registry/session.go b/vendor/github.com/docker/docker/registry/session.go
new file mode 100644
index 0000000000000000000000000000000000000000..72e286ab448f74d8a3ebae1743957146b3b4bbd1
--- /dev/null
+++ b/vendor/github.com/docker/docker/registry/session.go
@@ -0,0 +1,783 @@
+package registry
+
+import (
+	"bytes"
+	"crypto/sha256"
+	"errors"
+	"sync"
+	// this is required for some certificates
+	_ "crypto/sha512"
+	"encoding/hex"
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/http/cookiejar"
+	"net/url"
+	"strconv"
+	"strings"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/distribution/registry/api/errcode"
+	"github.com/docker/docker/api/types"
+	registrytypes "github.com/docker/docker/api/types/registry"
+	"github.com/docker/docker/pkg/httputils"
+	"github.com/docker/docker/pkg/ioutils"
+	"github.com/docker/docker/pkg/stringid"
+	"github.com/docker/docker/pkg/tarsum"
+	"github.com/docker/docker/reference"
+)
+
+var (
+	// ErrRepoNotFound is returned if the repository didn't exist on the
+	// remote side
+	ErrRepoNotFound = errors.New("Repository not found")
+)
+
+// A Session is used to communicate with a V1 registry
+type Session struct {
+	indexEndpoint *V1Endpoint
+	client        *http.Client
+	// TODO(tiborvass): remove authConfig
+	authConfig *types.AuthConfig
+	id         string
+}
+
+type authTransport struct {
+	http.RoundTripper
+	*types.AuthConfig
+
+	alwaysSetBasicAuth bool
+	token              []string
+
+	mu     sync.Mutex                      // guards modReq
+	modReq map[*http.Request]*http.Request // original -> modified
+}
+
+// AuthTransport handles the auth layer when communicating with a v1 registry (private or official)
+//
+// For private v1 registries, set alwaysSetBasicAuth to true.
+//
+// For the official v1 registry, if there isn't already an Authorization header in the request,
+// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header.
+// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing
+// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent
+// requests.
+//
+// If the server sends a token without the client having requested it, it is ignored.
+//
+// This RoundTripper also has a CancelRequest method important for correct timeout handling.
+func AuthTransport(base http.RoundTripper, authConfig *types.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper {
+	if base == nil {
+		base = http.DefaultTransport
+	}
+	return &authTransport{
+		RoundTripper:       base,
+		AuthConfig:         authConfig,
+		alwaysSetBasicAuth: alwaysSetBasicAuth,
+		modReq:             make(map[*http.Request]*http.Request),
+	}
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+	// shallow copy of the struct
+	r2 := new(http.Request)
+	*r2 = *r
+	// deep copy of the Header
+	r2.Header = make(http.Header, len(r.Header))
+	for k, s := range r.Header {
+		r2.Header[k] = append([]string(nil), s...)
+	}
+
+	return r2
+}
+
+// RoundTrip changes an HTTP request's headers to add the necessary
+// authentication-related headers
+func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) {
+	// Authorization should not be set on 302 redirect for untrusted locations.
+	// This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests.
+	// As the authorization logic is currently implemented in RoundTrip,
+	// a 302 redirect is detected by looking at the Referrer header as go http package adds said header.
+	// This is safe as Docker doesn't set Referrer in other scenarios.
+	if orig.Header.Get("Referer") != "" && !trustedLocation(orig) {
+		return tr.RoundTripper.RoundTrip(orig)
+	}
+
+	req := cloneRequest(orig)
+	tr.mu.Lock()
+	tr.modReq[orig] = req
+	tr.mu.Unlock()
+
+	if tr.alwaysSetBasicAuth {
+		if tr.AuthConfig == nil {
+			return nil, errors.New("unexpected error: empty auth config")
+		}
+		req.SetBasicAuth(tr.Username, tr.Password)
+		return tr.RoundTripper.RoundTrip(req)
+	}
+
+	// Don't override
+	if req.Header.Get("Authorization") == "" {
+		if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 {
+			req.SetBasicAuth(tr.Username, tr.Password)
+		} else if len(tr.token) > 0 {
+			req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ","))
+		}
+	}
+	resp, err := tr.RoundTripper.RoundTrip(req)
+	if err != nil {
+		delete(tr.modReq, orig)
+		return nil, err
+	}
+	if len(resp.Header["X-Docker-Token"]) > 0 {
+		tr.token = resp.Header["X-Docker-Token"]
+	}
+	resp.Body = &ioutils.OnEOFReader{
+		Rc: resp.Body,
+		Fn: func() {
+			tr.mu.Lock()
+			delete(tr.modReq, orig)
+			tr.mu.Unlock()
+		},
+	}
+	return resp, nil
+}
+
+// CancelRequest cancels an in-flight request by closing its connection.
+func (tr *authTransport) CancelRequest(req *http.Request) {
+	type canceler interface {
+		CancelRequest(*http.Request)
+	}
+	if cr, ok := tr.RoundTripper.(canceler); ok {
+		tr.mu.Lock()
+		modReq := tr.modReq[req]
+		delete(tr.modReq, req)
+		tr.mu.Unlock()
+		cr.CancelRequest(modReq)
+	}
+}
+
+func authorizeClient(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) error {
+	var alwaysSetBasicAuth bool
+
+	// If we're working with a standalone private registry over HTTPS, send Basic Auth headers
+	// alongside all our requests.
+	if endpoint.String() != IndexServer && endpoint.URL.Scheme == "https" {
+		info, err := endpoint.Ping()
+		if err != nil {
+			return err
+		}
+		if info.Standalone && authConfig != nil {
+			logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String())
+			alwaysSetBasicAuth = true
+		}
+	}
+
+	// Annotate the transport unconditionally so that v2 can
+	// properly fallback on v1 when an image is not found.
+	client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth)
+
+	jar, err := cookiejar.New(nil)
+	if err != nil {
+		return errors.New("cookiejar.New is not supposed to return an error")
+	}
+	client.Jar = jar
+
+	return nil
+}
+
+func newSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) *Session {
+	return &Session{
+		authConfig:    authConfig,
+		client:        client,
+		indexEndpoint: endpoint,
+		id:            stringid.GenerateRandomID(),
+	}
+}
+
+// NewSession creates a new session
+// TODO(tiborvass): remove authConfig param once registry client v2 is vendored
+func NewSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) (*Session, error) {
+	if err := authorizeClient(client, authConfig, endpoint); err != nil {
+		return nil, err
+	}
+
+	return newSession(client, authConfig, endpoint), nil
+}
+
+// ID returns this registry session's ID.
+func (r *Session) ID() string {
+	return r.id
+}
+
+// GetRemoteHistory retrieves the history of a given image from the registry.
+// It returns a list of the parent's JSON files (including the requested image).
+func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) {
+	res, err := r.client.Get(registry + "images/" + imgID + "/ancestry")
+	if err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+	if res.StatusCode != 200 {
+		if res.StatusCode == 401 {
+			return nil, errcode.ErrorCodeUnauthorized.WithArgs()
+		}
+		return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res)
+	}
+
+	var history []string
+	if err := json.NewDecoder(res.Body).Decode(&history); err != nil {
+		return nil, fmt.Errorf("Error while reading the http response: %v", err)
+	}
+
+	logrus.Debugf("Ancestry: %v", history)
+	return history, nil
+}
+
+// LookupRemoteImage checks if an image exists in the registry
+func (r *Session) LookupRemoteImage(imgID, registry string) error {
+	res, err := r.client.Get(registry + "images/" + imgID + "/json")
+	if err != nil {
+		return err
+	}
+	res.Body.Close()
+	if res.StatusCode != 200 {
+		return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res)
+	}
+	return nil
+}
+
+// GetRemoteImageJSON retrieves an image's JSON metadata from the registry.
+func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int64, error) {
+	res, err := r.client.Get(registry + "images/" + imgID + "/json")
+	if err != nil {
+		return nil, -1, fmt.Errorf("Failed to download json: %s", err)
+	}
+	defer res.Body.Close()
+	if res.StatusCode != 200 {
+		return nil, -1, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res)
+	}
+	// if the size header is not present, then set it to '-1'
+	imageSize := int64(-1)
+	if hdr := res.Header.Get("X-Docker-Size"); hdr != "" {
+		imageSize, err = strconv.ParseInt(hdr, 10, 64)
+		if err != nil {
+			return nil, -1, err
+		}
+	}
+
+	jsonString, err := ioutil.ReadAll(res.Body)
+	if err != nil {
+		return nil, -1, fmt.Errorf("Failed to parse downloaded json: %v (%s)", err, jsonString)
+	}
+	return jsonString, imageSize, nil
+}
+
+// GetRemoteImageLayer retrieves an image layer from the registry
+func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io.ReadCloser, error) {
+	var (
+		statusCode = 0
+		res        *http.Response
+		err        error
+		imageURL   = fmt.Sprintf("%simages/%s/layer", registry, imgID)
+	)
+
+	req, err := http.NewRequest("GET", imageURL, nil)
+	if err != nil {
+		return nil, fmt.Errorf("Error while getting from the server: %v", err)
+	}
+	statusCode = 0
+	res, err = r.client.Do(req)
+	if err != nil {
+		logrus.Debugf("Error contacting registry %s: %v", registry, err)
+		// the only case err != nil && res != nil is https://golang.org/src/net/http/client.go#L515
+		if res != nil {
+			if res.Body != nil {
+				res.Body.Close()
+			}
+			statusCode = res.StatusCode
+		}
+		return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)",
+			statusCode, imgID)
+	}
+
+	if res.StatusCode != 200 {
+		res.Body.Close()
+		return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)",
+			res.StatusCode, imgID)
+	}
+
+	if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 {
+		logrus.Debug("server supports resume")
+		return httputils.ResumableRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil
+	}
+	logrus.Debug("server doesn't support resume")
+	return res.Body, nil
+}
+
+// GetRemoteTag retrieves the tag named in the askedTag argument from the given
+// repository. It queries each of the registries supplied in the registries
+// argument, and returns data from the first one that answers the query
+// successfully.
+func (r *Session) GetRemoteTag(registries []string, repositoryRef reference.Named, askedTag string) (string, error) {
+	repository := repositoryRef.RemoteName()
+
+	if strings.Count(repository, "/") == 0 {
+		// This will be removed once the registry supports auto-resolution on
+		// the "library" namespace
+		repository = "library/" + repository
+	}
+	for _, host := range registries {
+		endpoint := fmt.Sprintf("%srepositories/%s/tags/%s", host, repository, askedTag)
+		res, err := r.client.Get(endpoint)
+		if err != nil {
+			return "", err
+		}
+
+		logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint)
+		defer res.Body.Close()
+
+		if res.StatusCode == 404 {
+			return "", ErrRepoNotFound
+		}
+		if res.StatusCode != 200 {
+			continue
+		}
+
+		var tagID string
+		if err := json.NewDecoder(res.Body).Decode(&tagID); err != nil {
+			return "", err
+		}
+		return tagID, nil
+	}
+	return "", fmt.Errorf("Could not reach any registry endpoint")
+}
+
+// GetRemoteTags retrieves all tags from the given repository. It queries each
+// of the registries supplied in the registries argument, and returns data from
+// the first one that answers the query successfully. It returns a map with
+// tag names as the keys and image IDs as the values.
+func (r *Session) GetRemoteTags(registries []string, repositoryRef reference.Named) (map[string]string, error) {
+	repository := repositoryRef.RemoteName()
+
+	if strings.Count(repository, "/") == 0 {
+		// This will be removed once the registry supports auto-resolution on
+		// the "library" namespace
+		repository = "library/" + repository
+	}
+	for _, host := range registries {
+		endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository)
+		res, err := r.client.Get(endpoint)
+		if err != nil {
+			return nil, err
+		}
+
+		logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint)
+		defer res.Body.Close()
+
+		if res.StatusCode == 404 {
+			return nil, ErrRepoNotFound
+		}
+		if res.StatusCode != 200 {
+			continue
+		}
+
+		result := make(map[string]string)
+		if err := json.NewDecoder(res.Body).Decode(&result); err != nil {
+			return nil, err
+		}
+		return result, nil
+	}
+	return nil, fmt.Errorf("Could not reach any registry endpoint")
+}
+
+func buildEndpointsList(headers []string, indexEp string) ([]string, error) {
+	var endpoints []string
+	parsedURL, err := url.Parse(indexEp)
+	if err != nil {
+		return nil, err
+	}
+	var urlScheme = parsedURL.Scheme
+	// The registry's URL scheme has to match the Index'
+	for _, ep := range headers {
+		epList := strings.Split(ep, ",")
+		for _, epListElement := range epList {
+			endpoints = append(
+				endpoints,
+				fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement)))
+		}
+	}
+	return endpoints, nil
+}
+
+// GetRepositoryData returns lists of images and endpoints for the repository
+func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, error) {
+	repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.String(), name.RemoteName())
+
+	logrus.Debugf("[registry] Calling GET %s", repositoryTarget)
+
+	req, err := http.NewRequest("GET", repositoryTarget, nil)
+	if err != nil {
+		return nil, err
+	}
+	// this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests
+	req.Header.Set("X-Docker-Token", "true")
+	res, err := r.client.Do(req)
+	if err != nil {
+		// check if the error is because of i/o timeout
+		// and return a non-obtuse error message for users
+		// "Get https://index.docker.io/v1/repositories/library/busybox/images: i/o timeout"
+		// was a top search on the docker user forum
+		if isTimeout(err) {
+			return nil, fmt.Errorf("Network timed out while trying to connect to %s. You may want to check your internet connection or if you are behind a proxy.", repositoryTarget)
+		}
+		return nil, fmt.Errorf("Error while pulling image: %v", err)
+	}
+	defer res.Body.Close()
+	if res.StatusCode == 401 {
+		return nil, errcode.ErrorCodeUnauthorized.WithArgs()
+	}
+	// TODO: Right now we're ignoring checksums in the response body.
+	// In the future, we need to use them to check image validity.
+	if res.StatusCode == 404 {
+		return nil, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res)
+	} else if res.StatusCode != 200 {
+		errBody, err := ioutil.ReadAll(res.Body)
+		if err != nil {
+			logrus.Debugf("Error reading response body: %s", err)
+		}
+		return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, name.RemoteName(), errBody), res)
+	}
+
+	var endpoints []string
+	if res.Header.Get("X-Docker-Endpoints") != "" {
+		endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String())
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		// Assume the endpoint is on the same host
+		endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host))
+	}
+
+	remoteChecksums := []*ImgData{}
+	if err := json.NewDecoder(res.Body).Decode(&remoteChecksums); err != nil {
+		return nil, err
+	}
+
+	// Forge a better object from the retrieved data
+	imgsData := make(map[string]*ImgData, len(remoteChecksums))
+	for _, elem := range remoteChecksums {
+		imgsData[elem.ID] = elem
+	}
+
+	return &RepositoryData{
+		ImgList:   imgsData,
+		Endpoints: endpoints,
+	}, nil
+}
+
+// PushImageChecksumRegistry uploads checksums for an image
+func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string) error {
+	u := registry + "images/" + imgData.ID + "/checksum"
+
+	logrus.Debugf("[registry] Calling PUT %s", u)
+
+	req, err := http.NewRequest("PUT", u, nil)
+	if err != nil {
+		return err
+	}
+	req.Header.Set("X-Docker-Checksum", imgData.Checksum)
+	req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload)
+
+	res, err := r.client.Do(req)
+	if err != nil {
+		return fmt.Errorf("Failed to upload metadata: %v", err)
+	}
+	defer res.Body.Close()
+	if len(res.Cookies()) > 0 {
+		r.client.Jar.SetCookies(req.URL, res.Cookies())
+	}
+	if res.StatusCode != 200 {
+		errBody, err := ioutil.ReadAll(res.Body)
+		if err != nil {
+			return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err)
+		}
+		var jsonBody map[string]string
+		if err := json.Unmarshal(errBody, &jsonBody); err != nil {
+			errBody = []byte(err.Error())
+		} else if jsonBody["error"] == "Image already exists" {
+			return ErrAlreadyExists
+		}
+		return fmt.Errorf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody)
+	}
+	return nil
+}
+
+// PushImageJSONRegistry pushes JSON metadata for a local image to the registry
+func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string) error {
+
+	u := registry + "images/" + imgData.ID + "/json"
+
+	logrus.Debugf("[registry] Calling PUT %s", u)
+
+	req, err := http.NewRequest("PUT", u, bytes.NewReader(jsonRaw))
+	if err != nil {
+		return err
+	}
+	req.Header.Add("Content-type", "application/json")
+
+	res, err := r.client.Do(req)
+	if err != nil {
+		return fmt.Errorf("Failed to upload metadata: %s", err)
+	}
+	defer res.Body.Close()
+	if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") {
+		return httputils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res)
+	}
+	if res.StatusCode != 200 {
+		errBody, err := ioutil.ReadAll(res.Body)
+		if err != nil {
+			return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res)
+		}
+		var jsonBody map[string]string
+		if err := json.Unmarshal(errBody, &jsonBody); err != nil {
+			errBody = []byte(err.Error())
+		} else if jsonBody["error"] == "Image already exists" {
+			return ErrAlreadyExists
+		}
+		return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res)
+	}
+	return nil
+}
+
+// PushImageLayerRegistry sends the checksum of an image layer to the registry
+func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, jsonRaw []byte) (checksum string, checksumPayload string, err error) {
+	u := registry + "images/" + imgID + "/layer"
+
+	logrus.Debugf("[registry] Calling PUT %s", u)
+
+	tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0)
+	if err != nil {
+		return "", "", err
+	}
+	h := sha256.New()
+	h.Write(jsonRaw)
+	h.Write([]byte{'\n'})
+	checksumLayer := io.TeeReader(tarsumLayer, h)
+
+	req, err := http.NewRequest("PUT", u, checksumLayer)
+	if err != nil {
+		return "", "", err
+	}
+	req.Header.Add("Content-Type", "application/octet-stream")
+	req.ContentLength = -1
+	req.TransferEncoding = []string{"chunked"}
+	res, err := r.client.Do(req)
+	if err != nil {
+		return "", "", fmt.Errorf("Failed to upload layer: %v", err)
+	}
+	if rc, ok := layer.(io.Closer); ok {
+		if err := rc.Close(); err != nil {
+			return "", "", err
+		}
+	}
+	defer res.Body.Close()
+
+	if res.StatusCode != 200 {
+		errBody, err := ioutil.ReadAll(res.Body)
+		if err != nil {
+			return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res)
+		}
+		return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res)
+	}
+
+	checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil))
+	return tarsumLayer.Sum(jsonRaw), checksumPayload, nil
+}
+
+// PushRegistryTag pushes a tag on the registry.
+// Remote has the format '<user>/<repo>
+func (r *Session) PushRegistryTag(remote reference.Named, revision, tag, registry string) error {
+	// "jsonify" the string
+	revision = "\"" + revision + "\""
+	path := fmt.Sprintf("repositories/%s/tags/%s", remote.RemoteName(), tag)
+
+	req, err := http.NewRequest("PUT", registry+path, strings.NewReader(revision))
+	if err != nil {
+		return err
+	}
+	req.Header.Add("Content-type", "application/json")
+	req.ContentLength = int64(len(revision))
+	res, err := r.client.Do(req)
+	if err != nil {
+		return err
+	}
+	res.Body.Close()
+	if res.StatusCode != 200 && res.StatusCode != 201 {
+		return httputils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote.RemoteName()), res)
+	}
+	return nil
+}
+
+// PushImageJSONIndex uploads an image list to the repository
+func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) {
+	cleanImgList := []*ImgData{}
+	if validate {
+		for _, elem := range imgList {
+			if elem.Checksum != "" {
+				cleanImgList = append(cleanImgList, elem)
+			}
+		}
+	} else {
+		cleanImgList = imgList
+	}
+
+	imgListJSON, err := json.Marshal(cleanImgList)
+	if err != nil {
+		return nil, err
+	}
+	var suffix string
+	if validate {
+		suffix = "images"
+	}
+	u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.String(), remote.RemoteName(), suffix)
+	logrus.Debugf("[registry] PUT %s", u)
+	logrus.Debugf("Image list pushed to index:\n%s", imgListJSON)
+	headers := map[string][]string{
+		"Content-type": {"application/json"},
+		// this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests
+		"X-Docker-Token": {"true"},
+	}
+	if validate {
+		headers["X-Docker-Endpoints"] = regs
+	}
+
+	// Redirect if necessary
+	var res *http.Response
+	for {
+		if res, err = r.putImageRequest(u, headers, imgListJSON); err != nil {
+			return nil, err
+		}
+		if !shouldRedirect(res) {
+			break
+		}
+		res.Body.Close()
+		u = res.Header.Get("Location")
+		logrus.Debugf("Redirected to %s", u)
+	}
+	defer res.Body.Close()
+
+	if res.StatusCode == 401 {
+		return nil, errcode.ErrorCodeUnauthorized.WithArgs()
+	}
+
+	var tokens, endpoints []string
+	if !validate {
+		if res.StatusCode != 200 && res.StatusCode != 201 {
+			errBody, err := ioutil.ReadAll(res.Body)
+			if err != nil {
+				logrus.Debugf("Error reading response body: %s", err)
+			}
+			return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, remote.RemoteName(), errBody), res)
+		}
+		tokens = res.Header["X-Docker-Token"]
+		logrus.Debugf("Auth token: %v", tokens)
+
+		if res.Header.Get("X-Docker-Endpoints") == "" {
+			return nil, fmt.Errorf("Index response didn't contain any endpoints")
+		}
+		endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String())
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		if res.StatusCode != 204 {
+			errBody, err := ioutil.ReadAll(res.Body)
+			if err != nil {
+				logrus.Debugf("Error reading response body: %s", err)
+			}
+			return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, remote.RemoteName(), errBody), res)
+		}
+	}
+
+	return &RepositoryData{
+		Endpoints: endpoints,
+	}, nil
+}
+
+func (r *Session) putImageRequest(u string, headers map[string][]string, body []byte) (*http.Response, error) {
+	req, err := http.NewRequest("PUT", u, bytes.NewReader(body))
+	if err != nil {
+		return nil, err
+	}
+	req.ContentLength = int64(len(body))
+	for k, v := range headers {
+		req.Header[k] = v
+	}
+	response, err := r.client.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	return response, nil
+}
+
+func shouldRedirect(response *http.Response) bool {
+	return response.StatusCode >= 300 && response.StatusCode < 400
+}
+
+// SearchRepositories performs a search against the remote repository
+func (r *Session) SearchRepositories(term string, limit int) (*registrytypes.SearchResults, error) {
+	if limit < 1 || limit > 100 {
+		return nil, fmt.Errorf("Limit %d is outside the range of [1, 100]", limit)
+	}
+	logrus.Debugf("Index server: %s", r.indexEndpoint)
+	u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) + "&n=" + url.QueryEscape(fmt.Sprintf("%d", limit))
+
+	req, err := http.NewRequest("GET", u, nil)
+	if err != nil {
+		return nil, fmt.Errorf("Error while getting from the server: %v", err)
+	}
+	// Have the AuthTransport send authentication, when logged in.
+	req.Header.Set("X-Docker-Token", "true")
+	res, err := r.client.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer res.Body.Close()
+	if res.StatusCode != 200 {
+		return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res)
+	}
+	result := new(registrytypes.SearchResults)
+	return result, json.NewDecoder(res.Body).Decode(result)
+}
+
+// GetAuthConfig returns the authentication settings for a session
+// TODO(tiborvass): remove this once registry client v2 is vendored
+func (r *Session) GetAuthConfig(withPasswd bool) *types.AuthConfig {
+	password := ""
+	if withPasswd {
+		password = r.authConfig.Password
+	}
+	return &types.AuthConfig{
+		Username: r.authConfig.Username,
+		Password: password,
+	}
+}
+
+func isTimeout(err error) bool {
+	type timeout interface {
+		Timeout() bool
+	}
+	e := err
+	switch urlErr := err.(type) {
+	case *url.Error:
+		e = urlErr.Err
+	}
+	t, ok := e.(timeout)
+	return ok && t.Timeout()
+}
diff --git a/vendor/github.com/docker/docker/registry/types.go b/vendor/github.com/docker/docker/registry/types.go
new file mode 100644
index 0000000000000000000000000000000000000000..49c123a3e2cfcba3ce48509b223769a83600baeb
--- /dev/null
+++ b/vendor/github.com/docker/docker/registry/types.go
@@ -0,0 +1,73 @@
+package registry
+
+import (
+	registrytypes "github.com/docker/docker/api/types/registry"
+	"github.com/docker/docker/reference"
+)
+
+// RepositoryData tracks the image list, list of endpoints, and list of tokens
+// for a repository
+type RepositoryData struct {
+	// ImgList is a list of images in the repository
+	ImgList map[string]*ImgData
+	// Endpoints is a list of endpoints returned in X-Docker-Endpoints
+	Endpoints []string
+	// Tokens is currently unused (remove it?)
+	Tokens []string
+}
+
+// ImgData is used to transfer image checksums to and from the registry
+type ImgData struct {
+	// ID is an opaque string that identifies the image
+	ID              string `json:"id"`
+	Checksum        string `json:"checksum,omitempty"`
+	ChecksumPayload string `json:"-"`
+	Tag             string `json:",omitempty"`
+}
+
+// PingResult contains the information returned when pinging a registry. It
+// indicates the registry's version and whether the registry claims to be a
+// standalone registry.
+type PingResult struct {
+	// Version is the registry version supplied by the registry in an HTTP
+	// header
+	Version string `json:"version"`
+	// Standalone is set to true if the registry indicates it is a
+	// standalone registry in the X-Docker-Registry-Standalone
+	// header
+	Standalone bool `json:"standalone"`
+}
+
+// APIVersion is an integral representation of an API version (presently
+// either 1 or 2)
+type APIVersion int
+
+func (av APIVersion) String() string {
+	return apiVersions[av]
+}
+
+// API Version identifiers.
+const (
+	_                      = iota
+	APIVersion1 APIVersion = iota
+	APIVersion2
+)
+
+var apiVersions = map[APIVersion]string{
+	APIVersion1: "v1",
+	APIVersion2: "v2",
+}
+
+// RepositoryInfo describes a repository
+type RepositoryInfo struct {
+	reference.Named
+	// Index points to registry information
+	Index *registrytypes.IndexInfo
+	// Official indicates whether the repository is considered official.
+	// If the registry is official, and the normalized name does not
+	// contain a '/' (e.g. "foo"), then it is considered an official repo.
+	Official bool
+	// Class represents the class of the repository, such as "plugin"
+	// or "image".
+	Class string
+}
diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go
index e19c73c378d6c1424e9f4a0385700d58880881b7..4d5f5ae63afdf2ae53b3521071cbb14192b897dc 100644
--- a/vendor/github.com/docker/go-connections/nat/nat.go
+++ b/vendor/github.com/docker/go-connections/nat/nat.go
@@ -155,33 +155,36 @@ type PortMapping struct {
 	Binding PortBinding
 }
 
+func splitParts(rawport string) (string, string, string) {
+	parts := strings.Split(rawport, ":")
+	n := len(parts)
+	containerport := parts[n-1]
+
+	switch n {
+	case 1:
+		return "", "", containerport
+	case 2:
+		return "", parts[0], containerport
+	case 3:
+		return parts[0], parts[1], containerport
+	default:
+		return strings.Join(parts[:n-2], ":"), parts[n-2], containerport
+	}
+}
+
 // ParsePortSpec parses a port specification string into a slice of PortMappings
 func ParsePortSpec(rawPort string) ([]PortMapping, error) {
-	proto := "tcp"
-
-	if i := strings.LastIndex(rawPort, "/"); i != -1 {
-		proto = rawPort[i+1:]
-		rawPort = rawPort[:i]
-	}
-	if !strings.Contains(rawPort, ":") {
-		rawPort = fmt.Sprintf("::%s", rawPort)
-	} else if len(strings.Split(rawPort, ":")) == 2 {
-		rawPort = fmt.Sprintf(":%s", rawPort)
-	}
+	var proto string
+	rawIP, hostPort, containerPort := splitParts(rawPort)
+	proto, containerPort = SplitProtoPort(containerPort)
 
-	parts, err := PartParser(portSpecTemplate, rawPort)
+	// Strip [] from IPV6 addresses
+	ip, _, err := net.SplitHostPort(rawIP + ":")
 	if err != nil {
-		return nil, err
+		return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err)
 	}
-
-	var (
-		containerPort = parts["containerPort"]
-		rawIP         = parts["ip"]
-		hostPort      = parts["hostPort"]
-	)
-
-	if rawIP != "" && net.ParseIP(rawIP) == nil {
-		return nil, fmt.Errorf("Invalid ip address: %s", rawIP)
+	if ip != "" && net.ParseIP(ip) == nil {
+		return nil, fmt.Errorf("Invalid ip address: %s", ip)
 	}
 	if containerPort == "" {
 		return nil, fmt.Errorf("No port specified: %s<empty>", rawPort)
@@ -230,7 +233,7 @@ func ParsePortSpec(rawPort string) ([]PortMapping, error) {
 		}
 
 		binding := PortBinding{
-			HostIP:   rawIP,
+			HostIP:   ip,
 			HostPort: hostPort,
 		}
 		ports = append(ports, PortMapping{Port: port, Binding: binding})
diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go
index 872050205f4952b12afa7287b7e69d59f6e8d19e..892adf8c6673e36fc1994aefba42a91283703da8 100644
--- a/vendor/github.com/docker/go-connections/nat/parse.go
+++ b/vendor/github.com/docker/go-connections/nat/parse.go
@@ -8,6 +8,7 @@ import (
 
 // PartParser parses and validates the specified string (data) using the specified template
 // e.g. ip:public:private -> 192.168.0.1:80:8000
+// DEPRECATED: do not use, this function may be removed in a future version
 func PartParser(template, data string) (map[string]string, error) {
 	// ip:public:private
 	var (
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go
index 1739cecf2a56fc7ea51dae4ea069d1fba58975ca..a1d7beb4d80595445aeaea928208d923c2ecce71 100644
--- a/vendor/github.com/docker/go-connections/sockets/sockets.go
+++ b/vendor/github.com/docker/go-connections/sockets/sockets.go
@@ -2,6 +2,7 @@
 package sockets
 
 import (
+	"errors"
 	"net"
 	"net/http"
 	"time"
@@ -10,6 +11,9 @@ import (
 // Why 32? See https://github.com/docker/docker/pull/8035.
 const defaultTimeout = 32 * time.Second
 
+// ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system.
+var ErrProtocolNotAvailable = errors.New("protocol not available")
+
 // ConfigureTransport configures the specified Transport according to the
 // specified proto and addr.
 // If the proto is unix (using a unix socket to communicate) or npipe the
@@ -17,17 +21,9 @@ const defaultTimeout = 32 * time.Second
 func ConfigureTransport(tr *http.Transport, proto, addr string) error {
 	switch proto {
 	case "unix":
-		// No need for compression in local communications.
-		tr.DisableCompression = true
-		tr.Dial = func(_, _ string) (net.Conn, error) {
-			return net.DialTimeout(proto, addr, defaultTimeout)
-		}
+		return configureUnixTransport(tr, proto, addr)
 	case "npipe":
-		// No need for compression in local communications.
-		tr.DisableCompression = true
-		tr.Dial = func(_, _ string) (net.Conn, error) {
-			return DialPipe(addr, defaultTimeout)
-		}
+		return configureNpipeTransport(tr, proto, addr)
 	default:
 		tr.Proxy = http.ProxyFromEnvironment
 		dialer, err := DialerFromEnvironment(&net.Dialer{
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go
index b255ac9ac7afde42ed45801607d65e52e8942fb7..386cf0dbbdecb12396e9c2c3e6d5d3c36ab5a552 100644
--- a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go
+++ b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go
@@ -3,11 +3,31 @@
 package sockets
 
 import (
+	"fmt"
 	"net"
+	"net/http"
 	"syscall"
 	"time"
 )
 
+const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path)
+
+func configureUnixTransport(tr *http.Transport, proto, addr string) error {
+	if len(addr) > maxUnixSocketPathSize {
+		return fmt.Errorf("Unix socket path %q is too long", addr)
+	}
+	// No need for compression in local communications.
+	tr.DisableCompression = true
+	tr.Dial = func(_, _ string) (net.Conn, error) {
+		return net.DialTimeout(proto, addr, defaultTimeout)
+	}
+	return nil
+}
+
+func configureNpipeTransport(tr *http.Transport, proto, addr string) error {
+	return ErrProtocolNotAvailable
+}
+
 // DialPipe connects to a Windows named pipe.
 // This is not supported on other OSes.
 func DialPipe(_ string, _ time.Duration) (net.Conn, error) {
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go
index 1f3540b2fe87a39fd17d54d7da807cdbe4a09fd0..5c21644e1fe7b76a04f758d6d48bad6f86dbe64e 100644
--- a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go
+++ b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go
@@ -2,11 +2,25 @@ package sockets
 
 import (
 	"net"
+	"net/http"
 	"time"
 
 	"github.com/Microsoft/go-winio"
 )
 
+func configureUnixTransport(tr *http.Transport, proto, addr string) error {
+	return ErrProtocolNotAvailable
+}
+
+func configureNpipeTransport(tr *http.Transport, proto, addr string) error {
+	// No need for compression in local communications.
+	tr.DisableCompression = true
+	tr.Dial = func(_, _ string) (net.Conn, error) {
+		return DialPipe(addr, defaultTimeout)
+	}
+	return nil
+}
+
 // DialPipe connects to a Windows named pipe.
 func DialPipe(addr string, timeout time.Duration) (net.Conn, error) {
 	return winio.DialPipe(addr, &timeout)
diff --git a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go
index 8a82727df00a2891c5260110d2f692ca322bb85c..53cbb6c79e476280d2ec6a7ddd2e520c856b05cb 100644
--- a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go
+++ b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go
@@ -7,7 +7,7 @@ import (
 )
 
 // NewTCPSocket creates a TCP socket listener with the specified address and
-// and the specified tls configuration. If TLSConfig is set, will encapsulate the
+// the specified tls configuration. If TLSConfig is set, will encapsulate the
 // TCP listener inside a TLS one.
 func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) {
 	l, err := net.Listen("tcp", addr)
diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go
index d1627349f826c0f02fe93bc19d69fba15676b604..a8b5dbb6fdc0472c9b3471e03d574c1bed4aaa42 100644
--- a/vendor/github.com/docker/go-connections/sockets/unix_socket.go
+++ b/vendor/github.com/docker/go-connections/sockets/unix_socket.go
@@ -1,30 +1,26 @@
-// +build linux freebsd solaris
+// +build !windows
 
 package sockets
 
 import (
-	"fmt"
 	"net"
 	"os"
-	"strconv"
 	"syscall"
-
-	"github.com/Sirupsen/logrus"
-	"github.com/opencontainers/runc/libcontainer/user"
 )
 
 // NewUnixSocket creates a unix socket with the specified path and group.
-func NewUnixSocket(path, group string) (net.Listener, error) {
+func NewUnixSocket(path string, gid int) (net.Listener, error) {
 	if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) {
 		return nil, err
 	}
 	mask := syscall.Umask(0777)
 	defer syscall.Umask(mask)
+
 	l, err := net.Listen("unix", path)
 	if err != nil {
 		return nil, err
 	}
-	if err := setSocketGroup(path, group); err != nil {
+	if err := os.Chown(path, 0, gid); err != nil {
 		l.Close()
 		return nil, err
 	}
@@ -34,47 +30,3 @@ func NewUnixSocket(path, group string) (net.Listener, error) {
 	}
 	return l, nil
 }
-
-func setSocketGroup(path, group string) error {
-	if group == "" {
-		return nil
-	}
-	if err := changeGroup(path, group); err != nil {
-		if group != "docker" {
-			return err
-		}
-		logrus.Debugf("Warning: could not change group %s to docker: %v", path, err)
-	}
-	return nil
-}
-
-func changeGroup(path string, nameOrGid string) error {
-	gid, err := lookupGidByName(nameOrGid)
-	if err != nil {
-		return err
-	}
-	logrus.Debugf("%s group found. gid: %d", nameOrGid, gid)
-	return os.Chown(path, 0, gid)
-}
-
-func lookupGidByName(nameOrGid string) (int, error) {
-	groupFile, err := user.GetGroupPath()
-	if err != nil {
-		return -1, err
-	}
-	groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool {
-		return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid
-	})
-	if err != nil {
-		return -1, err
-	}
-	if groups != nil && len(groups) > 0 {
-		return groups[0].Gid, nil
-	}
-	gid, err := strconv.Atoi(nameOrGid)
-	if err == nil {
-		logrus.Warnf("Could not find GID %d", gid)
-		return gid, nil
-	}
-	return -1, fmt.Errorf("Group %s not found", nameOrGid)
-}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go
new file mode 100644
index 0000000000000000000000000000000000000000..1ca0965e06ea5549d2cb24356f0e6d406a6371a4
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go
@@ -0,0 +1,18 @@
+// +build go1.7
+
+package tlsconfig
+
+import (
+	"crypto/x509"
+	"runtime"
+)
+
+// SystemCertPool returns a copy of the system cert pool,
+// returns an error if failed to load or empty pool on windows.
+func SystemCertPool() (*x509.CertPool, error) {
+	certpool, err := x509.SystemCertPool()
+	if err != nil && runtime.GOOS == "windows" {
+		return x509.NewCertPool(), nil
+	}
+	return certpool, err
+}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go
new file mode 100644
index 0000000000000000000000000000000000000000..9ca974539a774083728f717d37f86125393991f1
--- /dev/null
+++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go
@@ -0,0 +1,14 @@
+// +build !go1.7
+
+package tlsconfig
+
+import (
+	"crypto/x509"
+
+)
+
+// SystemCertPool returns an new empty cert pool,
+// accessing system cert pool is supported in go 1.7
+func SystemCertPool() (*x509.CertPool, error) {
+	return x509.NewCertPool(), nil
+}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go
index 2cd50a62ca986f6b0ea03378696ec547ba140776..1b31bbb8b1b96c9929b4e9d69408cf3d9bf67469 100644
--- a/vendor/github.com/docker/go-connections/tlsconfig/config.go
+++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go
@@ -8,11 +8,12 @@ package tlsconfig
 import (
 	"crypto/tls"
 	"crypto/x509"
+	"encoding/pem"
 	"fmt"
 	"io/ioutil"
 	"os"
 
-	"github.com/Sirupsen/logrus"
+	"github.com/pkg/errors"
 )
 
 // Options represents the information needed to create client and server TLS configurations.
@@ -29,6 +30,14 @@ type Options struct {
 	InsecureSkipVerify bool
 	// server-only option
 	ClientAuth tls.ClientAuthType
+	// If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS
+	// creds will include exclusively the roots in that CA file.  If no CA file is provided,
+	// the system pool will be used.
+	ExclusiveRootPools bool
+	MinVersion         uint16
+	// If Passphrase is set, it will be used to decrypt a TLS private key
+	// if the key is encrypted
+	Passphrase string
 }
 
 // Extra (server-side) accepted CBC cipher suites - will phase out in the future
@@ -46,6 +55,15 @@ var acceptedCBCCiphers = []uint16{
 // known weak algorithms removed.
 var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...)
 
+// allTLSVersions lists all the TLS versions and is used by the code that validates
+// a uint16 value as a TLS version.
+var allTLSVersions = map[uint16]struct{}{
+	tls.VersionSSL30: {},
+	tls.VersionTLS10: {},
+	tls.VersionTLS11: {},
+	tls.VersionTLS12: {},
+}
+
 // ServerDefault returns a secure-enough TLS configuration for the server TLS configuration.
 func ServerDefault() *tls.Config {
 	return &tls.Config{
@@ -66,38 +84,133 @@ func ClientDefault() *tls.Config {
 }
 
 // certPool returns an X.509 certificate pool from `caFile`, the certificate file.
-func certPool(caFile string) (*x509.CertPool, error) {
+func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) {
 	// If we should verify the server, we need to load a trusted ca
-	certPool := x509.NewCertPool()
+	var (
+		certPool *x509.CertPool
+		err      error
+	)
+	if exclusivePool {
+		certPool = x509.NewCertPool()
+	} else {
+		certPool, err = SystemCertPool()
+		if err != nil {
+			return nil, fmt.Errorf("failed to read system certificates: %v", err)
+		}
+	}
 	pem, err := ioutil.ReadFile(caFile)
 	if err != nil {
-		return nil, fmt.Errorf("Could not read CA certificate %q: %v", caFile, err)
+		return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err)
 	}
 	if !certPool.AppendCertsFromPEM(pem) {
 		return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile)
 	}
-	logrus.Debugf("Trusting %d certs", len(certPool.Subjects()))
 	return certPool, nil
 }
 
+// isValidMinVersion checks that the input value is a valid tls minimum version
+func isValidMinVersion(version uint16) bool {
+	_, ok := allTLSVersions[version]
+	return ok
+}
+
+// adjustMinVersion sets the MinVersion on `config`, the input configuration.
+// It assumes the current MinVersion on the `config` is the lowest allowed.
+func adjustMinVersion(options Options, config *tls.Config) error {
+	if options.MinVersion > 0 {
+		if !isValidMinVersion(options.MinVersion) {
+			return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion)
+		}
+		if options.MinVersion < config.MinVersion {
+			return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion)
+		}
+		config.MinVersion = options.MinVersion
+	}
+
+	return nil
+}
+
+// IsErrEncryptedKey returns true if the 'err' is an error of incorrect
+// password when tryin to decrypt a TLS private key
+func IsErrEncryptedKey(err error) bool {
+	return errors.Cause(err) == x509.IncorrectPasswordError
+}
+
+// getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format.
+// If the private key is encrypted, 'passphrase' is used to decrypted the
+// private key.
+func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) {
+	// this section makes some small changes to code from notary/tuf/utils/x509.go
+	pemBlock, _ := pem.Decode(keyBytes)
+	if pemBlock == nil {
+		return nil, fmt.Errorf("no valid private key found")
+	}
+
+	var err error
+	if x509.IsEncryptedPEMBlock(pemBlock) {
+		keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase))
+		if err != nil {
+			return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it")
+		}
+		keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes})
+	}
+
+	return keyBytes, nil
+}
+
+// getCert returns a Certificate from the CertFile and KeyFile in 'options',
+// if the key is encrypted, the Passphrase in 'options' will be used to
+// decrypt it.
+func getCert(options Options) ([]tls.Certificate, error) {
+	if options.CertFile == "" && options.KeyFile == "" {
+		return nil, nil
+	}
+
+	errMessage := "Could not load X509 key pair"
+
+	cert, err := ioutil.ReadFile(options.CertFile)
+	if err != nil {
+		return nil, errors.Wrap(err, errMessage)
+	}
+
+	prKeyBytes, err := ioutil.ReadFile(options.KeyFile)
+	if err != nil {
+		return nil, errors.Wrap(err, errMessage)
+	}
+
+	prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase)
+	if err != nil {
+		return nil, errors.Wrap(err, errMessage)
+	}
+
+	tlsCert, err := tls.X509KeyPair(cert, prKeyBytes)
+	if err != nil {
+		return nil, errors.Wrap(err, errMessage)
+	}
+
+	return []tls.Certificate{tlsCert}, nil
+}
+
 // Client returns a TLS configuration meant to be used by a client.
 func Client(options Options) (*tls.Config, error) {
 	tlsConfig := ClientDefault()
 	tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify
 	if !options.InsecureSkipVerify && options.CAFile != "" {
-		CAs, err := certPool(options.CAFile)
+		CAs, err := certPool(options.CAFile, options.ExclusiveRootPools)
 		if err != nil {
 			return nil, err
 		}
 		tlsConfig.RootCAs = CAs
 	}
 
-	if options.CertFile != "" || options.KeyFile != "" {
-		tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile)
-		if err != nil {
-			return nil, fmt.Errorf("Could not load X509 key pair: %v. Make sure the key is not encrypted", err)
-		}
-		tlsConfig.Certificates = []tls.Certificate{tlsCert}
+	tlsCerts, err := getCert(options)
+	if err != nil {
+		return nil, err
+	}
+	tlsConfig.Certificates = tlsCerts
+
+	if err := adjustMinVersion(options, tlsConfig); err != nil {
+		return nil, err
 	}
 
 	return tlsConfig, nil
@@ -115,12 +228,17 @@ func Server(options Options) (*tls.Config, error) {
 		return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err)
 	}
 	tlsConfig.Certificates = []tls.Certificate{tlsCert}
-	if options.ClientAuth >= tls.VerifyClientCertIfGiven {
-		CAs, err := certPool(options.CAFile)
+	if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" {
+		CAs, err := certPool(options.CAFile, options.ExclusiveRootPools)
 		if err != nil {
 			return nil, err
 		}
 		tlsConfig.ClientCAs = CAs
 	}
+
+	if err := adjustMinVersion(options, tlsConfig); err != nil {
+		return nil, err
+	}
+
 	return tlsConfig, nil
 }
diff --git a/vendor/github.com/gorilla/context/LICENSE b/vendor/github.com/gorilla/context/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..0e5fb872800da9557f75a5650bb9d80c1c2cf715
--- /dev/null
+++ b/vendor/github.com/gorilla/context/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+	 * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+	 * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+	 * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/gorilla/context/README.md b/vendor/github.com/gorilla/context/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..08f86693bcd88a09bacde0bfffbd2bbf32bad9f8
--- /dev/null
+++ b/vendor/github.com/gorilla/context/README.md
@@ -0,0 +1,10 @@
+context
+=======
+[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context)
+
+gorilla/context is a general purpose registry for global request variables.
+
+> Note: gorilla/context, having been born well before `context.Context` existed, does not play well
+> with the shallow copying of the request that [`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) (added to net/http Go 1.7 onwards) performs. You should either use *just* gorilla/context, or moving forward, the new `http.Request.Context()`.
+
+Read the full documentation here: http://www.gorillatoolkit.org/pkg/context
diff --git a/vendor/github.com/gorilla/context/context.go b/vendor/github.com/gorilla/context/context.go
new file mode 100644
index 0000000000000000000000000000000000000000..81cb128b19cad7e0a9ee89d7626746d1cf859564
--- /dev/null
+++ b/vendor/github.com/gorilla/context/context.go
@@ -0,0 +1,143 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package context
+
+import (
+	"net/http"
+	"sync"
+	"time"
+)
+
+var (
+	mutex sync.RWMutex
+	data  = make(map[*http.Request]map[interface{}]interface{})
+	datat = make(map[*http.Request]int64)
+)
+
+// Set stores a value for a given key in a given request.
+func Set(r *http.Request, key, val interface{}) {
+	mutex.Lock()
+	if data[r] == nil {
+		data[r] = make(map[interface{}]interface{})
+		datat[r] = time.Now().Unix()
+	}
+	data[r][key] = val
+	mutex.Unlock()
+}
+
+// Get returns a value stored for a given key in a given request.
+func Get(r *http.Request, key interface{}) interface{} {
+	mutex.RLock()
+	if ctx := data[r]; ctx != nil {
+		value := ctx[key]
+		mutex.RUnlock()
+		return value
+	}
+	mutex.RUnlock()
+	return nil
+}
+
+// GetOk returns stored value and presence state like multi-value return of map access.
+func GetOk(r *http.Request, key interface{}) (interface{}, bool) {
+	mutex.RLock()
+	if _, ok := data[r]; ok {
+		value, ok := data[r][key]
+		mutex.RUnlock()
+		return value, ok
+	}
+	mutex.RUnlock()
+	return nil, false
+}
+
+// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests.
+func GetAll(r *http.Request) map[interface{}]interface{} {
+	mutex.RLock()
+	if context, ok := data[r]; ok {
+		result := make(map[interface{}]interface{}, len(context))
+		for k, v := range context {
+			result[k] = v
+		}
+		mutex.RUnlock()
+		return result
+	}
+	mutex.RUnlock()
+	return nil
+}
+
+// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if
+// the request was registered.
+func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) {
+	mutex.RLock()
+	context, ok := data[r]
+	result := make(map[interface{}]interface{}, len(context))
+	for k, v := range context {
+		result[k] = v
+	}
+	mutex.RUnlock()
+	return result, ok
+}
+
+// Delete removes a value stored for a given key in a given request.
+func Delete(r *http.Request, key interface{}) {
+	mutex.Lock()
+	if data[r] != nil {
+		delete(data[r], key)
+	}
+	mutex.Unlock()
+}
+
+// Clear removes all values stored for a given request.
+//
+// This is usually called by a handler wrapper to clean up request
+// variables at the end of a request lifetime. See ClearHandler().
+func Clear(r *http.Request) {
+	mutex.Lock()
+	clear(r)
+	mutex.Unlock()
+}
+
+// clear is Clear without the lock.
+func clear(r *http.Request) {
+	delete(data, r)
+	delete(datat, r)
+}
+
+// Purge removes request data stored for longer than maxAge, in seconds.
+// It returns the amount of requests removed.
+//
+// If maxAge <= 0, all request data is removed.
+//
+// This is only used for sanity check: in case context cleaning was not
+// properly set some request data can be kept forever, consuming an increasing
+// amount of memory. In case this is detected, Purge() must be called
+// periodically until the problem is fixed.
+func Purge(maxAge int) int {
+	mutex.Lock()
+	count := 0
+	if maxAge <= 0 {
+		count = len(data)
+		data = make(map[*http.Request]map[interface{}]interface{})
+		datat = make(map[*http.Request]int64)
+	} else {
+		min := time.Now().Unix() - int64(maxAge)
+		for r := range data {
+			if datat[r] < min {
+				clear(r)
+				count++
+			}
+		}
+	}
+	mutex.Unlock()
+	return count
+}
+
+// ClearHandler wraps an http.Handler and clears request values at the end
+// of a request lifetime.
+func ClearHandler(h http.Handler) http.Handler {
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		defer Clear(r)
+		h.ServeHTTP(w, r)
+	})
+}
diff --git a/vendor/github.com/gorilla/context/doc.go b/vendor/github.com/gorilla/context/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..448d1bfcac6e1cf1d410abdc7de800f915276fa6
--- /dev/null
+++ b/vendor/github.com/gorilla/context/doc.go
@@ -0,0 +1,88 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package context stores values shared during a request lifetime.
+
+Note: gorilla/context, having been born well before `context.Context` existed,
+does not play well > with the shallow copying of the request that
+[`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext)
+(added to net/http Go 1.7 onwards) performs. You should either use *just*
+gorilla/context, or moving forward, the new `http.Request.Context()`.
+
+For example, a router can set variables extracted from the URL and later
+application handlers can access those values, or it can be used to store
+sessions values to be saved at the end of a request. There are several
+others common uses.
+
+The idea was posted by Brad Fitzpatrick to the go-nuts mailing list:
+
+	http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53
+
+Here's the basic usage: first define the keys that you will need. The key
+type is interface{} so a key can be of any type that supports equality.
+Here we define a key using a custom int type to avoid name collisions:
+
+	package foo
+
+	import (
+		"github.com/gorilla/context"
+	)
+
+	type key int
+
+	const MyKey key = 0
+
+Then set a variable. Variables are bound to an http.Request object, so you
+need a request instance to set a value:
+
+	context.Set(r, MyKey, "bar")
+
+The application can later access the variable using the same key you provided:
+
+	func MyHandler(w http.ResponseWriter, r *http.Request) {
+		// val is "bar".
+		val := context.Get(r, foo.MyKey)
+
+		// returns ("bar", true)
+		val, ok := context.GetOk(r, foo.MyKey)
+		// ...
+	}
+
+And that's all about the basic usage. We discuss some other ideas below.
+
+Any type can be stored in the context. To enforce a given type, make the key
+private and wrap Get() and Set() to accept and return values of a specific
+type:
+
+	type key int
+
+	const mykey key = 0
+
+	// GetMyKey returns a value for this package from the request values.
+	func GetMyKey(r *http.Request) SomeType {
+		if rv := context.Get(r, mykey); rv != nil {
+			return rv.(SomeType)
+		}
+		return nil
+	}
+
+	// SetMyKey sets a value for this package in the request values.
+	func SetMyKey(r *http.Request, val SomeType) {
+		context.Set(r, mykey, val)
+	}
+
+Variables must be cleared at the end of a request, to remove all values
+that were stored. This can be done in an http.Handler, after a request was
+served. Just call Clear() passing the request:
+
+	context.Clear(r)
+
+...or use ClearHandler(), which conveniently wraps an http.Handler to clear
+variables at the end of a request lifetime.
+
+The Routers from the packages gorilla/mux and gorilla/pat call Clear()
+so if you are using either of them you don't need to clear the context manually.
+*/
+package context
diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..0e5fb872800da9557f75a5650bb9d80c1c2cf715
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+	 * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+	 * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+	 * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..cdab8784d11dbe7afca91183ff67a6b0208f3863
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/README.md
@@ -0,0 +1,340 @@
+gorilla/mux
+===
+[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
+[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux)
+[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge)
+
+![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png)
+
+http://www.gorillatoolkit.org/pkg/mux
+
+Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
+their respective handler.
+
+The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are:
+
+* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
+* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers.
+* URL hosts and paths can have variables with an optional regular expression.
+* Registered URLs can be built, or "reversed", which helps maintaining references to resources.
+* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching.
+
+---
+
+* [Install](#install)
+* [Examples](#examples)
+* [Matching Routes](#matching-routes)
+* [Listing Routes](#listing-routes)
+* [Static Files](#static-files)
+* [Registered URLs](#registered-urls)
+* [Full Example](#full-example)
+
+---
+
+## Install
+
+With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain:
+
+```sh
+go get -u github.com/gorilla/mux
+```
+
+## Examples
+
+Let's start registering a couple of URL paths and handlers:
+
+```go
+func main() {
+	r := mux.NewRouter()
+	r.HandleFunc("/", HomeHandler)
+	r.HandleFunc("/products", ProductsHandler)
+	r.HandleFunc("/articles", ArticlesHandler)
+	http.Handle("/", r)
+}
+```
+
+Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters.
+
+Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/products/{key}", ProductHandler)
+r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
+r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+```
+
+The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`:
+
+```go
+func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) {
+	vars := mux.Vars(r)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprintf(w, "Category: %v\n", vars["category"])
+}
+```
+
+And this is all you need to know about the basic usage. More advanced options are explained below.
+
+### Matching Routes
+
+Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables:
+
+```go
+r := mux.NewRouter()
+// Only matches if domain is "www.example.com".
+r.Host("www.example.com")
+// Matches a dynamic subdomain.
+r.Host("{subdomain:[a-z]+}.domain.com")
+```
+
+There are several other matchers that can be added. To match path prefixes:
+
+```go
+r.PathPrefix("/products/")
+```
+
+...or HTTP methods:
+
+```go
+r.Methods("GET", "POST")
+```
+
+...or URL schemes:
+
+```go
+r.Schemes("https")
+```
+
+...or header values:
+
+```go
+r.Headers("X-Requested-With", "XMLHttpRequest")
+```
+
+...or query values:
+
+```go
+r.Queries("key", "value")
+```
+
+...or to use a custom matcher function:
+
+```go
+r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
+	return r.ProtoMajor == 0
+})
+```
+
+...and finally, it is possible to combine several matchers in a single route:
+
+```go
+r.HandleFunc("/products", ProductsHandler).
+  Host("www.example.com").
+  Methods("GET").
+  Schemes("http")
+```
+
+Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting".
+
+For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it:
+
+```go
+r := mux.NewRouter()
+s := r.Host("www.example.com").Subrouter()
+```
+
+Then register routes in the subrouter:
+
+```go
+s.HandleFunc("/products/", ProductsHandler)
+s.HandleFunc("/products/{key}", ProductHandler)
+s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+```
+
+The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route.
+
+Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter.
+
+There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths:
+
+```go
+r := mux.NewRouter()
+s := r.PathPrefix("/products").Subrouter()
+// "/products/"
+s.HandleFunc("/", ProductsHandler)
+// "/products/{key}/"
+s.HandleFunc("/{key}/", ProductHandler)
+// "/products/{key}/details"
+s.HandleFunc("/{key}/details", ProductDetailsHandler)
+```
+
+### Listing Routes
+
+Routes on a mux can be listed using the Router.Walk method—useful for generating documentation:
+
+```go
+package main
+
+import (
+    "fmt"
+    "net/http"
+
+    "github.com/gorilla/mux"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+    return
+}
+
+func main() {
+    r := mux.NewRouter()
+    r.HandleFunc("/", handler)
+    r.HandleFunc("/products", handler)
+    r.HandleFunc("/articles", handler)
+    r.HandleFunc("/articles/{id}", handler)
+    r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
+        t, err := route.GetPathTemplate()
+        if err != nil {
+            return err
+        }
+        fmt.Println(t)
+        return nil
+    })
+    http.Handle("/", r)
+}
+```
+
+### Static Files
+
+Note that the path provided to `PathPrefix()` represents a "wildcard": calling
+`PathPrefix("/static/").Handler(...)` means that the handler will be passed any
+request that matches "/static/*". This makes it easy to serve static files with mux:
+
+```go
+func main() {
+	var dir string
+
+	flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+	flag.Parse()
+	r := mux.NewRouter()
+
+	// This will serve files under http://localhost:8000/static/<filename>
+	r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+
+	srv := &http.Server{
+		Handler:      r,
+		Addr:         "127.0.0.1:8000",
+		// Good practice: enforce timeouts for servers you create!
+		WriteTimeout: 15 * time.Second,
+		ReadTimeout:  15 * time.Second,
+	}
+
+	log.Fatal(srv.ListenAndServe())
+}
+```
+
+### Registered URLs
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+  Name("article")
+```
+
+To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do:
+
+```go
+url, err := r.Get("article").URL("category", "technology", "id", "42")
+```
+
+...and the result will be a `url.URL` with the following path:
+
+```
+"/articles/technology/42"
+```
+
+This also works for host variables:
+
+```go
+r := mux.NewRouter()
+r.Host("{subdomain}.domain.com").
+  Path("/articles/{category}/{id:[0-9]+}").
+  HandlerFunc(ArticleHandler).
+  Name("article")
+
+// url.String() will be "http://news.domain.com/articles/technology/42"
+url, err := r.Get("article").URL("subdomain", "news",
+                                 "category", "technology",
+                                 "id", "42")
+```
+
+All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match.
+
+Regex support also exists for matching Headers within a route. For example, we could do:
+
+```go
+r.HeadersRegexp("Content-Type", "application/(text|json)")
+```
+
+...and the route will match both requests with a Content-Type of `application/json` as well as `application/text`
+
+There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do:
+
+```go
+// "http://news.domain.com/"
+host, err := r.Get("article").URLHost("subdomain", "news")
+
+// "/articles/technology/42"
+path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+```
+
+And if you use subrouters, host and path defined separately can be built as well:
+
+```go
+r := mux.NewRouter()
+s := r.Host("{subdomain}.domain.com").Subrouter()
+s.Path("/articles/{category}/{id:[0-9]+}").
+  HandlerFunc(ArticleHandler).
+  Name("article")
+
+// "http://news.domain.com/articles/technology/42"
+url, err := r.Get("article").URL("subdomain", "news",
+                                 "category", "technology",
+                                 "id", "42")
+```
+
+## Full Example
+
+Here's a complete, runnable example of a small `mux` based server:
+
+```go
+package main
+
+import (
+	"net/http"
+	"log"
+	"github.com/gorilla/mux"
+)
+
+func YourHandler(w http.ResponseWriter, r *http.Request) {
+	w.Write([]byte("Gorilla!\n"))
+}
+
+func main() {
+	r := mux.NewRouter()
+	// Routes consist of a path and a handler function.
+	r.HandleFunc("/", YourHandler)
+
+	// Bind to a port and pass our router in
+	log.Fatal(http.ListenAndServe(":8000", r))
+}
+```
+
+## License
+
+BSD licensed. See the LICENSE file for details.
diff --git a/vendor/github.com/gorilla/mux/context_gorilla.go b/vendor/github.com/gorilla/mux/context_gorilla.go
new file mode 100644
index 0000000000000000000000000000000000000000..d7adaa8fad4fa8ce62d18a7058d10723ff2288af
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/context_gorilla.go
@@ -0,0 +1,26 @@
+// +build !go1.7
+
+package mux
+
+import (
+	"net/http"
+
+	"github.com/gorilla/context"
+)
+
+func contextGet(r *http.Request, key interface{}) interface{} {
+	return context.Get(r, key)
+}
+
+func contextSet(r *http.Request, key, val interface{}) *http.Request {
+	if val == nil {
+		return r
+	}
+
+	context.Set(r, key, val)
+	return r
+}
+
+func contextClear(r *http.Request) {
+	context.Clear(r)
+}
diff --git a/vendor/github.com/gorilla/mux/context_native.go b/vendor/github.com/gorilla/mux/context_native.go
new file mode 100644
index 0000000000000000000000000000000000000000..209cbea7d66170098a4c743f6e8746595e59f60f
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/context_native.go
@@ -0,0 +1,24 @@
+// +build go1.7
+
+package mux
+
+import (
+	"context"
+	"net/http"
+)
+
+func contextGet(r *http.Request, key interface{}) interface{} {
+	return r.Context().Value(key)
+}
+
+func contextSet(r *http.Request, key, val interface{}) *http.Request {
+	if val == nil {
+		return r
+	}
+
+	return r.WithContext(context.WithValue(r.Context(), key, val))
+}
+
+func contextClear(r *http.Request) {
+	return
+}
diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..00daf4a721c8ef91b71eaca08e5287c309915914
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/doc.go
@@ -0,0 +1,240 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package mux implements a request router and dispatcher.
+
+The name mux stands for "HTTP request multiplexer". Like the standard
+http.ServeMux, mux.Router matches incoming requests against a list of
+registered routes and calls a handler for the route that matches the URL
+or other conditions. The main features are:
+
+	* Requests can be matched based on URL host, path, path prefix, schemes,
+	  header and query values, HTTP methods or using custom matchers.
+	* URL hosts and paths can have variables with an optional regular
+	  expression.
+	* Registered URLs can be built, or "reversed", which helps maintaining
+	  references to resources.
+	* Routes can be used as subrouters: nested routes are only tested if the
+	  parent route matches. This is useful to define groups of routes that
+	  share common conditions like a host, a path prefix or other repeated
+	  attributes. As a bonus, this optimizes request matching.
+	* It implements the http.Handler interface so it is compatible with the
+	  standard http.ServeMux.
+
+Let's start registering a couple of URL paths and handlers:
+
+	func main() {
+		r := mux.NewRouter()
+		r.HandleFunc("/", HomeHandler)
+		r.HandleFunc("/products", ProductsHandler)
+		r.HandleFunc("/articles", ArticlesHandler)
+		http.Handle("/", r)
+	}
+
+Here we register three routes mapping URL paths to handlers. This is
+equivalent to how http.HandleFunc() works: if an incoming request URL matches
+one of the paths, the corresponding handler is called passing
+(http.ResponseWriter, *http.Request) as parameters.
+
+Paths can have variables. They are defined using the format {name} or
+{name:pattern}. If a regular expression pattern is not defined, the matched
+variable will be anything until the next slash. For example:
+
+	r := mux.NewRouter()
+	r.HandleFunc("/products/{key}", ProductHandler)
+	r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
+	r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+
+Groups can be used inside patterns, as long as they are non-capturing (?:re). For example:
+
+	r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler)
+
+The names are used to create a map of route variables which can be retrieved
+calling mux.Vars():
+
+	vars := mux.Vars(request)
+	category := vars["category"]
+
+Note that if any capturing groups are present, mux will panic() during parsing. To prevent
+this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to
+"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably
+when capturing groups were present.
+
+And this is all you need to know about the basic usage. More advanced options
+are explained below.
+
+Routes can also be restricted to a domain or subdomain. Just define a host
+pattern to be matched. They can also have variables:
+
+	r := mux.NewRouter()
+	// Only matches if domain is "www.example.com".
+	r.Host("www.example.com")
+	// Matches a dynamic subdomain.
+	r.Host("{subdomain:[a-z]+}.domain.com")
+
+There are several other matchers that can be added. To match path prefixes:
+
+	r.PathPrefix("/products/")
+
+...or HTTP methods:
+
+	r.Methods("GET", "POST")
+
+...or URL schemes:
+
+	r.Schemes("https")
+
+...or header values:
+
+	r.Headers("X-Requested-With", "XMLHttpRequest")
+
+...or query values:
+
+	r.Queries("key", "value")
+
+...or to use a custom matcher function:
+
+	r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
+		return r.ProtoMajor == 0
+	})
+
+...and finally, it is possible to combine several matchers in a single route:
+
+	r.HandleFunc("/products", ProductsHandler).
+	  Host("www.example.com").
+	  Methods("GET").
+	  Schemes("http")
+
+Setting the same matching conditions again and again can be boring, so we have
+a way to group several routes that share the same requirements.
+We call it "subrouting".
+
+For example, let's say we have several URLs that should only match when the
+host is "www.example.com". Create a route for that host and get a "subrouter"
+from it:
+
+	r := mux.NewRouter()
+	s := r.Host("www.example.com").Subrouter()
+
+Then register routes in the subrouter:
+
+	s.HandleFunc("/products/", ProductsHandler)
+	s.HandleFunc("/products/{key}", ProductHandler)
+	s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+
+The three URL paths we registered above will only be tested if the domain is
+"www.example.com", because the subrouter is tested first. This is not
+only convenient, but also optimizes request matching. You can create
+subrouters combining any attribute matchers accepted by a route.
+
+Subrouters can be used to create domain or path "namespaces": you define
+subrouters in a central place and then parts of the app can register its
+paths relatively to a given subrouter.
+
+There's one more thing about subroutes. When a subrouter has a path prefix,
+the inner routes use it as base for their paths:
+
+	r := mux.NewRouter()
+	s := r.PathPrefix("/products").Subrouter()
+	// "/products/"
+	s.HandleFunc("/", ProductsHandler)
+	// "/products/{key}/"
+	s.HandleFunc("/{key}/", ProductHandler)
+	// "/products/{key}/details"
+	s.HandleFunc("/{key}/details", ProductDetailsHandler)
+
+Note that the path provided to PathPrefix() represents a "wildcard": calling
+PathPrefix("/static/").Handler(...) means that the handler will be passed any
+request that matches "/static/*". This makes it easy to serve static files with mux:
+
+	func main() {
+		var dir string
+
+		flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+		flag.Parse()
+		r := mux.NewRouter()
+
+		// This will serve files under http://localhost:8000/static/<filename>
+		r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+
+		srv := &http.Server{
+			Handler:      r,
+			Addr:         "127.0.0.1:8000",
+			// Good practice: enforce timeouts for servers you create!
+			WriteTimeout: 15 * time.Second,
+			ReadTimeout:  15 * time.Second,
+		}
+
+		log.Fatal(srv.ListenAndServe())
+	}
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built,
+or "reversed". We define a name calling Name() on a route. For example:
+
+	r := mux.NewRouter()
+	r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+	  Name("article")
+
+To build a URL, get the route and call the URL() method, passing a sequence of
+key/value pairs for the route variables. For the previous route, we would do:
+
+	url, err := r.Get("article").URL("category", "technology", "id", "42")
+
+...and the result will be a url.URL with the following path:
+
+	"/articles/technology/42"
+
+This also works for host variables:
+
+	r := mux.NewRouter()
+	r.Host("{subdomain}.domain.com").
+	  Path("/articles/{category}/{id:[0-9]+}").
+	  HandlerFunc(ArticleHandler).
+	  Name("article")
+
+	// url.String() will be "http://news.domain.com/articles/technology/42"
+	url, err := r.Get("article").URL("subdomain", "news",
+	                                 "category", "technology",
+	                                 "id", "42")
+
+All variables defined in the route are required, and their values must
+conform to the corresponding patterns. These requirements guarantee that a
+generated URL will always match a registered route -- the only exception is
+for explicitly defined "build-only" routes which never match.
+
+Regex support also exists for matching Headers within a route. For example, we could do:
+
+	r.HeadersRegexp("Content-Type", "application/(text|json)")
+
+...and the route will match both requests with a Content-Type of `application/json` as well as
+`application/text`
+
+There's also a way to build only the URL host or path for a route:
+use the methods URLHost() or URLPath() instead. For the previous route,
+we would do:
+
+	// "http://news.domain.com/"
+	host, err := r.Get("article").URLHost("subdomain", "news")
+
+	// "/articles/technology/42"
+	path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+
+And if you use subrouters, host and path defined separately can be built
+as well:
+
+	r := mux.NewRouter()
+	s := r.Host("{subdomain}.domain.com").Subrouter()
+	s.Path("/articles/{category}/{id:[0-9]+}").
+	  HandlerFunc(ArticleHandler).
+	  Name("article")
+
+	// "http://news.domain.com/articles/technology/42"
+	url, err := r.Get("article").URL("subdomain", "news",
+	                                 "category", "technology",
+	                                 "id", "42")
+*/
+package mux
diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go
new file mode 100644
index 0000000000000000000000000000000000000000..d66ec38415fd30f0cadc75d2d550525c34e1ac57
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/mux.go
@@ -0,0 +1,542 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+	"errors"
+	"fmt"
+	"net/http"
+	"path"
+	"regexp"
+	"strings"
+)
+
+// NewRouter returns a new router instance.
+func NewRouter() *Router {
+	return &Router{namedRoutes: make(map[string]*Route), KeepContext: false}
+}
+
+// Router registers routes to be matched and dispatches a handler.
+//
+// It implements the http.Handler interface, so it can be registered to serve
+// requests:
+//
+//     var router = mux.NewRouter()
+//
+//     func main() {
+//         http.Handle("/", router)
+//     }
+//
+// Or, for Google App Engine, register it in a init() function:
+//
+//     func init() {
+//         http.Handle("/", router)
+//     }
+//
+// This will send all incoming requests to the router.
+type Router struct {
+	// Configurable Handler to be used when no route matches.
+	NotFoundHandler http.Handler
+	// Parent route, if this is a subrouter.
+	parent parentRoute
+	// Routes to be matched, in order.
+	routes []*Route
+	// Routes by name for URL building.
+	namedRoutes map[string]*Route
+	// See Router.StrictSlash(). This defines the flag for new routes.
+	strictSlash bool
+	// See Router.SkipClean(). This defines the flag for new routes.
+	skipClean bool
+	// If true, do not clear the request context after handling the request.
+	// This has no effect when go1.7+ is used, since the context is stored
+	// on the request itself.
+	KeepContext bool
+	// see Router.UseEncodedPath(). This defines a flag for all routes.
+	useEncodedPath bool
+}
+
+// Match matches registered routes against the request.
+func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
+	for _, route := range r.routes {
+		if route.Match(req, match) {
+			return true
+		}
+	}
+
+	// Closest match for a router (includes sub-routers)
+	if r.NotFoundHandler != nil {
+		match.Handler = r.NotFoundHandler
+		return true
+	}
+	return false
+}
+
+// ServeHTTP dispatches the handler registered in the matched route.
+//
+// When there is a match, the route variables can be retrieved calling
+// mux.Vars(request).
+func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+	if !r.skipClean {
+		path := req.URL.Path
+		if r.useEncodedPath {
+			path = getPath(req)
+		}
+		// Clean path to canonical form and redirect.
+		if p := cleanPath(path); p != path {
+
+			// Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query.
+			// This matches with fix in go 1.2 r.c. 4 for same problem.  Go Issue:
+			// http://code.google.com/p/go/issues/detail?id=5252
+			url := *req.URL
+			url.Path = p
+			p = url.String()
+
+			w.Header().Set("Location", p)
+			w.WriteHeader(http.StatusMovedPermanently)
+			return
+		}
+	}
+	var match RouteMatch
+	var handler http.Handler
+	if r.Match(req, &match) {
+		handler = match.Handler
+		req = setVars(req, match.Vars)
+		req = setCurrentRoute(req, match.Route)
+	}
+	if handler == nil {
+		handler = http.NotFoundHandler()
+	}
+	if !r.KeepContext {
+		defer contextClear(req)
+	}
+	handler.ServeHTTP(w, req)
+}
+
+// Get returns a route registered with the given name.
+func (r *Router) Get(name string) *Route {
+	return r.getNamedRoutes()[name]
+}
+
+// GetRoute returns a route registered with the given name. This method
+// was renamed to Get() and remains here for backwards compatibility.
+func (r *Router) GetRoute(name string) *Route {
+	return r.getNamedRoutes()[name]
+}
+
+// StrictSlash defines the trailing slash behavior for new routes. The initial
+// value is false.
+//
+// When true, if the route path is "/path/", accessing "/path" will redirect
+// to the former and vice versa. In other words, your application will always
+// see the path as specified in the route.
+//
+// When false, if the route path is "/path", accessing "/path/" will not match
+// this route and vice versa.
+//
+// Special case: when a route sets a path prefix using the PathPrefix() method,
+// strict slash is ignored for that route because the redirect behavior can't
+// be determined from a prefix alone. However, any subrouters created from that
+// route inherit the original StrictSlash setting.
+func (r *Router) StrictSlash(value bool) *Router {
+	r.strictSlash = value
+	return r
+}
+
+// SkipClean defines the path cleaning behaviour for new routes. The initial
+// value is false. Users should be careful about which routes are not cleaned
+//
+// When true, if the route path is "/path//to", it will remain with the double
+// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/
+//
+// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will
+// become /fetch/http/xkcd.com/534
+func (r *Router) SkipClean(value bool) *Router {
+	r.skipClean = value
+	return r
+}
+
+// UseEncodedPath tells the router to match the encoded original path
+// to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
+// This behavior has the drawback of needing to match routes against
+// r.RequestURI instead of r.URL.Path. Any modifications (such as http.StripPrefix)
+// to r.URL.Path will not affect routing when this flag is on and thus may
+// induce unintended behavior.
+//
+// If not called, the router will match the unencoded path to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
+func (r *Router) UseEncodedPath() *Router {
+	r.useEncodedPath = true
+	return r
+}
+
+// ----------------------------------------------------------------------------
+// parentRoute
+// ----------------------------------------------------------------------------
+
+// getNamedRoutes returns the map where named routes are registered.
+func (r *Router) getNamedRoutes() map[string]*Route {
+	if r.namedRoutes == nil {
+		if r.parent != nil {
+			r.namedRoutes = r.parent.getNamedRoutes()
+		} else {
+			r.namedRoutes = make(map[string]*Route)
+		}
+	}
+	return r.namedRoutes
+}
+
+// getRegexpGroup returns regexp definitions from the parent route, if any.
+func (r *Router) getRegexpGroup() *routeRegexpGroup {
+	if r.parent != nil {
+		return r.parent.getRegexpGroup()
+	}
+	return nil
+}
+
+func (r *Router) buildVars(m map[string]string) map[string]string {
+	if r.parent != nil {
+		m = r.parent.buildVars(m)
+	}
+	return m
+}
+
+// ----------------------------------------------------------------------------
+// Route factories
+// ----------------------------------------------------------------------------
+
+// NewRoute registers an empty route.
+func (r *Router) NewRoute() *Route {
+	route := &Route{parent: r, strictSlash: r.strictSlash, skipClean: r.skipClean, useEncodedPath: r.useEncodedPath}
+	r.routes = append(r.routes, route)
+	return route
+}
+
+// Handle registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.Handler().
+func (r *Router) Handle(path string, handler http.Handler) *Route {
+	return r.NewRoute().Path(path).Handler(handler)
+}
+
+// HandleFunc registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.HandlerFunc().
+func (r *Router) HandleFunc(path string, f func(http.ResponseWriter,
+	*http.Request)) *Route {
+	return r.NewRoute().Path(path).HandlerFunc(f)
+}
+
+// Headers registers a new route with a matcher for request header values.
+// See Route.Headers().
+func (r *Router) Headers(pairs ...string) *Route {
+	return r.NewRoute().Headers(pairs...)
+}
+
+// Host registers a new route with a matcher for the URL host.
+// See Route.Host().
+func (r *Router) Host(tpl string) *Route {
+	return r.NewRoute().Host(tpl)
+}
+
+// MatcherFunc registers a new route with a custom matcher function.
+// See Route.MatcherFunc().
+func (r *Router) MatcherFunc(f MatcherFunc) *Route {
+	return r.NewRoute().MatcherFunc(f)
+}
+
+// Methods registers a new route with a matcher for HTTP methods.
+// See Route.Methods().
+func (r *Router) Methods(methods ...string) *Route {
+	return r.NewRoute().Methods(methods...)
+}
+
+// Path registers a new route with a matcher for the URL path.
+// See Route.Path().
+func (r *Router) Path(tpl string) *Route {
+	return r.NewRoute().Path(tpl)
+}
+
+// PathPrefix registers a new route with a matcher for the URL path prefix.
+// See Route.PathPrefix().
+func (r *Router) PathPrefix(tpl string) *Route {
+	return r.NewRoute().PathPrefix(tpl)
+}
+
+// Queries registers a new route with a matcher for URL query values.
+// See Route.Queries().
+func (r *Router) Queries(pairs ...string) *Route {
+	return r.NewRoute().Queries(pairs...)
+}
+
+// Schemes registers a new route with a matcher for URL schemes.
+// See Route.Schemes().
+func (r *Router) Schemes(schemes ...string) *Route {
+	return r.NewRoute().Schemes(schemes...)
+}
+
+// BuildVarsFunc registers a new route with a custom function for modifying
+// route variables before building a URL.
+func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route {
+	return r.NewRoute().BuildVarsFunc(f)
+}
+
+// Walk walks the router and all its sub-routers, calling walkFn for each route
+// in the tree. The routes are walked in the order they were added. Sub-routers
+// are explored depth-first.
+func (r *Router) Walk(walkFn WalkFunc) error {
+	return r.walk(walkFn, []*Route{})
+}
+
+// SkipRouter is used as a return value from WalkFuncs to indicate that the
+// router that walk is about to descend down to should be skipped.
+var SkipRouter = errors.New("skip this router")
+
+// WalkFunc is the type of the function called for each route visited by Walk.
+// At every invocation, it is given the current route, and the current router,
+// and a list of ancestor routes that lead to the current route.
+type WalkFunc func(route *Route, router *Router, ancestors []*Route) error
+
+func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
+	for _, t := range r.routes {
+		if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" {
+			continue
+		}
+
+		err := walkFn(t, r, ancestors)
+		if err == SkipRouter {
+			continue
+		}
+		if err != nil {
+			return err
+		}
+		for _, sr := range t.matchers {
+			if h, ok := sr.(*Router); ok {
+				err := h.walk(walkFn, ancestors)
+				if err != nil {
+					return err
+				}
+			}
+		}
+		if h, ok := t.handler.(*Router); ok {
+			ancestors = append(ancestors, t)
+			err := h.walk(walkFn, ancestors)
+			if err != nil {
+				return err
+			}
+			ancestors = ancestors[:len(ancestors)-1]
+		}
+	}
+	return nil
+}
+
+// ----------------------------------------------------------------------------
+// Context
+// ----------------------------------------------------------------------------
+
+// RouteMatch stores information about a matched route.
+type RouteMatch struct {
+	Route   *Route
+	Handler http.Handler
+	Vars    map[string]string
+}
+
+type contextKey int
+
+const (
+	varsKey contextKey = iota
+	routeKey
+)
+
+// Vars returns the route variables for the current request, if any.
+func Vars(r *http.Request) map[string]string {
+	if rv := contextGet(r, varsKey); rv != nil {
+		return rv.(map[string]string)
+	}
+	return nil
+}
+
+// CurrentRoute returns the matched route for the current request, if any.
+// This only works when called inside the handler of the matched route
+// because the matched route is stored in the request context which is cleared
+// after the handler returns, unless the KeepContext option is set on the
+// Router.
+func CurrentRoute(r *http.Request) *Route {
+	if rv := contextGet(r, routeKey); rv != nil {
+		return rv.(*Route)
+	}
+	return nil
+}
+
+func setVars(r *http.Request, val interface{}) *http.Request {
+	return contextSet(r, varsKey, val)
+}
+
+func setCurrentRoute(r *http.Request, val interface{}) *http.Request {
+	return contextSet(r, routeKey, val)
+}
+
+// ----------------------------------------------------------------------------
+// Helpers
+// ----------------------------------------------------------------------------
+
+// getPath returns the escaped path if possible; doing what URL.EscapedPath()
+// which was added in go1.5 does
+func getPath(req *http.Request) string {
+	if req.RequestURI != "" {
+		// Extract the path from RequestURI (which is escaped unlike URL.Path)
+		// as detailed here as detailed in https://golang.org/pkg/net/url/#URL
+		// for < 1.5 server side workaround
+		// http://localhost/path/here?v=1 -> /path/here
+		path := req.RequestURI
+		path = strings.TrimPrefix(path, req.URL.Scheme+`://`)
+		path = strings.TrimPrefix(path, req.URL.Host)
+		if i := strings.LastIndex(path, "?"); i > -1 {
+			path = path[:i]
+		}
+		if i := strings.LastIndex(path, "#"); i > -1 {
+			path = path[:i]
+		}
+		return path
+	}
+	return req.URL.Path
+}
+
+// cleanPath returns the canonical path for p, eliminating . and .. elements.
+// Borrowed from the net/http package.
+func cleanPath(p string) string {
+	if p == "" {
+		return "/"
+	}
+	if p[0] != '/' {
+		p = "/" + p
+	}
+	np := path.Clean(p)
+	// path.Clean removes trailing slash except for root;
+	// put the trailing slash back if necessary.
+	if p[len(p)-1] == '/' && np != "/" {
+		np += "/"
+	}
+
+	return np
+}
+
+// uniqueVars returns an error if two slices contain duplicated strings.
+func uniqueVars(s1, s2 []string) error {
+	for _, v1 := range s1 {
+		for _, v2 := range s2 {
+			if v1 == v2 {
+				return fmt.Errorf("mux: duplicated route variable %q", v2)
+			}
+		}
+	}
+	return nil
+}
+
+// checkPairs returns the count of strings passed in, and an error if
+// the count is not an even number.
+func checkPairs(pairs ...string) (int, error) {
+	length := len(pairs)
+	if length%2 != 0 {
+		return length, fmt.Errorf(
+			"mux: number of parameters must be multiple of 2, got %v", pairs)
+	}
+	return length, nil
+}
+
+// mapFromPairsToString converts variadic string parameters to a
+// string to string map.
+func mapFromPairsToString(pairs ...string) (map[string]string, error) {
+	length, err := checkPairs(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	m := make(map[string]string, length/2)
+	for i := 0; i < length; i += 2 {
+		m[pairs[i]] = pairs[i+1]
+	}
+	return m, nil
+}
+
+// mapFromPairsToRegex converts variadic string paramers to a
+// string to regex map.
+func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) {
+	length, err := checkPairs(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	m := make(map[string]*regexp.Regexp, length/2)
+	for i := 0; i < length; i += 2 {
+		regex, err := regexp.Compile(pairs[i+1])
+		if err != nil {
+			return nil, err
+		}
+		m[pairs[i]] = regex
+	}
+	return m, nil
+}
+
+// matchInArray returns true if the given string value is in the array.
+func matchInArray(arr []string, value string) bool {
+	for _, v := range arr {
+		if v == value {
+			return true
+		}
+	}
+	return false
+}
+
+// matchMapWithString returns true if the given key/value pairs exist in a given map.
+func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool {
+	for k, v := range toCheck {
+		// Check if key exists.
+		if canonicalKey {
+			k = http.CanonicalHeaderKey(k)
+		}
+		if values := toMatch[k]; values == nil {
+			return false
+		} else if v != "" {
+			// If value was defined as an empty string we only check that the
+			// key exists. Otherwise we also check for equality.
+			valueExists := false
+			for _, value := range values {
+				if v == value {
+					valueExists = true
+					break
+				}
+			}
+			if !valueExists {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against
+// the given regex
+func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool {
+	for k, v := range toCheck {
+		// Check if key exists.
+		if canonicalKey {
+			k = http.CanonicalHeaderKey(k)
+		}
+		if values := toMatch[k]; values == nil {
+			return false
+		} else if v != nil {
+			// If value was defined as an empty string we only check that the
+			// key exists. Otherwise we also check for equality.
+			valueExists := false
+			for _, value := range values {
+				if v.MatchString(value) {
+					valueExists = true
+					break
+				}
+			}
+			if !valueExists {
+				return false
+			}
+		}
+	}
+	return true
+}
diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go
new file mode 100644
index 0000000000000000000000000000000000000000..0189ad346fd1c2b4e26850af12f4f8659cd863b7
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/regexp.go
@@ -0,0 +1,323 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+	"bytes"
+	"fmt"
+	"net/http"
+	"net/url"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+// newRouteRegexp parses a route template and returns a routeRegexp,
+// used to match a host, a path or a query string.
+//
+// It will extract named variables, assemble a regexp to be matched, create
+// a "reverse" template to build URLs and compile regexps to validate variable
+// values used in URL building.
+//
+// Previously we accepted only Python-like identifiers for variable
+// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
+// name and pattern can't be empty, and names can't contain a colon.
+func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash, useEncodedPath bool) (*routeRegexp, error) {
+	// Check if it is well-formed.
+	idxs, errBraces := braceIndices(tpl)
+	if errBraces != nil {
+		return nil, errBraces
+	}
+	// Backup the original.
+	template := tpl
+	// Now let's parse it.
+	defaultPattern := "[^/]+"
+	if matchQuery {
+		defaultPattern = "[^?&]*"
+	} else if matchHost {
+		defaultPattern = "[^.]+"
+		matchPrefix = false
+	}
+	// Only match strict slash if not matching
+	if matchPrefix || matchHost || matchQuery {
+		strictSlash = false
+	}
+	// Set a flag for strictSlash.
+	endSlash := false
+	if strictSlash && strings.HasSuffix(tpl, "/") {
+		tpl = tpl[:len(tpl)-1]
+		endSlash = true
+	}
+	varsN := make([]string, len(idxs)/2)
+	varsR := make([]*regexp.Regexp, len(idxs)/2)
+	pattern := bytes.NewBufferString("")
+	pattern.WriteByte('^')
+	reverse := bytes.NewBufferString("")
+	var end int
+	var err error
+	for i := 0; i < len(idxs); i += 2 {
+		// Set all values we are interested in.
+		raw := tpl[end:idxs[i]]
+		end = idxs[i+1]
+		parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2)
+		name := parts[0]
+		patt := defaultPattern
+		if len(parts) == 2 {
+			patt = parts[1]
+		}
+		// Name or pattern can't be empty.
+		if name == "" || patt == "" {
+			return nil, fmt.Errorf("mux: missing name or pattern in %q",
+				tpl[idxs[i]:end])
+		}
+		// Build the regexp pattern.
+		fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt)
+
+		// Build the reverse template.
+		fmt.Fprintf(reverse, "%s%%s", raw)
+
+		// Append variable name and compiled pattern.
+		varsN[i/2] = name
+		varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
+		if err != nil {
+			return nil, err
+		}
+	}
+	// Add the remaining.
+	raw := tpl[end:]
+	pattern.WriteString(regexp.QuoteMeta(raw))
+	if strictSlash {
+		pattern.WriteString("[/]?")
+	}
+	if matchQuery {
+		// Add the default pattern if the query value is empty
+		if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" {
+			pattern.WriteString(defaultPattern)
+		}
+	}
+	if !matchPrefix {
+		pattern.WriteByte('$')
+	}
+	reverse.WriteString(raw)
+	if endSlash {
+		reverse.WriteByte('/')
+	}
+	// Compile full regexp.
+	reg, errCompile := regexp.Compile(pattern.String())
+	if errCompile != nil {
+		return nil, errCompile
+	}
+
+	// Check for capturing groups which used to work in older versions
+	if reg.NumSubexp() != len(idxs)/2 {
+		panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) +
+			"Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)")
+	}
+
+	// Done!
+	return &routeRegexp{
+		template:       template,
+		matchHost:      matchHost,
+		matchQuery:     matchQuery,
+		strictSlash:    strictSlash,
+		useEncodedPath: useEncodedPath,
+		regexp:         reg,
+		reverse:        reverse.String(),
+		varsN:          varsN,
+		varsR:          varsR,
+	}, nil
+}
+
+// routeRegexp stores a regexp to match a host or path and information to
+// collect and validate route variables.
+type routeRegexp struct {
+	// The unmodified template.
+	template string
+	// True for host match, false for path or query string match.
+	matchHost bool
+	// True for query string match, false for path and host match.
+	matchQuery bool
+	// The strictSlash value defined on the route, but disabled if PathPrefix was used.
+	strictSlash bool
+	// Determines whether to use encoded path from getPath function or unencoded
+	// req.URL.Path for path matching
+	useEncodedPath bool
+	// Expanded regexp.
+	regexp *regexp.Regexp
+	// Reverse template.
+	reverse string
+	// Variable names.
+	varsN []string
+	// Variable regexps (validators).
+	varsR []*regexp.Regexp
+}
+
+// Match matches the regexp against the URL host or path.
+func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
+	if !r.matchHost {
+		if r.matchQuery {
+			return r.matchQueryString(req)
+		}
+		path := req.URL.Path
+		if r.useEncodedPath {
+			path = getPath(req)
+		}
+		return r.regexp.MatchString(path)
+	}
+
+	return r.regexp.MatchString(getHost(req))
+}
+
+// url builds a URL part using the given values.
+func (r *routeRegexp) url(values map[string]string) (string, error) {
+	urlValues := make([]interface{}, len(r.varsN))
+	for k, v := range r.varsN {
+		value, ok := values[v]
+		if !ok {
+			return "", fmt.Errorf("mux: missing route variable %q", v)
+		}
+		urlValues[k] = value
+	}
+	rv := fmt.Sprintf(r.reverse, urlValues...)
+	if !r.regexp.MatchString(rv) {
+		// The URL is checked against the full regexp, instead of checking
+		// individual variables. This is faster but to provide a good error
+		// message, we check individual regexps if the URL doesn't match.
+		for k, v := range r.varsN {
+			if !r.varsR[k].MatchString(values[v]) {
+				return "", fmt.Errorf(
+					"mux: variable %q doesn't match, expected %q", values[v],
+					r.varsR[k].String())
+			}
+		}
+	}
+	return rv, nil
+}
+
+// getURLQuery returns a single query parameter from a request URL.
+// For a URL with foo=bar&baz=ding, we return only the relevant key
+// value pair for the routeRegexp.
+func (r *routeRegexp) getURLQuery(req *http.Request) string {
+	if !r.matchQuery {
+		return ""
+	}
+	templateKey := strings.SplitN(r.template, "=", 2)[0]
+	for key, vals := range req.URL.Query() {
+		if key == templateKey && len(vals) > 0 {
+			return key + "=" + vals[0]
+		}
+	}
+	return ""
+}
+
+func (r *routeRegexp) matchQueryString(req *http.Request) bool {
+	return r.regexp.MatchString(r.getURLQuery(req))
+}
+
+// braceIndices returns the first level curly brace indices from a string.
+// It returns an error in case of unbalanced braces.
+func braceIndices(s string) ([]int, error) {
+	var level, idx int
+	var idxs []int
+	for i := 0; i < len(s); i++ {
+		switch s[i] {
+		case '{':
+			if level++; level == 1 {
+				idx = i
+			}
+		case '}':
+			if level--; level == 0 {
+				idxs = append(idxs, idx, i+1)
+			} else if level < 0 {
+				return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+			}
+		}
+	}
+	if level != 0 {
+		return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+	}
+	return idxs, nil
+}
+
+// varGroupName builds a capturing group name for the indexed variable.
+func varGroupName(idx int) string {
+	return "v" + strconv.Itoa(idx)
+}
+
+// ----------------------------------------------------------------------------
+// routeRegexpGroup
+// ----------------------------------------------------------------------------
+
+// routeRegexpGroup groups the route matchers that carry variables.
+type routeRegexpGroup struct {
+	host    *routeRegexp
+	path    *routeRegexp
+	queries []*routeRegexp
+}
+
+// setMatch extracts the variables from the URL once a route matches.
+func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
+	// Store host variables.
+	if v.host != nil {
+		host := getHost(req)
+		matches := v.host.regexp.FindStringSubmatchIndex(host)
+		if len(matches) > 0 {
+			extractVars(host, matches, v.host.varsN, m.Vars)
+		}
+	}
+	path := req.URL.Path
+	if r.useEncodedPath {
+		path = getPath(req)
+	}
+	// Store path variables.
+	if v.path != nil {
+		matches := v.path.regexp.FindStringSubmatchIndex(path)
+		if len(matches) > 0 {
+			extractVars(path, matches, v.path.varsN, m.Vars)
+			// Check if we should redirect.
+			if v.path.strictSlash {
+				p1 := strings.HasSuffix(path, "/")
+				p2 := strings.HasSuffix(v.path.template, "/")
+				if p1 != p2 {
+					u, _ := url.Parse(req.URL.String())
+					if p1 {
+						u.Path = u.Path[:len(u.Path)-1]
+					} else {
+						u.Path += "/"
+					}
+					m.Handler = http.RedirectHandler(u.String(), 301)
+				}
+			}
+		}
+	}
+	// Store query string variables.
+	for _, q := range v.queries {
+		queryURL := q.getURLQuery(req)
+		matches := q.regexp.FindStringSubmatchIndex(queryURL)
+		if len(matches) > 0 {
+			extractVars(queryURL, matches, q.varsN, m.Vars)
+		}
+	}
+}
+
+// getHost tries its best to return the request host.
+func getHost(r *http.Request) string {
+	if r.URL.IsAbs() {
+		return r.URL.Host
+	}
+	host := r.Host
+	// Slice off any port information.
+	if i := strings.Index(host, ":"); i != -1 {
+		host = host[:i]
+	}
+	return host
+
+}
+
+func extractVars(input string, matches []int, names []string, output map[string]string) {
+	for i, name := range names {
+		output[name] = input[matches[2*i+2]:matches[2*i+3]]
+	}
+}
diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go
new file mode 100644
index 0000000000000000000000000000000000000000..5544c1fd6ba4f9f4abc801c114cca215ad5135c3
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/route.go
@@ -0,0 +1,636 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+	"errors"
+	"fmt"
+	"net/http"
+	"net/url"
+	"regexp"
+	"strings"
+)
+
+// Route stores information to match a request and build URLs.
+type Route struct {
+	// Parent where the route was registered (a Router).
+	parent parentRoute
+	// Request handler for the route.
+	handler http.Handler
+	// List of matchers.
+	matchers []matcher
+	// Manager for the variables from host and path.
+	regexp *routeRegexpGroup
+	// If true, when the path pattern is "/path/", accessing "/path" will
+	// redirect to the former and vice versa.
+	strictSlash bool
+	// If true, when the path pattern is "/path//to", accessing "/path//to"
+	// will not redirect
+	skipClean bool
+	// If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
+	useEncodedPath bool
+	// If true, this route never matches: it is only used to build URLs.
+	buildOnly bool
+	// The name used to build URLs.
+	name string
+	// Error resulted from building a route.
+	err error
+
+	buildVarsFunc BuildVarsFunc
+}
+
+func (r *Route) SkipClean() bool {
+	return r.skipClean
+}
+
+// Match matches the route against the request.
+func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
+	if r.buildOnly || r.err != nil {
+		return false
+	}
+	// Match everything.
+	for _, m := range r.matchers {
+		if matched := m.Match(req, match); !matched {
+			return false
+		}
+	}
+	// Yay, we have a match. Let's collect some info about it.
+	if match.Route == nil {
+		match.Route = r
+	}
+	if match.Handler == nil {
+		match.Handler = r.handler
+	}
+	if match.Vars == nil {
+		match.Vars = make(map[string]string)
+	}
+	// Set variables.
+	if r.regexp != nil {
+		r.regexp.setMatch(req, match, r)
+	}
+	return true
+}
+
+// ----------------------------------------------------------------------------
+// Route attributes
+// ----------------------------------------------------------------------------
+
+// GetError returns an error resulted from building the route, if any.
+func (r *Route) GetError() error {
+	return r.err
+}
+
+// BuildOnly sets the route to never match: it is only used to build URLs.
+func (r *Route) BuildOnly() *Route {
+	r.buildOnly = true
+	return r
+}
+
+// Handler --------------------------------------------------------------------
+
+// Handler sets a handler for the route.
+func (r *Route) Handler(handler http.Handler) *Route {
+	if r.err == nil {
+		r.handler = handler
+	}
+	return r
+}
+
+// HandlerFunc sets a handler function for the route.
+func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route {
+	return r.Handler(http.HandlerFunc(f))
+}
+
+// GetHandler returns the handler for the route, if any.
+func (r *Route) GetHandler() http.Handler {
+	return r.handler
+}
+
+// Name -----------------------------------------------------------------------
+
+// Name sets the name for the route, used to build URLs.
+// If the name was registered already it will be overwritten.
+func (r *Route) Name(name string) *Route {
+	if r.name != "" {
+		r.err = fmt.Errorf("mux: route already has name %q, can't set %q",
+			r.name, name)
+	}
+	if r.err == nil {
+		r.name = name
+		r.getNamedRoutes()[name] = r
+	}
+	return r
+}
+
+// GetName returns the name for the route, if any.
+func (r *Route) GetName() string {
+	return r.name
+}
+
+// ----------------------------------------------------------------------------
+// Matchers
+// ----------------------------------------------------------------------------
+
+// matcher types try to match a request.
+type matcher interface {
+	Match(*http.Request, *RouteMatch) bool
+}
+
+// addMatcher adds a matcher to the route.
+func (r *Route) addMatcher(m matcher) *Route {
+	if r.err == nil {
+		r.matchers = append(r.matchers, m)
+	}
+	return r
+}
+
+// addRegexpMatcher adds a host or path matcher and builder to a route.
+func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error {
+	if r.err != nil {
+		return r.err
+	}
+	r.regexp = r.getRegexpGroup()
+	if !matchHost && !matchQuery {
+		if len(tpl) > 0 && tpl[0] != '/' {
+			return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
+		}
+		if r.regexp.path != nil {
+			tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
+		}
+	}
+	rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash, r.useEncodedPath)
+	if err != nil {
+		return err
+	}
+	for _, q := range r.regexp.queries {
+		if err = uniqueVars(rr.varsN, q.varsN); err != nil {
+			return err
+		}
+	}
+	if matchHost {
+		if r.regexp.path != nil {
+			if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
+				return err
+			}
+		}
+		r.regexp.host = rr
+	} else {
+		if r.regexp.host != nil {
+			if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil {
+				return err
+			}
+		}
+		if matchQuery {
+			r.regexp.queries = append(r.regexp.queries, rr)
+		} else {
+			r.regexp.path = rr
+		}
+	}
+	r.addMatcher(rr)
+	return nil
+}
+
+// Headers --------------------------------------------------------------------
+
+// headerMatcher matches the request against header values.
+type headerMatcher map[string]string
+
+func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchMapWithString(m, r.Header, true)
+}
+
+// Headers adds a matcher for request header values.
+// It accepts a sequence of key/value pairs to be matched. For example:
+//
+//     r := mux.NewRouter()
+//     r.Headers("Content-Type", "application/json",
+//               "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both request header values match.
+// If the value is an empty string, it will match any value if the key is set.
+func (r *Route) Headers(pairs ...string) *Route {
+	if r.err == nil {
+		var headers map[string]string
+		headers, r.err = mapFromPairsToString(pairs...)
+		return r.addMatcher(headerMatcher(headers))
+	}
+	return r
+}
+
+// headerRegexMatcher matches the request against the route given a regex for the header
+type headerRegexMatcher map[string]*regexp.Regexp
+
+func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchMapWithRegex(m, r.Header, true)
+}
+
+// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex
+// support. For example:
+//
+//     r := mux.NewRouter()
+//     r.HeadersRegexp("Content-Type", "application/(text|json)",
+//               "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both the request header matches both regular expressions.
+// It the value is an empty string, it will match any value if the key is set.
+func (r *Route) HeadersRegexp(pairs ...string) *Route {
+	if r.err == nil {
+		var headers map[string]*regexp.Regexp
+		headers, r.err = mapFromPairsToRegex(pairs...)
+		return r.addMatcher(headerRegexMatcher(headers))
+	}
+	return r
+}
+
+// Host -----------------------------------------------------------------------
+
+// Host adds a matcher for the URL host.
+// It accepts a template with zero or more URL variables enclosed by {}.
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next dot.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+//     r := mux.NewRouter()
+//     r.Host("www.example.com")
+//     r.Host("{subdomain}.domain.com")
+//     r.Host("{subdomain:[a-z]+}.domain.com")
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Host(tpl string) *Route {
+	r.err = r.addRegexpMatcher(tpl, true, false, false)
+	return r
+}
+
+// MatcherFunc ----------------------------------------------------------------
+
+// MatcherFunc is the function signature used by custom matchers.
+type MatcherFunc func(*http.Request, *RouteMatch) bool
+
+// Match returns the match for a given request.
+func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool {
+	return m(r, match)
+}
+
+// MatcherFunc adds a custom function to be used as request matcher.
+func (r *Route) MatcherFunc(f MatcherFunc) *Route {
+	return r.addMatcher(f)
+}
+
+// Methods --------------------------------------------------------------------
+
+// methodMatcher matches the request against HTTP methods.
+type methodMatcher []string
+
+func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchInArray(m, r.Method)
+}
+
+// Methods adds a matcher for HTTP methods.
+// It accepts a sequence of one or more methods to be matched, e.g.:
+// "GET", "POST", "PUT".
+func (r *Route) Methods(methods ...string) *Route {
+	for k, v := range methods {
+		methods[k] = strings.ToUpper(v)
+	}
+	return r.addMatcher(methodMatcher(methods))
+}
+
+// Path -----------------------------------------------------------------------
+
+// Path adds a matcher for the URL path.
+// It accepts a template with zero or more URL variables enclosed by {}. The
+// template must start with a "/".
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+//     r := mux.NewRouter()
+//     r.Path("/products/").Handler(ProductsHandler)
+//     r.Path("/products/{key}").Handler(ProductsHandler)
+//     r.Path("/articles/{category}/{id:[0-9]+}").
+//       Handler(ArticleHandler)
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Path(tpl string) *Route {
+	r.err = r.addRegexpMatcher(tpl, false, false, false)
+	return r
+}
+
+// PathPrefix -----------------------------------------------------------------
+
+// PathPrefix adds a matcher for the URL path prefix. This matches if the given
+// template is a prefix of the full URL path. See Route.Path() for details on
+// the tpl argument.
+//
+// Note that it does not treat slashes specially ("/foobar/" will be matched by
+// the prefix "/foo") so you may want to use a trailing slash here.
+//
+// Also note that the setting of Router.StrictSlash() has no effect on routes
+// with a PathPrefix matcher.
+func (r *Route) PathPrefix(tpl string) *Route {
+	r.err = r.addRegexpMatcher(tpl, false, true, false)
+	return r
+}
+
+// Query ----------------------------------------------------------------------
+
+// Queries adds a matcher for URL query values.
+// It accepts a sequence of key/value pairs. Values may define variables.
+// For example:
+//
+//     r := mux.NewRouter()
+//     r.Queries("foo", "bar", "id", "{id:[0-9]+}")
+//
+// The above route will only match if the URL contains the defined queries
+// values, e.g.: ?foo=bar&id=42.
+//
+// It the value is an empty string, it will match any value if the key is set.
+//
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+func (r *Route) Queries(pairs ...string) *Route {
+	length := len(pairs)
+	if length%2 != 0 {
+		r.err = fmt.Errorf(
+			"mux: number of parameters must be multiple of 2, got %v", pairs)
+		return nil
+	}
+	for i := 0; i < length; i += 2 {
+		if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil {
+			return r
+		}
+	}
+
+	return r
+}
+
+// Schemes --------------------------------------------------------------------
+
+// schemeMatcher matches the request against URL schemes.
+type schemeMatcher []string
+
+func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchInArray(m, r.URL.Scheme)
+}
+
+// Schemes adds a matcher for URL schemes.
+// It accepts a sequence of schemes to be matched, e.g.: "http", "https".
+func (r *Route) Schemes(schemes ...string) *Route {
+	for k, v := range schemes {
+		schemes[k] = strings.ToLower(v)
+	}
+	return r.addMatcher(schemeMatcher(schemes))
+}
+
+// BuildVarsFunc --------------------------------------------------------------
+
+// BuildVarsFunc is the function signature used by custom build variable
+// functions (which can modify route variables before a route's URL is built).
+type BuildVarsFunc func(map[string]string) map[string]string
+
+// BuildVarsFunc adds a custom function to be used to modify build variables
+// before a route's URL is built.
+func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
+	r.buildVarsFunc = f
+	return r
+}
+
+// Subrouter ------------------------------------------------------------------
+
+// Subrouter creates a subrouter for the route.
+//
+// It will test the inner routes only if the parent route matched. For example:
+//
+//     r := mux.NewRouter()
+//     s := r.Host("www.example.com").Subrouter()
+//     s.HandleFunc("/products/", ProductsHandler)
+//     s.HandleFunc("/products/{key}", ProductHandler)
+//     s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+//
+// Here, the routes registered in the subrouter won't be tested if the host
+// doesn't match.
+func (r *Route) Subrouter() *Router {
+	router := &Router{parent: r, strictSlash: r.strictSlash}
+	r.addMatcher(router)
+	return router
+}
+
+// ----------------------------------------------------------------------------
+// URL building
+// ----------------------------------------------------------------------------
+
+// URL builds a URL for the route.
+//
+// It accepts a sequence of key/value pairs for the route variables. For
+// example, given this route:
+//
+//     r := mux.NewRouter()
+//     r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+//       Name("article")
+//
+// ...a URL for it can be built using:
+//
+//     url, err := r.Get("article").URL("category", "technology", "id", "42")
+//
+// ...which will return an url.URL with the following path:
+//
+//     "/articles/technology/42"
+//
+// This also works for host variables:
+//
+//     r := mux.NewRouter()
+//     r.Host("{subdomain}.domain.com").
+//       HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+//       Name("article")
+//
+//     // url.String() will be "http://news.domain.com/articles/technology/42"
+//     url, err := r.Get("article").URL("subdomain", "news",
+//                                      "category", "technology",
+//                                      "id", "42")
+//
+// All variables defined in the route are required, and their values must
+// conform to the corresponding patterns.
+func (r *Route) URL(pairs ...string) (*url.URL, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.regexp == nil {
+		return nil, errors.New("mux: route doesn't have a host or path")
+	}
+	values, err := r.prepareVars(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	var scheme, host, path string
+	if r.regexp.host != nil {
+		// Set a default scheme.
+		scheme = "http"
+		if host, err = r.regexp.host.url(values); err != nil {
+			return nil, err
+		}
+	}
+	if r.regexp.path != nil {
+		if path, err = r.regexp.path.url(values); err != nil {
+			return nil, err
+		}
+	}
+	return &url.URL{
+		Scheme: scheme,
+		Host:   host,
+		Path:   path,
+	}, nil
+}
+
+// URLHost builds the host part of the URL for a route. See Route.URL().
+//
+// The route must have a host defined.
+func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.regexp == nil || r.regexp.host == nil {
+		return nil, errors.New("mux: route doesn't have a host")
+	}
+	values, err := r.prepareVars(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	host, err := r.regexp.host.url(values)
+	if err != nil {
+		return nil, err
+	}
+	return &url.URL{
+		Scheme: "http",
+		Host:   host,
+	}, nil
+}
+
+// URLPath builds the path part of the URL for a route. See Route.URL().
+//
+// The route must have a path defined.
+func (r *Route) URLPath(pairs ...string) (*url.URL, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.regexp == nil || r.regexp.path == nil {
+		return nil, errors.New("mux: route doesn't have a path")
+	}
+	values, err := r.prepareVars(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	path, err := r.regexp.path.url(values)
+	if err != nil {
+		return nil, err
+	}
+	return &url.URL{
+		Path: path,
+	}, nil
+}
+
+// GetPathTemplate returns the template used to build the
+// route match.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a path.
+func (r *Route) GetPathTemplate() (string, error) {
+	if r.err != nil {
+		return "", r.err
+	}
+	if r.regexp == nil || r.regexp.path == nil {
+		return "", errors.New("mux: route doesn't have a path")
+	}
+	return r.regexp.path.template, nil
+}
+
+// GetHostTemplate returns the template used to build the
+// route match.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a host.
+func (r *Route) GetHostTemplate() (string, error) {
+	if r.err != nil {
+		return "", r.err
+	}
+	if r.regexp == nil || r.regexp.host == nil {
+		return "", errors.New("mux: route doesn't have a host")
+	}
+	return r.regexp.host.template, nil
+}
+
+// prepareVars converts the route variable pairs into a map. If the route has a
+// BuildVarsFunc, it is invoked.
+func (r *Route) prepareVars(pairs ...string) (map[string]string, error) {
+	m, err := mapFromPairsToString(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	return r.buildVars(m), nil
+}
+
+func (r *Route) buildVars(m map[string]string) map[string]string {
+	if r.parent != nil {
+		m = r.parent.buildVars(m)
+	}
+	if r.buildVarsFunc != nil {
+		m = r.buildVarsFunc(m)
+	}
+	return m
+}
+
+// ----------------------------------------------------------------------------
+// parentRoute
+// ----------------------------------------------------------------------------
+
+// parentRoute allows routes to know about parent host and path definitions.
+type parentRoute interface {
+	getNamedRoutes() map[string]*Route
+	getRegexpGroup() *routeRegexpGroup
+	buildVars(map[string]string) map[string]string
+}
+
+// getNamedRoutes returns the map where named routes are registered.
+func (r *Route) getNamedRoutes() map[string]*Route {
+	if r.parent == nil {
+		// During tests router is not always set.
+		r.parent = NewRouter()
+	}
+	return r.parent.getNamedRoutes()
+}
+
+// getRegexpGroup returns regexp definitions from this route.
+func (r *Route) getRegexpGroup() *routeRegexpGroup {
+	if r.regexp == nil {
+		if r.parent == nil {
+			// During tests router is not always set.
+			r.parent = NewRouter()
+		}
+		regexp := r.parent.getRegexpGroup()
+		if regexp == nil {
+			r.regexp = new(routeRegexpGroup)
+		} else {
+			// Copy.
+			r.regexp = &routeRegexpGroup{
+				host:    regexp.host,
+				path:    regexp.path,
+				queries: regexp.queries,
+			}
+		}
+	}
+	return r.regexp
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..c02b73e3e6a39939a4bb6f82869bef270795e925
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unix.go
@@ -0,0 +1,102 @@
+// +build linux freebsd
+
+package devices
+
+import (
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"syscall"
+
+	"github.com/opencontainers/runc/libcontainer/configs"
+)
+
+var (
+	ErrNotADevice = errors.New("not a device node")
+)
+
+// Testing dependencies
+var (
+	osLstat       = os.Lstat
+	ioutilReadDir = ioutil.ReadDir
+)
+
+// Given the path to a device and it's cgroup_permissions(which cannot be easily queried) look up the information about a linux device and return that information as a Device struct.
+func DeviceFromPath(path, permissions string) (*configs.Device, error) {
+	fileInfo, err := osLstat(path)
+	if err != nil {
+		return nil, err
+	}
+	var (
+		devType                rune
+		mode                   = fileInfo.Mode()
+		fileModePermissionBits = os.FileMode.Perm(mode)
+	)
+	switch {
+	case mode&os.ModeDevice == 0:
+		return nil, ErrNotADevice
+	case mode&os.ModeCharDevice != 0:
+		fileModePermissionBits |= syscall.S_IFCHR
+		devType = 'c'
+	default:
+		fileModePermissionBits |= syscall.S_IFBLK
+		devType = 'b'
+	}
+	stat_t, ok := fileInfo.Sys().(*syscall.Stat_t)
+	if !ok {
+		return nil, fmt.Errorf("cannot determine the device number for device %s", path)
+	}
+	devNumber := int(stat_t.Rdev)
+	return &configs.Device{
+		Type:        devType,
+		Path:        path,
+		Major:       Major(devNumber),
+		Minor:       Minor(devNumber),
+		Permissions: permissions,
+		FileMode:    fileModePermissionBits,
+		Uid:         stat_t.Uid,
+		Gid:         stat_t.Gid,
+	}, nil
+}
+
+func HostDevices() ([]*configs.Device, error) {
+	return getDevices("/dev")
+}
+
+func getDevices(path string) ([]*configs.Device, error) {
+	files, err := ioutilReadDir(path)
+	if err != nil {
+		return nil, err
+	}
+	out := []*configs.Device{}
+	for _, f := range files {
+		switch {
+		case f.IsDir():
+			switch f.Name() {
+			case "pts", "shm", "fd", "mqueue":
+				continue
+			default:
+				sub, err := getDevices(filepath.Join(path, f.Name()))
+				if err != nil {
+					return nil, err
+				}
+
+				out = append(out, sub...)
+				continue
+			}
+		case f.Name() == "console":
+			continue
+		}
+		device, err := DeviceFromPath(filepath.Join(path, f.Name()), "rwm")
+		if err != nil {
+			if err == ErrNotADevice {
+				continue
+			}
+			return nil, err
+		}
+		out = append(out, device)
+	}
+	return out, nil
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unsupported.go
new file mode 100644
index 0000000000000000000000000000000000000000..1e84033daff8297f2e183a1dad5dace4094f3c2c
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unsupported.go
@@ -0,0 +1,3 @@
+// +build windows
+
+package devices
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/devices/number.go b/vendor/github.com/opencontainers/runc/libcontainer/devices/number.go
new file mode 100644
index 0000000000000000000000000000000000000000..885b6e5dd9363d5a746b6753bee90e394d81bfbd
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/devices/number.go
@@ -0,0 +1,24 @@
+// +build linux freebsd
+
+package devices
+
+/*
+
+This code provides support for manipulating linux device numbers.  It should be replaced by normal syscall functions once http://code.google.com/p/go/issues/detail?id=8106 is solved.
+
+You can read what they are here:
+
+ - http://www.makelinux.net/ldd3/chp-3-sect-2
+ - http://www.linux-tutorial.info/modules.php?name=MContent&pageid=94
+
+Note! These are NOT the same as the MAJOR(dev_t device);, MINOR(dev_t device); and MKDEV(int major, int minor); functions as defined in <linux/kdev_t.h> as the representation of device numbers used by go is different than the one used internally to the kernel! - https://github.com/torvalds/linux/blob/master/include/linux/kdev_t.h#L9
+
+*/
+
+func Major(devNumber int) int64 {
+	return int64((devNumber >> 8) & 0xfff)
+}
+
+func Minor(devNumber int) int64 {
+	return int64((devNumber & 0xff) | ((devNumber >> 12) & 0xfff00))
+}
diff --git a/vendor/github.com/opencontainers/runtime-spec/LICENSE b/vendor/github.com/opencontainers/runtime-spec/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..bdc403653e0a053959f28d1d83f72aab72ff20ac
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-spec/LICENSE
@@ -0,0 +1,191 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   Copyright 2015 The Linux Foundation.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
new file mode 100644
index 0000000000000000000000000000000000000000..4733a1a6261dafa4673ca306495d6de835e1bbc0
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
@@ -0,0 +1,471 @@
+package specs
+
+import "os"
+
+// Spec is the base configuration for the container.
+type Spec struct {
+	// Version of the Open Container Runtime Specification with which the bundle complies.
+	Version string `json:"ociVersion"`
+	// Platform specifies the configuration's target platform.
+	Platform Platform `json:"platform"`
+	// Process configures the container process.
+	Process Process `json:"process"`
+	// Root configures the container's root filesystem.
+	Root Root `json:"root"`
+	// Hostname configures the container's hostname.
+	Hostname string `json:"hostname,omitempty"`
+	// Mounts configures additional mounts (on top of Root).
+	Mounts []Mount `json:"mounts,omitempty"`
+	// Hooks configures callbacks for container lifecycle events.
+	Hooks Hooks `json:"hooks"`
+	// Annotations contains arbitrary metadata for the container.
+	Annotations map[string]string `json:"annotations,omitempty"`
+
+	// Linux is platform specific configuration for Linux based containers.
+	Linux *Linux `json:"linux,omitempty" platform:"linux"`
+	// Solaris is platform specific configuration for Solaris containers.
+	Solaris *Solaris `json:"solaris,omitempty" platform:"solaris"`
+}
+
+// Process contains information to start a specific application inside the container.
+type Process struct {
+	// Terminal creates an interactive terminal for the container.
+	Terminal bool `json:"terminal,omitempty"`
+	// User specifies user information for the process.
+	User User `json:"user"`
+	// Args specifies the binary and arguments for the application to execute.
+	Args []string `json:"args"`
+	// Env populates the process environment for the process.
+	Env []string `json:"env,omitempty"`
+	// Cwd is the current working directory for the process and must be
+	// relative to the container's root.
+	Cwd string `json:"cwd"`
+	// Capabilities are Linux capabilities that are kept for the container.
+	Capabilities []string `json:"capabilities,omitempty" platform:"linux"`
+	// Rlimits specifies rlimit options to apply to the process.
+	Rlimits []Rlimit `json:"rlimits,omitempty" platform:"linux"`
+	// NoNewPrivileges controls whether additional privileges could be gained by processes in the container.
+	NoNewPrivileges bool `json:"noNewPrivileges,omitempty" platform:"linux"`
+	// ApparmorProfile specifies the apparmor profile for the container.
+	ApparmorProfile string `json:"apparmorProfile,omitempty" platform:"linux"`
+	// SelinuxLabel specifies the selinux context that the container process is run as.
+	SelinuxLabel string `json:"selinuxLabel,omitempty" platform:"linux"`
+}
+
+// User specifies specific user (and group) information for the container process.
+type User struct {
+	// UID is the user id.
+	UID uint32 `json:"uid" platform:"linux,solaris"`
+	// GID is the group id.
+	GID uint32 `json:"gid" platform:"linux,solaris"`
+	// AdditionalGids are additional group ids set for the container's process.
+	AdditionalGids []uint32 `json:"additionalGids,omitempty" platform:"linux,solaris"`
+	// Username is the user name.
+	Username string `json:"username,omitempty" platform:"windows"`
+}
+
+// Root contains information about the container's root filesystem on the host.
+type Root struct {
+	// Path is the absolute path to the container's root filesystem.
+	Path string `json:"path"`
+	// Readonly makes the root filesystem for the container readonly before the process is executed.
+	Readonly bool `json:"readonly,omitempty"`
+}
+
+// Platform specifies OS and arch information for the host system that the container
+// is created for.
+type Platform struct {
+	// OS is the operating system.
+	OS string `json:"os"`
+	// Arch is the architecture
+	Arch string `json:"arch"`
+}
+
+// Mount specifies a mount for a container.
+type Mount struct {
+	// Destination is the path where the mount will be placed relative to the container's root.  The path and child directories MUST exist, a runtime MUST NOT create directories automatically to a mount point.
+	Destination string `json:"destination"`
+	// Type specifies the mount kind.
+	Type string `json:"type"`
+	// Source specifies the source path of the mount.  In the case of bind mounts on
+	// Linux based systems this would be the file on the host.
+	Source string `json:"source"`
+	// Options are fstab style mount options.
+	Options []string `json:"options,omitempty"`
+}
+
+// Hook specifies a command that is run at a particular event in the lifecycle of a container
+type Hook struct {
+	Path    string   `json:"path"`
+	Args    []string `json:"args,omitempty"`
+	Env     []string `json:"env,omitempty"`
+	Timeout *int     `json:"timeout,omitempty"`
+}
+
+// Hooks for container setup and teardown
+type Hooks struct {
+	// Prestart is a list of hooks to be run before the container process is executed.
+	// On Linux, they are run after the container namespaces are created.
+	Prestart []Hook `json:"prestart,omitempty"`
+	// Poststart is a list of hooks to be run after the container process is started.
+	Poststart []Hook `json:"poststart,omitempty"`
+	// Poststop is a list of hooks to be run after the container process exits.
+	Poststop []Hook `json:"poststop,omitempty"`
+}
+
+// Linux contains platform specific configuration for Linux based containers.
+type Linux struct {
+	// UIDMapping specifies user mappings for supporting user namespaces on Linux.
+	UIDMappings []IDMapping `json:"uidMappings,omitempty"`
+	// GIDMapping specifies group mappings for supporting user namespaces on Linux.
+	GIDMappings []IDMapping `json:"gidMappings,omitempty"`
+	// Sysctl are a set of key value pairs that are set for the container on start
+	Sysctl map[string]string `json:"sysctl,omitempty"`
+	// Resources contain cgroup information for handling resource constraints
+	// for the container
+	Resources *Resources `json:"resources,omitempty"`
+	// CgroupsPath specifies the path to cgroups that are created and/or joined by the container.
+	// The path is expected to be relative to the cgroups mountpoint.
+	// If resources are specified, the cgroups at CgroupsPath will be updated based on resources.
+	CgroupsPath *string `json:"cgroupsPath,omitempty"`
+	// Namespaces contains the namespaces that are created and/or joined by the container
+	Namespaces []Namespace `json:"namespaces,omitempty"`
+	// Devices are a list of device nodes that are created for the container
+	Devices []Device `json:"devices,omitempty"`
+	// Seccomp specifies the seccomp security settings for the container.
+	Seccomp *Seccomp `json:"seccomp,omitempty"`
+	// RootfsPropagation is the rootfs mount propagation mode for the container.
+	RootfsPropagation string `json:"rootfsPropagation,omitempty"`
+	// MaskedPaths masks over the provided paths inside the container.
+	MaskedPaths []string `json:"maskedPaths,omitempty"`
+	// ReadonlyPaths sets the provided paths as RO inside the container.
+	ReadonlyPaths []string `json:"readonlyPaths,omitempty"`
+	// MountLabel specifies the selinux context for the mounts in the container.
+	MountLabel string `json:"mountLabel,omitempty"`
+}
+
+// Namespace is the configuration for a Linux namespace
+type Namespace struct {
+	// Type is the type of Linux namespace
+	Type NamespaceType `json:"type"`
+	// Path is a path to an existing namespace persisted on disk that can be joined
+	// and is of the same type
+	Path string `json:"path,omitempty"`
+}
+
+// NamespaceType is one of the Linux namespaces
+type NamespaceType string
+
+const (
+	// PIDNamespace for isolating process IDs
+	PIDNamespace NamespaceType = "pid"
+	// NetworkNamespace for isolating network devices, stacks, ports, etc
+	NetworkNamespace = "network"
+	// MountNamespace for isolating mount points
+	MountNamespace = "mount"
+	// IPCNamespace for isolating System V IPC, POSIX message queues
+	IPCNamespace = "ipc"
+	// UTSNamespace for isolating hostname and NIS domain name
+	UTSNamespace = "uts"
+	// UserNamespace for isolating user and group IDs
+	UserNamespace = "user"
+	// CgroupNamespace for isolating cgroup hierarchies
+	CgroupNamespace = "cgroup"
+)
+
+// IDMapping specifies UID/GID mappings
+type IDMapping struct {
+	// HostID is the UID/GID of the host user or group
+	HostID uint32 `json:"hostID"`
+	// ContainerID is the UID/GID of the container's user or group
+	ContainerID uint32 `json:"containerID"`
+	// Size is the length of the range of IDs mapped between the two namespaces
+	Size uint32 `json:"size"`
+}
+
+// Rlimit type and restrictions
+type Rlimit struct {
+	// Type of the rlimit to set
+	Type string `json:"type"`
+	// Hard is the hard limit for the specified type
+	Hard uint64 `json:"hard"`
+	// Soft is the soft limit for the specified type
+	Soft uint64 `json:"soft"`
+}
+
+// HugepageLimit structure corresponds to limiting kernel hugepages
+type HugepageLimit struct {
+	// Pagesize is the hugepage size
+	Pagesize *string `json:"pageSize,omitempty"`
+	// Limit is the limit of "hugepagesize" hugetlb usage
+	Limit *uint64 `json:"limit,omitempty"`
+}
+
+// InterfacePriority for network interfaces
+type InterfacePriority struct {
+	// Name is the name of the network interface
+	Name string `json:"name"`
+	// Priority for the interface
+	Priority uint32 `json:"priority"`
+}
+
+// blockIODevice holds major:minor format supported in blkio cgroup
+type blockIODevice struct {
+	// Major is the device's major number.
+	Major int64 `json:"major"`
+	// Minor is the device's minor number.
+	Minor int64 `json:"minor"`
+}
+
+// WeightDevice struct holds a `major:minor weight` pair for blkioWeightDevice
+type WeightDevice struct {
+	blockIODevice
+	// Weight is the bandwidth rate for the device, range is from 10 to 1000
+	Weight *uint16 `json:"weight,omitempty"`
+	// LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, range is from 10 to 1000, CFQ scheduler only
+	LeafWeight *uint16 `json:"leafWeight,omitempty"`
+}
+
+// ThrottleDevice struct holds a `major:minor rate_per_second` pair
+type ThrottleDevice struct {
+	blockIODevice
+	// Rate is the IO rate limit per cgroup per device
+	Rate *uint64 `json:"rate,omitempty"`
+}
+
+// BlockIO for Linux cgroup 'blkio' resource management
+type BlockIO struct {
+	// Specifies per cgroup weight, range is from 10 to 1000
+	Weight *uint16 `json:"blkioWeight,omitempty"`
+	// Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, range is from 10 to 1000, CFQ scheduler only
+	LeafWeight *uint16 `json:"blkioLeafWeight,omitempty"`
+	// Weight per cgroup per device, can override BlkioWeight
+	WeightDevice []WeightDevice `json:"blkioWeightDevice,omitempty"`
+	// IO read rate limit per cgroup per device, bytes per second
+	ThrottleReadBpsDevice []ThrottleDevice `json:"blkioThrottleReadBpsDevice,omitempty"`
+	// IO write rate limit per cgroup per device, bytes per second
+	ThrottleWriteBpsDevice []ThrottleDevice `json:"blkioThrottleWriteBpsDevice,omitempty"`
+	// IO read rate limit per cgroup per device, IO per second
+	ThrottleReadIOPSDevice []ThrottleDevice `json:"blkioThrottleReadIOPSDevice,omitempty"`
+	// IO write rate limit per cgroup per device, IO per second
+	ThrottleWriteIOPSDevice []ThrottleDevice `json:"blkioThrottleWriteIOPSDevice,omitempty"`
+}
+
+// Memory for Linux cgroup 'memory' resource management
+type Memory struct {
+	// Memory limit (in bytes).
+	Limit *uint64 `json:"limit,omitempty"`
+	// Memory reservation or soft_limit (in bytes).
+	Reservation *uint64 `json:"reservation,omitempty"`
+	// Total memory limit (memory + swap).
+	Swap *uint64 `json:"swap,omitempty"`
+	// Kernel memory limit (in bytes).
+	Kernel *uint64 `json:"kernel,omitempty"`
+	// Kernel memory limit for tcp (in bytes)
+	KernelTCP *uint64 `json:"kernelTCP,omitempty"`
+	// How aggressive the kernel will swap memory pages. Range from 0 to 100.
+	Swappiness *uint64 `json:"swappiness,omitempty"`
+}
+
+// CPU for Linux cgroup 'cpu' resource management
+type CPU struct {
+	// CPU shares (relative weight (ratio) vs. other cgroups with cpu shares).
+	Shares *uint64 `json:"shares,omitempty"`
+	// CPU hardcap limit (in usecs). Allowed cpu time in a given period.
+	Quota *uint64 `json:"quota,omitempty"`
+	// CPU period to be used for hardcapping (in usecs).
+	Period *uint64 `json:"period,omitempty"`
+	// How much time realtime scheduling may use (in usecs).
+	RealtimeRuntime *uint64 `json:"realtimeRuntime,omitempty"`
+	// CPU period to be used for realtime scheduling (in usecs).
+	RealtimePeriod *uint64 `json:"realtimePeriod,omitempty"`
+	// CPUs to use within the cpuset. Default is to use any CPU available.
+	Cpus *string `json:"cpus,omitempty"`
+	// List of memory nodes in the cpuset. Default is to use any available memory node.
+	Mems *string `json:"mems,omitempty"`
+}
+
+// Pids for Linux cgroup 'pids' resource management (Linux 4.3)
+type Pids struct {
+	// Maximum number of PIDs. Default is "no limit".
+	Limit *int64 `json:"limit,omitempty"`
+}
+
+// Network identification and priority configuration
+type Network struct {
+	// Set class identifier for container's network packets
+	ClassID *uint32 `json:"classID,omitempty"`
+	// Set priority of network traffic for container
+	Priorities []InterfacePriority `json:"priorities,omitempty"`
+}
+
+// Resources has container runtime resource constraints
+type Resources struct {
+	// Devices configures the device whitelist.
+	Devices []DeviceCgroup `json:"devices,omitempty"`
+	// DisableOOMKiller disables the OOM killer for out of memory conditions
+	DisableOOMKiller *bool `json:"disableOOMKiller,omitempty"`
+	// Specify an oom_score_adj for the container.
+	OOMScoreAdj *int `json:"oomScoreAdj,omitempty"`
+	// Memory restriction configuration
+	Memory *Memory `json:"memory,omitempty"`
+	// CPU resource restriction configuration
+	CPU *CPU `json:"cpu,omitempty"`
+	// Task resource restriction configuration.
+	Pids *Pids `json:"pids,omitempty"`
+	// BlockIO restriction configuration
+	BlockIO *BlockIO `json:"blockIO,omitempty"`
+	// Hugetlb limit (in bytes)
+	HugepageLimits []HugepageLimit `json:"hugepageLimits,omitempty"`
+	// Network restriction configuration
+	Network *Network `json:"network,omitempty"`
+}
+
+// Device represents the mknod information for a Linux special device file
+type Device struct {
+	// Path to the device.
+	Path string `json:"path"`
+	// Device type, block, char, etc.
+	Type string `json:"type"`
+	// Major is the device's major number.
+	Major int64 `json:"major"`
+	// Minor is the device's minor number.
+	Minor int64 `json:"minor"`
+	// FileMode permission bits for the device.
+	FileMode *os.FileMode `json:"fileMode,omitempty"`
+	// UID of the device.
+	UID *uint32 `json:"uid,omitempty"`
+	// Gid of the device.
+	GID *uint32 `json:"gid,omitempty"`
+}
+
+// DeviceCgroup represents a device rule for the whitelist controller
+type DeviceCgroup struct {
+	// Allow or deny
+	Allow bool `json:"allow"`
+	// Device type, block, char, etc.
+	Type *string `json:"type,omitempty"`
+	// Major is the device's major number.
+	Major *int64 `json:"major,omitempty"`
+	// Minor is the device's minor number.
+	Minor *int64 `json:"minor,omitempty"`
+	// Cgroup access permissions format, rwm.
+	Access *string `json:"access,omitempty"`
+}
+
+// Seccomp represents syscall restrictions
+type Seccomp struct {
+	DefaultAction Action    `json:"defaultAction"`
+	Architectures []Arch    `json:"architectures"`
+	Syscalls      []Syscall `json:"syscalls,omitempty"`
+}
+
+// Solaris contains platform specific configuration for Solaris application containers.
+type Solaris struct {
+	// SMF FMRI which should go "online" before we start the container process.
+	Milestone string `json:"milestone,omitempty"`
+	// Maximum set of privileges any process in this container can obtain.
+	LimitPriv string `json:"limitpriv,omitempty"`
+	// The maximum amount of shared memory allowed for this container.
+	MaxShmMemory string `json:"maxShmMemory,omitempty"`
+	// Specification for automatic creation of network resources for this container.
+	Anet []Anet `json:"anet,omitempty"`
+	// Set limit on the amount of CPU time that can be used by container.
+	CappedCPU *CappedCPU `json:"cappedCPU,omitempty"`
+	// The physical and swap caps on the memory that can be used by this container.
+	CappedMemory *CappedMemory `json:"cappedMemory,omitempty"`
+}
+
+// CappedCPU allows users to set limit on the amount of CPU time that can be used by container.
+type CappedCPU struct {
+	Ncpus string `json:"ncpus,omitempty"`
+}
+
+// CappedMemory allows users to set the physical and swap caps on the memory that can be used by this container.
+type CappedMemory struct {
+	Physical string `json:"physical,omitempty"`
+	Swap     string `json:"swap,omitempty"`
+}
+
+// Anet provides the specification for automatic creation of network resources for this container.
+type Anet struct {
+	// Specify a name for the automatically created VNIC datalink.
+	Linkname string `json:"linkname,omitempty"`
+	// Specify the link over which the VNIC will be created.
+	Lowerlink string `json:"lowerLink,omitempty"`
+	// The set of IP addresses that the container can use.
+	Allowedaddr string `json:"allowedAddress,omitempty"`
+	// Specifies whether allowedAddress limitation is to be applied to the VNIC.
+	Configallowedaddr string `json:"configureAllowedAddress,omitempty"`
+	// The value of the optional default router.
+	Defrouter string `json:"defrouter,omitempty"`
+	// Enable one or more types of link protection.
+	Linkprotection string `json:"linkProtection,omitempty"`
+	// Set the VNIC's macAddress
+	Macaddress string `json:"macAddress,omitempty"`
+}
+
+// Arch used for additional architectures
+type Arch string
+
+// Additional architectures permitted to be used for system calls
+// By default only the native architecture of the kernel is permitted
+const (
+	ArchX86         Arch = "SCMP_ARCH_X86"
+	ArchX86_64      Arch = "SCMP_ARCH_X86_64"
+	ArchX32         Arch = "SCMP_ARCH_X32"
+	ArchARM         Arch = "SCMP_ARCH_ARM"
+	ArchAARCH64     Arch = "SCMP_ARCH_AARCH64"
+	ArchMIPS        Arch = "SCMP_ARCH_MIPS"
+	ArchMIPS64      Arch = "SCMP_ARCH_MIPS64"
+	ArchMIPS64N32   Arch = "SCMP_ARCH_MIPS64N32"
+	ArchMIPSEL      Arch = "SCMP_ARCH_MIPSEL"
+	ArchMIPSEL64    Arch = "SCMP_ARCH_MIPSEL64"
+	ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32"
+	ArchPPC         Arch = "SCMP_ARCH_PPC"
+	ArchPPC64       Arch = "SCMP_ARCH_PPC64"
+	ArchPPC64LE     Arch = "SCMP_ARCH_PPC64LE"
+	ArchS390        Arch = "SCMP_ARCH_S390"
+	ArchS390X       Arch = "SCMP_ARCH_S390X"
+)
+
+// Action taken upon Seccomp rule match
+type Action string
+
+// Define actions for Seccomp rules
+const (
+	ActKill  Action = "SCMP_ACT_KILL"
+	ActTrap  Action = "SCMP_ACT_TRAP"
+	ActErrno Action = "SCMP_ACT_ERRNO"
+	ActTrace Action = "SCMP_ACT_TRACE"
+	ActAllow Action = "SCMP_ACT_ALLOW"
+)
+
+// Operator used to match syscall arguments in Seccomp
+type Operator string
+
+// Define operators for syscall arguments in Seccomp
+const (
+	OpNotEqual     Operator = "SCMP_CMP_NE"
+	OpLessThan     Operator = "SCMP_CMP_LT"
+	OpLessEqual    Operator = "SCMP_CMP_LE"
+	OpEqualTo      Operator = "SCMP_CMP_EQ"
+	OpGreaterEqual Operator = "SCMP_CMP_GE"
+	OpGreaterThan  Operator = "SCMP_CMP_GT"
+	OpMaskedEqual  Operator = "SCMP_CMP_MASKED_EQ"
+)
+
+// Arg used for matching specific syscall arguments in Seccomp
+type Arg struct {
+	Index    uint     `json:"index"`
+	Value    uint64   `json:"value"`
+	ValueTwo uint64   `json:"valueTwo"`
+	Op       Operator `json:"op"`
+}
+
+// Syscall is used to match a syscall in Seccomp
+type Syscall struct {
+	Name   string `json:"name"`
+	Action Action `json:"action"`
+	Args   []Arg  `json:"args,omitempty"`
+}
diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go
new file mode 100644
index 0000000000000000000000000000000000000000..ad31b893bbf70f58c13ee363c1c431b1df2915ad
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go
@@ -0,0 +1,17 @@
+package specs
+
+// State holds information about the runtime state of the container.
+type State struct {
+	// Version is the version of the specification that is supported.
+	Version string `json:"version"`
+	// ID is the container ID
+	ID string `json:"id"`
+	// Status is the runtime state of the container.
+	Status string `json:"status"`
+	// Pid is the process ID for the container process.
+	Pid int `json:"pid"`
+	// BundlePath is the path to the container's bundle directory.
+	BundlePath string `json:"bundlePath"`
+	// Annotations are the annotations associated with the container.
+	Annotations map[string]string `json:"annotations"`
+}
diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go
new file mode 100644
index 0000000000000000000000000000000000000000..8b5ec89089e53408763ea80cc66779dfd99aa48b
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go
@@ -0,0 +1,18 @@
+package specs
+
+import "fmt"
+
+const (
+	// VersionMajor is for an API incompatible changes
+	VersionMajor = 1
+	// VersionMinor is for functionality in a backwards-compatible manner
+	VersionMinor = 0
+	// VersionPatch is for backwards-compatible bug fixes
+	VersionPatch = 0
+
+	// VersionDev indicates development branch. Releases will be empty string.
+	VersionDev = "-rc2-dev"
+)
+
+// Version is the specification version that the package types support.
+var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev)
diff --git a/vendor/github.com/vbatts/tar-split/LICENSE b/vendor/github.com/vbatts/tar-split/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..ca03685b1586d241b05f98f51deb59fbd1e450b3
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2015 Vincent Batts, Raleigh, NC, USA
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors
+may be used to endorse or promote products derived from this software without
+specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/common.go b/vendor/github.com/vbatts/tar-split/archive/tar/common.go
new file mode 100644
index 0000000000000000000000000000000000000000..36f4e23980930c82f55749aef4e00779527ccb5c
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/common.go
@@ -0,0 +1,340 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tar implements access to tar archives.
+// It aims to cover most of the variations, including those produced
+// by GNU and BSD tars.
+//
+// References:
+//   http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
+//   http://www.gnu.org/software/tar/manual/html_node/Standard.html
+//   http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
+package tar
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"os"
+	"path"
+	"time"
+)
+
+const (
+	blockSize = 512
+
+	// Types
+	TypeReg           = '0'    // regular file
+	TypeRegA          = '\x00' // regular file
+	TypeLink          = '1'    // hard link
+	TypeSymlink       = '2'    // symbolic link
+	TypeChar          = '3'    // character device node
+	TypeBlock         = '4'    // block device node
+	TypeDir           = '5'    // directory
+	TypeFifo          = '6'    // fifo node
+	TypeCont          = '7'    // reserved
+	TypeXHeader       = 'x'    // extended header
+	TypeXGlobalHeader = 'g'    // global extended header
+	TypeGNULongName   = 'L'    // Next file has a long name
+	TypeGNULongLink   = 'K'    // Next file symlinks to a file w/ a long name
+	TypeGNUSparse     = 'S'    // sparse file
+)
+
+// A Header represents a single header in a tar archive.
+// Some fields may not be populated.
+type Header struct {
+	Name       string    // name of header file entry
+	Mode       int64     // permission and mode bits
+	Uid        int       // user id of owner
+	Gid        int       // group id of owner
+	Size       int64     // length in bytes
+	ModTime    time.Time // modified time
+	Typeflag   byte      // type of header entry
+	Linkname   string    // target name of link
+	Uname      string    // user name of owner
+	Gname      string    // group name of owner
+	Devmajor   int64     // major number of character or block device
+	Devminor   int64     // minor number of character or block device
+	AccessTime time.Time // access time
+	ChangeTime time.Time // status change time
+	Xattrs     map[string]string
+}
+
+// File name constants from the tar spec.
+const (
+	fileNameSize       = 100 // Maximum number of bytes in a standard tar name.
+	fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
+)
+
+// FileInfo returns an os.FileInfo for the Header.
+func (h *Header) FileInfo() os.FileInfo {
+	return headerFileInfo{h}
+}
+
+// headerFileInfo implements os.FileInfo.
+type headerFileInfo struct {
+	h *Header
+}
+
+func (fi headerFileInfo) Size() int64        { return fi.h.Size }
+func (fi headerFileInfo) IsDir() bool        { return fi.Mode().IsDir() }
+func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
+func (fi headerFileInfo) Sys() interface{}   { return fi.h }
+
+// Name returns the base name of the file.
+func (fi headerFileInfo) Name() string {
+	if fi.IsDir() {
+		return path.Base(path.Clean(fi.h.Name))
+	}
+	return path.Base(fi.h.Name)
+}
+
+// Mode returns the permission and mode bits for the headerFileInfo.
+func (fi headerFileInfo) Mode() (mode os.FileMode) {
+	// Set file permission bits.
+	mode = os.FileMode(fi.h.Mode).Perm()
+
+	// Set setuid, setgid and sticky bits.
+	if fi.h.Mode&c_ISUID != 0 {
+		// setuid
+		mode |= os.ModeSetuid
+	}
+	if fi.h.Mode&c_ISGID != 0 {
+		// setgid
+		mode |= os.ModeSetgid
+	}
+	if fi.h.Mode&c_ISVTX != 0 {
+		// sticky
+		mode |= os.ModeSticky
+	}
+
+	// Set file mode bits.
+	// clear perm, setuid, setgid and sticky bits.
+	m := os.FileMode(fi.h.Mode) &^ 07777
+	if m == c_ISDIR {
+		// directory
+		mode |= os.ModeDir
+	}
+	if m == c_ISFIFO {
+		// named pipe (FIFO)
+		mode |= os.ModeNamedPipe
+	}
+	if m == c_ISLNK {
+		// symbolic link
+		mode |= os.ModeSymlink
+	}
+	if m == c_ISBLK {
+		// device file
+		mode |= os.ModeDevice
+	}
+	if m == c_ISCHR {
+		// Unix character device
+		mode |= os.ModeDevice
+		mode |= os.ModeCharDevice
+	}
+	if m == c_ISSOCK {
+		// Unix domain socket
+		mode |= os.ModeSocket
+	}
+
+	switch fi.h.Typeflag {
+	case TypeSymlink:
+		// symbolic link
+		mode |= os.ModeSymlink
+	case TypeChar:
+		// character device node
+		mode |= os.ModeDevice
+		mode |= os.ModeCharDevice
+	case TypeBlock:
+		// block device node
+		mode |= os.ModeDevice
+	case TypeDir:
+		// directory
+		mode |= os.ModeDir
+	case TypeFifo:
+		// fifo node
+		mode |= os.ModeNamedPipe
+	}
+
+	return mode
+}
+
+// sysStat, if non-nil, populates h from system-dependent fields of fi.
+var sysStat func(fi os.FileInfo, h *Header) error
+
+// Mode constants from the tar spec.
+const (
+	c_ISUID  = 04000   // Set uid
+	c_ISGID  = 02000   // Set gid
+	c_ISVTX  = 01000   // Save text (sticky bit)
+	c_ISDIR  = 040000  // Directory
+	c_ISFIFO = 010000  // FIFO
+	c_ISREG  = 0100000 // Regular file
+	c_ISLNK  = 0120000 // Symbolic link
+	c_ISBLK  = 060000  // Block special file
+	c_ISCHR  = 020000  // Character special file
+	c_ISSOCK = 0140000 // Socket
+)
+
+// Keywords for the PAX Extended Header
+const (
+	paxAtime    = "atime"
+	paxCharset  = "charset"
+	paxComment  = "comment"
+	paxCtime    = "ctime" // please note that ctime is not a valid pax header.
+	paxGid      = "gid"
+	paxGname    = "gname"
+	paxLinkpath = "linkpath"
+	paxMtime    = "mtime"
+	paxPath     = "path"
+	paxSize     = "size"
+	paxUid      = "uid"
+	paxUname    = "uname"
+	paxXattr    = "SCHILY.xattr."
+	paxNone     = ""
+)
+
+// FileInfoHeader creates a partially-populated Header from fi.
+// If fi describes a symlink, FileInfoHeader records link as the link target.
+// If fi describes a directory, a slash is appended to the name.
+// Because os.FileInfo's Name method returns only the base name of
+// the file it describes, it may be necessary to modify the Name field
+// of the returned header to provide the full path name of the file.
+func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
+	if fi == nil {
+		return nil, errors.New("tar: FileInfo is nil")
+	}
+	fm := fi.Mode()
+	h := &Header{
+		Name:    fi.Name(),
+		ModTime: fi.ModTime(),
+		Mode:    int64(fm.Perm()), // or'd with c_IS* constants later
+	}
+	switch {
+	case fm.IsRegular():
+		h.Mode |= c_ISREG
+		h.Typeflag = TypeReg
+		h.Size = fi.Size()
+	case fi.IsDir():
+		h.Typeflag = TypeDir
+		h.Mode |= c_ISDIR
+		h.Name += "/"
+	case fm&os.ModeSymlink != 0:
+		h.Typeflag = TypeSymlink
+		h.Mode |= c_ISLNK
+		h.Linkname = link
+	case fm&os.ModeDevice != 0:
+		if fm&os.ModeCharDevice != 0 {
+			h.Mode |= c_ISCHR
+			h.Typeflag = TypeChar
+		} else {
+			h.Mode |= c_ISBLK
+			h.Typeflag = TypeBlock
+		}
+	case fm&os.ModeNamedPipe != 0:
+		h.Typeflag = TypeFifo
+		h.Mode |= c_ISFIFO
+	case fm&os.ModeSocket != 0:
+		h.Mode |= c_ISSOCK
+	default:
+		return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
+	}
+	if fm&os.ModeSetuid != 0 {
+		h.Mode |= c_ISUID
+	}
+	if fm&os.ModeSetgid != 0 {
+		h.Mode |= c_ISGID
+	}
+	if fm&os.ModeSticky != 0 {
+		h.Mode |= c_ISVTX
+	}
+	// If possible, populate additional fields from OS-specific
+	// FileInfo fields.
+	if sys, ok := fi.Sys().(*Header); ok {
+		// This FileInfo came from a Header (not the OS). Use the
+		// original Header to populate all remaining fields.
+		h.Uid = sys.Uid
+		h.Gid = sys.Gid
+		h.Uname = sys.Uname
+		h.Gname = sys.Gname
+		h.AccessTime = sys.AccessTime
+		h.ChangeTime = sys.ChangeTime
+		if sys.Xattrs != nil {
+			h.Xattrs = make(map[string]string)
+			for k, v := range sys.Xattrs {
+				h.Xattrs[k] = v
+			}
+		}
+		if sys.Typeflag == TypeLink {
+			// hard link
+			h.Typeflag = TypeLink
+			h.Size = 0
+			h.Linkname = sys.Linkname
+		}
+	}
+	if sysStat != nil {
+		return h, sysStat(fi, h)
+	}
+	return h, nil
+}
+
+var zeroBlock = make([]byte, blockSize)
+
+// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
+// We compute and return both.
+func checksum(header []byte) (unsigned int64, signed int64) {
+	for i := 0; i < len(header); i++ {
+		if i == 148 {
+			// The chksum field (header[148:156]) is special: it should be treated as space bytes.
+			unsigned += ' ' * 8
+			signed += ' ' * 8
+			i += 7
+			continue
+		}
+		unsigned += int64(header[i])
+		signed += int64(int8(header[i]))
+	}
+	return
+}
+
+type slicer []byte
+
+func (sp *slicer) next(n int) (b []byte) {
+	s := *sp
+	b, *sp = s[0:n], s[n:]
+	return
+}
+
+func isASCII(s string) bool {
+	for _, c := range s {
+		if c >= 0x80 {
+			return false
+		}
+	}
+	return true
+}
+
+func toASCII(s string) string {
+	if isASCII(s) {
+		return s
+	}
+	var buf bytes.Buffer
+	for _, c := range s {
+		if c < 0x80 {
+			buf.WriteByte(byte(c))
+		}
+	}
+	return buf.String()
+}
+
+// isHeaderOnlyType checks if the given type flag is of the type that has no
+// data section even if a size is specified.
+func isHeaderOnlyType(flag byte) bool {
+	switch flag {
+	case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
+		return true
+	default:
+		return false
+	}
+}
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go
new file mode 100644
index 0000000000000000000000000000000000000000..adf32122e1b97942f009ae2f728bc568172d8994
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go
@@ -0,0 +1,1064 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+// TODO(dsymonds):
+//   - pax extensions
+
+import (
+	"bytes"
+	"errors"
+	"io"
+	"io/ioutil"
+	"math"
+	"os"
+	"strconv"
+	"strings"
+	"time"
+)
+
+var (
+	ErrHeader = errors.New("archive/tar: invalid tar header")
+)
+
+const maxNanoSecondIntSize = 9
+
+// A Reader provides sequential access to the contents of a tar archive.
+// A tar archive consists of a sequence of files.
+// The Next method advances to the next file in the archive (including the first),
+// and then it can be treated as an io.Reader to access the file's data.
+type Reader struct {
+	r       io.Reader
+	err     error
+	pad     int64           // amount of padding (ignored) after current file entry
+	curr    numBytesReader  // reader for current file entry
+	hdrBuff [blockSize]byte // buffer to use in readHeader
+
+	RawAccounting bool          // Whether to enable the access needed to reassemble the tar from raw bytes. Some performance/memory hit for this.
+	rawBytes      *bytes.Buffer // last raw bits
+}
+
+type parser struct {
+	err error // Last error seen
+}
+
+// RawBytes accesses the raw bytes of the archive, apart from the file payload itself.
+// This includes the header and padding.
+//
+// This call resets the current rawbytes buffer
+//
+// Only when RawAccounting is enabled, otherwise this returns nil
+func (tr *Reader) RawBytes() []byte {
+	if !tr.RawAccounting {
+		return nil
+	}
+	if tr.rawBytes == nil {
+		tr.rawBytes = bytes.NewBuffer(nil)
+	}
+	// if we've read them, then flush them.
+	defer tr.rawBytes.Reset()
+	return tr.rawBytes.Bytes()
+}
+
+// A numBytesReader is an io.Reader with a numBytes method, returning the number
+// of bytes remaining in the underlying encoded data.
+type numBytesReader interface {
+	io.Reader
+	numBytes() int64
+}
+
+// A regFileReader is a numBytesReader for reading file data from a tar archive.
+type regFileReader struct {
+	r  io.Reader // underlying reader
+	nb int64     // number of unread bytes for current file entry
+}
+
+// A sparseFileReader is a numBytesReader for reading sparse file data from a
+// tar archive.
+type sparseFileReader struct {
+	rfr   numBytesReader // Reads the sparse-encoded file data
+	sp    []sparseEntry  // The sparse map for the file
+	pos   int64          // Keeps track of file position
+	total int64          // Total size of the file
+}
+
+// A sparseEntry holds a single entry in a sparse file's sparse map.
+//
+// Sparse files are represented using a series of sparseEntrys.
+// Despite the name, a sparseEntry represents an actual data fragment that
+// references data found in the underlying archive stream. All regions not
+// covered by a sparseEntry are logically filled with zeros.
+//
+// For example, if the underlying raw file contains the 10-byte data:
+//	var compactData = "abcdefgh"
+//
+// And the sparse map has the following entries:
+//	var sp = []sparseEntry{
+//		{offset: 2,  numBytes: 5} // Data fragment for [2..7]
+//		{offset: 18, numBytes: 3} // Data fragment for [18..21]
+//	}
+//
+// Then the content of the resulting sparse file with a "real" size of 25 is:
+//	var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
+type sparseEntry struct {
+	offset   int64 // Starting position of the fragment
+	numBytes int64 // Length of the fragment
+}
+
+// Keywords for GNU sparse files in a PAX extended header
+const (
+	paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
+	paxGNUSparseOffset    = "GNU.sparse.offset"
+	paxGNUSparseNumBytes  = "GNU.sparse.numbytes"
+	paxGNUSparseMap       = "GNU.sparse.map"
+	paxGNUSparseName      = "GNU.sparse.name"
+	paxGNUSparseMajor     = "GNU.sparse.major"
+	paxGNUSparseMinor     = "GNU.sparse.minor"
+	paxGNUSparseSize      = "GNU.sparse.size"
+	paxGNUSparseRealSize  = "GNU.sparse.realsize"
+)
+
+// Keywords for old GNU sparse headers
+const (
+	oldGNUSparseMainHeaderOffset               = 386
+	oldGNUSparseMainHeaderIsExtendedOffset     = 482
+	oldGNUSparseMainHeaderNumEntries           = 4
+	oldGNUSparseExtendedHeaderIsExtendedOffset = 504
+	oldGNUSparseExtendedHeaderNumEntries       = 21
+	oldGNUSparseOffsetSize                     = 12
+	oldGNUSparseNumBytesSize                   = 12
+)
+
+// NewReader creates a new Reader reading from r.
+func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
+
+// Next advances to the next entry in the tar archive.
+//
+// io.EOF is returned at the end of the input.
+func (tr *Reader) Next() (*Header, error) {
+	if tr.RawAccounting {
+		if tr.rawBytes == nil {
+			tr.rawBytes = bytes.NewBuffer(nil)
+		} else {
+			tr.rawBytes.Reset()
+		}
+	}
+
+	if tr.err != nil {
+		return nil, tr.err
+	}
+
+	var hdr *Header
+	var extHdrs map[string]string
+
+	// Externally, Next iterates through the tar archive as if it is a series of
+	// files. Internally, the tar format often uses fake "files" to add meta
+	// data that describes the next file. These meta data "files" should not
+	// normally be visible to the outside. As such, this loop iterates through
+	// one or more "header files" until it finds a "normal file".
+loop:
+	for {
+		tr.err = tr.skipUnread()
+		if tr.err != nil {
+			return nil, tr.err
+		}
+
+		hdr = tr.readHeader()
+		if tr.err != nil {
+			return nil, tr.err
+		}
+		// Check for PAX/GNU special headers and files.
+		switch hdr.Typeflag {
+		case TypeXHeader:
+			extHdrs, tr.err = parsePAX(tr)
+			if tr.err != nil {
+				return nil, tr.err
+			}
+			continue loop // This is a meta header affecting the next header
+		case TypeGNULongName, TypeGNULongLink:
+			var realname []byte
+			realname, tr.err = ioutil.ReadAll(tr)
+			if tr.err != nil {
+				return nil, tr.err
+			}
+
+			if tr.RawAccounting {
+				if _, tr.err = tr.rawBytes.Write(realname); tr.err != nil {
+					return nil, tr.err
+				}
+			}
+
+			// Convert GNU extensions to use PAX headers.
+			if extHdrs == nil {
+				extHdrs = make(map[string]string)
+			}
+			var p parser
+			switch hdr.Typeflag {
+			case TypeGNULongName:
+				extHdrs[paxPath] = p.parseString(realname)
+			case TypeGNULongLink:
+				extHdrs[paxLinkpath] = p.parseString(realname)
+			}
+			if p.err != nil {
+				tr.err = p.err
+				return nil, tr.err
+			}
+			continue loop // This is a meta header affecting the next header
+		default:
+			mergePAX(hdr, extHdrs)
+
+			// Check for a PAX format sparse file
+			sp, err := tr.checkForGNUSparsePAXHeaders(hdr, extHdrs)
+			if err != nil {
+				tr.err = err
+				return nil, err
+			}
+			if sp != nil {
+				// Current file is a PAX format GNU sparse file.
+				// Set the current file reader to a sparse file reader.
+				tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size)
+				if tr.err != nil {
+					return nil, tr.err
+				}
+			}
+			break loop // This is a file, so stop
+		}
+	}
+	return hdr, nil
+}
+
+// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then
+// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to
+// be treated as a regular file.
+func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) {
+	var sparseFormat string
+
+	// Check for sparse format indicators
+	major, majorOk := headers[paxGNUSparseMajor]
+	minor, minorOk := headers[paxGNUSparseMinor]
+	sparseName, sparseNameOk := headers[paxGNUSparseName]
+	_, sparseMapOk := headers[paxGNUSparseMap]
+	sparseSize, sparseSizeOk := headers[paxGNUSparseSize]
+	sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize]
+
+	// Identify which, if any, sparse format applies from which PAX headers are set
+	if majorOk && minorOk {
+		sparseFormat = major + "." + minor
+	} else if sparseNameOk && sparseMapOk {
+		sparseFormat = "0.1"
+	} else if sparseSizeOk {
+		sparseFormat = "0.0"
+	} else {
+		// Not a PAX format GNU sparse file.
+		return nil, nil
+	}
+
+	// Check for unknown sparse format
+	if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" {
+		return nil, nil
+	}
+
+	// Update hdr from GNU sparse PAX headers
+	if sparseNameOk {
+		hdr.Name = sparseName
+	}
+	if sparseSizeOk {
+		realSize, err := strconv.ParseInt(sparseSize, 10, 0)
+		if err != nil {
+			return nil, ErrHeader
+		}
+		hdr.Size = realSize
+	} else if sparseRealSizeOk {
+		realSize, err := strconv.ParseInt(sparseRealSize, 10, 0)
+		if err != nil {
+			return nil, ErrHeader
+		}
+		hdr.Size = realSize
+	}
+
+	// Set up the sparse map, according to the particular sparse format in use
+	var sp []sparseEntry
+	var err error
+	switch sparseFormat {
+	case "0.0", "0.1":
+		sp, err = readGNUSparseMap0x1(headers)
+	case "1.0":
+		sp, err = readGNUSparseMap1x0(tr.curr)
+	}
+	return sp, err
+}
+
+// mergePAX merges well known headers according to PAX standard.
+// In general headers with the same name as those found
+// in the header struct overwrite those found in the header
+// struct with higher precision or longer values. Esp. useful
+// for name and linkname fields.
+func mergePAX(hdr *Header, headers map[string]string) error {
+	for k, v := range headers {
+		switch k {
+		case paxPath:
+			hdr.Name = v
+		case paxLinkpath:
+			hdr.Linkname = v
+		case paxGname:
+			hdr.Gname = v
+		case paxUname:
+			hdr.Uname = v
+		case paxUid:
+			uid, err := strconv.ParseInt(v, 10, 0)
+			if err != nil {
+				return err
+			}
+			hdr.Uid = int(uid)
+		case paxGid:
+			gid, err := strconv.ParseInt(v, 10, 0)
+			if err != nil {
+				return err
+			}
+			hdr.Gid = int(gid)
+		case paxAtime:
+			t, err := parsePAXTime(v)
+			if err != nil {
+				return err
+			}
+			hdr.AccessTime = t
+		case paxMtime:
+			t, err := parsePAXTime(v)
+			if err != nil {
+				return err
+			}
+			hdr.ModTime = t
+		case paxCtime:
+			t, err := parsePAXTime(v)
+			if err != nil {
+				return err
+			}
+			hdr.ChangeTime = t
+		case paxSize:
+			size, err := strconv.ParseInt(v, 10, 0)
+			if err != nil {
+				return err
+			}
+			hdr.Size = int64(size)
+		default:
+			if strings.HasPrefix(k, paxXattr) {
+				if hdr.Xattrs == nil {
+					hdr.Xattrs = make(map[string]string)
+				}
+				hdr.Xattrs[k[len(paxXattr):]] = v
+			}
+		}
+	}
+	return nil
+}
+
+// parsePAXTime takes a string of the form %d.%d as described in
+// the PAX specification.
+func parsePAXTime(t string) (time.Time, error) {
+	buf := []byte(t)
+	pos := bytes.IndexByte(buf, '.')
+	var seconds, nanoseconds int64
+	var err error
+	if pos == -1 {
+		seconds, err = strconv.ParseInt(t, 10, 0)
+		if err != nil {
+			return time.Time{}, err
+		}
+	} else {
+		seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0)
+		if err != nil {
+			return time.Time{}, err
+		}
+		nano_buf := string(buf[pos+1:])
+		// Pad as needed before converting to a decimal.
+		// For example .030 -> .030000000 -> 30000000 nanoseconds
+		if len(nano_buf) < maxNanoSecondIntSize {
+			// Right pad
+			nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf))
+		} else if len(nano_buf) > maxNanoSecondIntSize {
+			// Right truncate
+			nano_buf = nano_buf[:maxNanoSecondIntSize]
+		}
+		nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0)
+		if err != nil {
+			return time.Time{}, err
+		}
+	}
+	ts := time.Unix(seconds, nanoseconds)
+	return ts, nil
+}
+
+// parsePAX parses PAX headers.
+// If an extended header (type 'x') is invalid, ErrHeader is returned
+func parsePAX(r io.Reader) (map[string]string, error) {
+	buf, err := ioutil.ReadAll(r)
+	if err != nil {
+		return nil, err
+	}
+	// leaving this function for io.Reader makes it more testable
+	if tr, ok := r.(*Reader); ok && tr.RawAccounting {
+		if _, err = tr.rawBytes.Write(buf); err != nil {
+			return nil, err
+		}
+	}
+	sbuf := string(buf)
+
+	// For GNU PAX sparse format 0.0 support.
+	// This function transforms the sparse format 0.0 headers into sparse format 0.1 headers.
+	var sparseMap bytes.Buffer
+
+	headers := make(map[string]string)
+	// Each record is constructed as
+	//     "%d %s=%s\n", length, keyword, value
+	for len(sbuf) > 0 {
+		key, value, residual, err := parsePAXRecord(sbuf)
+		if err != nil {
+			return nil, ErrHeader
+		}
+		sbuf = residual
+
+		keyStr := string(key)
+		if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes {
+			// GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map.
+			sparseMap.WriteString(value)
+			sparseMap.Write([]byte{','})
+		} else {
+			// Normal key. Set the value in the headers map.
+			headers[keyStr] = string(value)
+		}
+	}
+	if sparseMap.Len() != 0 {
+		// Add sparse info to headers, chopping off the extra comma
+		sparseMap.Truncate(sparseMap.Len() - 1)
+		headers[paxGNUSparseMap] = sparseMap.String()
+	}
+	return headers, nil
+}
+
+// parsePAXRecord parses the input PAX record string into a key-value pair.
+// If parsing is successful, it will slice off the currently read record and
+// return the remainder as r.
+//
+// A PAX record is of the following form:
+//	"%d %s=%s\n" % (size, key, value)
+func parsePAXRecord(s string) (k, v, r string, err error) {
+	// The size field ends at the first space.
+	sp := strings.IndexByte(s, ' ')
+	if sp == -1 {
+		return "", "", s, ErrHeader
+	}
+
+	// Parse the first token as a decimal integer.
+	n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int
+	if perr != nil || n < 5 || int64(len(s)) < n {
+		return "", "", s, ErrHeader
+	}
+
+	// Extract everything between the space and the final newline.
+	rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:]
+	if nl != "\n" {
+		return "", "", s, ErrHeader
+	}
+
+	// The first equals separates the key from the value.
+	eq := strings.IndexByte(rec, '=')
+	if eq == -1 {
+		return "", "", s, ErrHeader
+	}
+	return rec[:eq], rec[eq+1:], rem, nil
+}
+
+// parseString parses bytes as a NUL-terminated C-style string.
+// If a NUL byte is not found then the whole slice is returned as a string.
+func (*parser) parseString(b []byte) string {
+	n := 0
+	for n < len(b) && b[n] != 0 {
+		n++
+	}
+	return string(b[0:n])
+}
+
+// parseNumeric parses the input as being encoded in either base-256 or octal.
+// This function may return negative numbers.
+// If parsing fails or an integer overflow occurs, err will be set.
+func (p *parser) parseNumeric(b []byte) int64 {
+	// Check for base-256 (binary) format first.
+	// If the first bit is set, then all following bits constitute a two's
+	// complement encoded number in big-endian byte order.
+	if len(b) > 0 && b[0]&0x80 != 0 {
+		// Handling negative numbers relies on the following identity:
+		//	-a-1 == ^a
+		//
+		// If the number is negative, we use an inversion mask to invert the
+		// data bytes and treat the value as an unsigned number.
+		var inv byte // 0x00 if positive or zero, 0xff if negative
+		if b[0]&0x40 != 0 {
+			inv = 0xff
+		}
+
+		var x uint64
+		for i, c := range b {
+			c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing
+			if i == 0 {
+				c &= 0x7f // Ignore signal bit in first byte
+			}
+			if (x >> 56) > 0 {
+				p.err = ErrHeader // Integer overflow
+				return 0
+			}
+			x = x<<8 | uint64(c)
+		}
+		if (x >> 63) > 0 {
+			p.err = ErrHeader // Integer overflow
+			return 0
+		}
+		if inv == 0xff {
+			return ^int64(x)
+		}
+		return int64(x)
+	}
+
+	// Normal case is base-8 (octal) format.
+	return p.parseOctal(b)
+}
+
+func (p *parser) parseOctal(b []byte) int64 {
+	// Because unused fields are filled with NULs, we need
+	// to skip leading NULs. Fields may also be padded with
+	// spaces or NULs.
+	// So we remove leading and trailing NULs and spaces to
+	// be sure.
+	b = bytes.Trim(b, " \x00")
+
+	if len(b) == 0 {
+		return 0
+	}
+	x, perr := strconv.ParseUint(p.parseString(b), 8, 64)
+	if perr != nil {
+		p.err = ErrHeader
+	}
+	return int64(x)
+}
+
+// skipUnread skips any unread bytes in the existing file entry, as well as any
+// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is
+// encountered in the data portion; it is okay to hit io.EOF in the padding.
+//
+// Note that this function still works properly even when sparse files are being
+// used since numBytes returns the bytes remaining in the underlying io.Reader.
+func (tr *Reader) skipUnread() error {
+	dataSkip := tr.numBytes()      // Number of data bytes to skip
+	totalSkip := dataSkip + tr.pad // Total number of bytes to skip
+	tr.curr, tr.pad = nil, 0
+	if tr.RawAccounting {
+		_, tr.err = io.CopyN(tr.rawBytes, tr.r, totalSkip)
+		return tr.err
+	}
+	// If possible, Seek to the last byte before the end of the data section.
+	// Do this because Seek is often lazy about reporting errors; this will mask
+	// the fact that the tar stream may be truncated. We can rely on the
+	// io.CopyN done shortly afterwards to trigger any IO errors.
+	var seekSkipped int64 // Number of bytes skipped via Seek
+	if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 {
+		// Not all io.Seeker can actually Seek. For example, os.Stdin implements
+		// io.Seeker, but calling Seek always returns an error and performs
+		// no action. Thus, we try an innocent seek to the current position
+		// to see if Seek is really supported.
+		pos1, err := sr.Seek(0, os.SEEK_CUR)
+		if err == nil {
+			// Seek seems supported, so perform the real Seek.
+			pos2, err := sr.Seek(dataSkip-1, os.SEEK_CUR)
+			if err != nil {
+				tr.err = err
+				return tr.err
+			}
+			seekSkipped = pos2 - pos1
+		}
+	}
+
+	var copySkipped int64 // Number of bytes skipped via CopyN
+	copySkipped, tr.err = io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped)
+	if tr.err == io.EOF && seekSkipped+copySkipped < dataSkip {
+		tr.err = io.ErrUnexpectedEOF
+	}
+	return tr.err
+}
+
+func (tr *Reader) verifyChecksum(header []byte) bool {
+	if tr.err != nil {
+		return false
+	}
+
+	var p parser
+	given := p.parseOctal(header[148:156])
+	unsigned, signed := checksum(header)
+	return p.err == nil && (given == unsigned || given == signed)
+}
+
+// readHeader reads the next block header and assumes that the underlying reader
+// is already aligned to a block boundary.
+//
+// The err will be set to io.EOF only when one of the following occurs:
+//	* Exactly 0 bytes are read and EOF is hit.
+//	* Exactly 1 block of zeros is read and EOF is hit.
+//	* At least 2 blocks of zeros are read.
+func (tr *Reader) readHeader() *Header {
+	header := tr.hdrBuff[:]
+	copy(header, zeroBlock)
+
+	if n, err := io.ReadFull(tr.r, header); err != nil {
+		tr.err = err
+		// because it could read some of the block, but reach EOF first
+		if tr.err == io.EOF && tr.RawAccounting {
+			if _, err := tr.rawBytes.Write(header[:n]); err != nil {
+				tr.err = err
+			}
+		}
+		return nil // io.EOF is okay here
+	}
+	if tr.RawAccounting {
+		if _, tr.err = tr.rawBytes.Write(header); tr.err != nil {
+			return nil
+		}
+	}
+
+	// Two blocks of zero bytes marks the end of the archive.
+	if bytes.Equal(header, zeroBlock[0:blockSize]) {
+		if n, err := io.ReadFull(tr.r, header); err != nil {
+			tr.err = err
+			// because it could read some of the block, but reach EOF first
+			if tr.err == io.EOF && tr.RawAccounting {
+				if _, err := tr.rawBytes.Write(header[:n]); err != nil {
+					tr.err = err
+				}
+			}
+			return nil // io.EOF is okay here
+		}
+		if tr.RawAccounting {
+			if _, tr.err = tr.rawBytes.Write(header); tr.err != nil {
+				return nil
+			}
+		}
+		if bytes.Equal(header, zeroBlock[0:blockSize]) {
+			tr.err = io.EOF
+		} else {
+			tr.err = ErrHeader // zero block and then non-zero block
+		}
+		return nil
+	}
+
+	if !tr.verifyChecksum(header) {
+		tr.err = ErrHeader
+		return nil
+	}
+
+	// Unpack
+	var p parser
+	hdr := new(Header)
+	s := slicer(header)
+
+	hdr.Name = p.parseString(s.next(100))
+	hdr.Mode = p.parseNumeric(s.next(8))
+	hdr.Uid = int(p.parseNumeric(s.next(8)))
+	hdr.Gid = int(p.parseNumeric(s.next(8)))
+	hdr.Size = p.parseNumeric(s.next(12))
+	hdr.ModTime = time.Unix(p.parseNumeric(s.next(12)), 0)
+	s.next(8) // chksum
+	hdr.Typeflag = s.next(1)[0]
+	hdr.Linkname = p.parseString(s.next(100))
+
+	// The remainder of the header depends on the value of magic.
+	// The original (v7) version of tar had no explicit magic field,
+	// so its magic bytes, like the rest of the block, are NULs.
+	magic := string(s.next(8)) // contains version field as well.
+	var format string
+	switch {
+	case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988)
+		if string(header[508:512]) == "tar\x00" {
+			format = "star"
+		} else {
+			format = "posix"
+		}
+	case magic == "ustar  \x00": // old GNU tar
+		format = "gnu"
+	}
+
+	switch format {
+	case "posix", "gnu", "star":
+		hdr.Uname = p.parseString(s.next(32))
+		hdr.Gname = p.parseString(s.next(32))
+		devmajor := s.next(8)
+		devminor := s.next(8)
+		if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock {
+			hdr.Devmajor = p.parseNumeric(devmajor)
+			hdr.Devminor = p.parseNumeric(devminor)
+		}
+		var prefix string
+		switch format {
+		case "posix", "gnu":
+			prefix = p.parseString(s.next(155))
+		case "star":
+			prefix = p.parseString(s.next(131))
+			hdr.AccessTime = time.Unix(p.parseNumeric(s.next(12)), 0)
+			hdr.ChangeTime = time.Unix(p.parseNumeric(s.next(12)), 0)
+		}
+		if len(prefix) > 0 {
+			hdr.Name = prefix + "/" + hdr.Name
+		}
+	}
+
+	if p.err != nil {
+		tr.err = p.err
+		return nil
+	}
+
+	nb := hdr.Size
+	if isHeaderOnlyType(hdr.Typeflag) {
+		nb = 0
+	}
+	if nb < 0 {
+		tr.err = ErrHeader
+		return nil
+	}
+
+	// Set the current file reader.
+	tr.pad = -nb & (blockSize - 1) // blockSize is a power of two
+	tr.curr = &regFileReader{r: tr.r, nb: nb}
+
+	// Check for old GNU sparse format entry.
+	if hdr.Typeflag == TypeGNUSparse {
+		// Get the real size of the file.
+		hdr.Size = p.parseNumeric(header[483:495])
+		if p.err != nil {
+			tr.err = p.err
+			return nil
+		}
+
+		// Read the sparse map.
+		sp := tr.readOldGNUSparseMap(header)
+		if tr.err != nil {
+			return nil
+		}
+
+		// Current file is a GNU sparse file. Update the current file reader.
+		tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size)
+		if tr.err != nil {
+			return nil
+		}
+	}
+
+	return hdr
+}
+
+// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format.
+// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries,
+// then one or more extension headers are used to store the rest of the sparse map.
+func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry {
+	var p parser
+	isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0
+	spCap := oldGNUSparseMainHeaderNumEntries
+	if isExtended {
+		spCap += oldGNUSparseExtendedHeaderNumEntries
+	}
+	sp := make([]sparseEntry, 0, spCap)
+	s := slicer(header[oldGNUSparseMainHeaderOffset:])
+
+	// Read the four entries from the main tar header
+	for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ {
+		offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize))
+		numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize))
+		if p.err != nil {
+			tr.err = p.err
+			return nil
+		}
+		if offset == 0 && numBytes == 0 {
+			break
+		}
+		sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+	}
+
+	for isExtended {
+		// There are more entries. Read an extension header and parse its entries.
+		sparseHeader := make([]byte, blockSize)
+		if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil {
+			return nil
+		}
+		if tr.RawAccounting {
+			if _, tr.err = tr.rawBytes.Write(sparseHeader); tr.err != nil {
+				return nil
+			}
+		}
+
+		isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0
+		s = slicer(sparseHeader)
+		for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ {
+			offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize))
+			numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize))
+			if p.err != nil {
+				tr.err = p.err
+				return nil
+			}
+			if offset == 0 && numBytes == 0 {
+				break
+			}
+			sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+		}
+	}
+	return sp
+}
+
+// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format
+// version 1.0. The format of the sparse map consists of a series of
+// newline-terminated numeric fields. The first field is the number of entries
+// and is always present. Following this are the entries, consisting of two
+// fields (offset, numBytes). This function must stop reading at the end
+// boundary of the block containing the last newline.
+//
+// Note that the GNU manual says that numeric values should be encoded in octal
+// format. However, the GNU tar utility itself outputs these values in decimal.
+// As such, this library treats values as being encoded in decimal.
+func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) {
+	var cntNewline int64
+	var buf bytes.Buffer
+	var blk = make([]byte, blockSize)
+
+	// feedTokens copies data in numBlock chunks from r into buf until there are
+	// at least cnt newlines in buf. It will not read more blocks than needed.
+	var feedTokens = func(cnt int64) error {
+		for cntNewline < cnt {
+			if _, err := io.ReadFull(r, blk); err != nil {
+				if err == io.EOF {
+					err = io.ErrUnexpectedEOF
+				}
+				return err
+			}
+			buf.Write(blk)
+			for _, c := range blk {
+				if c == '\n' {
+					cntNewline++
+				}
+			}
+		}
+		return nil
+	}
+
+	// nextToken gets the next token delimited by a newline. This assumes that
+	// at least one newline exists in the buffer.
+	var nextToken = func() string {
+		cntNewline--
+		tok, _ := buf.ReadString('\n')
+		return tok[:len(tok)-1] // Cut off newline
+	}
+
+	// Parse for the number of entries.
+	// Use integer overflow resistant math to check this.
+	if err := feedTokens(1); err != nil {
+		return nil, err
+	}
+	numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int
+	if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
+		return nil, ErrHeader
+	}
+
+	// Parse for all member entries.
+	// numEntries is trusted after this since a potential attacker must have
+	// committed resources proportional to what this library used.
+	if err := feedTokens(2 * numEntries); err != nil {
+		return nil, err
+	}
+	sp := make([]sparseEntry, 0, numEntries)
+	for i := int64(0); i < numEntries; i++ {
+		offset, err := strconv.ParseInt(nextToken(), 10, 64)
+		if err != nil {
+			return nil, ErrHeader
+		}
+		numBytes, err := strconv.ParseInt(nextToken(), 10, 64)
+		if err != nil {
+			return nil, ErrHeader
+		}
+		sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+	}
+	return sp, nil
+}
+
+// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format
+// version 0.1. The sparse map is stored in the PAX headers.
+func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) {
+	// Get number of entries.
+	// Use integer overflow resistant math to check this.
+	numEntriesStr := extHdrs[paxGNUSparseNumBlocks]
+	numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int
+	if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
+		return nil, ErrHeader
+	}
+
+	// There should be two numbers in sparseMap for each entry.
+	sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",")
+	if int64(len(sparseMap)) != 2*numEntries {
+		return nil, ErrHeader
+	}
+
+	// Loop through the entries in the sparse map.
+	// numEntries is trusted now.
+	sp := make([]sparseEntry, 0, numEntries)
+	for i := int64(0); i < numEntries; i++ {
+		offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64)
+		if err != nil {
+			return nil, ErrHeader
+		}
+		numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64)
+		if err != nil {
+			return nil, ErrHeader
+		}
+		sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+	}
+	return sp, nil
+}
+
+// numBytes returns the number of bytes left to read in the current file's entry
+// in the tar archive, or 0 if there is no current file.
+func (tr *Reader) numBytes() int64 {
+	if tr.curr == nil {
+		// No current file, so no bytes
+		return 0
+	}
+	return tr.curr.numBytes()
+}
+
+// Read reads from the current entry in the tar archive.
+// It returns 0, io.EOF when it reaches the end of that entry,
+// until Next is called to advance to the next entry.
+//
+// Calling Read on special types like TypeLink, TypeSymLink, TypeChar,
+// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what
+// the Header.Size claims.
+func (tr *Reader) Read(b []byte) (n int, err error) {
+	if tr.err != nil {
+		return 0, tr.err
+	}
+	if tr.curr == nil {
+		return 0, io.EOF
+	}
+
+	n, err = tr.curr.Read(b)
+	if err != nil && err != io.EOF {
+		tr.err = err
+	}
+	return
+}
+
+func (rfr *regFileReader) Read(b []byte) (n int, err error) {
+	if rfr.nb == 0 {
+		// file consumed
+		return 0, io.EOF
+	}
+	if int64(len(b)) > rfr.nb {
+		b = b[0:rfr.nb]
+	}
+	n, err = rfr.r.Read(b)
+	rfr.nb -= int64(n)
+
+	if err == io.EOF && rfr.nb > 0 {
+		err = io.ErrUnexpectedEOF
+	}
+	return
+}
+
+// numBytes returns the number of bytes left to read in the file's data in the tar archive.
+func (rfr *regFileReader) numBytes() int64 {
+	return rfr.nb
+}
+
+// newSparseFileReader creates a new sparseFileReader, but validates all of the
+// sparse entries before doing so.
+func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) {
+	if total < 0 {
+		return nil, ErrHeader // Total size cannot be negative
+	}
+
+	// Validate all sparse entries. These are the same checks as performed by
+	// the BSD tar utility.
+	for i, s := range sp {
+		switch {
+		case s.offset < 0 || s.numBytes < 0:
+			return nil, ErrHeader // Negative values are never okay
+		case s.offset > math.MaxInt64-s.numBytes:
+			return nil, ErrHeader // Integer overflow with large length
+		case s.offset+s.numBytes > total:
+			return nil, ErrHeader // Region extends beyond the "real" size
+		case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset:
+			return nil, ErrHeader // Regions can't overlap and must be in order
+		}
+	}
+	return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil
+}
+
+// readHole reads a sparse hole ending at endOffset.
+func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int {
+	n64 := endOffset - sfr.pos
+	if n64 > int64(len(b)) {
+		n64 = int64(len(b))
+	}
+	n := int(n64)
+	for i := 0; i < n; i++ {
+		b[i] = 0
+	}
+	sfr.pos += n64
+	return n
+}
+
+// Read reads the sparse file data in expanded form.
+func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
+	// Skip past all empty fragments.
+	for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 {
+		sfr.sp = sfr.sp[1:]
+	}
+
+	// If there are no more fragments, then it is possible that there
+	// is one last sparse hole.
+	if len(sfr.sp) == 0 {
+		// This behavior matches the BSD tar utility.
+		// However, GNU tar stops returning data even if sfr.total is unmet.
+		if sfr.pos < sfr.total {
+			return sfr.readHole(b, sfr.total), nil
+		}
+		return 0, io.EOF
+	}
+
+	// In front of a data fragment, so read a hole.
+	if sfr.pos < sfr.sp[0].offset {
+		return sfr.readHole(b, sfr.sp[0].offset), nil
+	}
+
+	// In a data fragment, so read from it.
+	// This math is overflow free since we verify that offset and numBytes can
+	// be safely added when creating the sparseFileReader.
+	endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment
+	bytesLeft := endPos - sfr.pos                   // Bytes left in fragment
+	if int64(len(b)) > bytesLeft {
+		b = b[:bytesLeft]
+	}
+
+	n, err = sfr.rfr.Read(b)
+	sfr.pos += int64(n)
+	if err == io.EOF {
+		if sfr.pos < endPos {
+			err = io.ErrUnexpectedEOF // There was supposed to be more data
+		} else if sfr.pos < sfr.total {
+			err = nil // There is still an implicit sparse hole at the end
+		}
+	}
+
+	if sfr.pos == endPos {
+		sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it
+	}
+	return n, err
+}
+
+// numBytes returns the number of bytes left to read in the sparse file's
+// sparse-encoded data in the tar archive.
+func (sfr *sparseFileReader) numBytes() int64 {
+	return sfr.rfr.numBytes()
+}
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/stat_atim.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_atim.go
new file mode 100644
index 0000000000000000000000000000000000000000..cf9cc79c5915bace0af0e81fa4df961f3a68553c
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/stat_atim.go
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux dragonfly openbsd solaris
+
+package tar
+
+import (
+	"syscall"
+	"time"
+)
+
+func statAtime(st *syscall.Stat_t) time.Time {
+	return time.Unix(st.Atim.Unix())
+}
+
+func statCtime(st *syscall.Stat_t) time.Time {
+	return time.Unix(st.Ctim.Unix())
+}
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/stat_atimespec.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_atimespec.go
new file mode 100644
index 0000000000000000000000000000000000000000..6f17dbe30725c120218885cc662587b8c7dcb4d2
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/stat_atimespec.go
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin freebsd netbsd
+
+package tar
+
+import (
+	"syscall"
+	"time"
+)
+
+func statAtime(st *syscall.Stat_t) time.Time {
+	return time.Unix(st.Atimespec.Unix())
+}
+
+func statCtime(st *syscall.Stat_t) time.Time {
+	return time.Unix(st.Ctimespec.Unix())
+}
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..cb843db4cfd65815830128cc0aa285dee36f7123
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go
@@ -0,0 +1,32 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin dragonfly freebsd openbsd netbsd solaris
+
+package tar
+
+import (
+	"os"
+	"syscall"
+)
+
+func init() {
+	sysStat = statUnix
+}
+
+func statUnix(fi os.FileInfo, h *Header) error {
+	sys, ok := fi.Sys().(*syscall.Stat_t)
+	if !ok {
+		return nil
+	}
+	h.Uid = int(sys.Uid)
+	h.Gid = int(sys.Gid)
+	// TODO(bradfitz): populate username & group.  os/user
+	// doesn't cache LookupId lookups, and lacks group
+	// lookup functions.
+	h.AccessTime = statAtime(sys)
+	h.ChangeTime = statCtime(sys)
+	// TODO(bradfitz): major/minor device numbers?
+	return nil
+}
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/writer.go b/vendor/github.com/vbatts/tar-split/archive/tar/writer.go
new file mode 100644
index 0000000000000000000000000000000000000000..042638175cd84a75fa7b56b806f87c88612f45cc
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/writer.go
@@ -0,0 +1,416 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+// TODO(dsymonds):
+// - catch more errors (no first header, etc.)
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"path"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+)
+
+var (
+	ErrWriteTooLong    = errors.New("archive/tar: write too long")
+	ErrFieldTooLong    = errors.New("archive/tar: header field too long")
+	ErrWriteAfterClose = errors.New("archive/tar: write after close")
+	errInvalidHeader   = errors.New("archive/tar: header field too long or contains invalid values")
+)
+
+// A Writer provides sequential writing of a tar archive in POSIX.1 format.
+// A tar archive consists of a sequence of files.
+// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
+// writing at most hdr.Size bytes in total.
+type Writer struct {
+	w          io.Writer
+	err        error
+	nb         int64 // number of unwritten bytes for current file entry
+	pad        int64 // amount of padding to write after current file entry
+	closed     bool
+	usedBinary bool            // whether the binary numeric field extension was used
+	preferPax  bool            // use pax header instead of binary numeric header
+	hdrBuff    [blockSize]byte // buffer to use in writeHeader when writing a regular header
+	paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
+}
+
+type formatter struct {
+	err error // Last error seen
+}
+
+// NewWriter creates a new Writer writing to w.
+func NewWriter(w io.Writer) *Writer { return &Writer{w: w} }
+
+// Flush finishes writing the current file (optional).
+func (tw *Writer) Flush() error {
+	if tw.nb > 0 {
+		tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
+		return tw.err
+	}
+
+	n := tw.nb + tw.pad
+	for n > 0 && tw.err == nil {
+		nr := n
+		if nr > blockSize {
+			nr = blockSize
+		}
+		var nw int
+		nw, tw.err = tw.w.Write(zeroBlock[0:nr])
+		n -= int64(nw)
+	}
+	tw.nb = 0
+	tw.pad = 0
+	return tw.err
+}
+
+// Write s into b, terminating it with a NUL if there is room.
+func (f *formatter) formatString(b []byte, s string) {
+	if len(s) > len(b) {
+		f.err = ErrFieldTooLong
+		return
+	}
+	ascii := toASCII(s)
+	copy(b, ascii)
+	if len(ascii) < len(b) {
+		b[len(ascii)] = 0
+	}
+}
+
+// Encode x as an octal ASCII string and write it into b with leading zeros.
+func (f *formatter) formatOctal(b []byte, x int64) {
+	s := strconv.FormatInt(x, 8)
+	// leading zeros, but leave room for a NUL.
+	for len(s)+1 < len(b) {
+		s = "0" + s
+	}
+	f.formatString(b, s)
+}
+
+// fitsInBase256 reports whether x can be encoded into n bytes using base-256
+// encoding. Unlike octal encoding, base-256 encoding does not require that the
+// string ends with a NUL character. Thus, all n bytes are available for output.
+//
+// If operating in binary mode, this assumes strict GNU binary mode; which means
+// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
+// equivalent to the sign bit in two's complement form.
+func fitsInBase256(n int, x int64) bool {
+	var binBits = uint(n-1) * 8
+	return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
+}
+
+// Write x into b, as binary (GNUtar/star extension).
+func (f *formatter) formatNumeric(b []byte, x int64) {
+	if fitsInBase256(len(b), x) {
+		for i := len(b) - 1; i >= 0; i-- {
+			b[i] = byte(x)
+			x >>= 8
+		}
+		b[0] |= 0x80 // Highest bit indicates binary format
+		return
+	}
+
+	f.formatOctal(b, 0) // Last resort, just write zero
+	f.err = ErrFieldTooLong
+}
+
+var (
+	minTime = time.Unix(0, 0)
+	// There is room for 11 octal digits (33 bits) of mtime.
+	maxTime = minTime.Add((1<<33 - 1) * time.Second)
+)
+
+// WriteHeader writes hdr and prepares to accept the file's contents.
+// WriteHeader calls Flush if it is not the first header.
+// Calling after a Close will return ErrWriteAfterClose.
+func (tw *Writer) WriteHeader(hdr *Header) error {
+	return tw.writeHeader(hdr, true)
+}
+
+// WriteHeader writes hdr and prepares to accept the file's contents.
+// WriteHeader calls Flush if it is not the first header.
+// Calling after a Close will return ErrWriteAfterClose.
+// As this method is called internally by writePax header to allow it to
+// suppress writing the pax header.
+func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
+	if tw.closed {
+		return ErrWriteAfterClose
+	}
+	if tw.err == nil {
+		tw.Flush()
+	}
+	if tw.err != nil {
+		return tw.err
+	}
+
+	// a map to hold pax header records, if any are needed
+	paxHeaders := make(map[string]string)
+
+	// TODO(shanemhansen): we might want to use PAX headers for
+	// subsecond time resolution, but for now let's just capture
+	// too long fields or non ascii characters
+
+	var f formatter
+	var header []byte
+
+	// We need to select which scratch buffer to use carefully,
+	// since this method is called recursively to write PAX headers.
+	// If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
+	// If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
+	// already being used by the non-recursive call, so we must use paxHdrBuff.
+	header = tw.hdrBuff[:]
+	if !allowPax {
+		header = tw.paxHdrBuff[:]
+	}
+	copy(header, zeroBlock)
+	s := slicer(header)
+
+	// Wrappers around formatter that automatically sets paxHeaders if the
+	// argument extends beyond the capacity of the input byte slice.
+	var formatString = func(b []byte, s string, paxKeyword string) {
+		needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s)
+		if needsPaxHeader {
+			paxHeaders[paxKeyword] = s
+			return
+		}
+		f.formatString(b, s)
+	}
+	var formatNumeric = func(b []byte, x int64, paxKeyword string) {
+		// Try octal first.
+		s := strconv.FormatInt(x, 8)
+		if len(s) < len(b) {
+			f.formatOctal(b, x)
+			return
+		}
+
+		// If it is too long for octal, and PAX is preferred, use a PAX header.
+		if paxKeyword != paxNone && tw.preferPax {
+			f.formatOctal(b, 0)
+			s := strconv.FormatInt(x, 10)
+			paxHeaders[paxKeyword] = s
+			return
+		}
+
+		tw.usedBinary = true
+		f.formatNumeric(b, x)
+	}
+
+	// keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
+	pathHeaderBytes := s.next(fileNameSize)
+
+	formatString(pathHeaderBytes, hdr.Name, paxPath)
+
+	// Handle out of range ModTime carefully.
+	var modTime int64
+	if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) {
+		modTime = hdr.ModTime.Unix()
+	}
+
+	f.formatOctal(s.next(8), hdr.Mode)               // 100:108
+	formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116
+	formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124
+	formatNumeric(s.next(12), hdr.Size, paxSize)     // 124:136
+	formatNumeric(s.next(12), modTime, paxNone)      // 136:148 --- consider using pax for finer granularity
+	s.next(8)                                        // chksum (148:156)
+	s.next(1)[0] = hdr.Typeflag                      // 156:157
+
+	formatString(s.next(100), hdr.Linkname, paxLinkpath)
+
+	copy(s.next(8), []byte("ustar\x0000"))          // 257:265
+	formatString(s.next(32), hdr.Uname, paxUname)   // 265:297
+	formatString(s.next(32), hdr.Gname, paxGname)   // 297:329
+	formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337
+	formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345
+
+	// keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
+	prefixHeaderBytes := s.next(155)
+	formatString(prefixHeaderBytes, "", paxNone) // 345:500  prefix
+
+	// Use the GNU magic instead of POSIX magic if we used any GNU extensions.
+	if tw.usedBinary {
+		copy(header[257:265], []byte("ustar  \x00"))
+	}
+
+	_, paxPathUsed := paxHeaders[paxPath]
+	// try to use a ustar header when only the name is too long
+	if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
+		prefix, suffix, ok := splitUSTARPath(hdr.Name)
+		if ok {
+			// Since we can encode in USTAR format, disable PAX header.
+			delete(paxHeaders, paxPath)
+
+			// Update the path fields
+			formatString(pathHeaderBytes, suffix, paxNone)
+			formatString(prefixHeaderBytes, prefix, paxNone)
+		}
+	}
+
+	// The chksum field is terminated by a NUL and a space.
+	// This is different from the other octal fields.
+	chksum, _ := checksum(header)
+	f.formatOctal(header[148:155], chksum) // Never fails
+	header[155] = ' '
+
+	// Check if there were any formatting errors.
+	if f.err != nil {
+		tw.err = f.err
+		return tw.err
+	}
+
+	if allowPax {
+		for k, v := range hdr.Xattrs {
+			paxHeaders[paxXattr+k] = v
+		}
+	}
+
+	if len(paxHeaders) > 0 {
+		if !allowPax {
+			return errInvalidHeader
+		}
+		if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
+			return err
+		}
+	}
+	tw.nb = int64(hdr.Size)
+	tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
+
+	_, tw.err = tw.w.Write(header)
+	return tw.err
+}
+
+// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
+// If the path is not splittable, then it will return ("", "", false).
+func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
+	length := len(name)
+	if length <= fileNameSize || !isASCII(name) {
+		return "", "", false
+	} else if length > fileNamePrefixSize+1 {
+		length = fileNamePrefixSize + 1
+	} else if name[length-1] == '/' {
+		length--
+	}
+
+	i := strings.LastIndex(name[:length], "/")
+	nlen := len(name) - i - 1 // nlen is length of suffix
+	plen := i                 // plen is length of prefix
+	if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
+		return "", "", false
+	}
+	return name[:i], name[i+1:], true
+}
+
+// writePaxHeader writes an extended pax header to the
+// archive.
+func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
+	// Prepare extended header
+	ext := new(Header)
+	ext.Typeflag = TypeXHeader
+	// Setting ModTime is required for reader parsing to
+	// succeed, and seems harmless enough.
+	ext.ModTime = hdr.ModTime
+	// The spec asks that we namespace our pseudo files
+	// with the current pid.  However, this results in differing outputs
+	// for identical inputs.  As such, the constant 0 is now used instead.
+	// golang.org/issue/12358
+	dir, file := path.Split(hdr.Name)
+	fullName := path.Join(dir, "PaxHeaders.0", file)
+
+	ascii := toASCII(fullName)
+	if len(ascii) > 100 {
+		ascii = ascii[:100]
+	}
+	ext.Name = ascii
+	// Construct the body
+	var buf bytes.Buffer
+
+	// Keys are sorted before writing to body to allow deterministic output.
+	var keys []string
+	for k := range paxHeaders {
+		keys = append(keys, k)
+	}
+	sort.Strings(keys)
+
+	for _, k := range keys {
+		fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k]))
+	}
+
+	ext.Size = int64(len(buf.Bytes()))
+	if err := tw.writeHeader(ext, false); err != nil {
+		return err
+	}
+	if _, err := tw.Write(buf.Bytes()); err != nil {
+		return err
+	}
+	if err := tw.Flush(); err != nil {
+		return err
+	}
+	return nil
+}
+
+// formatPAXRecord formats a single PAX record, prefixing it with the
+// appropriate length.
+func formatPAXRecord(k, v string) string {
+	const padding = 3 // Extra padding for ' ', '=', and '\n'
+	size := len(k) + len(v) + padding
+	size += len(strconv.Itoa(size))
+	record := fmt.Sprintf("%d %s=%s\n", size, k, v)
+
+	// Final adjustment if adding size field increased the record size.
+	if len(record) != size {
+		size = len(record)
+		record = fmt.Sprintf("%d %s=%s\n", size, k, v)
+	}
+	return record
+}
+
+// Write writes to the current entry in the tar archive.
+// Write returns the error ErrWriteTooLong if more than
+// hdr.Size bytes are written after WriteHeader.
+func (tw *Writer) Write(b []byte) (n int, err error) {
+	if tw.closed {
+		err = ErrWriteAfterClose
+		return
+	}
+	overwrite := false
+	if int64(len(b)) > tw.nb {
+		b = b[0:tw.nb]
+		overwrite = true
+	}
+	n, err = tw.w.Write(b)
+	tw.nb -= int64(n)
+	if err == nil && overwrite {
+		err = ErrWriteTooLong
+		return
+	}
+	tw.err = err
+	return
+}
+
+// Close closes the tar archive, flushing any unwritten
+// data to the underlying writer.
+func (tw *Writer) Close() error {
+	if tw.err != nil || tw.closed {
+		return tw.err
+	}
+	tw.Flush()
+	tw.closed = true
+	if tw.err != nil {
+		return tw.err
+	}
+
+	// trailer: two zero blocks
+	for i := 0; i < 2; i++ {
+		_, tw.err = tw.w.Write(zeroBlock)
+		if tw.err != nil {
+			break
+		}
+	}
+	return tw.err
+}
diff --git a/vendor/github.com/vbatts/tar-split/tar/asm/README.md b/vendor/github.com/vbatts/tar-split/tar/asm/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..2a3a5b56a6246651f6a8bb3fe3519be3c4dd9675
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/tar/asm/README.md
@@ -0,0 +1,44 @@
+asm
+===
+
+This library for assembly and disassembly of tar archives, facilitated by
+`github.com/vbatts/tar-split/tar/storage`.
+
+
+Concerns
+--------
+
+For completely safe assembly/disassembly, there will need to be a Content
+Addressable Storage (CAS) directory, that maps to a checksum in the
+`storage.Entity` of `storage.FileType`.
+
+This is due to the fact that tar archives _can_ allow multiple records for the
+same path, but the last one effectively wins. Even if the prior records had a
+different payload. 
+
+In this way, when assembling an archive from relative paths, if the archive has
+multiple entries for the same path, then all payloads read in from a relative
+path would be identical.
+
+
+Thoughts
+--------
+
+Have a look-aside directory or storage. This way when a clobbering record is
+encountered from the tar stream, then the payload of the prior/existing file is
+stored to the CAS. This way the clobbering record's file payload can be
+extracted, but we'll have preserved the payload needed to reassemble a precise
+tar archive.
+
+clobbered/path/to/file.[0-N]
+
+*alternatively*
+
+We could just _not_ support tar streams that have clobbering file paths.
+Appending records to the archive is not incredibly common, and doesn't happen
+by default for most implementations.  Not supporting them wouldn't be a
+security concern either, as if it did occur, we would reassemble an archive
+that doesn't validate signature/checksum, so it shouldn't be trusted anyway.
+
+Otherwise, this will allow us to defer support for appended files as a FUTURE FEATURE.
+
diff --git a/vendor/github.com/vbatts/tar-split/tar/asm/assemble.go b/vendor/github.com/vbatts/tar-split/tar/asm/assemble.go
new file mode 100644
index 0000000000000000000000000000000000000000..d624450ab7d5bb8ceffbcb015fc885bc7ce5b852
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/tar/asm/assemble.go
@@ -0,0 +1,130 @@
+package asm
+
+import (
+	"bytes"
+	"fmt"
+	"hash"
+	"hash/crc64"
+	"io"
+	"sync"
+
+	"github.com/vbatts/tar-split/tar/storage"
+)
+
+// NewOutputTarStream returns an io.ReadCloser that is an assembled tar archive
+// stream.
+//
+// It takes a storage.FileGetter, for mapping the file payloads that are to be read in,
+// and a storage.Unpacker, which has access to the rawbytes and file order
+// metadata. With the combination of these two items, a precise assembled Tar
+// archive is possible.
+func NewOutputTarStream(fg storage.FileGetter, up storage.Unpacker) io.ReadCloser {
+	// ... Since these are interfaces, this is possible, so let's not have a nil pointer
+	if fg == nil || up == nil {
+		return nil
+	}
+	pr, pw := io.Pipe()
+	go func() {
+		err := WriteOutputTarStream(fg, up, pw)
+		if err != nil {
+			pw.CloseWithError(err)
+		} else {
+			pw.Close()
+		}
+	}()
+	return pr
+}
+
+// WriteOutputTarStream writes assembled tar archive to a writer.
+func WriteOutputTarStream(fg storage.FileGetter, up storage.Unpacker, w io.Writer) error {
+	// ... Since these are interfaces, this is possible, so let's not have a nil pointer
+	if fg == nil || up == nil {
+		return nil
+	}
+	var copyBuffer []byte
+	var crcHash hash.Hash
+	var crcSum []byte
+	var multiWriter io.Writer
+	for {
+		entry, err := up.Next()
+		if err != nil {
+			if err == io.EOF {
+				return nil
+			}
+			return err
+		}
+		switch entry.Type {
+		case storage.SegmentType:
+			if _, err := w.Write(entry.Payload); err != nil {
+				return err
+			}
+		case storage.FileType:
+			if entry.Size == 0 {
+				continue
+			}
+			fh, err := fg.Get(entry.GetName())
+			if err != nil {
+				return err
+			}
+			if crcHash == nil {
+				crcHash = crc64.New(storage.CRCTable)
+				crcSum = make([]byte, 8)
+				multiWriter = io.MultiWriter(w, crcHash)
+				copyBuffer = byteBufferPool.Get().([]byte)
+				defer byteBufferPool.Put(copyBuffer)
+			} else {
+				crcHash.Reset()
+			}
+
+			if _, err := copyWithBuffer(multiWriter, fh, copyBuffer); err != nil {
+				fh.Close()
+				return err
+			}
+
+			if !bytes.Equal(crcHash.Sum(crcSum[:0]), entry.Payload) {
+				// I would rather this be a comparable ErrInvalidChecksum or such,
+				// but since it's coming through the PipeReader, the context of
+				// _which_ file would be lost...
+				fh.Close()
+				return fmt.Errorf("file integrity checksum failed for %q", entry.GetName())
+			}
+			fh.Close()
+		}
+	}
+}
+
+var byteBufferPool = &sync.Pool{
+	New: func() interface{} {
+		return make([]byte, 32*1024)
+	},
+}
+
+// copyWithBuffer is taken from stdlib io.Copy implementation
+// https://github.com/golang/go/blob/go1.5.1/src/io/io.go#L367
+func copyWithBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) {
+	for {
+		nr, er := src.Read(buf)
+		if nr > 0 {
+			nw, ew := dst.Write(buf[0:nr])
+			if nw > 0 {
+				written += int64(nw)
+			}
+			if ew != nil {
+				err = ew
+				break
+			}
+			if nr != nw {
+				err = io.ErrShortWrite
+				break
+			}
+		}
+		if er == io.EOF {
+			break
+		}
+		if er != nil {
+			err = er
+			break
+		}
+	}
+	return written, err
+}
diff --git a/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go b/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go
new file mode 100644
index 0000000000000000000000000000000000000000..54ef23aed3660d2b00befd1caff27d33b2ad6a9b
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go
@@ -0,0 +1,141 @@
+package asm
+
+import (
+	"io"
+	"io/ioutil"
+
+	"github.com/vbatts/tar-split/archive/tar"
+	"github.com/vbatts/tar-split/tar/storage"
+)
+
+// NewInputTarStream wraps the Reader stream of a tar archive and provides a
+// Reader stream of the same.
+//
+// In the middle it will pack the segments and file metadata to storage.Packer
+// `p`.
+//
+// The the storage.FilePutter is where payload of files in the stream are
+// stashed. If this stashing is not needed, you can provide a nil
+// storage.FilePutter. Since the checksumming is still needed, then a default
+// of NewDiscardFilePutter will be used internally
+func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io.Reader, error) {
+	// What to do here... folks will want their own access to the Reader that is
+	// their tar archive stream, but we'll need that same stream to use our
+	// forked 'archive/tar'.
+	// Perhaps do an io.TeeReader that hands back an io.Reader for them to read
+	// from, and we'll MITM the stream to store metadata.
+	// We'll need a storage.FilePutter too ...
+
+	// Another concern, whether to do any storage.FilePutter operations, such that we
+	// don't extract any amount of the archive. But then again, we're not making
+	// files/directories, hardlinks, etc. Just writing the io to the storage.FilePutter.
+	// Perhaps we have a DiscardFilePutter that is a bit bucket.
+
+	// we'll return the pipe reader, since TeeReader does not buffer and will
+	// only read what the outputRdr Read's. Since Tar archives have padding on
+	// the end, we want to be the one reading the padding, even if the user's
+	// `archive/tar` doesn't care.
+	pR, pW := io.Pipe()
+	outputRdr := io.TeeReader(r, pW)
+
+	// we need a putter that will generate the crc64 sums of file payloads
+	if fp == nil {
+		fp = storage.NewDiscardFilePutter()
+	}
+
+	go func() {
+		tr := tar.NewReader(outputRdr)
+		tr.RawAccounting = true
+		for {
+			hdr, err := tr.Next()
+			if err != nil {
+				if err != io.EOF {
+					pW.CloseWithError(err)
+					return
+				}
+				// even when an EOF is reached, there is often 1024 null bytes on
+				// the end of an archive. Collect them too.
+				if b := tr.RawBytes(); len(b) > 0 {
+					_, err := p.AddEntry(storage.Entry{
+						Type:    storage.SegmentType,
+						Payload: b,
+					})
+					if err != nil {
+						pW.CloseWithError(err)
+						return
+					}
+				}
+				break // not return. We need the end of the reader.
+			}
+			if hdr == nil {
+				break // not return. We need the end of the reader.
+			}
+
+			if b := tr.RawBytes(); len(b) > 0 {
+				_, err := p.AddEntry(storage.Entry{
+					Type:    storage.SegmentType,
+					Payload: b,
+				})
+				if err != nil {
+					pW.CloseWithError(err)
+					return
+				}
+			}
+
+			var csum []byte
+			if hdr.Size > 0 {
+				var err error
+				_, csum, err = fp.Put(hdr.Name, tr)
+				if err != nil {
+					pW.CloseWithError(err)
+					return
+				}
+			}
+
+			entry := storage.Entry{
+				Type:    storage.FileType,
+				Size:    hdr.Size,
+				Payload: csum,
+			}
+			// For proper marshalling of non-utf8 characters
+			entry.SetName(hdr.Name)
+
+			// File entries added, regardless of size
+			_, err = p.AddEntry(entry)
+			if err != nil {
+				pW.CloseWithError(err)
+				return
+			}
+
+			if b := tr.RawBytes(); len(b) > 0 {
+				_, err = p.AddEntry(storage.Entry{
+					Type:    storage.SegmentType,
+					Payload: b,
+				})
+				if err != nil {
+					pW.CloseWithError(err)
+					return
+				}
+			}
+		}
+
+		// it is allowable, and not uncommon that there is further padding on the
+		// end of an archive, apart from the expected 1024 null bytes.
+		remainder, err := ioutil.ReadAll(outputRdr)
+		if err != nil && err != io.EOF {
+			pW.CloseWithError(err)
+			return
+		}
+		_, err = p.AddEntry(storage.Entry{
+			Type:    storage.SegmentType,
+			Payload: remainder,
+		})
+		if err != nil {
+			pW.CloseWithError(err)
+			return
+		}
+		pW.Close()
+	}()
+
+	return pR, nil
+}
diff --git a/vendor/github.com/vbatts/tar-split/tar/asm/doc.go b/vendor/github.com/vbatts/tar-split/tar/asm/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..4367b90220eba5051b0b0d482ff54e4fec6d00fb
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/tar/asm/doc.go
@@ -0,0 +1,9 @@
+/*
+Package asm provides the API for streaming assembly and disassembly of tar
+archives.
+
+Using the `github.com/vbatts/tar-split/tar/storage` for Packing/Unpacking the
+metadata for a stream, as well as an implementation of Getting/Putting the file
+entries' payload.
+*/
+package asm
diff --git a/vendor/github.com/vbatts/tar-split/tar/storage/doc.go b/vendor/github.com/vbatts/tar-split/tar/storage/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..83f7089ff156b055386352e56677cbd6aa313878
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/tar/storage/doc.go
@@ -0,0 +1,12 @@
+/*
+Package storage is for metadata of a tar archive.
+
+Packing and unpacking the Entries of the stream. The types of streams are
+either segments of raw bytes (for the raw headers and various padding) and for
+an entry marking a file payload.
+
+The raw bytes are stored precisely in the packed (marshalled) Entry, whereas
+the file payload marker include the name of the file, size, and crc64 checksum
+(for basic file integrity).
+*/
+package storage
diff --git a/vendor/github.com/vbatts/tar-split/tar/storage/entry.go b/vendor/github.com/vbatts/tar-split/tar/storage/entry.go
new file mode 100644
index 0000000000000000000000000000000000000000..c91e7ea1e823ca90084d83adc67837da182584cd
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/tar/storage/entry.go
@@ -0,0 +1,78 @@
+package storage
+
+import "unicode/utf8"
+
+// Entries is for sorting by Position
+type Entries []Entry
+
+func (e Entries) Len() int           { return len(e) }
+func (e Entries) Swap(i, j int)      { e[i], e[j] = e[j], e[i] }
+func (e Entries) Less(i, j int) bool { return e[i].Position < e[j].Position }
+
+// Type of Entry
+type Type int
+
+const (
+	// FileType represents a file payload from the tar stream.
+	//
+	// This will be used to map to relative paths on disk. Only Size > 0 will get
+	// read into a resulting output stream (due to hardlinks).
+	FileType Type = 1 + iota
+	// SegmentType represents a raw bytes segment from the archive stream. These raw
+	// byte segments consist of the raw headers and various padding.
+	//
+	// Its payload is to be marshalled base64 encoded.
+	SegmentType
+)
+
+// Entry is the structure for packing and unpacking the information read from
+// the Tar archive.
+//
+// FileType Payload checksum is using `hash/crc64` for basic file integrity,
+// _not_ for cryptography.
+// From http://www.backplane.com/matt/crc64.html, CRC32 has almost 40,000
+// collisions in a sample of 18.2 million, CRC64 had none.
+type Entry struct {
+	Type     Type   `json:"type"`
+	Name     string `json:"name,omitempty"`
+	NameRaw  []byte `json:"name_raw,omitempty"`
+	Size     int64  `json:"size,omitempty"`
+	Payload  []byte `json:"payload"` // SegmentType stores payload here; FileType stores crc64 checksum here;
+	Position int    `json:"position"`
+}
+
+// SetName will check name for valid UTF-8 string, and set the appropriate
+// field. See https://github.com/vbatts/tar-split/issues/17
+func (e *Entry) SetName(name string) {
+	if utf8.ValidString(name) {
+		e.Name = name
+	} else {
+		e.NameRaw = []byte(name)
+	}
+}
+
+// SetNameBytes will check name for valid UTF-8 string, and set the appropriate
+// field
+func (e *Entry) SetNameBytes(name []byte) {
+	if utf8.Valid(name) {
+		e.Name = string(name)
+	} else {
+		e.NameRaw = name
+	}
+}
+
+// GetName returns the string for the entry's name, regardless of the field stored in
+func (e *Entry) GetName() string {
+	if len(e.NameRaw) > 0 {
+		return string(e.NameRaw)
+	}
+	return e.Name
+}
+
+// GetNameBytes returns the bytes for the entry's name, regardless of the field stored in
+func (e *Entry) GetNameBytes() []byte {
+	if len(e.NameRaw) > 0 {
+		return e.NameRaw
+	}
+	return []byte(e.Name)
+}
diff --git a/vendor/github.com/vbatts/tar-split/tar/storage/getter.go b/vendor/github.com/vbatts/tar-split/tar/storage/getter.go
new file mode 100644
index 0000000000000000000000000000000000000000..ae11f8ffd4e8fa889812fd701adbde3b812bdbf8
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/tar/storage/getter.go
@@ -0,0 +1,104 @@
+package storage
+
+import (
+	"bytes"
+	"errors"
+	"hash/crc64"
+	"io"
+	"os"
+	"path/filepath"
+)
+
+// FileGetter is the interface for getting a stream of a file payload,
+// addressed by name/filename. Presumably, the names will be scoped to relative
+// file paths.
+type FileGetter interface {
+	// Get returns a stream for the provided file path
+	Get(filename string) (output io.ReadCloser, err error)
+}
+
+// FilePutter is the interface for storing a stream of a file payload,
+// addressed by name/filename.
+type FilePutter interface {
+	// Put returns the size of the stream received, and the crc64 checksum for
+	// the provided stream
+	Put(filename string, input io.Reader) (size int64, checksum []byte, err error)
+}
+
+// FileGetPutter is the interface that groups both Getting and Putting file
+// payloads.
+type FileGetPutter interface {
+	FileGetter
+	FilePutter
+}
+
+// NewPathFileGetter returns a FileGetter that is for files relative to path
+// relpath.
+func NewPathFileGetter(relpath string) FileGetter {
+	return &pathFileGetter{root: relpath}
+}
+
+type pathFileGetter struct {
+	root string
+}
+
+func (pfg pathFileGetter) Get(filename string) (io.ReadCloser, error) {
+	return os.Open(filepath.Join(pfg.root, filename))
+}
+
+type bufferFileGetPutter struct {
+	files map[string][]byte
+}
+
+func (bfgp bufferFileGetPutter) Get(name string) (io.ReadCloser, error) {
+	if _, ok := bfgp.files[name]; !ok {
+		return nil, errors.New("no such file")
+	}
+	b := bytes.NewBuffer(bfgp.files[name])
+	return &readCloserWrapper{b}, nil
+}
+
+func (bfgp *bufferFileGetPutter) Put(name string, r io.Reader) (int64, []byte, error) {
+	crc := crc64.New(CRCTable)
+	buf := bytes.NewBuffer(nil)
+	cw := io.MultiWriter(crc, buf)
+	i, err := io.Copy(cw, r)
+	if err != nil {
+		return 0, nil, err
+	}
+	bfgp.files[name] = buf.Bytes()
+	return i, crc.Sum(nil), nil
+}
+
+type readCloserWrapper struct {
+	io.Reader
+}
+
+func (w *readCloserWrapper) Close() error { return nil }
+
+// NewBufferFileGetPutter is a simple in-memory FileGetPutter
+//
+// Implication is this is memory intensive...
+// Probably best for testing or light weight cases.
+func NewBufferFileGetPutter() FileGetPutter {
+	return &bufferFileGetPutter{
+		files: map[string][]byte{},
+	}
+}
+
+// NewDiscardFilePutter is a bit bucket FilePutter
+func NewDiscardFilePutter() FilePutter {
+	return &bitBucketFilePutter{}
+}
+
+type bitBucketFilePutter struct {
+}
+
+func (bbfp *bitBucketFilePutter) Put(name string, r io.Reader) (int64, []byte, error) {
+	c := crc64.New(CRCTable)
+	i, err := io.Copy(c, r)
+	return i, c.Sum(nil), err
+}
+
+// CRCTable is the default table used for crc64 sum calculations
+var CRCTable = crc64.MakeTable(crc64.ISO)
diff --git a/vendor/github.com/vbatts/tar-split/tar/storage/packer.go b/vendor/github.com/vbatts/tar-split/tar/storage/packer.go
new file mode 100644
index 0000000000000000000000000000000000000000..aba694818549b65dddf999a719d5f38c108bde5f
--- /dev/null
+++ b/vendor/github.com/vbatts/tar-split/tar/storage/packer.go
@@ -0,0 +1,127 @@
+package storage
+
+import (
+	"encoding/json"
+	"errors"
+	"io"
+	"path/filepath"
+	"unicode/utf8"
+)
+
+// ErrDuplicatePath occurs when a tar archive has more than one entry for the
+// same file path
+var ErrDuplicatePath = errors.New("duplicates of file paths not supported")
+
+// Packer describes the methods to pack Entries to a storage destination
+type Packer interface {
+	// AddEntry packs the Entry and returns its position
+	AddEntry(e Entry) (int, error)
+}
+
+// Unpacker describes the methods to read Entries from a source
+type Unpacker interface {
+	// Next returns the next Entry being unpacked, or error, until io.EOF
+	Next() (*Entry, error)
+}
+
+/* TODO(vbatts) figure out a good model for this
+type PackUnpacker interface {
+	Packer
+	Unpacker
+}
+*/
+
+type jsonUnpacker struct {
+	seen seenNames
+	dec  *json.Decoder
+}
+
+func (jup *jsonUnpacker) Next() (*Entry, error) {
+	var e Entry
+	err := jup.dec.Decode(&e)
+	if err != nil {
+		return nil, err
+	}
+
+	// check for dup name
+	if e.Type == FileType {
+		cName := filepath.Clean(e.GetName())
+		if _, ok := jup.seen[cName]; ok {
+			return nil, ErrDuplicatePath
+		}
+		jup.seen[cName] = struct{}{}
+	}
+
+	return &e, err
+}
+
+// NewJSONUnpacker provides an Unpacker that reads Entries (SegmentType and
+// FileType) as a json document.
+//
+// Each Entry read are expected to be delimited by new line.
+func NewJSONUnpacker(r io.Reader) Unpacker {
+	return &jsonUnpacker{
+		dec:  json.NewDecoder(r),
+		seen: seenNames{},
+	}
+}
+
+type jsonPacker struct {
+	w    io.Writer
+	e    *json.Encoder
+	pos  int
+	seen seenNames
+}
+
+type seenNames map[string]struct{}
+
+func (jp *jsonPacker) AddEntry(e Entry) (int, error) {
+	// if Name is not valid utf8, switch it to raw first.
+	if e.Name != "" {
+		if !utf8.ValidString(e.Name) {
+			e.NameRaw = []byte(e.Name)
+			e.Name = ""
+		}
+	}
+
+	// check early for dup name
+	if e.Type == FileType {
+		cName := filepath.Clean(e.GetName())
+		if _, ok := jp.seen[cName]; ok {
+			return -1, ErrDuplicatePath
+		}
+		jp.seen[cName] = struct{}{}
+	}
+
+	e.Position = jp.pos
+	err := jp.e.Encode(e)
+	if err != nil {
+		return -1, err
+	}
+
+	// made it this far, increment now
+	jp.pos++
+	return e.Position, nil
+}
+
+// NewJSONPacker provides a Packer that writes each Entry (SegmentType and
+// FileType) as a json document.
+//
+// The Entries are delimited by new line.
+func NewJSONPacker(w io.Writer) Packer {
+	return &jsonPacker{
+		w:    w,
+		e:    json.NewEncoder(w),
+		seen: seenNames{},
+	}
+}
+
+/*
+TODO(vbatts) perhaps have a more compact packer/unpacker, maybe using msgapck
+(https://github.com/ugorji/go)
+
+
+Even though, since our jsonUnpacker and jsonPacker just take
+io.Reader/io.Writer, then we can get away with passing them a
+gzip.Reader/gzip.Writer
+*/
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
new file mode 100644
index 0000000000000000000000000000000000000000..724e580c4ea7787476bbd2bf5cc9105808ac1c3d
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
@@ -0,0 +1,28 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build mips64 mips64le
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for mips64, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT	·Syscall(SB),NOSPLIT,$0-56
+	JMP	syscall·Syscall(SB)
+
+TEXT	·Syscall6(SB),NOSPLIT,$0-80
+	JMP	syscall·Syscall6(SB)
+
+TEXT	·RawSyscall(SB),NOSPLIT,$0-56
+	JMP	syscall·RawSyscall(SB)
+
+TEXT	·RawSyscall6(SB),NOSPLIT,$0-80
+	JMP	syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh
index bba8f755e09d1917c77cd9974438ea8a11b80ea2..de95a4bbcf501f3b2392002cd87fe5993f531fb9 100755
--- a/vendor/golang.org/x/sys/unix/mkall.sh
+++ b/vendor/golang.org/x/sys/unix/mkall.sh
@@ -116,12 +116,12 @@ _* | *_ | _)
 darwin_386)
 	mkerrors="$mkerrors -m32"
 	mksyscall="./mksyscall.pl -l32"
-	mksysnum="./mksysnum_darwin.pl /usr/include/sys/syscall.h"
+	mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
 	mktypes="GOARCH=$GOARCH go tool cgo -godefs"
 	;;
 darwin_amd64)
 	mkerrors="$mkerrors -m64"
-	mksysnum="./mksysnum_darwin.pl /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk/usr/include/sys/syscall.h"
+	mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
 	mktypes="GOARCH=$GOARCH go tool cgo -godefs"
 	;;
 darwin_arm)
@@ -131,7 +131,7 @@ darwin_arm)
 	;;
 darwin_arm64)
 	mkerrors="$mkerrors -m64"
-	mksysnum="./mksysnum_darwin.pl /usr/include/sys/syscall.h"
+	mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
 	mktypes="GOARCH=$GOARCH go tool cgo -godefs"
 	;;
 dragonfly_386)
@@ -267,5 +267,8 @@ esac
 	esac
 	if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
 	if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
-	if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go | gofmt >ztypes_$GOOSARCH.go"; fi
+	if [ -n "$mktypes" ]; then
+		echo "echo // +build $GOARCH,$GOOS > ztypes_$GOOSARCH.go";
+		echo "$mktypes types_$GOOS.go | gofmt >>ztypes_$GOOSARCH.go";
+	fi
 ) | $run
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index 8e9670352c4dc8d8f5243f6a30517368e4e9a97c..c40d788c4ab3f5b6327bb21e11a73c1a57a64a19 100755
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -12,11 +12,16 @@ export LC_ALL=C
 export LC_CTYPE=C
 
 if test -z "$GOARCH" -o -z "$GOOS"; then
-  echo 1>&2 "GOARCH or GOOS not defined in environment"
-  exit 1
+	echo 1>&2 "GOARCH or GOOS not defined in environment"
+	exit 1
 fi
 
-CC=${CC:-gcc}
+CC=${CC:-cc}
+
+if [[ "$GOOS" -eq "solaris" ]]; then
+	# Assumes GNU versions of utilities in PATH.
+	export PATH=/usr/gnu/bin:$PATH
+fi
 
 uname=$(uname)
 
@@ -38,7 +43,6 @@ includes_Darwin='
 #include <net/route.h>
 #include <netinet/in.h>
 #include <netinet/ip.h>
-#include <netinet/ip_mroute.h>
 #include <termios.h>
 '
 
@@ -124,7 +128,7 @@ includes_Linux='
 #include <linux/wait.h>
 #include <linux/icmpv6.h>
 #include <net/route.h>
-#include <termios.h>
+#include <asm/termbits.h>
 
 #ifndef MSG_FASTOPEN
 #define MSG_FASTOPEN    0x20000000
@@ -201,6 +205,7 @@ includes_OpenBSD='
 '
 
 includes_SunOS='
+#include <limits.h>
 #include <sys/types.h>
 #include <sys/socket.h>
 #include <sys/sockio.h>
@@ -271,21 +276,31 @@ ccflags="$@"
 		$2 !~ /^EXPR_/ &&
 		$2 ~ /^E[A-Z0-9_]+$/ ||
 		$2 ~ /^B[0-9_]+$/ ||
+		$2 == "BOTHER" ||
+		$2 ~ /^CI?BAUD(EX)?$/ ||
+		$2 == "IBSHIFT" ||
 		$2 ~ /^V[A-Z0-9]+$/ ||
 		$2 ~ /^CS[A-Z0-9]/ ||
-		$2 ~ /^I(SIG|CANON|CRNL|EXTEN|MAXBEL|STRIP|UTF8)$/ ||
+		$2 ~ /^I(SIG|CANON|CRNL|UCLC|EXTEN|MAXBEL|STRIP|UTF8)$/ ||
 		$2 ~ /^IGN/ ||
 		$2 ~ /^IX(ON|ANY|OFF)$/ ||
 		$2 ~ /^IN(LCR|PCK)$/ ||
 		$2 ~ /(^FLU?SH)|(FLU?SH$)/ ||
-		$2 ~ /^C(LOCAL|READ)$/ ||
+		$2 ~ /^C(LOCAL|READ|MSPAR|RTSCTS)$/ ||
 		$2 == "BRKINT" ||
 		$2 == "HUPCL" ||
 		$2 == "PENDIN" ||
 		$2 == "TOSTOP" ||
+		$2 == "XCASE" ||
+		$2 == "ALTWERASE" ||
+		$2 == "NOKERNINFO" ||
 		$2 ~ /^PAR/ ||
 		$2 ~ /^SIG[^_]/ ||
-		$2 ~ /^O[CNPFP][A-Z]+[^_][A-Z]+$/ ||
+		$2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ ||
+		$2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ ||
+		$2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ ||
+		$2 ~ /^O?XTABS$/ ||
+		$2 ~ /^TC[IO](ON|OFF)$/ ||
 		$2 ~ /^IN_/ ||
 		$2 ~ /^LOCK_(SH|EX|NB|UN)$/ ||
 		$2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ ||
@@ -304,6 +319,9 @@ ccflags="$@"
 		$2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P)_/ ||
 		$2 ~ /^SIOC/ ||
 		$2 ~ /^TIOC/ ||
+		$2 ~ /^TCGET/ ||
+		$2 ~ /^TCSET/ ||
+		$2 ~ /^TC(FLSH|SBRKP?|XONC)$/ ||
 		$2 !~ "RTF_BITS" &&
 		$2 ~ /^(IFF|IFT|NET_RT|RTM|RTF|RTV|RTA|RTAX)_/ ||
 		$2 ~ /^BIOC/ ||
diff --git a/vendor/golang.org/x/sys/unix/mksyscall_solaris.pl b/vendor/golang.org/x/sys/unix/mksyscall_solaris.pl
index f17b6125b519ad06937addfade9051c4f733aeba..06bade76877612905c43023cd54011e311c908b4 100755
--- a/vendor/golang.org/x/sys/unix/mksyscall_solaris.pl
+++ b/vendor/golang.org/x/sys/unix/mksyscall_solaris.pl
@@ -110,9 +110,9 @@ while(<>) {
 	$sysname =~ y/A-Z/a-z/; # All libc functions are lowercase.
 
 	# Runtime import of function to allow cross-platform builds.
-	$dynimports .= "//go:cgo_import_dynamic ${modname}_${sysname} ${sysname} \"$modname.so\"\n";
+	$dynimports .= "//go:cgo_import_dynamic libc_${sysname} ${sysname} \"$modname.so\"\n";
 	# Link symbol to proc address variable.
-	$linknames .= "//go:linkname ${sysvarname} ${modname}_${sysname}\n";
+	$linknames .= "//go:linkname ${sysvarname} libc_${sysname}\n";
 	# Library proc address variable.
 	push @vars, $sysvarname;
 
diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go
index 6668bec711dd8d0249dc602e2a6c73e06656bc93..70af5a728e5ad08fbe8e821e2c0e04eaf9b4dc53 100644
--- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go
+++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go
@@ -77,10 +77,10 @@ func UnixRights(fds ...int) []byte {
 	h.Level = SOL_SOCKET
 	h.Type = SCM_RIGHTS
 	h.SetLen(CmsgLen(datalen))
-	data := uintptr(cmsgData(h))
+	data := cmsgData(h)
 	for _, fd := range fds {
-		*(*int32)(unsafe.Pointer(data)) = int32(fd)
-		data += 4
+		*(*int32)(data) = int32(fd)
+		data = unsafe.Pointer(uintptr(data) + 4)
 	}
 	return b
 }
diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go
index 6442a9939e21f49dd7e7baf03e3cb29d7f72e252..a0bcf842ca8fd36a11f4e589366e8a13d0182ab8 100644
--- a/vendor/golang.org/x/sys/unix/syscall.go
+++ b/vendor/golang.org/x/sys/unix/syscall.go
@@ -68,6 +68,8 @@ func (tv *Timeval) Nano() int64 {
 	return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000
 }
 
+func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
+
 // use is a no-op, but the compiler cannot see that it is.
 // Calling use(p) ensures that p is kept live until that point.
 //go:noescape
diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go
index 9679dec89824db517b0ce2ef1f471b718dc958c5..e9671764ccb4e4dfd8d49f2002a5abbdd6b1015b 100644
--- a/vendor/golang.org/x/sys/unix/syscall_bsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go
@@ -450,16 +450,34 @@ func Kevent(kq int, changes, events []Kevent_t, timeout *Timespec) (n int, err e
 
 //sys	sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL
 
-func Sysctl(name string) (value string, err error) {
+// sysctlmib translates name to mib number and appends any additional args.
+func sysctlmib(name string, args ...int) ([]_C_int, error) {
 	// Translate name to mib number.
 	mib, err := nametomib(name)
+	if err != nil {
+		return nil, err
+	}
+
+	for _, a := range args {
+		mib = append(mib, _C_int(a))
+	}
+
+	return mib, nil
+}
+
+func Sysctl(name string) (string, error) {
+	return SysctlArgs(name)
+}
+
+func SysctlArgs(name string, args ...int) (string, error) {
+	mib, err := sysctlmib(name, args...)
 	if err != nil {
 		return "", err
 	}
 
 	// Find size.
 	n := uintptr(0)
-	if err = sysctl(mib, nil, &n, nil, 0); err != nil {
+	if err := sysctl(mib, nil, &n, nil, 0); err != nil {
 		return "", err
 	}
 	if n == 0 {
@@ -468,7 +486,7 @@ func Sysctl(name string) (value string, err error) {
 
 	// Read into buffer of that size.
 	buf := make([]byte, n)
-	if err = sysctl(mib, &buf[0], &n, nil, 0); err != nil {
+	if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil {
 		return "", err
 	}
 
@@ -479,17 +497,19 @@ func Sysctl(name string) (value string, err error) {
 	return string(buf[0:n]), nil
 }
 
-func SysctlUint32(name string) (value uint32, err error) {
-	// Translate name to mib number.
-	mib, err := nametomib(name)
+func SysctlUint32(name string) (uint32, error) {
+	return SysctlUint32Args(name)
+}
+
+func SysctlUint32Args(name string, args ...int) (uint32, error) {
+	mib, err := sysctlmib(name, args...)
 	if err != nil {
 		return 0, err
 	}
 
-	// Read into buffer of that size.
 	n := uintptr(4)
 	buf := make([]byte, 4)
-	if err = sysctl(mib, &buf[0], &n, nil, 0); err != nil {
+	if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil {
 		return 0, err
 	}
 	if n != 4 {
@@ -498,6 +518,49 @@ func SysctlUint32(name string) (value uint32, err error) {
 	return *(*uint32)(unsafe.Pointer(&buf[0])), nil
 }
 
+func SysctlUint64(name string, args ...int) (uint64, error) {
+	mib, err := sysctlmib(name, args...)
+	if err != nil {
+		return 0, err
+	}
+
+	n := uintptr(8)
+	buf := make([]byte, 8)
+	if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil {
+		return 0, err
+	}
+	if n != 8 {
+		return 0, EIO
+	}
+	return *(*uint64)(unsafe.Pointer(&buf[0])), nil
+}
+
+func SysctlRaw(name string, args ...int) ([]byte, error) {
+	mib, err := sysctlmib(name, args...)
+	if err != nil {
+		return nil, err
+	}
+
+	// Find size.
+	n := uintptr(0)
+	if err := sysctl(mib, nil, &n, nil, 0); err != nil {
+		return nil, err
+	}
+	if n == 0 {
+		return nil, nil
+	}
+
+	// Read into buffer of that size.
+	buf := make([]byte, n)
+	if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil {
+		return nil, err
+	}
+
+	// The actual call may return less than the original reported required
+	// size so ensure we deal with that.
+	return buf[:n], nil
+}
+
 //sys	utimes(path string, timeval *[2]Timeval) (err error)
 
 func Utimes(path string, tv []Timeval) error {
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go
index 974078571da76b5968d25c2600143078021f4345..c172a3da5a3ab666e19ad71b1977c8a55cd3414a 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go
@@ -21,8 +21,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
 	return
 }
 
-func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
-
 func NsecToTimeval(nsec int64) (tv Timeval) {
 	nsec += 999 // round up to microsecond
 	tv.Usec = int32(nsec % 1e9 / 1e3)
@@ -73,3 +71,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
 }
 
 func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions
+// of darwin/386 the syscall is called sysctl instead of __sysctl.
+const SYS___SYSCTL = SYS_SYSCTL
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
index 7b0cb07eab95b3b7c463a999ef5fc89a85f4f914..fc1e5a4a8257de048e8af82fdc9e49841785bae0 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
@@ -11,6 +11,8 @@ import (
 	"unsafe"
 )
 
+//sys	Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)
+
 func Getpagesize() int { return 4096 }
 
 func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
@@ -21,8 +23,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
 	return
 }
 
-func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
-
 func NsecToTimeval(nsec int64) (tv Timeval) {
 	nsec += 999 // round up to microsecond
 	tv.Usec = int32(nsec % 1e9 / 1e3)
@@ -73,3 +73,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
 }
 
 func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions
+// of darwin/amd64 the syscall is called sysctl instead of __sysctl.
+const SYS___SYSCTL = SYS_SYSCTL
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go
index e47ffd7396735e968adf90307c9b6d54fd544213..d286cf408d802757371504aada45c504ecc600d1 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go
@@ -19,8 +19,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
 	return
 }
 
-func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
-
 func NsecToTimeval(nsec int64) (tv Timeval) {
 	nsec += 999 // round up to microsecond
 	tv.Usec = int32(nsec % 1e9 / 1e3)
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
index ac8a493965184e107556ec3aadab167635f5d724..c33905cdcd962c71da781e587101b32fda04e973 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
@@ -21,8 +21,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
 	return
 }
 
-func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
-
 func NsecToTimeval(nsec int64) (tv Timeval) {
 	nsec += 999 // round up to microsecond
 	tv.Usec = int32(nsec % 1e9 / 1e3)
@@ -73,3 +71,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
 }
 
 func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) // sic
+
+// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions
+// of darwin/arm64 the syscall is called sysctl instead of __sysctl.
+const SYS___SYSCTL = SYS_SYSCTL
diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_386.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_386.go
index 41c2e69782a6f835f5b0e35e5af99cf3d6846015..60fec8cecd96bf2456ba615e411235887555c37d 100644
--- a/vendor/golang.org/x/sys/unix/syscall_dragonfly_386.go
+++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_386.go
@@ -21,8 +21,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
 	return
 }
 
-func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
-
 func NsecToTimeval(nsec int64) (tv Timeval) {
 	nsec += 999 // round up to microsecond
 	tv.Usec = int32(nsec % 1e9 / 1e3)
diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go
index 2ed92590e27908c904db36674308f66896694315..da7cb7982cddb54f21998e7043bb95a30fd74609 100644
--- a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go
@@ -21,8 +21,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
 	return
 }
 
-func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
-
 func NsecToTimeval(nsec int64) (tv Timeval) {
 	nsec += 999 // round up to microsecond
 	tv.Usec = nsec % 1e9 / 1e3
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
index 6255d40ff860f7e743692459ac9af1edce75a489..6a0cd804d88275ee6f6107b9931ef90c8a2c4ddb 100644
--- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
@@ -21,8 +21,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
 	return
 }
 
-func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
-
 func NsecToTimeval(nsec int64) (tv Timeval) {
 	nsec += 999 // round up to microsecond
 	tv.Usec = int32(nsec % 1e9 / 1e3)
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
index 8b395d596dcb5502d6f6e46e1a0cc16bdb58d65c..e142540efa428ac99f0eceb55f60655b9734ec32 100644
--- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
@@ -21,8 +21,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
 	return
 }
 
-func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
-
 func NsecToTimeval(nsec int64) (tv Timeval) {
 	nsec += 999 // round up to microsecond
 	tv.Usec = nsec % 1e9 / 1e3
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
index 4e72d46a81666ff4f3b7dbc4dcf0c3b4b008a849..5504cb1255941c90fdfa09ac8615de5729fb4adf 100644
--- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
@@ -21,8 +21,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
 	return
 }
 
-func TimevalToNsec(tv Timeval) int64 { return tv.Sec*1e9 + int64(tv.Usec)*1e3 }
-
 func NsecToTimeval(nsec int64) (tv Timeval) {
 	nsec += 999 // round up to microsecond
 	tv.Usec = int32(nsec % 1e9 / 1e3)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index 9df719571d1e7bf475c53632b636138ba8d93f58..464344e5fc394d5111d5ef669d947f74719fc6d3 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -36,10 +36,10 @@ func Creat(path string, mode uint32) (fd int, err error) {
 	return Open(path, O_CREAT|O_WRONLY|O_TRUNC, mode)
 }
 
-//sys	linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error)
+//sys	Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error)
 
 func Link(oldpath string, newpath string) (err error) {
-	return linkat(AT_FDCWD, oldpath, AT_FDCWD, newpath, 0)
+	return Linkat(AT_FDCWD, oldpath, AT_FDCWD, newpath, 0)
 }
 
 func Mkdir(path string, mode uint32) (err error) {
@@ -86,19 +86,30 @@ func Unlink(path string) error {
 
 //sys	unlinkat(dirfd int, path string, flags int) (err error)
 
-func Unlinkat(dirfd int, path string) error {
-	return unlinkat(dirfd, path, 0)
+func Unlinkat(dirfd int, path string, flags int) error {
+	return unlinkat(dirfd, path, flags)
 }
 
 //sys	utimes(path string, times *[2]Timeval) (err error)
 
-func Utimes(path string, tv []Timeval) (err error) {
+func Utimes(path string, tv []Timeval) error {
 	if tv == nil {
+		err := utimensat(AT_FDCWD, path, nil, 0)
+		if err != ENOSYS {
+			return err
+		}
 		return utimes(path, nil)
 	}
 	if len(tv) != 2 {
 		return EINVAL
 	}
+	var ts [2]Timespec
+	ts[0] = NsecToTimespec(TimevalToNsec(tv[0]))
+	ts[1] = NsecToTimespec(TimevalToNsec(tv[1]))
+	err := utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0)
+	if err != ENOSYS {
+		return err
+	}
 	return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
 }
 
@@ -886,6 +897,7 @@ func Getpgrp() (pid int) {
 //sys	Pause() (err error)
 //sys	PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT
 //sysnb prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) = SYS_PRLIMIT64
+//sys   Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error)
 //sys	read(fd int, p []byte) (n int, err error)
 //sys	Removexattr(path string, attr string) (err error)
 //sys	Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
@@ -894,6 +906,7 @@ func Getpgrp() (pid int) {
 //sysnb	Setpgid(pid int, pgid int) (err error)
 //sysnb	Setsid() (pid int, err error)
 //sysnb	Settimeofday(tv *Timeval) (err error)
+//sys	Setns(fd int, nstype int) (err error)
 
 // issue 1435.
 // On linux Setuid and Setgid only affects the current thread, not the process.
@@ -920,7 +933,6 @@ func Setgid(uid int) (err error) {
 //sys	Unmount(target string, flags int) (err error) = SYS_UMOUNT2
 //sys	Unshare(flags int) (err error)
 //sys	Ustat(dev int, ubuf *Ustat_t) (err error)
-//sys	Utime(path string, buf *Utimbuf) (err error)
 //sys	write(fd int, p []byte) (n int, err error)
 //sys	exitThread(code int) (err error) = SYS_EXIT
 //sys	readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ
@@ -1022,7 +1034,6 @@ func Munmap(b []byte) (err error) {
 // Personality
 // Poll
 // Ppoll
-// Prctl
 // Pselect6
 // Ptrace
 // Putpmsg
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go
index 7171219af7f303b2073a44952cdec7f2413c2442..d5dde1b9467dc23e999264c040bc670543e3c8a2 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go
@@ -24,8 +24,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
 	return
 }
 
-func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
-
 func NsecToTimeval(nsec int64) (tv Timeval) {
 	nsec += 999 // round up to microsecond
 	tv.Sec = int32(nsec / 1e9)
@@ -181,6 +179,8 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
 //sysnb	Gettimeofday(tv *Timeval) (err error)
 //sysnb	Time(t *Time_t) (tt Time_t, err error)
 
+//sys	Utime(path string, buf *Utimbuf) (err error)
+
 // On x86 Linux, all the socket calls go through an extra indirection,
 // I think because the 5-register system call interface can't handle
 // the 6-argument calls like sendto and recvfrom.  Instead the
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
index ae70c2afca0d7977b24171aa9f39f0c41cfcde2b..b7fa9e4f52c25e68483c39c5a9cfa7be48b8113c 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
@@ -86,6 +86,8 @@ func Time(t *Time_t) (tt Time_t, err error) {
 	return Time_t(tv.Sec), nil
 }
 
+//sys	Utime(path string, buf *Utimbuf) (err error)
+
 func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
 
 func NsecToTimespec(nsec int64) (ts Timespec) {
@@ -94,8 +96,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
 	return
 }
 
-func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
-
 func NsecToTimeval(nsec int64) (tv Timeval) {
 	nsec += 999 // round up to microsecond
 	tv.Sec = nsec / 1e9
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
index abc41c3ea5d7bdea0bc2f1f89bf0b0befd8f560f..3b4da2061b9d5181ff087fee28c017c026dfb09e 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
@@ -108,7 +108,26 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
 
 // Vsyscalls on amd64.
 //sysnb	Gettimeofday(tv *Timeval) (err error)
-//sysnb	Time(t *Time_t) (tt Time_t, err error)
+
+func Time(t *Time_t) (Time_t, error) {
+	var tv Timeval
+	err := Gettimeofday(&tv)
+	if err != nil {
+		return 0, err
+	}
+	if t != nil {
+		*t = Time_t(tv.Sec)
+	}
+	return Time_t(tv.Sec), nil
+}
+
+func Utime(path string, buf *Utimbuf) error {
+	tv := []Timeval{
+		{Sec: buf.Actime},
+		{Sec: buf.Modtime},
+	}
+	return Utimes(path, tv)
+}
 
 //sys   Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
 //sys   Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
@@ -158,7 +177,7 @@ type rlimit32 struct {
 	Max uint32
 }
 
-//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_GETRLIMIT
+//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_UGETRLIMIT
 
 const rlimInf32 = ^uint32(0)
 const rlimInf64 = ^uint64(0)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
index f3d72dfd3016fb8dca36158e1368074b7d1fe75d..9e2e8b72f688dd5beed3c7a43a9ff9fdc7045f9b 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
@@ -70,7 +70,6 @@ func Lstat(path string, stat *Stat_t) (err error) {
 func Getpagesize() int { return 65536 }
 
 //sysnb	Gettimeofday(tv *Timeval) (err error)
-//sysnb	Time(t *Time_t) (tt Time_t, err error)
 
 func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
 
@@ -80,8 +79,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
 	return
 }
 
-func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
-
 func NsecToTimeval(nsec int64) (tv Timeval) {
 	nsec += 999 // round up to microsecond
 	tv.Sec = nsec / 1e9
@@ -89,6 +86,26 @@ func NsecToTimeval(nsec int64) (tv Timeval) {
 	return
 }
 
+func Time(t *Time_t) (Time_t, error) {
+	var tv Timeval
+	err := Gettimeofday(&tv)
+	if err != nil {
+		return 0, err
+	}
+	if t != nil {
+		*t = Time_t(tv.Sec)
+	}
+	return Time_t(tv.Sec), nil
+}
+
+func Utime(path string, buf *Utimbuf) error {
+	tv := []Timeval{
+		{Sec: buf.Actime},
+		{Sec: buf.Modtime},
+	}
+	return Utimes(path, tv)
+}
+
 func Pipe(p []int) (err error) {
 	if len(p) != 2 {
 		return EINVAL
@@ -133,6 +150,10 @@ func InotifyInit() (fd int, err error) {
 	return InotifyInit1(0)
 }
 
+func Dup2(oldfd int, newfd int) (err error) {
+	return Dup3(oldfd, newfd, 0)
+}
+
 // TODO(dfc): constants that should be in zsysnum_linux_arm64.go, remove
 // these when the deprecated syscalls that the syscall package relies on
 // are removed.
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
new file mode 100644
index 0000000000000000000000000000000000000000..ce7b420c9add233fb34d03aa549d208d7b75fb27
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
@@ -0,0 +1,204 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build mips64 mips64le
+
+package unix
+
+// Linux introduced getdents64 syscall for N64 ABI only in 3.10
+// (May 21 2013, rev dec33abaafc89bcbd78f85fad0513170415a26d5),
+// to support older kernels, we have to use getdents for mips64.
+// Also note that struct dirent is different for these two.
+// Lookup linux_dirent{,64} in kernel source code for details.
+const _SYS_getdents = SYS_GETDENTS
+
+//sys	Fchown(fd int, uid int, gid int) (err error)
+//sys	Fstatfs(fd int, buf *Statfs_t) (err error)
+//sys	Ftruncate(fd int, length int64) (err error)
+//sysnb	Getegid() (egid int)
+//sysnb	Geteuid() (euid int)
+//sysnb	Getgid() (gid int)
+//sysnb	Getrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb	Getuid() (uid int)
+//sys	Lchown(path string, uid int, gid int) (err error)
+//sys	Listen(s int, n int) (err error)
+//sys	Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
+//sys	Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys	Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
+//sys	Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS_PSELECT6
+//sys	sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
+//sys	Setfsgid(gid int) (err error)
+//sys	Setfsuid(uid int) (err error)
+//sysnb	Setregid(rgid int, egid int) (err error)
+//sysnb	Setresgid(rgid int, egid int, sgid int) (err error)
+//sysnb	Setresuid(ruid int, euid int, suid int) (err error)
+//sysnb	Setrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb	Setreuid(ruid int, euid int) (err error)
+//sys	Shutdown(fd int, how int) (err error)
+//sys	Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
+//sys	Statfs(path string, buf *Statfs_t) (err error)
+//sys	SyncFileRange(fd int, off int64, n int64, flags int) (err error)
+//sys	Truncate(path string, length int64) (err error)
+//sys	accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
+//sys	accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
+//sys	bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sys	connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sysnb	getgroups(n int, list *_Gid_t) (nn int, err error)
+//sysnb	setgroups(n int, list *_Gid_t) (err error)
+//sys	getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error)
+//sys	setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error)
+//sysnb	socket(domain int, typ int, proto int) (fd int, err error)
+//sysnb	socketpair(domain int, typ int, proto int, fd *[2]int32) (err error)
+//sysnb	getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sysnb	getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sys	recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
+//sys	sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
+//sys	recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys	sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys	mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error)
+
+func Getpagesize() int { return 65536 }
+
+//sysnb	Gettimeofday(tv *Timeval) (err error)
+
+func Time(t *Time_t) (tt Time_t, err error) {
+	var tv Timeval
+	err = Gettimeofday(&tv)
+	if err != nil {
+		return 0, err
+	}
+	if t != nil {
+		*t = Time_t(tv.Sec)
+	}
+	return Time_t(tv.Sec), nil
+}
+
+//sys	Utime(path string, buf *Utimbuf) (err error)
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+	ts.Sec = nsec / 1e9
+	ts.Nsec = nsec % 1e9
+	return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+	nsec += 999 // round up to microsecond
+	tv.Sec = nsec / 1e9
+	tv.Usec = nsec % 1e9 / 1e3
+	return
+}
+
+func Pipe(p []int) (err error) {
+	if len(p) != 2 {
+		return EINVAL
+	}
+	var pp [2]_C_int
+	err = pipe2(&pp, 0)
+	p[0] = int(pp[0])
+	p[1] = int(pp[1])
+	return
+}
+
+//sysnb pipe2(p *[2]_C_int, flags int) (err error)
+
+func Pipe2(p []int, flags int) (err error) {
+	if len(p) != 2 {
+		return EINVAL
+	}
+	var pp [2]_C_int
+	err = pipe2(&pp, flags)
+	p[0] = int(pp[0])
+	p[1] = int(pp[1])
+	return
+}
+
+func Ioperm(from int, num int, on int) (err error) {
+	return ENOSYS
+}
+
+func Iopl(level int) (err error) {
+	return ENOSYS
+}
+
+type stat_t struct {
+	Dev        uint32
+	Pad0       [3]int32
+	Ino        uint64
+	Mode       uint32
+	Nlink      uint32
+	Uid        uint32
+	Gid        uint32
+	Rdev       uint32
+	Pad1       [3]uint32
+	Size       int64
+	Atime      uint32
+	Atime_nsec uint32
+	Mtime      uint32
+	Mtime_nsec uint32
+	Ctime      uint32
+	Ctime_nsec uint32
+	Blksize    uint32
+	Pad2       uint32
+	Blocks     int64
+}
+
+//sys	fstat(fd int, st *stat_t) (err error)
+//sys	lstat(path string, st *stat_t) (err error)
+//sys	stat(path string, st *stat_t) (err error)
+
+func Fstat(fd int, s *Stat_t) (err error) {
+	st := &stat_t{}
+	err = fstat(fd, st)
+	fillStat_t(s, st)
+	return
+}
+
+func Lstat(path string, s *Stat_t) (err error) {
+	st := &stat_t{}
+	err = lstat(path, st)
+	fillStat_t(s, st)
+	return
+}
+
+func Stat(path string, s *Stat_t) (err error) {
+	st := &stat_t{}
+	err = stat(path, st)
+	fillStat_t(s, st)
+	return
+}
+
+func fillStat_t(s *Stat_t, st *stat_t) {
+	s.Dev = st.Dev
+	s.Ino = st.Ino
+	s.Mode = st.Mode
+	s.Nlink = st.Nlink
+	s.Uid = st.Uid
+	s.Gid = st.Gid
+	s.Rdev = st.Rdev
+	s.Size = st.Size
+	s.Atim = Timespec{int64(st.Atime), int64(st.Atime_nsec)}
+	s.Mtim = Timespec{int64(st.Mtime), int64(st.Mtime_nsec)}
+	s.Ctim = Timespec{int64(st.Ctime), int64(st.Ctime_nsec)}
+	s.Blksize = st.Blksize
+	s.Blocks = st.Blocks
+}
+
+func (r *PtraceRegs) PC() uint64 { return r.Regs[64] }
+
+func (r *PtraceRegs) SetPC(pc uint64) { r.Regs[64] = pc }
+
+func (iov *Iovec) SetLen(length int) {
+	iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+	msghdr.Controllen = uint64(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+	cmsg.Len = uint64(length)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
index 67eed6334c4794dd02ae8ddb58964094a4fc3c2f..9560ffa6680a694035043bcc53a9cb1369239375 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
@@ -7,6 +7,7 @@
 
 package unix
 
+//sys	Dup2(oldfd int, newfd int) (err error)
 //sys	Fchown(fd int, uid int, gid int) (err error)
 //sys	Fstat(fd int, stat *Stat_t) (err error)
 //sys	Fstatfs(fd int, buf *Statfs_t) (err error)
@@ -62,6 +63,8 @@ func Getpagesize() int { return 65536 }
 //sysnb	Gettimeofday(tv *Timeval) (err error)
 //sysnb	Time(t *Time_t) (tt Time_t, err error)
 
+//sys	Utime(path string, buf *Utimbuf) (err error)
+
 func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
 
 func NsecToTimespec(nsec int64) (ts Timespec) {
@@ -70,8 +73,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
 	return
 }
 
-func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
-
 func NsecToTimeval(nsec int64) (tv Timeval) {
 	nsec += 999 // round up to microsecond
 	tv.Sec = nsec / 1e9
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go
index 1b0e1af1257018a08932877290bd7a537e59a6d3..afaca09838afe9133e961de8b10c7274309b6deb 100644
--- a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go
@@ -16,8 +16,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
 	return
 }
 
-func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
-
 func NsecToTimeval(nsec int64) (tv Timeval) {
 	nsec += 999 // round up to microsecond
 	tv.Usec = int32(nsec % 1e9 / 1e3)
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go
index 1b6dcbe35d6ebfafa2115fcf5305454d05656d1f..a6ff04ce5bd2e28293722e69acf822ff13c79f41 100644
--- a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go
@@ -16,8 +16,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
 	return
 }
 
-func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
-
 func NsecToTimeval(nsec int64) (tv Timeval) {
 	nsec += 999 // round up to microsecond
 	tv.Usec = int32(nsec % 1e9 / 1e3)
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go
index 87d1d6fed1e1d26c3f114f1f30416d8e3cb67cf3..68a6969b285b5e6cf4365eb39bf6d2b3883e8537 100644
--- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go
@@ -16,8 +16,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
 	return
 }
 
-func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
-
 func NsecToTimeval(nsec int64) (tv Timeval) {
 	nsec += 999 // round up to microsecond
 	tv.Usec = int32(nsec % 1e9 / 1e3)
diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go
index 9529b20e82ec1a5584d7a9f116214543fed9c0a7..a66ddc59ce9e70f5a097f305e75a991e08fe4caa 100644
--- a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go
@@ -16,8 +16,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
 	return
 }
 
-func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
-
 func NsecToTimeval(nsec int64) (tv Timeval) {
 	nsec += 999 // round up to microsecond
 	tv.Usec = int32(nsec % 1e9 / 1e3)
diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go
index fc6402946e3d3ca4a486cb53f9fa561b19c1d41f..0776c1faf98babcd2b9cdddcd0040f864505fa6f 100644
--- a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go
@@ -16,8 +16,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
 	return
 }
 
-func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
-
 func NsecToTimeval(nsec int64) (tv Timeval) {
 	nsec += 999 // round up to microsecond
 	tv.Usec = nsec % 1e9 / 1e3
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go
index ab54718f63dfa97c922d31607b2760bfff4fc153..eb489b159fd611ffebf511029c32b4da86bf79ad 100644
--- a/vendor/golang.org/x/sys/unix/syscall_solaris.go
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -13,6 +13,7 @@
 package unix
 
 import (
+	"sync/atomic"
 	"syscall"
 	"unsafe"
 )
@@ -138,6 +139,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) {
 	return unsafe.Pointer(&sa.raw), sl, nil
 }
 
+//sys	getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = libsocket.getsockname
+
 func Getsockname(fd int) (sa Sockaddr, err error) {
 	var rsa RawSockaddrAny
 	var len _Socklen = SizeofSockaddrAny
@@ -147,12 +150,23 @@ func Getsockname(fd int) (sa Sockaddr, err error) {
 	return anyToSockaddr(&rsa)
 }
 
-// The const provides a compile-time constant so clients
-// can adjust to whether there is a working Getwd and avoid
-// even linking this function into the binary.  See ../os/getwd.go.
-const ImplementsGetwd = false
+const ImplementsGetwd = true
 
-func Getwd() (string, error) { return "", ENOTSUP }
+//sys	Getcwd(buf []byte) (n int, err error)
+
+func Getwd() (wd string, err error) {
+	var buf [PathMax]byte
+	// Getcwd will return an error if it failed for any reason.
+	_, err = Getcwd(buf[0:])
+	if err != nil {
+		return "", err
+	}
+	n := clen(buf[:])
+	if n < 1 {
+		return "", EINVAL
+	}
+	return string(buf[:n]), nil
+}
 
 /*
  * Wrapped
@@ -163,21 +177,20 @@ func Getwd() (string, error) { return "", ENOTSUP }
 
 func Getgroups() (gids []int, err error) {
 	n, err := getgroups(0, nil)
-	if err != nil {
-		return nil, err
-	}
-	if n == 0 {
-		return nil, nil
-	}
-
-	// Sanity check group count.  Max is 16 on BSD.
-	if n < 0 || n > 1000 {
+	// Check for error and sanity check group count.  Newer versions of
+	// Solaris allow up to 1024 (NGROUPS_MAX).
+	if n < 0 || n > 1024 {
+		if err != nil {
+			return nil, err
+		}
 		return nil, EINVAL
+	} else if n == 0 {
+		return nil, nil
 	}
 
 	a := make([]_Gid_t, n)
 	n, err = getgroups(n, &a[0])
-	if err != nil {
+	if n == -1 {
 		return nil, err
 	}
 	gids = make([]int, n)
@@ -276,19 +289,38 @@ func Gethostname() (name string, err error) {
 	return name, err
 }
 
+//sys	utimes(path string, times *[2]Timeval) (err error)
+
+func Utimes(path string, tv []Timeval) (err error) {
+	if tv == nil {
+		return utimes(path, nil)
+	}
+	if len(tv) != 2 {
+		return EINVAL
+	}
+	return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
+//sys	utimensat(fd int, path string, times *[2]Timespec, flag int) (err error)
+
 func UtimesNano(path string, ts []Timespec) error {
 	if ts == nil {
-		return Utimes(path, nil)
+		return utimensat(AT_FDCWD, path, nil, 0)
 	}
 	if len(ts) != 2 {
 		return EINVAL
 	}
-	var tv [2]Timeval
-	for i := 0; i < 2; i++ {
-		tv[i].Sec = ts[i].Sec
-		tv[i].Usec = ts[i].Nsec / 1000
+	return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0)
+}
+
+func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error {
+	if ts == nil {
+		return utimensat(dirfd, path, nil, flags)
+	}
+	if len(ts) != 2 {
+		return EINVAL
 	}
-	return Utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+	return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags)
 }
 
 //sys	fcntl(fd int, cmd int, arg int) (val int, err error)
@@ -302,6 +334,35 @@ func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
 	return nil
 }
 
+//sys	futimesat(fildes int, path *byte, times *[2]Timeval) (err error)
+
+func Futimesat(dirfd int, path string, tv []Timeval) error {
+	pathp, err := BytePtrFromString(path)
+	if err != nil {
+		return err
+	}
+	if tv == nil {
+		return futimesat(dirfd, pathp, nil)
+	}
+	if len(tv) != 2 {
+		return EINVAL
+	}
+	return futimesat(dirfd, pathp, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
+// Solaris doesn't have an futimes function because it allows NULL to be
+// specified as the path for futimesat.  However, Go doesn't like
+// NULL-style string interfaces, so this simple wrapper is provided.
+func Futimes(fd int, tv []Timeval) error {
+	if tv == nil {
+		return futimesat(fd, nil, nil)
+	}
+	if len(tv) != 2 {
+		return EINVAL
+	}
+	return futimesat(fd, nil, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
+
 func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) {
 	switch rsa.Addr.Family {
 	case AF_UNIX:
@@ -350,7 +411,7 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) {
 	var rsa RawSockaddrAny
 	var len _Socklen = SizeofSockaddrAny
 	nfd, err = accept(fd, &rsa, &len)
-	if err != nil {
+	if nfd == -1 {
 		return
 	}
 	sa, err = anyToSockaddr(&rsa)
@@ -361,6 +422,8 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) {
 	return
 }
 
+//sys	recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.recvmsg
+
 func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) {
 	var msg Msghdr
 	var rsa RawSockaddrAny
@@ -382,7 +445,7 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from
 	}
 	msg.Iov = &iov
 	msg.Iovlen = 1
-	if n, err = recvmsg(fd, &msg, flags); err != nil {
+	if n, err = recvmsg(fd, &msg, flags); n == -1 {
 		return
 	}
 	oobn = int(msg.Accrightslen)
@@ -437,6 +500,67 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error)
 	return n, nil
 }
 
+//sys	acct(path *byte) (err error)
+
+func Acct(path string) (err error) {
+	if len(path) == 0 {
+		// Assume caller wants to disable accounting.
+		return acct(nil)
+	}
+
+	pathp, err := BytePtrFromString(path)
+	if err != nil {
+		return err
+	}
+	return acct(pathp)
+}
+
+/*
+ * Expose the ioctl function
+ */
+
+//sys	ioctl(fd int, req int, arg uintptr) (err error)
+
+func IoctlSetInt(fd int, req int, value int) (err error) {
+	return ioctl(fd, req, uintptr(value))
+}
+
+func IoctlSetWinsize(fd int, req int, value *Winsize) (err error) {
+	return ioctl(fd, req, uintptr(unsafe.Pointer(value)))
+}
+
+func IoctlSetTermios(fd int, req int, value *Termios) (err error) {
+	return ioctl(fd, req, uintptr(unsafe.Pointer(value)))
+}
+
+func IoctlSetTermio(fd int, req int, value *Termio) (err error) {
+	return ioctl(fd, req, uintptr(unsafe.Pointer(value)))
+}
+
+func IoctlGetInt(fd int, req int) (int, error) {
+	var value int
+	err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+	return value, err
+}
+
+func IoctlGetWinsize(fd int, req int) (*Winsize, error) {
+	var value Winsize
+	err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+	return &value, err
+}
+
+func IoctlGetTermios(fd int, req int) (*Termios, error) {
+	var value Termios
+	err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+	return &value, err
+}
+
+func IoctlGetTermio(fd int, req int) (*Termio, error) {
+	var value Termio
+	err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
+	return &value, err
+}
+
 /*
  * Exposed directly
  */
@@ -447,21 +571,29 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error)
 //sys	Chown(path string, uid int, gid int) (err error)
 //sys	Chroot(path string) (err error)
 //sys	Close(fd int) (err error)
+//sys	Creat(path string, mode uint32) (fd int, err error)
 //sys	Dup(fd int) (nfd int, err error)
+//sys	Dup2(oldfd int, newfd int) (err error)
 //sys	Exit(code int)
 //sys	Fchdir(fd int) (err error)
 //sys	Fchmod(fd int, mode uint32) (err error)
+//sys	Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)
 //sys	Fchown(fd int, uid int, gid int) (err error)
+//sys	Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error)
+//sys	Fdatasync(fd int) (err error)
 //sys	Fpathconf(fd int, name int) (val int, err error)
 //sys	Fstat(fd int, stat *Stat_t) (err error)
 //sys	Getdents(fd int, buf []byte, basep *uintptr) (n int, err error)
 //sysnb	Getgid() (gid int)
 //sysnb	Getpid() (pid int)
+//sysnb	Getpgid(pid int) (pgid int, err error)
+//sysnb	Getpgrp() (pgid int, err error)
 //sys	Geteuid() (euid int)
 //sys	Getegid() (egid int)
 //sys	Getppid() (ppid int)
 //sys	Getpriority(which int, who int) (n int, err error)
 //sysnb	Getrlimit(which int, lim *Rlimit) (err error)
+//sysnb	Getrusage(who int, rusage *Rusage) (err error)
 //sysnb	Gettimeofday(tv *Timeval) (err error)
 //sysnb	Getuid() (uid int)
 //sys	Kill(pid int, signum syscall.Signal) (err error)
@@ -471,20 +603,33 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error)
 //sys	Lstat(path string, stat *Stat_t) (err error)
 //sys	Madvise(b []byte, advice int) (err error)
 //sys	Mkdir(path string, mode uint32) (err error)
+//sys	Mkdirat(dirfd int, path string, mode uint32) (err error)
+//sys	Mkfifo(path string, mode uint32) (err error)
+//sys	Mkfifoat(dirfd int, path string, mode uint32) (err error)
 //sys	Mknod(path string, mode uint32, dev int) (err error)
+//sys	Mknodat(dirfd int, path string, mode uint32, dev int) (err error)
+//sys	Mlock(b []byte) (err error)
+//sys	Mlockall(flags int) (err error)
+//sys	Mprotect(b []byte, prot int) (err error)
+//sys	Munlock(b []byte) (err error)
+//sys	Munlockall() (err error)
 //sys	Nanosleep(time *Timespec, leftover *Timespec) (err error)
 //sys	Open(path string, mode int, perm uint32) (fd int, err error)
+//sys	Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error)
 //sys	Pathconf(path string, name int) (val int, err error)
+//sys	Pause() (err error)
 //sys	Pread(fd int, p []byte, offset int64) (n int, err error)
 //sys	Pwrite(fd int, p []byte, offset int64) (n int, err error)
 //sys	read(fd int, p []byte) (n int, err error)
 //sys	Readlink(path string, buf []byte) (n int, err error)
 //sys	Rename(from string, to string) (err error)
+//sys	Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
 //sys	Rmdir(path string) (err error)
 //sys	Seek(fd int, offset int64, whence int) (newoffset int64, err error) = lseek
 //sysnb	Setegid(egid int) (err error)
 //sysnb	Seteuid(euid int) (err error)
 //sysnb	Setgid(gid int) (err error)
+//sys	Sethostname(p []byte) (err error)
 //sysnb	Setpgid(pid int, pgid int) (err error)
 //sys	Setpriority(which int, who int, prio int) (err error)
 //sysnb	Setregid(rgid int, egid int) (err error)
@@ -496,12 +641,17 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error)
 //sys	Stat(path string, stat *Stat_t) (err error)
 //sys	Symlink(path string, link string) (err error)
 //sys	Sync() (err error)
+//sysnb	Times(tms *Tms) (ticks uintptr, err error)
 //sys	Truncate(path string, length int64) (err error)
 //sys	Fsync(fd int) (err error)
 //sys	Ftruncate(fd int, length int64) (err error)
-//sys	Umask(newmask int) (oldmask int)
+//sys	Umask(mask int) (oldmask int)
+//sysnb	Uname(buf *Utsname) (err error)
+//sys	Unmount(target string, flags int) (err error) = libc.umount
 //sys	Unlink(path string) (err error)
-//sys	Utimes(path string, times *[2]Timeval) (err error)
+//sys	Unlinkat(dirfd int, path string, flags int) (err error)
+//sys	Ustat(dev int, ubuf *Ustat_t) (err error)
+//sys	Utime(path string, buf *Utimbuf) (err error)
 //sys	bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.bind
 //sys	connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.connect
 //sys	mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
@@ -512,10 +662,8 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error)
 //sys	write(fd int, p []byte) (n int, err error)
 //sys	getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) = libsocket.getsockopt
 //sysnb	getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = libsocket.getpeername
-//sys	getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = libsocket.getsockname
 //sys	setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) = libsocket.setsockopt
 //sys	recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = libsocket.recvfrom
-//sys	recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.recvmsg
 
 func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procread)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0)
@@ -548,3 +696,18 @@ func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, e
 func Munmap(b []byte) (err error) {
 	return mapper.Munmap(b)
 }
+
+//sys	sysconf(name int) (n int64, err error)
+
+// pageSize caches the value of Getpagesize, since it can't change
+// once the system is booted.
+var pageSize int64 // accessed atomically
+
+func Getpagesize() int {
+	n := atomic.LoadInt64(&pageSize)
+	if n == 0 {
+		n, _ = sysconf(_SC_PAGESIZE)
+		atomic.StoreInt64(&pageSize, n)
+	}
+	return int(n)
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go
index 9c173cd5f2b03ca256f724d242dabfef36355553..5aff62c3bbeac100bfdaa31af0087e4d473a908e 100644
--- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go
@@ -6,8 +6,6 @@
 
 package unix
 
-func Getpagesize() int { return 4096 }
-
 func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
 
 func NsecToTimespec(nsec int64) (ts Timespec) {
@@ -16,8 +14,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
 	return
 }
 
-func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
-
 func NsecToTimeval(nsec int64) (tv Timeval) {
 	nsec += 999 // round up to microsecond
 	tv.Usec = nsec % 1e9 / 1e3
diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go
index 2bb15cb8b7ebbbda9193f2e214734cf86db2fe78..1153261822b22494e2596ea6f5fdd070f4fdda9a 100644
--- a/vendor/golang.org/x/sys/unix/types_darwin.go
+++ b/vendor/golang.org/x/sys/unix/types_darwin.go
@@ -241,3 +241,10 @@ type BpfHdr C.struct_bpf_hdr
 // Terminal handling
 
 type Termios C.struct_termios
+
+// fchmodat-like syscalls.
+
+const (
+	AT_FDCWD            = C.AT_FDCWD
+	AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
+)
diff --git a/vendor/golang.org/x/sys/unix/types_linux.go b/vendor/golang.org/x/sys/unix/types_linux.go
index 9b75633b37981da8054d9cd201fae31bc61ed88b..7b53590a377c1a9d5e21335e7d8bee1884d4baaf 100644
--- a/vendor/golang.org/x/sys/unix/types_linux.go
+++ b/vendor/golang.org/x/sys/unix/types_linux.go
@@ -50,12 +50,19 @@ package unix
 #include <linux/netlink.h>
 #include <linux/rtnetlink.h>
 #include <linux/icmpv6.h>
-#include <termios.h>
+#include <asm/termbits.h>
 #include <time.h>
 #include <unistd.h>
 #include <ustat.h>
 #include <utime.h>
 
+#ifdef TCSETS2
+// On systems that have "struct termios2" use this as type Termios.
+typedef struct termios2 termios_t;
+#else
+typedef struct termios termios_t;
+#endif
+
 enum {
 	sizeofPtr = sizeof(void*),
 };
@@ -91,6 +98,8 @@ typedef struct user_regs PtraceRegs;
 typedef struct user_pt_regs PtraceRegs;
 #elif defined(__powerpc64__)
 typedef struct pt_regs PtraceRegs;
+#elif defined(__mips__)
+typedef struct user PtraceRegs;
 #else
 typedef struct user_regs_struct PtraceRegs;
 #endif
@@ -391,9 +400,10 @@ type EpollEvent C.struct_my_epoll_event
 const (
 	AT_FDCWD            = C.AT_FDCWD
 	AT_REMOVEDIR        = C.AT_REMOVEDIR
+	AT_SYMLINK_FOLLOW   = C.AT_SYMLINK_FOLLOW
 	AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
 )
 
 // Terminal handling
 
-type Termios C.struct_termios
+type Termios C.termios_t
diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go
index 753c7996b1decfaa654c90309f9218da9103f6da..6ad50eaba61c43d03a42eeb796ed3fe1b4fe57c8 100644
--- a/vendor/golang.org/x/sys/unix/types_solaris.go
+++ b/vendor/golang.org/x/sys/unix/types_solaris.go
@@ -15,10 +15,17 @@ package unix
 
 /*
 #define KERNEL
+// These defines ensure that builds done on newer versions of Solaris are
+// backwards-compatible with older versions of Solaris and
+// OpenSolaris-based derivatives.
+#define __USE_SUNOS_SOCKETS__          // msghdr
+#define __USE_LEGACY_PROTOTYPES__      // iovec
 #include <dirent.h>
 #include <fcntl.h>
+#include <limits.h>
 #include <signal.h>
 #include <termios.h>
+#include <termio.h>
 #include <stdio.h>
 #include <unistd.h>
 #include <sys/mman.h>
@@ -30,7 +37,9 @@ package unix
 #include <sys/socket.h>
 #include <sys/stat.h>
 #include <sys/time.h>
+#include <sys/times.h>
 #include <sys/types.h>
+#include <sys/utsname.h>
 #include <sys/un.h>
 #include <sys/wait.h>
 #include <net/bpf.h>
@@ -40,6 +49,8 @@ package unix
 #include <netinet/in.h>
 #include <netinet/icmp6.h>
 #include <netinet/tcp.h>
+#include <ustat.h>
+#include <utime.h>
 
 enum {
 	sizeofPtr = sizeof(void*),
@@ -69,6 +80,7 @@ const (
 	sizeofInt      = C.sizeof_int
 	sizeofLong     = C.sizeof_long
 	sizeofLongLong = C.sizeof_longlong
+	PathMax        = C.PATH_MAX
 )
 
 // Basic types
@@ -88,6 +100,10 @@ type Timeval C.struct_timeval
 
 type Timeval32 C.struct_timeval32
 
+type Tms C.struct_tms
+
+type Utimbuf C.struct_utimbuf
+
 // Processes
 
 type Rusage C.struct_rusage
@@ -175,6 +191,20 @@ const (
 
 type FdSet C.fd_set
 
+// Misc
+
+type Utsname C.struct_utsname
+
+type Ustat_t C.struct_ustat
+
+const (
+	AT_FDCWD            = C.AT_FDCWD
+	AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
+	AT_SYMLINK_FOLLOW   = C.AT_SYMLINK_FOLLOW
+	AT_REMOVEDIR        = C.AT_REMOVEDIR
+	AT_EACCESS          = C.AT_EACCESS
+)
+
 // Routing and interface messages
 
 const (
@@ -217,6 +247,14 @@ type BpfTimeval C.struct_bpf_timeval
 
 type BpfHdr C.struct_bpf_hdr
 
+// sysconf information
+
+const _SC_PAGESIZE = C._SC_PAGESIZE
+
 // Terminal handling
 
 type Termios C.struct_termios
+
+type Termio C.struct_termio
+
+type Winsize C.struct_winsize
diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go
index 0bff296c1806733f64d054ff1c27ae4feadf3901..8e63888351e6285cb4ebe710a819aa3812f118c3 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go
@@ -32,7 +32,7 @@ const (
 	AF_LAT                            = 0xe
 	AF_LINK                           = 0x12
 	AF_LOCAL                          = 0x1
-	AF_MAX                            = 0x26
+	AF_MAX                            = 0x28
 	AF_NATM                           = 0x1f
 	AF_NDRV                           = 0x1b
 	AF_NETBIOS                        = 0x21
@@ -47,6 +47,7 @@ const (
 	AF_SYSTEM                         = 0x20
 	AF_UNIX                           = 0x1
 	AF_UNSPEC                         = 0x0
+	AF_UTUN                           = 0x26
 	B0                                = 0x0
 	B110                              = 0x6e
 	B115200                           = 0x1c200
@@ -85,6 +86,7 @@ const (
 	BIOCSBLEN                         = 0xc0044266
 	BIOCSDLT                          = 0x80044278
 	BIOCSETF                          = 0x80084267
+	BIOCSETFNR                        = 0x8008427e
 	BIOCSETIF                         = 0x8020426c
 	BIOCSHDRCMPLT                     = 0x80044275
 	BIOCSRSIG                         = 0x80044273
@@ -151,33 +153,168 @@ const (
 	CSUSP                             = 0x1a
 	CTL_MAXNAME                       = 0xc
 	CTL_NET                           = 0x4
+	DLT_A429                          = 0xb8
+	DLT_A653_ICM                      = 0xb9
+	DLT_AIRONET_HEADER                = 0x78
+	DLT_AOS                           = 0xde
 	DLT_APPLE_IP_OVER_IEEE1394        = 0x8a
 	DLT_ARCNET                        = 0x7
+	DLT_ARCNET_LINUX                  = 0x81
 	DLT_ATM_CLIP                      = 0x13
 	DLT_ATM_RFC1483                   = 0xb
+	DLT_AURORA                        = 0x7e
 	DLT_AX25                          = 0x3
+	DLT_AX25_KISS                     = 0xca
+	DLT_BACNET_MS_TP                  = 0xa5
+	DLT_BLUETOOTH_HCI_H4              = 0xbb
+	DLT_BLUETOOTH_HCI_H4_WITH_PHDR    = 0xc9
+	DLT_CAN20B                        = 0xbe
+	DLT_CAN_SOCKETCAN                 = 0xe3
 	DLT_CHAOS                         = 0x5
 	DLT_CHDLC                         = 0x68
+	DLT_CISCO_IOS                     = 0x76
 	DLT_C_HDLC                        = 0x68
+	DLT_C_HDLC_WITH_DIR               = 0xcd
+	DLT_DBUS                          = 0xe7
+	DLT_DECT                          = 0xdd
+	DLT_DOCSIS                        = 0x8f
+	DLT_DVB_CI                        = 0xeb
+	DLT_ECONET                        = 0x73
 	DLT_EN10MB                        = 0x1
 	DLT_EN3MB                         = 0x2
+	DLT_ENC                           = 0x6d
+	DLT_ERF                           = 0xc5
+	DLT_ERF_ETH                       = 0xaf
+	DLT_ERF_POS                       = 0xb0
+	DLT_FC_2                          = 0xe0
+	DLT_FC_2_WITH_FRAME_DELIMS        = 0xe1
 	DLT_FDDI                          = 0xa
+	DLT_FLEXRAY                       = 0xd2
+	DLT_FRELAY                        = 0x6b
+	DLT_FRELAY_WITH_DIR               = 0xce
+	DLT_GCOM_SERIAL                   = 0xad
+	DLT_GCOM_T1E1                     = 0xac
+	DLT_GPF_F                         = 0xab
+	DLT_GPF_T                         = 0xaa
+	DLT_GPRS_LLC                      = 0xa9
+	DLT_GSMTAP_ABIS                   = 0xda
+	DLT_GSMTAP_UM                     = 0xd9
+	DLT_HHDLC                         = 0x79
+	DLT_IBM_SN                        = 0x92
+	DLT_IBM_SP                        = 0x91
 	DLT_IEEE802                       = 0x6
 	DLT_IEEE802_11                    = 0x69
 	DLT_IEEE802_11_RADIO              = 0x7f
 	DLT_IEEE802_11_RADIO_AVS          = 0xa3
+	DLT_IEEE802_15_4                  = 0xc3
+	DLT_IEEE802_15_4_LINUX            = 0xbf
+	DLT_IEEE802_15_4_NOFCS            = 0xe6
+	DLT_IEEE802_15_4_NONASK_PHY       = 0xd7
+	DLT_IEEE802_16_MAC_CPS            = 0xbc
+	DLT_IEEE802_16_MAC_CPS_RADIO      = 0xc1
+	DLT_IPFILTER                      = 0x74
+	DLT_IPMB                          = 0xc7
+	DLT_IPMB_LINUX                    = 0xd1
+	DLT_IPNET                         = 0xe2
+	DLT_IPOIB                         = 0xf2
+	DLT_IPV4                          = 0xe4
+	DLT_IPV6                          = 0xe5
+	DLT_IP_OVER_FC                    = 0x7a
+	DLT_JUNIPER_ATM1                  = 0x89
+	DLT_JUNIPER_ATM2                  = 0x87
+	DLT_JUNIPER_ATM_CEMIC             = 0xee
+	DLT_JUNIPER_CHDLC                 = 0xb5
+	DLT_JUNIPER_ES                    = 0x84
+	DLT_JUNIPER_ETHER                 = 0xb2
+	DLT_JUNIPER_FIBRECHANNEL          = 0xea
+	DLT_JUNIPER_FRELAY                = 0xb4
+	DLT_JUNIPER_GGSN                  = 0x85
+	DLT_JUNIPER_ISM                   = 0xc2
+	DLT_JUNIPER_MFR                   = 0x86
+	DLT_JUNIPER_MLFR                  = 0x83
+	DLT_JUNIPER_MLPPP                 = 0x82
+	DLT_JUNIPER_MONITOR               = 0xa4
+	DLT_JUNIPER_PIC_PEER              = 0xae
+	DLT_JUNIPER_PPP                   = 0xb3
+	DLT_JUNIPER_PPPOE                 = 0xa7
+	DLT_JUNIPER_PPPOE_ATM             = 0xa8
+	DLT_JUNIPER_SERVICES              = 0x88
+	DLT_JUNIPER_SRX_E2E               = 0xe9
+	DLT_JUNIPER_ST                    = 0xc8
+	DLT_JUNIPER_VP                    = 0xb7
+	DLT_JUNIPER_VS                    = 0xe8
+	DLT_LAPB_WITH_DIR                 = 0xcf
+	DLT_LAPD                          = 0xcb
+	DLT_LIN                           = 0xd4
+	DLT_LINUX_EVDEV                   = 0xd8
+	DLT_LINUX_IRDA                    = 0x90
+	DLT_LINUX_LAPD                    = 0xb1
+	DLT_LINUX_PPP_WITHDIRECTION       = 0xa6
 	DLT_LINUX_SLL                     = 0x71
 	DLT_LOOP                          = 0x6c
+	DLT_LTALK                         = 0x72
+	DLT_MATCHING_MAX                  = 0xf5
+	DLT_MATCHING_MIN                  = 0x68
+	DLT_MFR                           = 0xb6
+	DLT_MOST                          = 0xd3
+	DLT_MPEG_2_TS                     = 0xf3
+	DLT_MPLS                          = 0xdb
+	DLT_MTP2                          = 0x8c
+	DLT_MTP2_WITH_PHDR                = 0x8b
+	DLT_MTP3                          = 0x8d
+	DLT_MUX27010                      = 0xec
+	DLT_NETANALYZER                   = 0xf0
+	DLT_NETANALYZER_TRANSPARENT       = 0xf1
+	DLT_NFC_LLCP                      = 0xf5
+	DLT_NFLOG                         = 0xef
+	DLT_NG40                          = 0xf4
 	DLT_NULL                          = 0x0
+	DLT_PCI_EXP                       = 0x7d
 	DLT_PFLOG                         = 0x75
 	DLT_PFSYNC                        = 0x12
+	DLT_PPI                           = 0xc0
 	DLT_PPP                           = 0x9
 	DLT_PPP_BSDOS                     = 0x10
+	DLT_PPP_ETHER                     = 0x33
+	DLT_PPP_PPPD                      = 0xa6
 	DLT_PPP_SERIAL                    = 0x32
+	DLT_PPP_WITH_DIR                  = 0xcc
+	DLT_PPP_WITH_DIRECTION            = 0xa6
+	DLT_PRISM_HEADER                  = 0x77
 	DLT_PRONET                        = 0x4
+	DLT_RAIF1                         = 0xc6
 	DLT_RAW                           = 0xc
+	DLT_RIO                           = 0x7c
+	DLT_SCCP                          = 0x8e
+	DLT_SITA                          = 0xc4
 	DLT_SLIP                          = 0x8
 	DLT_SLIP_BSDOS                    = 0xf
+	DLT_STANAG_5066_D_PDU             = 0xed
+	DLT_SUNATM                        = 0x7b
+	DLT_SYMANTEC_FIREWALL             = 0x63
+	DLT_TZSP                          = 0x80
+	DLT_USB                           = 0xba
+	DLT_USB_LINUX                     = 0xbd
+	DLT_USB_LINUX_MMAPPED             = 0xdc
+	DLT_USER0                         = 0x93
+	DLT_USER1                         = 0x94
+	DLT_USER10                        = 0x9d
+	DLT_USER11                        = 0x9e
+	DLT_USER12                        = 0x9f
+	DLT_USER13                        = 0xa0
+	DLT_USER14                        = 0xa1
+	DLT_USER15                        = 0xa2
+	DLT_USER2                         = 0x95
+	DLT_USER3                         = 0x96
+	DLT_USER4                         = 0x97
+	DLT_USER5                         = 0x98
+	DLT_USER6                         = 0x99
+	DLT_USER7                         = 0x9a
+	DLT_USER8                         = 0x9b
+	DLT_USER9                         = 0x9c
+	DLT_WIHART                        = 0xdf
+	DLT_X2E_SERIAL                    = 0xd5
+	DLT_X2E_XORAYA                    = 0xd6
 	DT_BLK                            = 0x6
 	DT_CHR                            = 0x2
 	DT_DIR                            = 0x4
@@ -200,8 +337,8 @@ const (
 	EVFILT_PROC                       = -0x5
 	EVFILT_READ                       = -0x1
 	EVFILT_SIGNAL                     = -0x6
-	EVFILT_SYSCOUNT                   = 0xc
-	EVFILT_THREADMARKER               = 0xc
+	EVFILT_SYSCOUNT                   = 0xe
+	EVFILT_THREADMARKER               = 0xe
 	EVFILT_TIMER                      = -0x7
 	EVFILT_USER                       = -0xa
 	EVFILT_VM                         = -0xc
@@ -235,9 +372,11 @@ const (
 	F_CHKCLEAN                        = 0x29
 	F_DUPFD                           = 0x0
 	F_DUPFD_CLOEXEC                   = 0x43
+	F_FINDSIGS                        = 0x4e
 	F_FLUSH_DATA                      = 0x28
 	F_FREEZE_FS                       = 0x35
 	F_FULLFSYNC                       = 0x33
+	F_GETCODEDIR                      = 0x48
 	F_GETFD                           = 0x1
 	F_GETFL                           = 0x3
 	F_GETLK                           = 0x7
@@ -247,10 +386,10 @@ const (
 	F_GETPATH                         = 0x32
 	F_GETPATH_MTMINFO                 = 0x47
 	F_GETPROTECTIONCLASS              = 0x3f
+	F_GETPROTECTIONLEVEL              = 0x4d
 	F_GLOBAL_NOCACHE                  = 0x37
 	F_LOG2PHYS                        = 0x31
 	F_LOG2PHYS_EXT                    = 0x41
-	F_MARKDEPENDENCY                  = 0x3c
 	F_NOCACHE                         = 0x30
 	F_NODIRECT                        = 0x3e
 	F_OK                              = 0x0
@@ -260,20 +399,21 @@ const (
 	F_RDADVISE                        = 0x2c
 	F_RDAHEAD                         = 0x2d
 	F_RDLCK                           = 0x1
-	F_READBOOTSTRAP                   = 0x2e
 	F_SETBACKINGSTORE                 = 0x46
 	F_SETFD                           = 0x2
 	F_SETFL                           = 0x4
 	F_SETLK                           = 0x8
 	F_SETLKW                          = 0x9
+	F_SETLKWTIMEOUT                   = 0xa
 	F_SETNOSIGPIPE                    = 0x49
 	F_SETOWN                          = 0x6
 	F_SETPROTECTIONCLASS              = 0x40
 	F_SETSIZE                         = 0x2b
+	F_SINGLE_WRITER                   = 0x4c
 	F_THAW_FS                         = 0x36
+	F_TRANSCODEKEY                    = 0x4b
 	F_UNLCK                           = 0x2
 	F_VOLPOSMODE                      = 0x4
-	F_WRITEBOOTSTRAP                  = 0x2f
 	F_WRLCK                           = 0x3
 	HUPCL                             = 0x4000
 	ICANON                            = 0x100
@@ -343,6 +483,7 @@ const (
 	IFT_PDP                           = 0xff
 	IFT_PFLOG                         = 0xf5
 	IFT_PFSYNC                        = 0xf6
+	IFT_PKTAP                         = 0xfe
 	IFT_PPP                           = 0x17
 	IFT_PROPMUX                       = 0x36
 	IFT_PROPVIRTUAL                   = 0x35
@@ -511,7 +652,7 @@ const (
 	IPV6_FAITH                        = 0x1d
 	IPV6_FLOWINFO_MASK                = 0xffffff0f
 	IPV6_FLOWLABEL_MASK               = 0xffff0f00
-	IPV6_FRAGTTL                      = 0x78
+	IPV6_FRAGTTL                      = 0x3c
 	IPV6_FW_ADD                       = 0x1e
 	IPV6_FW_DEL                       = 0x1f
 	IPV6_FW_FLUSH                     = 0x20
@@ -681,11 +822,19 @@ const (
 	NOFLSH                            = 0x80000000
 	NOTE_ABSOLUTE                     = 0x8
 	NOTE_ATTRIB                       = 0x8
+	NOTE_BACKGROUND                   = 0x40
 	NOTE_CHILD                        = 0x4
+	NOTE_CRITICAL                     = 0x20
 	NOTE_DELETE                       = 0x1
 	NOTE_EXEC                         = 0x20000000
 	NOTE_EXIT                         = 0x80000000
 	NOTE_EXITSTATUS                   = 0x4000000
+	NOTE_EXIT_CSERROR                 = 0x40000
+	NOTE_EXIT_DECRYPTFAIL             = 0x10000
+	NOTE_EXIT_DETAIL                  = 0x2000000
+	NOTE_EXIT_DETAIL_MASK             = 0x70000
+	NOTE_EXIT_MEMORY                  = 0x20000
+	NOTE_EXIT_REPARENTED              = 0x80000
 	NOTE_EXTEND                       = 0x4
 	NOTE_FFAND                        = 0x40000000
 	NOTE_FFCOPY                       = 0xc0000000
@@ -694,6 +843,7 @@ const (
 	NOTE_FFNOP                        = 0x0
 	NOTE_FFOR                         = 0x80000000
 	NOTE_FORK                         = 0x40000000
+	NOTE_LEEWAY                       = 0x10
 	NOTE_LINK                         = 0x10
 	NOTE_LOWAT                        = 0x1
 	NOTE_NONE                         = 0x80
@@ -702,7 +852,6 @@ const (
 	NOTE_PDATAMASK                    = 0xfffff
 	NOTE_REAP                         = 0x10000000
 	NOTE_RENAME                       = 0x20
-	NOTE_RESOURCEEND                  = 0x2000000
 	NOTE_REVOKE                       = 0x40
 	NOTE_SECONDS                      = 0x1
 	NOTE_SIGNAL                       = 0x8000000
@@ -730,6 +879,7 @@ const (
 	O_CLOEXEC                         = 0x1000000
 	O_CREAT                           = 0x200
 	O_DIRECTORY                       = 0x100000
+	O_DP_GETRAWENCRYPTED              = 0x1
 	O_DSYNC                           = 0x400000
 	O_EVTONLY                         = 0x8000
 	O_EXCL                            = 0x800
@@ -779,6 +929,7 @@ const (
 	RLIMIT_AS                         = 0x5
 	RLIMIT_CORE                       = 0x4
 	RLIMIT_CPU                        = 0x0
+	RLIMIT_CPU_USAGE_MONITOR          = 0x2
 	RLIMIT_DATA                       = 0x2
 	RLIMIT_FSIZE                      = 0x1
 	RLIMIT_NOFILE                     = 0x8
@@ -816,12 +967,15 @@ const (
 	RTF_LOCAL                         = 0x200000
 	RTF_MODIFIED                      = 0x20
 	RTF_MULTICAST                     = 0x800000
+	RTF_NOIFREF                       = 0x2000
 	RTF_PINNED                        = 0x100000
 	RTF_PRCLONING                     = 0x10000
 	RTF_PROTO1                        = 0x8000
 	RTF_PROTO2                        = 0x4000
 	RTF_PROTO3                        = 0x40000
+	RTF_PROXY                         = 0x8000000
 	RTF_REJECT                        = 0x8
+	RTF_ROUTER                        = 0x10000000
 	RTF_STATIC                        = 0x800
 	RTF_UP                            = 0x1
 	RTF_WASCLONED                     = 0x20000
@@ -866,7 +1020,6 @@ const (
 	SHUT_WR                           = 0x1
 	SIOCADDMULTI                      = 0x80206931
 	SIOCAIFADDR                       = 0x8040691a
-	SIOCALIFADDR                      = 0x8118691d
 	SIOCARPIPLL                       = 0xc0206928
 	SIOCATMARK                        = 0x40047307
 	SIOCAUTOADDR                      = 0xc0206926
@@ -874,10 +1027,7 @@ const (
 	SIOCDELMULTI                      = 0x80206932
 	SIOCDIFADDR                       = 0x80206919
 	SIOCDIFPHYADDR                    = 0x80206941
-	SIOCDLIFADDR                      = 0x8118691f
 	SIOCGDRVSPEC                      = 0xc01c697b
-	SIOCGETSGCNT                      = 0xc014721c
-	SIOCGETVIFCNT                     = 0xc014721b
 	SIOCGETVLAN                       = 0xc020697f
 	SIOCGHIWAT                        = 0x40047301
 	SIOCGIFADDR                       = 0xc0206921
@@ -903,13 +1053,12 @@ const (
 	SIOCGIFSTATUS                     = 0xc331693d
 	SIOCGIFVLAN                       = 0xc020697f
 	SIOCGIFWAKEFLAGS                  = 0xc0206988
-	SIOCGLIFADDR                      = 0xc118691e
-	SIOCGLIFPHYADDR                   = 0xc1186943
 	SIOCGLOWAT                        = 0x40047303
 	SIOCGPGRP                         = 0x40047309
 	SIOCIFCREATE                      = 0xc0206978
 	SIOCIFCREATE2                     = 0xc020697a
 	SIOCIFDESTROY                     = 0x80206979
+	SIOCIFGCLONERS                    = 0xc00c6981
 	SIOCRSLVMULTI                     = 0xc008693b
 	SIOCSDRVSPEC                      = 0x801c697b
 	SIOCSETVLAN                       = 0x8020697e
@@ -933,7 +1082,6 @@ const (
 	SIOCSIFPHYADDR                    = 0x8040693e
 	SIOCSIFPHYS                       = 0x80206936
 	SIOCSIFVLAN                       = 0x8020697e
-	SIOCSLIFPHYADDR                   = 0x81186942
 	SIOCSLOWAT                        = 0x80047302
 	SIOCSPGRP                         = 0x80047308
 	SOCK_DGRAM                        = 0x2
@@ -960,6 +1108,7 @@ const (
 	SO_NOTIFYCONFLICT                 = 0x1026
 	SO_NP_EXTENSIONS                  = 0x1083
 	SO_NREAD                          = 0x1020
+	SO_NUMRCVPKT                      = 0x1112
 	SO_NWRITE                         = 0x1024
 	SO_OOBINLINE                      = 0x100
 	SO_PEERLABEL                      = 0x1011
@@ -967,10 +1116,6 @@ const (
 	SO_RCVBUF                         = 0x1002
 	SO_RCVLOWAT                       = 0x1004
 	SO_RCVTIMEO                       = 0x1006
-	SO_RESTRICTIONS                   = 0x1081
-	SO_RESTRICT_DENYIN                = 0x1
-	SO_RESTRICT_DENYOUT               = 0x2
-	SO_RESTRICT_DENYSET               = 0x80000000
 	SO_REUSEADDR                      = 0x4
 	SO_REUSEPORT                      = 0x200
 	SO_REUSESHAREUID                  = 0x1025
@@ -1016,21 +1161,25 @@ const (
 	TCIOFLUSH                         = 0x3
 	TCOFLUSH                          = 0x2
 	TCP_CONNECTIONTIMEOUT             = 0x20
+	TCP_ENABLE_ECN                    = 0x104
 	TCP_KEEPALIVE                     = 0x10
+	TCP_KEEPCNT                       = 0x102
+	TCP_KEEPINTVL                     = 0x101
 	TCP_MAXHLEN                       = 0x3c
 	TCP_MAXOLEN                       = 0x28
 	TCP_MAXSEG                        = 0x2
 	TCP_MAXWIN                        = 0xffff
-	TCP_MAX_SACK                      = 0x3
+	TCP_MAX_SACK                      = 0x4
 	TCP_MAX_WINSHIFT                  = 0xe
 	TCP_MINMSS                        = 0xd8
-	TCP_MINMSSOVERLOAD                = 0x3e8
 	TCP_MSS                           = 0x200
 	TCP_NODELAY                       = 0x1
 	TCP_NOOPT                         = 0x8
 	TCP_NOPUSH                        = 0x4
+	TCP_NOTSENT_LOWAT                 = 0x201
 	TCP_RXT_CONNDROPTIME              = 0x80
 	TCP_RXT_FINDROP                   = 0x100
+	TCP_SENDMOREACKS                  = 0x103
 	TCSAFLUSH                         = 0x2
 	TIOCCBRK                          = 0x2000747a
 	TIOCCDTR                          = 0x20007478
@@ -1174,7 +1323,7 @@ const (
 	EIO             = syscall.Errno(0x5)
 	EISCONN         = syscall.Errno(0x38)
 	EISDIR          = syscall.Errno(0x15)
-	ELAST           = syscall.Errno(0x69)
+	ELAST           = syscall.Errno(0x6a)
 	ELOOP           = syscall.Errno(0x3e)
 	EMFILE          = syscall.Errno(0x18)
 	EMLINK          = syscall.Errno(0x1f)
@@ -1225,6 +1374,7 @@ const (
 	EPROTONOSUPPORT = syscall.Errno(0x2b)
 	EPROTOTYPE      = syscall.Errno(0x29)
 	EPWROFF         = syscall.Errno(0x52)
+	EQFULL          = syscall.Errno(0x6a)
 	ERANGE          = syscall.Errno(0x22)
 	EREMOTE         = syscall.Errno(0x47)
 	EROFS           = syscall.Errno(0x1e)
@@ -1387,6 +1537,7 @@ var errors = [...]string{
 	103: "policy not found",
 	104: "state not recoverable",
 	105: "previous owner died",
+	106: "interface output queue is full",
 }
 
 // Signal table
diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go
index 1f41dc085dce41e784acf0ba627bbc45b29a5be4..9594f93817a341417086948cdb4fdc1bb93329b9 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go
@@ -376,6 +376,7 @@ const (
 	F_FLUSH_DATA                      = 0x28
 	F_FREEZE_FS                       = 0x35
 	F_FULLFSYNC                       = 0x33
+	F_GETCODEDIR                      = 0x48
 	F_GETFD                           = 0x1
 	F_GETFL                           = 0x3
 	F_GETLK                           = 0x7
@@ -1019,7 +1020,6 @@ const (
 	SHUT_WR                           = 0x1
 	SIOCADDMULTI                      = 0x80206931
 	SIOCAIFADDR                       = 0x8040691a
-	SIOCALIFADDR                      = 0x8118691d
 	SIOCARPIPLL                       = 0xc0206928
 	SIOCATMARK                        = 0x40047307
 	SIOCAUTOADDR                      = 0xc0206926
@@ -1027,10 +1027,7 @@ const (
 	SIOCDELMULTI                      = 0x80206932
 	SIOCDIFADDR                       = 0x80206919
 	SIOCDIFPHYADDR                    = 0x80206941
-	SIOCDLIFADDR                      = 0x8118691f
 	SIOCGDRVSPEC                      = 0xc028697b
-	SIOCGETSGCNT                      = 0xc014721c
-	SIOCGETVIFCNT                     = 0xc014721b
 	SIOCGETVLAN                       = 0xc020697f
 	SIOCGHIWAT                        = 0x40047301
 	SIOCGIFADDR                       = 0xc0206921
@@ -1056,8 +1053,6 @@ const (
 	SIOCGIFSTATUS                     = 0xc331693d
 	SIOCGIFVLAN                       = 0xc020697f
 	SIOCGIFWAKEFLAGS                  = 0xc0206988
-	SIOCGLIFADDR                      = 0xc118691e
-	SIOCGLIFPHYADDR                   = 0xc1186943
 	SIOCGLOWAT                        = 0x40047303
 	SIOCGPGRP                         = 0x40047309
 	SIOCIFCREATE                      = 0xc0206978
@@ -1087,7 +1082,6 @@ const (
 	SIOCSIFPHYADDR                    = 0x8040693e
 	SIOCSIFPHYS                       = 0x80206936
 	SIOCSIFVLAN                       = 0x8020697e
-	SIOCSLIFPHYADDR                   = 0x81186942
 	SIOCSLOWAT                        = 0x80047302
 	SIOCSPGRP                         = 0x80047308
 	SOCK_DGRAM                        = 0x2
@@ -1114,6 +1108,7 @@ const (
 	SO_NOTIFYCONFLICT                 = 0x1026
 	SO_NP_EXTENSIONS                  = 0x1083
 	SO_NREAD                          = 0x1020
+	SO_NUMRCVPKT                      = 0x1112
 	SO_NWRITE                         = 0x1024
 	SO_OOBINLINE                      = 0x100
 	SO_PEERLABEL                      = 0x1011
@@ -1166,6 +1161,7 @@ const (
 	TCIOFLUSH                         = 0x3
 	TCOFLUSH                          = 0x2
 	TCP_CONNECTIONTIMEOUT             = 0x20
+	TCP_ENABLE_ECN                    = 0x104
 	TCP_KEEPALIVE                     = 0x10
 	TCP_KEEPCNT                       = 0x102
 	TCP_KEEPINTVL                     = 0x101
@@ -1173,13 +1169,14 @@ const (
 	TCP_MAXOLEN                       = 0x28
 	TCP_MAXSEG                        = 0x2
 	TCP_MAXWIN                        = 0xffff
-	TCP_MAX_SACK                      = 0x3
+	TCP_MAX_SACK                      = 0x4
 	TCP_MAX_WINSHIFT                  = 0xe
 	TCP_MINMSS                        = 0xd8
 	TCP_MSS                           = 0x200
 	TCP_NODELAY                       = 0x1
 	TCP_NOOPT                         = 0x8
 	TCP_NOPUSH                        = 0x4
+	TCP_NOTSENT_LOWAT                 = 0x201
 	TCP_RXT_CONNDROPTIME              = 0x80
 	TCP_RXT_FINDROP                   = 0x100
 	TCP_SENDMOREACKS                  = 0x103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go
index 6f07be1654034960cff9d6f63cb3f03107737500..3189c6b34595fadf84b1ba29009993a9def18dc7 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go
@@ -86,6 +86,7 @@ const (
 	BIOCSBLEN                         = 0xc0044266
 	BIOCSDLT                          = 0x80044278
 	BIOCSETF                          = 0x80104267
+	BIOCSETFNR                        = 0x8010427e
 	BIOCSETIF                         = 0x8020426c
 	BIOCSHDRCMPLT                     = 0x80044275
 	BIOCSRSIG                         = 0x80044273
@@ -152,33 +153,168 @@ const (
 	CSUSP                             = 0x1a
 	CTL_MAXNAME                       = 0xc
 	CTL_NET                           = 0x4
+	DLT_A429                          = 0xb8
+	DLT_A653_ICM                      = 0xb9
+	DLT_AIRONET_HEADER                = 0x78
+	DLT_AOS                           = 0xde
 	DLT_APPLE_IP_OVER_IEEE1394        = 0x8a
 	DLT_ARCNET                        = 0x7
+	DLT_ARCNET_LINUX                  = 0x81
 	DLT_ATM_CLIP                      = 0x13
 	DLT_ATM_RFC1483                   = 0xb
+	DLT_AURORA                        = 0x7e
 	DLT_AX25                          = 0x3
+	DLT_AX25_KISS                     = 0xca
+	DLT_BACNET_MS_TP                  = 0xa5
+	DLT_BLUETOOTH_HCI_H4              = 0xbb
+	DLT_BLUETOOTH_HCI_H4_WITH_PHDR    = 0xc9
+	DLT_CAN20B                        = 0xbe
+	DLT_CAN_SOCKETCAN                 = 0xe3
 	DLT_CHAOS                         = 0x5
 	DLT_CHDLC                         = 0x68
+	DLT_CISCO_IOS                     = 0x76
 	DLT_C_HDLC                        = 0x68
+	DLT_C_HDLC_WITH_DIR               = 0xcd
+	DLT_DBUS                          = 0xe7
+	DLT_DECT                          = 0xdd
+	DLT_DOCSIS                        = 0x8f
+	DLT_DVB_CI                        = 0xeb
+	DLT_ECONET                        = 0x73
 	DLT_EN10MB                        = 0x1
 	DLT_EN3MB                         = 0x2
+	DLT_ENC                           = 0x6d
+	DLT_ERF                           = 0xc5
+	DLT_ERF_ETH                       = 0xaf
+	DLT_ERF_POS                       = 0xb0
+	DLT_FC_2                          = 0xe0
+	DLT_FC_2_WITH_FRAME_DELIMS        = 0xe1
 	DLT_FDDI                          = 0xa
+	DLT_FLEXRAY                       = 0xd2
+	DLT_FRELAY                        = 0x6b
+	DLT_FRELAY_WITH_DIR               = 0xce
+	DLT_GCOM_SERIAL                   = 0xad
+	DLT_GCOM_T1E1                     = 0xac
+	DLT_GPF_F                         = 0xab
+	DLT_GPF_T                         = 0xaa
+	DLT_GPRS_LLC                      = 0xa9
+	DLT_GSMTAP_ABIS                   = 0xda
+	DLT_GSMTAP_UM                     = 0xd9
+	DLT_HHDLC                         = 0x79
+	DLT_IBM_SN                        = 0x92
+	DLT_IBM_SP                        = 0x91
 	DLT_IEEE802                       = 0x6
 	DLT_IEEE802_11                    = 0x69
 	DLT_IEEE802_11_RADIO              = 0x7f
 	DLT_IEEE802_11_RADIO_AVS          = 0xa3
+	DLT_IEEE802_15_4                  = 0xc3
+	DLT_IEEE802_15_4_LINUX            = 0xbf
+	DLT_IEEE802_15_4_NOFCS            = 0xe6
+	DLT_IEEE802_15_4_NONASK_PHY       = 0xd7
+	DLT_IEEE802_16_MAC_CPS            = 0xbc
+	DLT_IEEE802_16_MAC_CPS_RADIO      = 0xc1
+	DLT_IPFILTER                      = 0x74
+	DLT_IPMB                          = 0xc7
+	DLT_IPMB_LINUX                    = 0xd1
+	DLT_IPNET                         = 0xe2
+	DLT_IPOIB                         = 0xf2
+	DLT_IPV4                          = 0xe4
+	DLT_IPV6                          = 0xe5
+	DLT_IP_OVER_FC                    = 0x7a
+	DLT_JUNIPER_ATM1                  = 0x89
+	DLT_JUNIPER_ATM2                  = 0x87
+	DLT_JUNIPER_ATM_CEMIC             = 0xee
+	DLT_JUNIPER_CHDLC                 = 0xb5
+	DLT_JUNIPER_ES                    = 0x84
+	DLT_JUNIPER_ETHER                 = 0xb2
+	DLT_JUNIPER_FIBRECHANNEL          = 0xea
+	DLT_JUNIPER_FRELAY                = 0xb4
+	DLT_JUNIPER_GGSN                  = 0x85
+	DLT_JUNIPER_ISM                   = 0xc2
+	DLT_JUNIPER_MFR                   = 0x86
+	DLT_JUNIPER_MLFR                  = 0x83
+	DLT_JUNIPER_MLPPP                 = 0x82
+	DLT_JUNIPER_MONITOR               = 0xa4
+	DLT_JUNIPER_PIC_PEER              = 0xae
+	DLT_JUNIPER_PPP                   = 0xb3
+	DLT_JUNIPER_PPPOE                 = 0xa7
+	DLT_JUNIPER_PPPOE_ATM             = 0xa8
+	DLT_JUNIPER_SERVICES              = 0x88
+	DLT_JUNIPER_SRX_E2E               = 0xe9
+	DLT_JUNIPER_ST                    = 0xc8
+	DLT_JUNIPER_VP                    = 0xb7
+	DLT_JUNIPER_VS                    = 0xe8
+	DLT_LAPB_WITH_DIR                 = 0xcf
+	DLT_LAPD                          = 0xcb
+	DLT_LIN                           = 0xd4
+	DLT_LINUX_EVDEV                   = 0xd8
+	DLT_LINUX_IRDA                    = 0x90
+	DLT_LINUX_LAPD                    = 0xb1
+	DLT_LINUX_PPP_WITHDIRECTION       = 0xa6
 	DLT_LINUX_SLL                     = 0x71
 	DLT_LOOP                          = 0x6c
+	DLT_LTALK                         = 0x72
+	DLT_MATCHING_MAX                  = 0xf5
+	DLT_MATCHING_MIN                  = 0x68
+	DLT_MFR                           = 0xb6
+	DLT_MOST                          = 0xd3
+	DLT_MPEG_2_TS                     = 0xf3
+	DLT_MPLS                          = 0xdb
+	DLT_MTP2                          = 0x8c
+	DLT_MTP2_WITH_PHDR                = 0x8b
+	DLT_MTP3                          = 0x8d
+	DLT_MUX27010                      = 0xec
+	DLT_NETANALYZER                   = 0xf0
+	DLT_NETANALYZER_TRANSPARENT       = 0xf1
+	DLT_NFC_LLCP                      = 0xf5
+	DLT_NFLOG                         = 0xef
+	DLT_NG40                          = 0xf4
 	DLT_NULL                          = 0x0
+	DLT_PCI_EXP                       = 0x7d
 	DLT_PFLOG                         = 0x75
 	DLT_PFSYNC                        = 0x12
+	DLT_PPI                           = 0xc0
 	DLT_PPP                           = 0x9
 	DLT_PPP_BSDOS                     = 0x10
+	DLT_PPP_ETHER                     = 0x33
+	DLT_PPP_PPPD                      = 0xa6
 	DLT_PPP_SERIAL                    = 0x32
+	DLT_PPP_WITH_DIR                  = 0xcc
+	DLT_PPP_WITH_DIRECTION            = 0xa6
+	DLT_PRISM_HEADER                  = 0x77
 	DLT_PRONET                        = 0x4
+	DLT_RAIF1                         = 0xc6
 	DLT_RAW                           = 0xc
+	DLT_RIO                           = 0x7c
+	DLT_SCCP                          = 0x8e
+	DLT_SITA                          = 0xc4
 	DLT_SLIP                          = 0x8
 	DLT_SLIP_BSDOS                    = 0xf
+	DLT_STANAG_5066_D_PDU             = 0xed
+	DLT_SUNATM                        = 0x7b
+	DLT_SYMANTEC_FIREWALL             = 0x63
+	DLT_TZSP                          = 0x80
+	DLT_USB                           = 0xba
+	DLT_USB_LINUX                     = 0xbd
+	DLT_USB_LINUX_MMAPPED             = 0xdc
+	DLT_USER0                         = 0x93
+	DLT_USER1                         = 0x94
+	DLT_USER10                        = 0x9d
+	DLT_USER11                        = 0x9e
+	DLT_USER12                        = 0x9f
+	DLT_USER13                        = 0xa0
+	DLT_USER14                        = 0xa1
+	DLT_USER15                        = 0xa2
+	DLT_USER2                         = 0x95
+	DLT_USER3                         = 0x96
+	DLT_USER4                         = 0x97
+	DLT_USER5                         = 0x98
+	DLT_USER6                         = 0x99
+	DLT_USER7                         = 0x9a
+	DLT_USER8                         = 0x9b
+	DLT_USER9                         = 0x9c
+	DLT_WIHART                        = 0xdf
+	DLT_X2E_SERIAL                    = 0xd5
+	DLT_X2E_XORAYA                    = 0xd6
 	DT_BLK                            = 0x6
 	DT_CHR                            = 0x2
 	DT_DIR                            = 0x4
@@ -347,6 +483,7 @@ const (
 	IFT_PDP                           = 0xff
 	IFT_PFLOG                         = 0xf5
 	IFT_PFSYNC                        = 0xf6
+	IFT_PKTAP                         = 0xfe
 	IFT_PPP                           = 0x17
 	IFT_PROPMUX                       = 0x36
 	IFT_PROPVIRTUAL                   = 0x35
@@ -515,7 +652,7 @@ const (
 	IPV6_FAITH                        = 0x1d
 	IPV6_FLOWINFO_MASK                = 0xffffff0f
 	IPV6_FLOWLABEL_MASK               = 0xffff0f00
-	IPV6_FRAGTTL                      = 0x78
+	IPV6_FRAGTTL                      = 0x3c
 	IPV6_FW_ADD                       = 0x1e
 	IPV6_FW_DEL                       = 0x1f
 	IPV6_FW_FLUSH                     = 0x20
@@ -830,6 +967,7 @@ const (
 	RTF_LOCAL                         = 0x200000
 	RTF_MODIFIED                      = 0x20
 	RTF_MULTICAST                     = 0x800000
+	RTF_NOIFREF                       = 0x2000
 	RTF_PINNED                        = 0x100000
 	RTF_PRCLONING                     = 0x10000
 	RTF_PROTO1                        = 0x8000
@@ -1291,3 +1429,148 @@ const (
 	SIGXCPU   = syscall.Signal(0x18)
 	SIGXFSZ   = syscall.Signal(0x19)
 )
+
+// Error table
+var errors = [...]string{
+	1:   "operation not permitted",
+	2:   "no such file or directory",
+	3:   "no such process",
+	4:   "interrupted system call",
+	5:   "input/output error",
+	6:   "device not configured",
+	7:   "argument list too long",
+	8:   "exec format error",
+	9:   "bad file descriptor",
+	10:  "no child processes",
+	11:  "resource deadlock avoided",
+	12:  "cannot allocate memory",
+	13:  "permission denied",
+	14:  "bad address",
+	15:  "block device required",
+	16:  "resource busy",
+	17:  "file exists",
+	18:  "cross-device link",
+	19:  "operation not supported by device",
+	20:  "not a directory",
+	21:  "is a directory",
+	22:  "invalid argument",
+	23:  "too many open files in system",
+	24:  "too many open files",
+	25:  "inappropriate ioctl for device",
+	26:  "text file busy",
+	27:  "file too large",
+	28:  "no space left on device",
+	29:  "illegal seek",
+	30:  "read-only file system",
+	31:  "too many links",
+	32:  "broken pipe",
+	33:  "numerical argument out of domain",
+	34:  "result too large",
+	35:  "resource temporarily unavailable",
+	36:  "operation now in progress",
+	37:  "operation already in progress",
+	38:  "socket operation on non-socket",
+	39:  "destination address required",
+	40:  "message too long",
+	41:  "protocol wrong type for socket",
+	42:  "protocol not available",
+	43:  "protocol not supported",
+	44:  "socket type not supported",
+	45:  "operation not supported",
+	46:  "protocol family not supported",
+	47:  "address family not supported by protocol family",
+	48:  "address already in use",
+	49:  "can't assign requested address",
+	50:  "network is down",
+	51:  "network is unreachable",
+	52:  "network dropped connection on reset",
+	53:  "software caused connection abort",
+	54:  "connection reset by peer",
+	55:  "no buffer space available",
+	56:  "socket is already connected",
+	57:  "socket is not connected",
+	58:  "can't send after socket shutdown",
+	59:  "too many references: can't splice",
+	60:  "operation timed out",
+	61:  "connection refused",
+	62:  "too many levels of symbolic links",
+	63:  "file name too long",
+	64:  "host is down",
+	65:  "no route to host",
+	66:  "directory not empty",
+	67:  "too many processes",
+	68:  "too many users",
+	69:  "disc quota exceeded",
+	70:  "stale NFS file handle",
+	71:  "too many levels of remote in path",
+	72:  "RPC struct is bad",
+	73:  "RPC version wrong",
+	74:  "RPC prog. not avail",
+	75:  "program version wrong",
+	76:  "bad procedure for program",
+	77:  "no locks available",
+	78:  "function not implemented",
+	79:  "inappropriate file type or format",
+	80:  "authentication error",
+	81:  "need authenticator",
+	82:  "device power is off",
+	83:  "device error",
+	84:  "value too large to be stored in data type",
+	85:  "bad executable (or shared library)",
+	86:  "bad CPU type in executable",
+	87:  "shared library version mismatch",
+	88:  "malformed Mach-o file",
+	89:  "operation canceled",
+	90:  "identifier removed",
+	91:  "no message of desired type",
+	92:  "illegal byte sequence",
+	93:  "attribute not found",
+	94:  "bad message",
+	95:  "EMULTIHOP (Reserved)",
+	96:  "no message available on STREAM",
+	97:  "ENOLINK (Reserved)",
+	98:  "no STREAM resources",
+	99:  "not a STREAM",
+	100: "protocol error",
+	101: "STREAM ioctl timeout",
+	102: "operation not supported on socket",
+	103: "policy not found",
+	104: "state not recoverable",
+	105: "previous owner died",
+	106: "interface output queue is full",
+}
+
+// Signal table
+var signals = [...]string{
+	1:  "hangup",
+	2:  "interrupt",
+	3:  "quit",
+	4:  "illegal instruction",
+	5:  "trace/BPT trap",
+	6:  "abort trap",
+	7:  "EMT trap",
+	8:  "floating point exception",
+	9:  "killed",
+	10: "bus error",
+	11: "segmentation fault",
+	12: "bad system call",
+	13: "broken pipe",
+	14: "alarm clock",
+	15: "terminated",
+	16: "urgent I/O condition",
+	17: "suspended (signal)",
+	18: "suspended",
+	19: "continued",
+	20: "child exited",
+	21: "stopped (tty input)",
+	22: "stopped (tty output)",
+	23: "I/O possible",
+	24: "cputime limit exceeded",
+	25: "filesize limit exceeded",
+	26: "virtual timer expired",
+	27: "profiling timer expired",
+	28: "window size changes",
+	29: "information request",
+	30: "user defined signal 1",
+	31: "user defined signal 2",
+}
diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go
index 3c2a5bfc2e7dd99e59e1ead84b1ec51db9a1274e..7b95751c3db36f742a398d049828989eb8f4d63a 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go
@@ -225,6 +225,20 @@ const (
 	BRKINT                            = 0x2
 	CFLUSH                            = 0xf
 	CLOCAL                            = 0x8000
+	CLOCK_MONOTONIC                   = 0x4
+	CLOCK_MONOTONIC_FAST              = 0xc
+	CLOCK_MONOTONIC_PRECISE           = 0xb
+	CLOCK_PROCESS_CPUTIME_ID          = 0xf
+	CLOCK_PROF                        = 0x2
+	CLOCK_REALTIME                    = 0x0
+	CLOCK_REALTIME_FAST               = 0xa
+	CLOCK_REALTIME_PRECISE            = 0x9
+	CLOCK_SECOND                      = 0xd
+	CLOCK_THREAD_CPUTIME_ID           = 0xe
+	CLOCK_UPTIME                      = 0x5
+	CLOCK_UPTIME_FAST                 = 0x8
+	CLOCK_UPTIME_PRECISE              = 0x7
+	CLOCK_VIRTUAL                     = 0x1
 	CREAD                             = 0x800
 	CS5                               = 0x0
 	CS6                               = 0x100
diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go
index 3b3f7a9d22d30e67269d79c303f7f3098cf68f55..e48e7799a1d673b55512095970aa2feb96057b26 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go
@@ -225,6 +225,20 @@ const (
 	BRKINT                            = 0x2
 	CFLUSH                            = 0xf
 	CLOCAL                            = 0x8000
+	CLOCK_MONOTONIC                   = 0x4
+	CLOCK_MONOTONIC_FAST              = 0xc
+	CLOCK_MONOTONIC_PRECISE           = 0xb
+	CLOCK_PROCESS_CPUTIME_ID          = 0xf
+	CLOCK_PROF                        = 0x2
+	CLOCK_REALTIME                    = 0x0
+	CLOCK_REALTIME_FAST               = 0xa
+	CLOCK_REALTIME_PRECISE            = 0x9
+	CLOCK_SECOND                      = 0xd
+	CLOCK_THREAD_CPUTIME_ID           = 0xe
+	CLOCK_UPTIME                      = 0x5
+	CLOCK_UPTIME_FAST                 = 0x8
+	CLOCK_UPTIME_PRECISE              = 0x7
+	CLOCK_VIRTUAL                     = 0x1
 	CREAD                             = 0x800
 	CS5                               = 0x0
 	CS6                               = 0x100
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index 6fbef75226325619b3ed7aa7264eb63227facee7..80b73811dd8b4595a9f0d8dd434f1188b31aa24e 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -145,6 +145,7 @@ const (
 	B75                              = 0x2
 	B921600                          = 0x1007
 	B9600                            = 0xd
+	BOTHER                           = 0x1000
 	BPF_A                            = 0x10
 	BPF_ABS                          = 0x20
 	BPF_ADD                          = 0x0
@@ -186,7 +187,13 @@ const (
 	BPF_W                            = 0x0
 	BPF_X                            = 0x8
 	BRKINT                           = 0x2
+	BS0                              = 0x0
+	BS1                              = 0x2000
+	BSDLY                            = 0x2000
+	CBAUD                            = 0x100f
+	CBAUDEX                          = 0x1000
 	CFLUSH                           = 0xf
+	CIBAUD                           = 0x100f0000
 	CLOCAL                           = 0x800
 	CLOCK_BOOTTIME                   = 0x7
 	CLOCK_BOOTTIME_ALARM             = 0x9
@@ -225,7 +232,14 @@ const (
 	CLONE_UNTRACED                   = 0x800000
 	CLONE_VFORK                      = 0x4000
 	CLONE_VM                         = 0x100
+	CMSPAR                           = 0x40000000
+	CR0                              = 0x0
+	CR1                              = 0x200
+	CR2                              = 0x400
+	CR3                              = 0x600
+	CRDLY                            = 0x600
 	CREAD                            = 0x80
+	CRTSCTS                          = 0x80000000
 	CS5                              = 0x0
 	CS6                              = 0x10
 	CS7                              = 0x20
@@ -353,6 +367,9 @@ const (
 	EXTPROC                          = 0x10000
 	FD_CLOEXEC                       = 0x1
 	FD_SETSIZE                       = 0x400
+	FF0                              = 0x0
+	FF1                              = 0x8000
+	FFDLY                            = 0x8000
 	FLUSHO                           = 0x1000
 	F_DUPFD                          = 0x0
 	F_DUPFD_CLOEXEC                  = 0x406
@@ -388,6 +405,7 @@ const (
 	F_UNLCK                          = 0x2
 	F_WRLCK                          = 0x1
 	HUPCL                            = 0x400
+	IBSHIFT                          = 0x10
 	ICANON                           = 0x2
 	ICMPV6_FILTER                    = 0x1
 	ICRNL                            = 0x100
@@ -619,6 +637,7 @@ const (
 	IP_XFRM_POLICY                   = 0x11
 	ISIG                             = 0x1
 	ISTRIP                           = 0x20
+	IUCLC                            = 0x200
 	IUTF8                            = 0x4000
 	IXANY                            = 0x800
 	IXOFF                            = 0x1000
@@ -750,10 +769,13 @@ const (
 	NETLINK_UNUSED                   = 0x1
 	NETLINK_USERSOCK                 = 0x2
 	NETLINK_XFRM                     = 0x6
+	NL0                              = 0x0
+	NL1                              = 0x100
 	NLA_ALIGNTO                      = 0x4
 	NLA_F_NESTED                     = 0x8000
 	NLA_F_NET_BYTEORDER              = 0x4000
 	NLA_HDRLEN                       = 0x4
+	NLDLY                            = 0x100
 	NLMSG_ALIGNTO                    = 0x4
 	NLMSG_DONE                       = 0x3
 	NLMSG_ERROR                      = 0x2
@@ -778,6 +800,7 @@ const (
 	OCRNL                            = 0x8
 	OFDEL                            = 0x80
 	OFILL                            = 0x40
+	OLCUC                            = 0x2
 	ONLCR                            = 0x4
 	ONLRET                           = 0x20
 	ONOCR                            = 0x10
@@ -803,6 +826,7 @@ const (
 	O_RDWR                           = 0x2
 	O_RSYNC                          = 0x101000
 	O_SYNC                           = 0x101000
+	O_TMPFILE                        = 0x410000
 	O_TRUNC                          = 0x200
 	O_WRONLY                         = 0x1
 	PACKET_ADD_MEMBERSHIP            = 0x1
@@ -1275,10 +1299,23 @@ const (
 	S_IXGRP                          = 0x8
 	S_IXOTH                          = 0x1
 	S_IXUSR                          = 0x40
+	TAB0                             = 0x0
+	TAB1                             = 0x800
+	TAB2                             = 0x1000
+	TAB3                             = 0x1800
+	TABDLY                           = 0x1800
 	TCFLSH                           = 0x540b
+	TCGETA                           = 0x5405
+	TCGETS                           = 0x5401
+	TCGETS2                          = 0x802c542a
+	TCGETX                           = 0x5432
 	TCIFLUSH                         = 0x0
+	TCIOFF                           = 0x2
 	TCIOFLUSH                        = 0x2
+	TCION                            = 0x3
 	TCOFLUSH                         = 0x1
+	TCOOFF                           = 0x0
+	TCOON                            = 0x1
 	TCP_CONGESTION                   = 0xd
 	TCP_CORK                         = 0x3
 	TCP_DEFER_ACCEPT                 = 0x9
@@ -1298,14 +1335,32 @@ const (
 	TCP_SYNCNT                       = 0x7
 	TCP_WINDOW_CLAMP                 = 0xa
 	TCSAFLUSH                        = 0x2
+	TCSBRK                           = 0x5409
+	TCSBRKP                          = 0x5425
+	TCSETA                           = 0x5406
+	TCSETAF                          = 0x5408
+	TCSETAW                          = 0x5407
+	TCSETS                           = 0x5402
+	TCSETS2                          = 0x402c542b
+	TCSETSF                          = 0x5404
+	TCSETSF2                         = 0x402c542d
+	TCSETSW                          = 0x5403
+	TCSETSW2                         = 0x402c542c
+	TCSETX                           = 0x5433
+	TCSETXF                          = 0x5434
+	TCSETXW                          = 0x5435
+	TCXONC                           = 0x540a
 	TIOCCBRK                         = 0x5428
 	TIOCCONS                         = 0x541d
 	TIOCEXCL                         = 0x540c
 	TIOCGDEV                         = 0x80045432
 	TIOCGETD                         = 0x5424
+	TIOCGEXCL                        = 0x80045440
 	TIOCGICOUNT                      = 0x545d
 	TIOCGLCKTRMIOS                   = 0x5456
 	TIOCGPGRP                        = 0x540f
+	TIOCGPKT                         = 0x80045438
+	TIOCGPTLCK                       = 0x80045439
 	TIOCGPTN                         = 0x80045430
 	TIOCGRS485                       = 0x542e
 	TIOCGSERIAL                      = 0x541e
@@ -1411,6 +1466,8 @@ const (
 	WORDSIZE                         = 0x20
 	WSTOPPED                         = 0x2
 	WUNTRACED                        = 0x2
+	XCASE                            = 0x4
+	XTABS                            = 0x1800
 )
 
 // Errors
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index b40ccb8d7e5167adf6872f34f3bdb061c5e21a82..64cc0b7d2f39ff04cb09a98d8eacafb1b5b333ec 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -145,6 +145,7 @@ const (
 	B75                              = 0x2
 	B921600                          = 0x1007
 	B9600                            = 0xd
+	BOTHER                           = 0x1000
 	BPF_A                            = 0x10
 	BPF_ABS                          = 0x20
 	BPF_ADD                          = 0x0
@@ -186,7 +187,13 @@ const (
 	BPF_W                            = 0x0
 	BPF_X                            = 0x8
 	BRKINT                           = 0x2
+	BS0                              = 0x0
+	BS1                              = 0x2000
+	BSDLY                            = 0x2000
+	CBAUD                            = 0x100f
+	CBAUDEX                          = 0x1000
 	CFLUSH                           = 0xf
+	CIBAUD                           = 0x100f0000
 	CLOCAL                           = 0x800
 	CLOCK_BOOTTIME                   = 0x7
 	CLOCK_BOOTTIME_ALARM             = 0x9
@@ -225,7 +232,14 @@ const (
 	CLONE_UNTRACED                   = 0x800000
 	CLONE_VFORK                      = 0x4000
 	CLONE_VM                         = 0x100
+	CMSPAR                           = 0x40000000
+	CR0                              = 0x0
+	CR1                              = 0x200
+	CR2                              = 0x400
+	CR3                              = 0x600
+	CRDLY                            = 0x600
 	CREAD                            = 0x80
+	CRTSCTS                          = 0x80000000
 	CS5                              = 0x0
 	CS6                              = 0x10
 	CS7                              = 0x20
@@ -353,6 +367,9 @@ const (
 	EXTPROC                          = 0x10000
 	FD_CLOEXEC                       = 0x1
 	FD_SETSIZE                       = 0x400
+	FF0                              = 0x0
+	FF1                              = 0x8000
+	FFDLY                            = 0x8000
 	FLUSHO                           = 0x1000
 	F_DUPFD                          = 0x0
 	F_DUPFD_CLOEXEC                  = 0x406
@@ -388,6 +405,7 @@ const (
 	F_UNLCK                          = 0x2
 	F_WRLCK                          = 0x1
 	HUPCL                            = 0x400
+	IBSHIFT                          = 0x10
 	ICANON                           = 0x2
 	ICMPV6_FILTER                    = 0x1
 	ICRNL                            = 0x100
@@ -619,6 +637,7 @@ const (
 	IP_XFRM_POLICY                   = 0x11
 	ISIG                             = 0x1
 	ISTRIP                           = 0x20
+	IUCLC                            = 0x200
 	IUTF8                            = 0x4000
 	IXANY                            = 0x800
 	IXOFF                            = 0x1000
@@ -750,10 +769,13 @@ const (
 	NETLINK_UNUSED                   = 0x1
 	NETLINK_USERSOCK                 = 0x2
 	NETLINK_XFRM                     = 0x6
+	NL0                              = 0x0
+	NL1                              = 0x100
 	NLA_ALIGNTO                      = 0x4
 	NLA_F_NESTED                     = 0x8000
 	NLA_F_NET_BYTEORDER              = 0x4000
 	NLA_HDRLEN                       = 0x4
+	NLDLY                            = 0x100
 	NLMSG_ALIGNTO                    = 0x4
 	NLMSG_DONE                       = 0x3
 	NLMSG_ERROR                      = 0x2
@@ -778,6 +800,7 @@ const (
 	OCRNL                            = 0x8
 	OFDEL                            = 0x80
 	OFILL                            = 0x40
+	OLCUC                            = 0x2
 	ONLCR                            = 0x4
 	ONLRET                           = 0x20
 	ONOCR                            = 0x10
@@ -803,6 +826,7 @@ const (
 	O_RDWR                           = 0x2
 	O_RSYNC                          = 0x101000
 	O_SYNC                           = 0x101000
+	O_TMPFILE                        = 0x410000
 	O_TRUNC                          = 0x200
 	O_WRONLY                         = 0x1
 	PACKET_ADD_MEMBERSHIP            = 0x1
@@ -1276,10 +1300,23 @@ const (
 	S_IXGRP                          = 0x8
 	S_IXOTH                          = 0x1
 	S_IXUSR                          = 0x40
+	TAB0                             = 0x0
+	TAB1                             = 0x800
+	TAB2                             = 0x1000
+	TAB3                             = 0x1800
+	TABDLY                           = 0x1800
 	TCFLSH                           = 0x540b
+	TCGETA                           = 0x5405
+	TCGETS                           = 0x5401
+	TCGETS2                          = 0x802c542a
+	TCGETX                           = 0x5432
 	TCIFLUSH                         = 0x0
+	TCIOFF                           = 0x2
 	TCIOFLUSH                        = 0x2
+	TCION                            = 0x3
 	TCOFLUSH                         = 0x1
+	TCOOFF                           = 0x0
+	TCOON                            = 0x1
 	TCP_CONGESTION                   = 0xd
 	TCP_CORK                         = 0x3
 	TCP_DEFER_ACCEPT                 = 0x9
@@ -1299,14 +1336,32 @@ const (
 	TCP_SYNCNT                       = 0x7
 	TCP_WINDOW_CLAMP                 = 0xa
 	TCSAFLUSH                        = 0x2
+	TCSBRK                           = 0x5409
+	TCSBRKP                          = 0x5425
+	TCSETA                           = 0x5406
+	TCSETAF                          = 0x5408
+	TCSETAW                          = 0x5407
+	TCSETS                           = 0x5402
+	TCSETS2                          = 0x402c542b
+	TCSETSF                          = 0x5404
+	TCSETSF2                         = 0x402c542d
+	TCSETSW                          = 0x5403
+	TCSETSW2                         = 0x402c542c
+	TCSETX                           = 0x5433
+	TCSETXF                          = 0x5434
+	TCSETXW                          = 0x5435
+	TCXONC                           = 0x540a
 	TIOCCBRK                         = 0x5428
 	TIOCCONS                         = 0x541d
 	TIOCEXCL                         = 0x540c
 	TIOCGDEV                         = 0x80045432
 	TIOCGETD                         = 0x5424
+	TIOCGEXCL                        = 0x80045440
 	TIOCGICOUNT                      = 0x545d
 	TIOCGLCKTRMIOS                   = 0x5456
 	TIOCGPGRP                        = 0x540f
+	TIOCGPKT                         = 0x80045438
+	TIOCGPTLCK                       = 0x80045439
 	TIOCGPTN                         = 0x80045430
 	TIOCGRS485                       = 0x542e
 	TIOCGSERIAL                      = 0x541e
@@ -1412,6 +1467,8 @@ const (
 	WORDSIZE                         = 0x40
 	WSTOPPED                         = 0x2
 	WUNTRACED                        = 0x2
+	XCASE                            = 0x4
+	XTABS                            = 0x1800
 )
 
 // Errors
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index 4535b78b777aeef3b3d889d360374321e4025118..1cc76a78cf46dae9c0fcef6e254667cdf329c9e4 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -110,6 +110,38 @@ const (
 	ARPHRD_TUNNEL6                   = 0x301
 	ARPHRD_VOID                      = 0xffff
 	ARPHRD_X25                       = 0x10f
+	B0                               = 0x0
+	B1000000                         = 0x1008
+	B110                             = 0x3
+	B115200                          = 0x1002
+	B1152000                         = 0x1009
+	B1200                            = 0x9
+	B134                             = 0x4
+	B150                             = 0x5
+	B1500000                         = 0x100a
+	B1800                            = 0xa
+	B19200                           = 0xe
+	B200                             = 0x6
+	B2000000                         = 0x100b
+	B230400                          = 0x1003
+	B2400                            = 0xb
+	B2500000                         = 0x100c
+	B300                             = 0x7
+	B3000000                         = 0x100d
+	B3500000                         = 0x100e
+	B38400                           = 0xf
+	B4000000                         = 0x100f
+	B460800                          = 0x1004
+	B4800                            = 0xc
+	B50                              = 0x1
+	B500000                          = 0x1005
+	B57600                           = 0x1001
+	B576000                          = 0x1006
+	B600                             = 0x8
+	B75                              = 0x2
+	B921600                          = 0x1007
+	B9600                            = 0xd
+	BOTHER                           = 0x1000
 	BPF_A                            = 0x10
 	BPF_ABS                          = 0x20
 	BPF_ADD                          = 0x0
@@ -150,6 +182,15 @@ const (
 	BPF_TXA                          = 0x80
 	BPF_W                            = 0x0
 	BPF_X                            = 0x8
+	BRKINT                           = 0x2
+	BS0                              = 0x0
+	BS1                              = 0x2000
+	BSDLY                            = 0x2000
+	CBAUD                            = 0x100f
+	CBAUDEX                          = 0x1000
+	CFLUSH                           = 0xf
+	CIBAUD                           = 0x100f0000
+	CLOCAL                           = 0x800
 	CLOCK_BOOTTIME                   = 0x7
 	CLOCK_BOOTTIME_ALARM             = 0x9
 	CLOCK_DEFAULT                    = 0x0
@@ -187,6 +228,25 @@ const (
 	CLONE_UNTRACED                   = 0x800000
 	CLONE_VFORK                      = 0x4000
 	CLONE_VM                         = 0x100
+	CMSPAR                           = 0x40000000
+	CR0                              = 0x0
+	CR1                              = 0x200
+	CR2                              = 0x400
+	CR3                              = 0x600
+	CRDLY                            = 0x600
+	CREAD                            = 0x80
+	CRTSCTS                          = 0x80000000
+	CS5                              = 0x0
+	CS6                              = 0x10
+	CS7                              = 0x20
+	CS8                              = 0x30
+	CSIGNAL                          = 0xff
+	CSIZE                            = 0x30
+	CSTART                           = 0x11
+	CSTATUS                          = 0x0
+	CSTOP                            = 0x13
+	CSTOPB                           = 0x40
+	CSUSP                            = 0x1a
 	DT_BLK                           = 0x6
 	DT_CHR                           = 0x2
 	DT_DIR                           = 0x4
@@ -198,6 +258,13 @@ const (
 	DT_WHT                           = 0xe
 	ELF_NGREG                        = 0x12
 	ELF_PRARGSZ                      = 0x50
+	ECHO                             = 0x8
+	ECHOCTL                          = 0x200
+	ECHOE                            = 0x10
+	ECHOK                            = 0x20
+	ECHOKE                           = 0x800
+	ECHONL                           = 0x40
+	ECHOPRT                          = 0x400
 	EPOLLERR                         = 0x8
 	EPOLLET                          = -0x80000000
 	EPOLLHUP                         = 0x10
@@ -280,8 +347,15 @@ const (
 	ETH_P_WAN_PPP                    = 0x7
 	ETH_P_WCCP                       = 0x883e
 	ETH_P_X25                        = 0x805
+	EXTA                             = 0xe
+	EXTB                             = 0xf
+	EXTPROC                          = 0x10000
 	FD_CLOEXEC                       = 0x1
 	FD_SETSIZE                       = 0x400
+	FF0                              = 0x0
+	FF1                              = 0x8000
+	FFDLY                            = 0x8000
+	FLUSHO                           = 0x1000
 	F_DUPFD                          = 0x0
 	F_DUPFD_CLOEXEC                  = 0x406
 	F_EXLCK                          = 0x4
@@ -315,7 +389,12 @@ const (
 	F_ULOCK                          = 0x0
 	F_UNLCK                          = 0x2
 	F_WRLCK                          = 0x1
+	HUPCL                            = 0x400
+	IBSHIFT                          = 0x10
+	ICANON                           = 0x2
 	ICMPV6_FILTER                    = 0x1
+	ICRNL                            = 0x100
+	IEXTEN                           = 0x8000
 	IFA_F_DADFAILED                  = 0x8
 	IFA_F_DEPRECATED                 = 0x20
 	IFA_F_HOMEADDRESS                = 0x10
@@ -349,6 +428,12 @@ const (
 	IFF_UP                           = 0x1
 	IFF_VNET_HDR                     = 0x4000
 	IFNAMSIZ                         = 0x10
+	IGNBRK                           = 0x1
+	IGNCR                            = 0x80
+	IGNPAR                           = 0x4
+	IMAXBEL                          = 0x2000
+	INLCR                            = 0x40
+	INPCK                            = 0x10
 	IN_ACCESS                        = 0x1
 	IN_ALL_EVENTS                    = 0xfff
 	IN_ATTRIB                        = 0x4
@@ -512,6 +597,13 @@ const (
 	IP_TTL                           = 0x2
 	IP_UNBLOCK_SOURCE                = 0x25
 	IP_XFRM_POLICY                   = 0x11
+	ISIG                             = 0x1
+	ISTRIP                           = 0x20
+	IUCLC                            = 0x200
+	IUTF8                            = 0x4000
+	IXANY                            = 0x800
+	IXOFF                            = 0x1000
+	IXON                             = 0x400
 	LINUX_REBOOT_CMD_CAD_OFF         = 0x0
 	LINUX_REBOOT_CMD_CAD_ON          = 0x89abcdef
 	LINUX_REBOOT_CMD_HALT            = 0xcdef0123
@@ -635,10 +727,13 @@ const (
 	NETLINK_UNUSED                   = 0x1
 	NETLINK_USERSOCK                 = 0x2
 	NETLINK_XFRM                     = 0x6
+	NL0                              = 0x0
+	NL1                              = 0x100
 	NLA_ALIGNTO                      = 0x4
 	NLA_F_NESTED                     = 0x8000
 	NLA_F_NET_BYTEORDER              = 0x4000
 	NLA_HDRLEN                       = 0x4
+	NLDLY                            = 0x100
 	NLMSG_ALIGNTO                    = 0x4
 	NLMSG_DONE                       = 0x3
 	NLMSG_ERROR                      = 0x2
@@ -658,6 +753,15 @@ const (
 	NLM_F_REPLACE                    = 0x100
 	NLM_F_REQUEST                    = 0x1
 	NLM_F_ROOT                       = 0x100
+	NOFLSH                           = 0x80
+	OCRNL                            = 0x8
+	OFDEL                            = 0x80
+	OFILL                            = 0x40
+	OLCUC                            = 0x2
+	ONLCR                            = 0x4
+	ONLRET                           = 0x20
+	ONOCR                            = 0x10
+	OPOST                            = 0x1
 	O_ACCMODE                        = 0x3
 	O_APPEND                         = 0x400
 	O_ASYNC                          = 0x2000
@@ -674,6 +778,7 @@ const (
 	O_NOCTTY                         = 0x100
 	O_NOFOLLOW                       = 0x8000
 	O_NONBLOCK                       = 0x800
+	O_PATH                           = 0x200000
 	O_RDONLY                         = 0x0
 	O_RDWR                           = 0x2
 	O_RSYNC                          = 0x1000
@@ -695,6 +800,10 @@ const (
 	PACKET_RECV_OUTPUT               = 0x3
 	PACKET_RX_RING                   = 0x5
 	PACKET_STATISTICS                = 0x6
+	PARENB                           = 0x100
+	PARMRK                           = 0x8
+	PARODD                           = 0x200
+	PENDIN                           = 0x4000
 	PRIO_PGRP                        = 0x1
 	PRIO_PROCESS                     = 0x0
 	PRIO_USER                        = 0x2
@@ -1114,9 +1223,23 @@ const (
 	S_IXGRP                          = 0x8
 	S_IXOTH                          = 0x1
 	S_IXUSR                          = 0x40
+	TAB0                             = 0x0
+	TAB1                             = 0x800
+	TAB2                             = 0x1000
+	TAB3                             = 0x1800
+	TABDLY                           = 0x1800
+	TCFLSH                           = 0x540b
+	TCGETA                           = 0x5405
+	TCGETS                           = 0x5401
+	TCGETS2                          = 0x802c542a
+	TCGETX                           = 0x5432
 	TCIFLUSH                         = 0x0
+	TCIOFF                           = 0x2
 	TCIOFLUSH                        = 0x2
+	TCION                            = 0x3
 	TCOFLUSH                         = 0x1
+	TCOOFF                           = 0x0
+	TCOON                            = 0x1
 	TCP_CONGESTION                   = 0xd
 	TCP_CORK                         = 0x3
 	TCP_DEFER_ACCEPT                 = 0x9
@@ -1135,14 +1258,33 @@ const (
 	TCP_QUICKACK                     = 0xc
 	TCP_SYNCNT                       = 0x7
 	TCP_WINDOW_CLAMP                 = 0xa
+	TCSAFLUSH                        = 0x2
+	TCSBRK                           = 0x5409
+	TCSBRKP                          = 0x5425
+	TCSETA                           = 0x5406
+	TCSETAF                          = 0x5408
+	TCSETAW                          = 0x5407
+	TCSETS                           = 0x5402
+	TCSETS2                          = 0x402c542b
+	TCSETSF                          = 0x5404
+	TCSETSF2                         = 0x402c542d
+	TCSETSW                          = 0x5403
+	TCSETSW2                         = 0x402c542c
+	TCSETX                           = 0x5433
+	TCSETXF                          = 0x5434
+	TCSETXW                          = 0x5435
+	TCXONC                           = 0x540a
 	TIOCCBRK                         = 0x5428
 	TIOCCONS                         = 0x541d
 	TIOCEXCL                         = 0x540c
 	TIOCGDEV                         = 0x80045432
 	TIOCGETD                         = 0x5424
+	TIOCGEXCL                        = 0x80045440
 	TIOCGICOUNT                      = 0x545d
 	TIOCGLCKTRMIOS                   = 0x5456
 	TIOCGPGRP                        = 0x540f
+	TIOCGPKT                         = 0x80045438
+	TIOCGPTLCK                       = 0x80045439
 	TIOCGPTN                         = 0x80045430
 	TIOCGRS485                       = 0x542e
 	TIOCGSERIAL                      = 0x541e
@@ -1200,6 +1342,7 @@ const (
 	TIOCSTI                          = 0x5412
 	TIOCSWINSZ                       = 0x5414
 	TIOCVHANGUP                      = 0x5437
+	TOSTOP                           = 0x100
 	TUNATTACHFILTER                  = 0x400854d5
 	TUNDETACHFILTER                  = 0x400854d6
 	TUNGETFEATURES                   = 0x800454cf
@@ -1217,6 +1360,26 @@ const (
 	TUNSETSNDBUF                     = 0x400454d4
 	TUNSETTXFILTER                   = 0x400454d1
 	TUNSETVNETHDRSZ                  = 0x400454d8
+	VDISCARD                         = 0xd
+	VEOF                             = 0x4
+	VEOL                             = 0xb
+	VEOL2                            = 0x10
+	VERASE                           = 0x2
+	VINTR                            = 0x0
+	VKILL                            = 0x3
+	VLNEXT                           = 0xf
+	VMIN                             = 0x6
+	VQUIT                            = 0x1
+	VREPRINT                         = 0xc
+	VSTART                           = 0x8
+	VSTOP                            = 0x9
+	VSUSP                            = 0xa
+	VSWTC                            = 0x7
+	VT0                              = 0x0
+	VT1                              = 0x4000
+	VTDLY                            = 0x4000
+	VTIME                            = 0x5
+	VWERASE                          = 0xe
 	WALL                             = 0x40000000
 	WCLONE                           = 0x80000000
 	WCONTINUED                       = 0x8
@@ -1227,6 +1390,8 @@ const (
 	WORDSIZE                         = 0x20
 	WSTOPPED                         = 0x2
 	WUNTRACED                        = 0x2
+	XCASE                            = 0x4
+	XTABS                            = 0x1800
 )
 
 // Errors
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index 165073f1326c967eb1b8f616123bbc2ebefb1ec7..47027b79c9d01aa28b1b85df3501513b8ff08cf7 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -149,6 +149,7 @@ const (
 	B75                              = 0x2
 	B921600                          = 0x1007
 	B9600                            = 0xd
+	BOTHER                           = 0x1000
 	BPF_A                            = 0x10
 	BPF_ABS                          = 0x20
 	BPF_ADD                          = 0x0
@@ -192,7 +193,13 @@ const (
 	BPF_X                            = 0x8
 	BPF_XOR                          = 0xa0
 	BRKINT                           = 0x2
+	BS0                              = 0x0
+	BS1                              = 0x2000
+	BSDLY                            = 0x2000
+	CBAUD                            = 0x100f
+	CBAUDEX                          = 0x1000
 	CFLUSH                           = 0xf
+	CIBAUD                           = 0x100f0000
 	CLOCAL                           = 0x800
 	CLOCK_BOOTTIME                   = 0x7
 	CLOCK_BOOTTIME_ALARM             = 0x9
@@ -231,7 +238,14 @@ const (
 	CLONE_UNTRACED                   = 0x800000
 	CLONE_VFORK                      = 0x4000
 	CLONE_VM                         = 0x100
+	CMSPAR                           = 0x40000000
+	CR0                              = 0x0
+	CR1                              = 0x200
+	CR2                              = 0x400
+	CR3                              = 0x600
+	CRDLY                            = 0x600
 	CREAD                            = 0x80
+	CRTSCTS                          = 0x80000000
 	CS5                              = 0x0
 	CS6                              = 0x10
 	CS7                              = 0x20
@@ -367,6 +381,9 @@ const (
 	EXTPROC                          = 0x10000
 	FD_CLOEXEC                       = 0x1
 	FD_SETSIZE                       = 0x400
+	FF0                              = 0x0
+	FF1                              = 0x8000
+	FFDLY                            = 0x8000
 	FLUSHO                           = 0x1000
 	F_DUPFD                          = 0x0
 	F_DUPFD_CLOEXEC                  = 0x406
@@ -402,6 +419,7 @@ const (
 	F_UNLCK                          = 0x2
 	F_WRLCK                          = 0x1
 	HUPCL                            = 0x400
+	IBSHIFT                          = 0x10
 	ICANON                           = 0x2
 	ICMPV6_FILTER                    = 0x1
 	ICRNL                            = 0x100
@@ -645,6 +663,7 @@ const (
 	IP_XFRM_POLICY                   = 0x11
 	ISIG                             = 0x1
 	ISTRIP                           = 0x20
+	IUCLC                            = 0x200
 	IUTF8                            = 0x4000
 	IXANY                            = 0x800
 	IXOFF                            = 0x1000
@@ -782,10 +801,13 @@ const (
 	NETLINK_UNUSED                   = 0x1
 	NETLINK_USERSOCK                 = 0x2
 	NETLINK_XFRM                     = 0x6
+	NL0                              = 0x0
+	NL1                              = 0x100
 	NLA_ALIGNTO                      = 0x4
 	NLA_F_NESTED                     = 0x8000
 	NLA_F_NET_BYTEORDER              = 0x4000
 	NLA_HDRLEN                       = 0x4
+	NLDLY                            = 0x100
 	NLMSG_ALIGNTO                    = 0x4
 	NLMSG_DONE                       = 0x3
 	NLMSG_ERROR                      = 0x2
@@ -810,6 +832,7 @@ const (
 	OCRNL                            = 0x8
 	OFDEL                            = 0x80
 	OFILL                            = 0x40
+	OLCUC                            = 0x2
 	ONLCR                            = 0x4
 	ONLRET                           = 0x20
 	ONOCR                            = 0x10
@@ -1332,10 +1355,23 @@ const (
 	S_IXGRP                          = 0x8
 	S_IXOTH                          = 0x1
 	S_IXUSR                          = 0x40
+	TAB0                             = 0x0
+	TAB1                             = 0x800
+	TAB2                             = 0x1000
+	TAB3                             = 0x1800
+	TABDLY                           = 0x1800
 	TCFLSH                           = 0x540b
+	TCGETA                           = 0x5405
+	TCGETS                           = 0x5401
+	TCGETS2                          = 0x802c542a
+	TCGETX                           = 0x5432
 	TCIFLUSH                         = 0x0
+	TCIOFF                           = 0x2
 	TCIOFLUSH                        = 0x2
+	TCION                            = 0x3
 	TCOFLUSH                         = 0x1
+	TCOOFF                           = 0x0
+	TCOON                            = 0x1
 	TCP_CONGESTION                   = 0xd
 	TCP_COOKIE_IN_ALWAYS             = 0x1
 	TCP_COOKIE_MAX                   = 0x10
@@ -1374,6 +1410,21 @@ const (
 	TCP_USER_TIMEOUT                 = 0x12
 	TCP_WINDOW_CLAMP                 = 0xa
 	TCSAFLUSH                        = 0x2
+	TCSBRK                           = 0x5409
+	TCSBRKP                          = 0x5425
+	TCSETA                           = 0x5406
+	TCSETAF                          = 0x5408
+	TCSETAW                          = 0x5407
+	TCSETS                           = 0x5402
+	TCSETS2                          = 0x402c542b
+	TCSETSF                          = 0x5404
+	TCSETSF2                         = 0x402c542d
+	TCSETSW                          = 0x5403
+	TCSETSW2                         = 0x402c542c
+	TCSETX                           = 0x5433
+	TCSETXF                          = 0x5434
+	TCSETXW                          = 0x5435
+	TCXONC                           = 0x540a
 	TIOCCBRK                         = 0x5428
 	TIOCCONS                         = 0x541d
 	TIOCEXCL                         = 0x540c
@@ -1493,6 +1544,8 @@ const (
 	WORDSIZE                         = 0x40
 	WSTOPPED                         = 0x2
 	WUNTRACED                        = 0x2
+	XCASE                            = 0x4
+	XTABS                            = 0x1800
 )
 
 // Errors
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
new file mode 100644
index 0000000000000000000000000000000000000000..98056fe207b2f3c08f8078b6ff3609bd0d6389fd
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -0,0 +1,1916 @@
+// mkerrors.sh
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
+
+// +build mips64,linux
+
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs -- _const.go
+
+package unix
+
+import "syscall"
+
+const (
+	AF_ALG                           = 0x26
+	AF_APPLETALK                     = 0x5
+	AF_ASH                           = 0x12
+	AF_ATMPVC                        = 0x8
+	AF_ATMSVC                        = 0x14
+	AF_AX25                          = 0x3
+	AF_BLUETOOTH                     = 0x1f
+	AF_BRIDGE                        = 0x7
+	AF_CAIF                          = 0x25
+	AF_CAN                           = 0x1d
+	AF_DECnet                        = 0xc
+	AF_ECONET                        = 0x13
+	AF_FILE                          = 0x1
+	AF_IB                            = 0x1b
+	AF_IEEE802154                    = 0x24
+	AF_INET                          = 0x2
+	AF_INET6                         = 0xa
+	AF_IPX                           = 0x4
+	AF_IRDA                          = 0x17
+	AF_ISDN                          = 0x22
+	AF_IUCV                          = 0x20
+	AF_KEY                           = 0xf
+	AF_LLC                           = 0x1a
+	AF_LOCAL                         = 0x1
+	AF_MAX                           = 0x29
+	AF_MPLS                          = 0x1c
+	AF_NETBEUI                       = 0xd
+	AF_NETLINK                       = 0x10
+	AF_NETROM                        = 0x6
+	AF_NFC                           = 0x27
+	AF_PACKET                        = 0x11
+	AF_PHONET                        = 0x23
+	AF_PPPOX                         = 0x18
+	AF_RDS                           = 0x15
+	AF_ROSE                          = 0xb
+	AF_ROUTE                         = 0x10
+	AF_RXRPC                         = 0x21
+	AF_SECURITY                      = 0xe
+	AF_SNA                           = 0x16
+	AF_TIPC                          = 0x1e
+	AF_UNIX                          = 0x1
+	AF_UNSPEC                        = 0x0
+	AF_VSOCK                         = 0x28
+	AF_WANPIPE                       = 0x19
+	AF_X25                           = 0x9
+	ARPHRD_6LOWPAN                   = 0x339
+	ARPHRD_ADAPT                     = 0x108
+	ARPHRD_APPLETLK                  = 0x8
+	ARPHRD_ARCNET                    = 0x7
+	ARPHRD_ASH                       = 0x30d
+	ARPHRD_ATM                       = 0x13
+	ARPHRD_AX25                      = 0x3
+	ARPHRD_BIF                       = 0x307
+	ARPHRD_CAIF                      = 0x336
+	ARPHRD_CAN                       = 0x118
+	ARPHRD_CHAOS                     = 0x5
+	ARPHRD_CISCO                     = 0x201
+	ARPHRD_CSLIP                     = 0x101
+	ARPHRD_CSLIP6                    = 0x103
+	ARPHRD_DDCMP                     = 0x205
+	ARPHRD_DLCI                      = 0xf
+	ARPHRD_ECONET                    = 0x30e
+	ARPHRD_EETHER                    = 0x2
+	ARPHRD_ETHER                     = 0x1
+	ARPHRD_EUI64                     = 0x1b
+	ARPHRD_FCAL                      = 0x311
+	ARPHRD_FCFABRIC                  = 0x313
+	ARPHRD_FCPL                      = 0x312
+	ARPHRD_FCPP                      = 0x310
+	ARPHRD_FDDI                      = 0x306
+	ARPHRD_FRAD                      = 0x302
+	ARPHRD_HDLC                      = 0x201
+	ARPHRD_HIPPI                     = 0x30c
+	ARPHRD_HWX25                     = 0x110
+	ARPHRD_IEEE1394                  = 0x18
+	ARPHRD_IEEE802                   = 0x6
+	ARPHRD_IEEE80211                 = 0x321
+	ARPHRD_IEEE80211_PRISM           = 0x322
+	ARPHRD_IEEE80211_RADIOTAP        = 0x323
+	ARPHRD_IEEE802154                = 0x324
+	ARPHRD_IEEE802154_MONITOR        = 0x325
+	ARPHRD_IEEE802_TR                = 0x320
+	ARPHRD_INFINIBAND                = 0x20
+	ARPHRD_IP6GRE                    = 0x337
+	ARPHRD_IPDDP                     = 0x309
+	ARPHRD_IPGRE                     = 0x30a
+	ARPHRD_IRDA                      = 0x30f
+	ARPHRD_LAPB                      = 0x204
+	ARPHRD_LOCALTLK                  = 0x305
+	ARPHRD_LOOPBACK                  = 0x304
+	ARPHRD_METRICOM                  = 0x17
+	ARPHRD_NETLINK                   = 0x338
+	ARPHRD_NETROM                    = 0x0
+	ARPHRD_NONE                      = 0xfffe
+	ARPHRD_PHONET                    = 0x334
+	ARPHRD_PHONET_PIPE               = 0x335
+	ARPHRD_PIMREG                    = 0x30b
+	ARPHRD_PPP                       = 0x200
+	ARPHRD_PRONET                    = 0x4
+	ARPHRD_RAWHDLC                   = 0x206
+	ARPHRD_ROSE                      = 0x10e
+	ARPHRD_RSRVD                     = 0x104
+	ARPHRD_SIT                       = 0x308
+	ARPHRD_SKIP                      = 0x303
+	ARPHRD_SLIP                      = 0x100
+	ARPHRD_SLIP6                     = 0x102
+	ARPHRD_TUNNEL                    = 0x300
+	ARPHRD_TUNNEL6                   = 0x301
+	ARPHRD_VOID                      = 0xffff
+	ARPHRD_X25                       = 0x10f
+	B0                               = 0x0
+	B1000000                         = 0x1008
+	B110                             = 0x3
+	B115200                          = 0x1002
+	B1152000                         = 0x1009
+	B1200                            = 0x9
+	B134                             = 0x4
+	B150                             = 0x5
+	B1500000                         = 0x100a
+	B1800                            = 0xa
+	B19200                           = 0xe
+	B200                             = 0x6
+	B2000000                         = 0x100b
+	B230400                          = 0x1003
+	B2400                            = 0xb
+	B2500000                         = 0x100c
+	B300                             = 0x7
+	B3000000                         = 0x100d
+	B3500000                         = 0x100e
+	B38400                           = 0xf
+	B4000000                         = 0x100f
+	B460800                          = 0x1004
+	B4800                            = 0xc
+	B50                              = 0x1
+	B500000                          = 0x1005
+	B57600                           = 0x1001
+	B576000                          = 0x1006
+	B600                             = 0x8
+	B75                              = 0x2
+	B921600                          = 0x1007
+	B9600                            = 0xd
+	BPF_A                            = 0x10
+	BPF_ABS                          = 0x20
+	BPF_ADD                          = 0x0
+	BPF_ALU                          = 0x4
+	BPF_AND                          = 0x50
+	BPF_B                            = 0x10
+	BPF_DIV                          = 0x30
+	BPF_H                            = 0x8
+	BPF_IMM                          = 0x0
+	BPF_IND                          = 0x40
+	BPF_JA                           = 0x0
+	BPF_JEQ                          = 0x10
+	BPF_JGE                          = 0x30
+	BPF_JGT                          = 0x20
+	BPF_JMP                          = 0x5
+	BPF_JSET                         = 0x40
+	BPF_K                            = 0x0
+	BPF_LD                           = 0x0
+	BPF_LDX                          = 0x1
+	BPF_LEN                          = 0x80
+	BPF_LL_OFF                       = -0x200000
+	BPF_LSH                          = 0x60
+	BPF_MAJOR_VERSION                = 0x1
+	BPF_MAXINSNS                     = 0x1000
+	BPF_MEM                          = 0x60
+	BPF_MEMWORDS                     = 0x10
+	BPF_MINOR_VERSION                = 0x1
+	BPF_MISC                         = 0x7
+	BPF_MOD                          = 0x90
+	BPF_MSH                          = 0xa0
+	BPF_MUL                          = 0x20
+	BPF_NEG                          = 0x80
+	BPF_NET_OFF                      = -0x100000
+	BPF_OR                           = 0x40
+	BPF_RET                          = 0x6
+	BPF_RSH                          = 0x70
+	BPF_ST                           = 0x2
+	BPF_STX                          = 0x3
+	BPF_SUB                          = 0x10
+	BPF_TAX                          = 0x0
+	BPF_TXA                          = 0x80
+	BPF_W                            = 0x0
+	BPF_X                            = 0x8
+	BPF_XOR                          = 0xa0
+	BRKINT                           = 0x2
+	CFLUSH                           = 0xf
+	CLOCAL                           = 0x800
+	CLOCK_BOOTTIME                   = 0x7
+	CLOCK_BOOTTIME_ALARM             = 0x9
+	CLOCK_DEFAULT                    = 0x0
+	CLOCK_EXT                        = 0x1
+	CLOCK_INT                        = 0x2
+	CLOCK_MONOTONIC                  = 0x1
+	CLOCK_MONOTONIC_COARSE           = 0x6
+	CLOCK_MONOTONIC_RAW              = 0x4
+	CLOCK_PROCESS_CPUTIME_ID         = 0x2
+	CLOCK_REALTIME                   = 0x0
+	CLOCK_REALTIME_ALARM             = 0x8
+	CLOCK_REALTIME_COARSE            = 0x5
+	CLOCK_TAI                        = 0xb
+	CLOCK_THREAD_CPUTIME_ID          = 0x3
+	CLOCK_TXFROMRX                   = 0x4
+	CLOCK_TXINT                      = 0x3
+	CLONE_CHILD_CLEARTID             = 0x200000
+	CLONE_CHILD_SETTID               = 0x1000000
+	CLONE_DETACHED                   = 0x400000
+	CLONE_FILES                      = 0x400
+	CLONE_FS                         = 0x200
+	CLONE_IO                         = 0x80000000
+	CLONE_NEWIPC                     = 0x8000000
+	CLONE_NEWNET                     = 0x40000000
+	CLONE_NEWNS                      = 0x20000
+	CLONE_NEWPID                     = 0x20000000
+	CLONE_NEWUSER                    = 0x10000000
+	CLONE_NEWUTS                     = 0x4000000
+	CLONE_PARENT                     = 0x8000
+	CLONE_PARENT_SETTID              = 0x100000
+	CLONE_PTRACE                     = 0x2000
+	CLONE_SETTLS                     = 0x80000
+	CLONE_SIGHAND                    = 0x800
+	CLONE_SYSVSEM                    = 0x40000
+	CLONE_THREAD                     = 0x10000
+	CLONE_UNTRACED                   = 0x800000
+	CLONE_VFORK                      = 0x4000
+	CLONE_VM                         = 0x100
+	CREAD                            = 0x80
+	CS5                              = 0x0
+	CS6                              = 0x10
+	CS7                              = 0x20
+	CS8                              = 0x30
+	CSIGNAL                          = 0xff
+	CSIZE                            = 0x30
+	CSTART                           = 0x11
+	CSTATUS                          = 0x0
+	CSTOP                            = 0x13
+	CSTOPB                           = 0x40
+	CSUSP                            = 0x1a
+	DT_BLK                           = 0x6
+	DT_CHR                           = 0x2
+	DT_DIR                           = 0x4
+	DT_FIFO                          = 0x1
+	DT_LNK                           = 0xa
+	DT_REG                           = 0x8
+	DT_SOCK                          = 0xc
+	DT_UNKNOWN                       = 0x0
+	DT_WHT                           = 0xe
+	ECHO                             = 0x8
+	ECHOCTL                          = 0x200
+	ECHOE                            = 0x10
+	ECHOK                            = 0x20
+	ECHOKE                           = 0x800
+	ECHONL                           = 0x40
+	ECHOPRT                          = 0x400
+	ENCODING_DEFAULT                 = 0x0
+	ENCODING_FM_MARK                 = 0x3
+	ENCODING_FM_SPACE                = 0x4
+	ENCODING_MANCHESTER              = 0x5
+	ENCODING_NRZ                     = 0x1
+	ENCODING_NRZI                    = 0x2
+	EPOLLERR                         = 0x8
+	EPOLLET                          = 0x80000000
+	EPOLLHUP                         = 0x10
+	EPOLLIN                          = 0x1
+	EPOLLMSG                         = 0x400
+	EPOLLONESHOT                     = 0x40000000
+	EPOLLOUT                         = 0x4
+	EPOLLPRI                         = 0x2
+	EPOLLRDBAND                      = 0x80
+	EPOLLRDHUP                       = 0x2000
+	EPOLLRDNORM                      = 0x40
+	EPOLLWAKEUP                      = 0x20000000
+	EPOLLWRBAND                      = 0x200
+	EPOLLWRNORM                      = 0x100
+	EPOLL_CLOEXEC                    = 0x80000
+	EPOLL_CTL_ADD                    = 0x1
+	EPOLL_CTL_DEL                    = 0x2
+	EPOLL_CTL_MOD                    = 0x3
+	ETH_P_1588                       = 0x88f7
+	ETH_P_8021AD                     = 0x88a8
+	ETH_P_8021AH                     = 0x88e7
+	ETH_P_8021Q                      = 0x8100
+	ETH_P_80221                      = 0x8917
+	ETH_P_802_2                      = 0x4
+	ETH_P_802_3                      = 0x1
+	ETH_P_802_3_MIN                  = 0x600
+	ETH_P_802_EX1                    = 0x88b5
+	ETH_P_AARP                       = 0x80f3
+	ETH_P_AF_IUCV                    = 0xfbfb
+	ETH_P_ALL                        = 0x3
+	ETH_P_AOE                        = 0x88a2
+	ETH_P_ARCNET                     = 0x1a
+	ETH_P_ARP                        = 0x806
+	ETH_P_ATALK                      = 0x809b
+	ETH_P_ATMFATE                    = 0x8884
+	ETH_P_ATMMPOA                    = 0x884c
+	ETH_P_AX25                       = 0x2
+	ETH_P_BATMAN                     = 0x4305
+	ETH_P_BPQ                        = 0x8ff
+	ETH_P_CAIF                       = 0xf7
+	ETH_P_CAN                        = 0xc
+	ETH_P_CANFD                      = 0xd
+	ETH_P_CONTROL                    = 0x16
+	ETH_P_CUST                       = 0x6006
+	ETH_P_DDCMP                      = 0x6
+	ETH_P_DEC                        = 0x6000
+	ETH_P_DIAG                       = 0x6005
+	ETH_P_DNA_DL                     = 0x6001
+	ETH_P_DNA_RC                     = 0x6002
+	ETH_P_DNA_RT                     = 0x6003
+	ETH_P_DSA                        = 0x1b
+	ETH_P_ECONET                     = 0x18
+	ETH_P_EDSA                       = 0xdada
+	ETH_P_FCOE                       = 0x8906
+	ETH_P_FIP                        = 0x8914
+	ETH_P_HDLC                       = 0x19
+	ETH_P_IEEE802154                 = 0xf6
+	ETH_P_IEEEPUP                    = 0xa00
+	ETH_P_IEEEPUPAT                  = 0xa01
+	ETH_P_IP                         = 0x800
+	ETH_P_IPV6                       = 0x86dd
+	ETH_P_IPX                        = 0x8137
+	ETH_P_IRDA                       = 0x17
+	ETH_P_LAT                        = 0x6004
+	ETH_P_LINK_CTL                   = 0x886c
+	ETH_P_LOCALTALK                  = 0x9
+	ETH_P_LOOP                       = 0x60
+	ETH_P_LOOPBACK                   = 0x9000
+	ETH_P_MOBITEX                    = 0x15
+	ETH_P_MPLS_MC                    = 0x8848
+	ETH_P_MPLS_UC                    = 0x8847
+	ETH_P_MVRP                       = 0x88f5
+	ETH_P_PAE                        = 0x888e
+	ETH_P_PAUSE                      = 0x8808
+	ETH_P_PHONET                     = 0xf5
+	ETH_P_PPPTALK                    = 0x10
+	ETH_P_PPP_DISC                   = 0x8863
+	ETH_P_PPP_MP                     = 0x8
+	ETH_P_PPP_SES                    = 0x8864
+	ETH_P_PRP                        = 0x88fb
+	ETH_P_PUP                        = 0x200
+	ETH_P_PUPAT                      = 0x201
+	ETH_P_QINQ1                      = 0x9100
+	ETH_P_QINQ2                      = 0x9200
+	ETH_P_QINQ3                      = 0x9300
+	ETH_P_RARP                       = 0x8035
+	ETH_P_SCA                        = 0x6007
+	ETH_P_SLOW                       = 0x8809
+	ETH_P_SNAP                       = 0x5
+	ETH_P_TDLS                       = 0x890d
+	ETH_P_TEB                        = 0x6558
+	ETH_P_TIPC                       = 0x88ca
+	ETH_P_TRAILER                    = 0x1c
+	ETH_P_TR_802_2                   = 0x11
+	ETH_P_TSN                        = 0x22f0
+	ETH_P_WAN_PPP                    = 0x7
+	ETH_P_WCCP                       = 0x883e
+	ETH_P_X25                        = 0x805
+	ETH_P_XDSA                       = 0xf8
+	EXTA                             = 0xe
+	EXTB                             = 0xf
+	EXTPROC                          = 0x10000
+	FD_CLOEXEC                       = 0x1
+	FD_SETSIZE                       = 0x400
+	FLUSHO                           = 0x2000
+	F_DUPFD                          = 0x0
+	F_DUPFD_CLOEXEC                  = 0x406
+	F_EXLCK                          = 0x4
+	F_GETFD                          = 0x1
+	F_GETFL                          = 0x3
+	F_GETLEASE                       = 0x401
+	F_GETLK                          = 0xe
+	F_GETLK64                        = 0xe
+	F_GETOWN                         = 0x17
+	F_GETOWN_EX                      = 0x10
+	F_GETPIPE_SZ                     = 0x408
+	F_GETSIG                         = 0xb
+	F_LOCK                           = 0x1
+	F_NOTIFY                         = 0x402
+	F_OFD_GETLK                      = 0x24
+	F_OFD_SETLK                      = 0x25
+	F_OFD_SETLKW                     = 0x26
+	F_OK                             = 0x0
+	F_RDLCK                          = 0x0
+	F_SETFD                          = 0x2
+	F_SETFL                          = 0x4
+	F_SETLEASE                       = 0x400
+	F_SETLK                          = 0x6
+	F_SETLK64                        = 0x6
+	F_SETLKW                         = 0x7
+	F_SETLKW64                       = 0x7
+	F_SETOWN                         = 0x18
+	F_SETOWN_EX                      = 0xf
+	F_SETPIPE_SZ                     = 0x407
+	F_SETSIG                         = 0xa
+	F_SHLCK                          = 0x8
+	F_TEST                           = 0x3
+	F_TLOCK                          = 0x2
+	F_ULOCK                          = 0x0
+	F_UNLCK                          = 0x2
+	F_WRLCK                          = 0x1
+	HUPCL                            = 0x400
+	ICANON                           = 0x2
+	ICMPV6_FILTER                    = 0x1
+	ICRNL                            = 0x100
+	IEXTEN                           = 0x100
+	IFA_F_DADFAILED                  = 0x8
+	IFA_F_DEPRECATED                 = 0x20
+	IFA_F_HOMEADDRESS                = 0x10
+	IFA_F_MANAGETEMPADDR             = 0x100
+	IFA_F_MCAUTOJOIN                 = 0x400
+	IFA_F_NODAD                      = 0x2
+	IFA_F_NOPREFIXROUTE              = 0x200
+	IFA_F_OPTIMISTIC                 = 0x4
+	IFA_F_PERMANENT                  = 0x80
+	IFA_F_SECONDARY                  = 0x1
+	IFA_F_STABLE_PRIVACY             = 0x800
+	IFA_F_TEMPORARY                  = 0x1
+	IFA_F_TENTATIVE                  = 0x40
+	IFA_MAX                          = 0x8
+	IFF_ALLMULTI                     = 0x200
+	IFF_ATTACH_QUEUE                 = 0x200
+	IFF_AUTOMEDIA                    = 0x4000
+	IFF_BROADCAST                    = 0x2
+	IFF_DEBUG                        = 0x4
+	IFF_DETACH_QUEUE                 = 0x400
+	IFF_DORMANT                      = 0x20000
+	IFF_DYNAMIC                      = 0x8000
+	IFF_ECHO                         = 0x40000
+	IFF_LOOPBACK                     = 0x8
+	IFF_LOWER_UP                     = 0x10000
+	IFF_MASTER                       = 0x400
+	IFF_MULTICAST                    = 0x1000
+	IFF_MULTI_QUEUE                  = 0x100
+	IFF_NOARP                        = 0x80
+	IFF_NOFILTER                     = 0x1000
+	IFF_NOTRAILERS                   = 0x20
+	IFF_NO_PI                        = 0x1000
+	IFF_ONE_QUEUE                    = 0x2000
+	IFF_PERSIST                      = 0x800
+	IFF_POINTOPOINT                  = 0x10
+	IFF_PORTSEL                      = 0x2000
+	IFF_PROMISC                      = 0x100
+	IFF_RUNNING                      = 0x40
+	IFF_SLAVE                        = 0x800
+	IFF_TAP                          = 0x2
+	IFF_TUN                          = 0x1
+	IFF_TUN_EXCL                     = 0x8000
+	IFF_UP                           = 0x1
+	IFF_VNET_HDR                     = 0x4000
+	IFF_VOLATILE                     = 0x70c5a
+	IFNAMSIZ                         = 0x10
+	IGNBRK                           = 0x1
+	IGNCR                            = 0x80
+	IGNPAR                           = 0x4
+	IMAXBEL                          = 0x2000
+	INLCR                            = 0x40
+	INPCK                            = 0x10
+	IN_ACCESS                        = 0x1
+	IN_ALL_EVENTS                    = 0xfff
+	IN_ATTRIB                        = 0x4
+	IN_CLASSA_HOST                   = 0xffffff
+	IN_CLASSA_MAX                    = 0x80
+	IN_CLASSA_NET                    = 0xff000000
+	IN_CLASSA_NSHIFT                 = 0x18
+	IN_CLASSB_HOST                   = 0xffff
+	IN_CLASSB_MAX                    = 0x10000
+	IN_CLASSB_NET                    = 0xffff0000
+	IN_CLASSB_NSHIFT                 = 0x10
+	IN_CLASSC_HOST                   = 0xff
+	IN_CLASSC_NET                    = 0xffffff00
+	IN_CLASSC_NSHIFT                 = 0x8
+	IN_CLOEXEC                       = 0x80000
+	IN_CLOSE                         = 0x18
+	IN_CLOSE_NOWRITE                 = 0x10
+	IN_CLOSE_WRITE                   = 0x8
+	IN_CREATE                        = 0x100
+	IN_DELETE                        = 0x200
+	IN_DELETE_SELF                   = 0x400
+	IN_DONT_FOLLOW                   = 0x2000000
+	IN_EXCL_UNLINK                   = 0x4000000
+	IN_IGNORED                       = 0x8000
+	IN_ISDIR                         = 0x40000000
+	IN_LOOPBACKNET                   = 0x7f
+	IN_MASK_ADD                      = 0x20000000
+	IN_MODIFY                        = 0x2
+	IN_MOVE                          = 0xc0
+	IN_MOVED_FROM                    = 0x40
+	IN_MOVED_TO                      = 0x80
+	IN_MOVE_SELF                     = 0x800
+	IN_NONBLOCK                      = 0x80
+	IN_ONESHOT                       = 0x80000000
+	IN_ONLYDIR                       = 0x1000000
+	IN_OPEN                          = 0x20
+	IN_Q_OVERFLOW                    = 0x4000
+	IN_UNMOUNT                       = 0x2000
+	IPPROTO_AH                       = 0x33
+	IPPROTO_BEETPH                   = 0x5e
+	IPPROTO_COMP                     = 0x6c
+	IPPROTO_DCCP                     = 0x21
+	IPPROTO_DSTOPTS                  = 0x3c
+	IPPROTO_EGP                      = 0x8
+	IPPROTO_ENCAP                    = 0x62
+	IPPROTO_ESP                      = 0x32
+	IPPROTO_FRAGMENT                 = 0x2c
+	IPPROTO_GRE                      = 0x2f
+	IPPROTO_HOPOPTS                  = 0x0
+	IPPROTO_ICMP                     = 0x1
+	IPPROTO_ICMPV6                   = 0x3a
+	IPPROTO_IDP                      = 0x16
+	IPPROTO_IGMP                     = 0x2
+	IPPROTO_IP                       = 0x0
+	IPPROTO_IPIP                     = 0x4
+	IPPROTO_IPV6                     = 0x29
+	IPPROTO_MH                       = 0x87
+	IPPROTO_MTP                      = 0x5c
+	IPPROTO_NONE                     = 0x3b
+	IPPROTO_PIM                      = 0x67
+	IPPROTO_PUP                      = 0xc
+	IPPROTO_RAW                      = 0xff
+	IPPROTO_ROUTING                  = 0x2b
+	IPPROTO_RSVP                     = 0x2e
+	IPPROTO_SCTP                     = 0x84
+	IPPROTO_TCP                      = 0x6
+	IPPROTO_TP                       = 0x1d
+	IPPROTO_UDP                      = 0x11
+	IPPROTO_UDPLITE                  = 0x88
+	IPV6_2292DSTOPTS                 = 0x4
+	IPV6_2292HOPLIMIT                = 0x8
+	IPV6_2292HOPOPTS                 = 0x3
+	IPV6_2292PKTINFO                 = 0x2
+	IPV6_2292PKTOPTIONS              = 0x6
+	IPV6_2292RTHDR                   = 0x5
+	IPV6_ADDRFORM                    = 0x1
+	IPV6_ADD_MEMBERSHIP              = 0x14
+	IPV6_AUTHHDR                     = 0xa
+	IPV6_CHECKSUM                    = 0x7
+	IPV6_DONTFRAG                    = 0x3e
+	IPV6_DROP_MEMBERSHIP             = 0x15
+	IPV6_DSTOPTS                     = 0x3b
+	IPV6_HOPLIMIT                    = 0x34
+	IPV6_HOPOPTS                     = 0x36
+	IPV6_IPSEC_POLICY                = 0x22
+	IPV6_JOIN_ANYCAST                = 0x1b
+	IPV6_JOIN_GROUP                  = 0x14
+	IPV6_LEAVE_ANYCAST               = 0x1c
+	IPV6_LEAVE_GROUP                 = 0x15
+	IPV6_MTU                         = 0x18
+	IPV6_MTU_DISCOVER                = 0x17
+	IPV6_MULTICAST_HOPS              = 0x12
+	IPV6_MULTICAST_IF                = 0x11
+	IPV6_MULTICAST_LOOP              = 0x13
+	IPV6_NEXTHOP                     = 0x9
+	IPV6_PATHMTU                     = 0x3d
+	IPV6_PKTINFO                     = 0x32
+	IPV6_PMTUDISC_DO                 = 0x2
+	IPV6_PMTUDISC_DONT               = 0x0
+	IPV6_PMTUDISC_INTERFACE          = 0x4
+	IPV6_PMTUDISC_OMIT               = 0x5
+	IPV6_PMTUDISC_PROBE              = 0x3
+	IPV6_PMTUDISC_WANT               = 0x1
+	IPV6_RECVDSTOPTS                 = 0x3a
+	IPV6_RECVERR                     = 0x19
+	IPV6_RECVHOPLIMIT                = 0x33
+	IPV6_RECVHOPOPTS                 = 0x35
+	IPV6_RECVPATHMTU                 = 0x3c
+	IPV6_RECVPKTINFO                 = 0x31
+	IPV6_RECVRTHDR                   = 0x38
+	IPV6_RECVTCLASS                  = 0x42
+	IPV6_ROUTER_ALERT                = 0x16
+	IPV6_RTHDR                       = 0x39
+	IPV6_RTHDRDSTOPTS                = 0x37
+	IPV6_RTHDR_LOOSE                 = 0x0
+	IPV6_RTHDR_STRICT                = 0x1
+	IPV6_RTHDR_TYPE_0                = 0x0
+	IPV6_RXDSTOPTS                   = 0x3b
+	IPV6_RXHOPOPTS                   = 0x36
+	IPV6_TCLASS                      = 0x43
+	IPV6_UNICAST_HOPS                = 0x10
+	IPV6_V6ONLY                      = 0x1a
+	IPV6_XFRM_POLICY                 = 0x23
+	IP_ADD_MEMBERSHIP                = 0x23
+	IP_ADD_SOURCE_MEMBERSHIP         = 0x27
+	IP_BLOCK_SOURCE                  = 0x26
+	IP_CHECKSUM                      = 0x17
+	IP_DEFAULT_MULTICAST_LOOP        = 0x1
+	IP_DEFAULT_MULTICAST_TTL         = 0x1
+	IP_DF                            = 0x4000
+	IP_DROP_MEMBERSHIP               = 0x24
+	IP_DROP_SOURCE_MEMBERSHIP        = 0x28
+	IP_FREEBIND                      = 0xf
+	IP_HDRINCL                       = 0x3
+	IP_IPSEC_POLICY                  = 0x10
+	IP_MAXPACKET                     = 0xffff
+	IP_MAX_MEMBERSHIPS               = 0x14
+	IP_MF                            = 0x2000
+	IP_MINTTL                        = 0x15
+	IP_MSFILTER                      = 0x29
+	IP_MSS                           = 0x240
+	IP_MTU                           = 0xe
+	IP_MTU_DISCOVER                  = 0xa
+	IP_MULTICAST_ALL                 = 0x31
+	IP_MULTICAST_IF                  = 0x20
+	IP_MULTICAST_LOOP                = 0x22
+	IP_MULTICAST_TTL                 = 0x21
+	IP_NODEFRAG                      = 0x16
+	IP_OFFMASK                       = 0x1fff
+	IP_OPTIONS                       = 0x4
+	IP_ORIGDSTADDR                   = 0x14
+	IP_PASSSEC                       = 0x12
+	IP_PKTINFO                       = 0x8
+	IP_PKTOPTIONS                    = 0x9
+	IP_PMTUDISC                      = 0xa
+	IP_PMTUDISC_DO                   = 0x2
+	IP_PMTUDISC_DONT                 = 0x0
+	IP_PMTUDISC_INTERFACE            = 0x4
+	IP_PMTUDISC_OMIT                 = 0x5
+	IP_PMTUDISC_PROBE                = 0x3
+	IP_PMTUDISC_WANT                 = 0x1
+	IP_RECVERR                       = 0xb
+	IP_RECVOPTS                      = 0x6
+	IP_RECVORIGDSTADDR               = 0x14
+	IP_RECVRETOPTS                   = 0x7
+	IP_RECVTOS                       = 0xd
+	IP_RECVTTL                       = 0xc
+	IP_RETOPTS                       = 0x7
+	IP_RF                            = 0x8000
+	IP_ROUTER_ALERT                  = 0x5
+	IP_TOS                           = 0x1
+	IP_TRANSPARENT                   = 0x13
+	IP_TTL                           = 0x2
+	IP_UNBLOCK_SOURCE                = 0x25
+	IP_UNICAST_IF                    = 0x32
+	IP_XFRM_POLICY                   = 0x11
+	ISIG                             = 0x1
+	ISTRIP                           = 0x20
+	IUTF8                            = 0x4000
+	IXANY                            = 0x800
+	IXOFF                            = 0x1000
+	IXON                             = 0x400
+	LINUX_REBOOT_CMD_CAD_OFF         = 0x0
+	LINUX_REBOOT_CMD_CAD_ON          = 0x89abcdef
+	LINUX_REBOOT_CMD_HALT            = 0xcdef0123
+	LINUX_REBOOT_CMD_KEXEC           = 0x45584543
+	LINUX_REBOOT_CMD_POWER_OFF       = 0x4321fedc
+	LINUX_REBOOT_CMD_RESTART         = 0x1234567
+	LINUX_REBOOT_CMD_RESTART2        = 0xa1b2c3d4
+	LINUX_REBOOT_CMD_SW_SUSPEND      = 0xd000fce2
+	LINUX_REBOOT_MAGIC1              = 0xfee1dead
+	LINUX_REBOOT_MAGIC2              = 0x28121969
+	LOCK_EX                          = 0x2
+	LOCK_NB                          = 0x4
+	LOCK_SH                          = 0x1
+	LOCK_UN                          = 0x8
+	MADV_DODUMP                      = 0x11
+	MADV_DOFORK                      = 0xb
+	MADV_DONTDUMP                    = 0x10
+	MADV_DONTFORK                    = 0xa
+	MADV_DONTNEED                    = 0x4
+	MADV_HUGEPAGE                    = 0xe
+	MADV_HWPOISON                    = 0x64
+	MADV_MERGEABLE                   = 0xc
+	MADV_NOHUGEPAGE                  = 0xf
+	MADV_NORMAL                      = 0x0
+	MADV_RANDOM                      = 0x1
+	MADV_REMOVE                      = 0x9
+	MADV_SEQUENTIAL                  = 0x2
+	MADV_UNMERGEABLE                 = 0xd
+	MADV_WILLNEED                    = 0x3
+	MAP_ANON                         = 0x800
+	MAP_ANONYMOUS                    = 0x800
+	MAP_DENYWRITE                    = 0x2000
+	MAP_EXECUTABLE                   = 0x4000
+	MAP_FILE                         = 0x0
+	MAP_FIXED                        = 0x10
+	MAP_GROWSDOWN                    = 0x1000
+	MAP_HUGETLB                      = 0x80000
+	MAP_HUGE_MASK                    = 0x3f
+	MAP_HUGE_SHIFT                   = 0x1a
+	MAP_LOCKED                       = 0x8000
+	MAP_NONBLOCK                     = 0x20000
+	MAP_NORESERVE                    = 0x400
+	MAP_POPULATE                     = 0x10000
+	MAP_PRIVATE                      = 0x2
+	MAP_RENAME                       = 0x800
+	MAP_SHARED                       = 0x1
+	MAP_STACK                        = 0x40000
+	MAP_TYPE                         = 0xf
+	MCL_CURRENT                      = 0x1
+	MCL_FUTURE                       = 0x2
+	MNT_DETACH                       = 0x2
+	MNT_EXPIRE                       = 0x4
+	MNT_FORCE                        = 0x1
+	MSG_CMSG_CLOEXEC                 = 0x40000000
+	MSG_CONFIRM                      = 0x800
+	MSG_CTRUNC                       = 0x8
+	MSG_DONTROUTE                    = 0x4
+	MSG_DONTWAIT                     = 0x40
+	MSG_EOR                          = 0x80
+	MSG_ERRQUEUE                     = 0x2000
+	MSG_FASTOPEN                     = 0x20000000
+	MSG_FIN                          = 0x200
+	MSG_MORE                         = 0x8000
+	MSG_NOSIGNAL                     = 0x4000
+	MSG_OOB                          = 0x1
+	MSG_PEEK                         = 0x2
+	MSG_PROXY                        = 0x10
+	MSG_RST                          = 0x1000
+	MSG_SYN                          = 0x400
+	MSG_TRUNC                        = 0x20
+	MSG_TRYHARD                      = 0x4
+	MSG_WAITALL                      = 0x100
+	MSG_WAITFORONE                   = 0x10000
+	MS_ACTIVE                        = 0x40000000
+	MS_ASYNC                         = 0x1
+	MS_BIND                          = 0x1000
+	MS_DIRSYNC                       = 0x80
+	MS_INVALIDATE                    = 0x2
+	MS_I_VERSION                     = 0x800000
+	MS_KERNMOUNT                     = 0x400000
+	MS_LAZYTIME                      = 0x2000000
+	MS_MANDLOCK                      = 0x40
+	MS_MGC_MSK                       = 0xffff0000
+	MS_MGC_VAL                       = 0xc0ed0000
+	MS_MOVE                          = 0x2000
+	MS_NOATIME                       = 0x400
+	MS_NODEV                         = 0x4
+	MS_NODIRATIME                    = 0x800
+	MS_NOEXEC                        = 0x8
+	MS_NOSUID                        = 0x2
+	MS_NOUSER                        = -0x80000000
+	MS_POSIXACL                      = 0x10000
+	MS_PRIVATE                       = 0x40000
+	MS_RDONLY                        = 0x1
+	MS_REC                           = 0x4000
+	MS_RELATIME                      = 0x200000
+	MS_REMOUNT                       = 0x20
+	MS_RMT_MASK                      = 0x2800051
+	MS_SHARED                        = 0x100000
+	MS_SILENT                        = 0x8000
+	MS_SLAVE                         = 0x80000
+	MS_STRICTATIME                   = 0x1000000
+	MS_SYNC                          = 0x4
+	MS_SYNCHRONOUS                   = 0x10
+	MS_UNBINDABLE                    = 0x20000
+	NAME_MAX                         = 0xff
+	NETLINK_ADD_MEMBERSHIP           = 0x1
+	NETLINK_AUDIT                    = 0x9
+	NETLINK_BROADCAST_ERROR          = 0x4
+	NETLINK_CAP_ACK                  = 0xa
+	NETLINK_CONNECTOR                = 0xb
+	NETLINK_CRYPTO                   = 0x15
+	NETLINK_DNRTMSG                  = 0xe
+	NETLINK_DROP_MEMBERSHIP          = 0x2
+	NETLINK_ECRYPTFS                 = 0x13
+	NETLINK_FIB_LOOKUP               = 0xa
+	NETLINK_FIREWALL                 = 0x3
+	NETLINK_GENERIC                  = 0x10
+	NETLINK_INET_DIAG                = 0x4
+	NETLINK_IP6_FW                   = 0xd
+	NETLINK_ISCSI                    = 0x8
+	NETLINK_KOBJECT_UEVENT           = 0xf
+	NETLINK_LISTEN_ALL_NSID          = 0x8
+	NETLINK_LIST_MEMBERSHIPS         = 0x9
+	NETLINK_NETFILTER                = 0xc
+	NETLINK_NFLOG                    = 0x5
+	NETLINK_NO_ENOBUFS               = 0x5
+	NETLINK_PKTINFO                  = 0x3
+	NETLINK_RDMA                     = 0x14
+	NETLINK_ROUTE                    = 0x0
+	NETLINK_RX_RING                  = 0x6
+	NETLINK_SCSITRANSPORT            = 0x12
+	NETLINK_SELINUX                  = 0x7
+	NETLINK_SOCK_DIAG                = 0x4
+	NETLINK_TX_RING                  = 0x7
+	NETLINK_UNUSED                   = 0x1
+	NETLINK_USERSOCK                 = 0x2
+	NETLINK_XFRM                     = 0x6
+	NLA_ALIGNTO                      = 0x4
+	NLA_F_NESTED                     = 0x8000
+	NLA_F_NET_BYTEORDER              = 0x4000
+	NLA_HDRLEN                       = 0x4
+	NLMSG_ALIGNTO                    = 0x4
+	NLMSG_DONE                       = 0x3
+	NLMSG_ERROR                      = 0x2
+	NLMSG_HDRLEN                     = 0x10
+	NLMSG_MIN_TYPE                   = 0x10
+	NLMSG_NOOP                       = 0x1
+	NLMSG_OVERRUN                    = 0x4
+	NLM_F_ACK                        = 0x4
+	NLM_F_APPEND                     = 0x800
+	NLM_F_ATOMIC                     = 0x400
+	NLM_F_CREATE                     = 0x400
+	NLM_F_DUMP                       = 0x300
+	NLM_F_DUMP_INTR                  = 0x10
+	NLM_F_ECHO                       = 0x8
+	NLM_F_EXCL                       = 0x200
+	NLM_F_MATCH                      = 0x200
+	NLM_F_MULTI                      = 0x2
+	NLM_F_REPLACE                    = 0x100
+	NLM_F_REQUEST                    = 0x1
+	NLM_F_ROOT                       = 0x100
+	NOFLSH                           = 0x80
+	OCRNL                            = 0x8
+	OFDEL                            = 0x80
+	OFILL                            = 0x40
+	ONLCR                            = 0x4
+	ONLRET                           = 0x20
+	ONOCR                            = 0x10
+	OPOST                            = 0x1
+	O_ACCMODE                        = 0x3
+	O_APPEND                         = 0x8
+	O_ASYNC                          = 0x1000
+	O_CLOEXEC                        = 0x80000
+	O_CREAT                          = 0x100
+	O_DIRECT                         = 0x8000
+	O_DIRECTORY                      = 0x10000
+	O_DSYNC                          = 0x10
+	O_EXCL                           = 0x400
+	O_FSYNC                          = 0x4010
+	O_LARGEFILE                      = 0x0
+	O_NDELAY                         = 0x80
+	O_NOATIME                        = 0x40000
+	O_NOCTTY                         = 0x800
+	O_NOFOLLOW                       = 0x20000
+	O_NONBLOCK                       = 0x80
+	O_PATH                           = 0x200000
+	O_RDONLY                         = 0x0
+	O_RDWR                           = 0x2
+	O_RSYNC                          = 0x4010
+	O_SYNC                           = 0x4010
+	O_TMPFILE                        = 0x410000
+	O_TRUNC                          = 0x200
+	O_WRONLY                         = 0x1
+	PACKET_ADD_MEMBERSHIP            = 0x1
+	PACKET_AUXDATA                   = 0x8
+	PACKET_BROADCAST                 = 0x1
+	PACKET_COPY_THRESH               = 0x7
+	PACKET_DROP_MEMBERSHIP           = 0x2
+	PACKET_FANOUT                    = 0x12
+	PACKET_FANOUT_CBPF               = 0x6
+	PACKET_FANOUT_CPU                = 0x2
+	PACKET_FANOUT_DATA               = 0x16
+	PACKET_FANOUT_EBPF               = 0x7
+	PACKET_FANOUT_FLAG_DEFRAG        = 0x8000
+	PACKET_FANOUT_FLAG_ROLLOVER      = 0x1000
+	PACKET_FANOUT_HASH               = 0x0
+	PACKET_FANOUT_LB                 = 0x1
+	PACKET_FANOUT_QM                 = 0x5
+	PACKET_FANOUT_RND                = 0x4
+	PACKET_FANOUT_ROLLOVER           = 0x3
+	PACKET_FASTROUTE                 = 0x6
+	PACKET_HDRLEN                    = 0xb
+	PACKET_HOST                      = 0x0
+	PACKET_KERNEL                    = 0x7
+	PACKET_LOOPBACK                  = 0x5
+	PACKET_LOSS                      = 0xe
+	PACKET_MR_ALLMULTI               = 0x2
+	PACKET_MR_MULTICAST              = 0x0
+	PACKET_MR_PROMISC                = 0x1
+	PACKET_MR_UNICAST                = 0x3
+	PACKET_MULTICAST                 = 0x2
+	PACKET_ORIGDEV                   = 0x9
+	PACKET_OTHERHOST                 = 0x3
+	PACKET_OUTGOING                  = 0x4
+	PACKET_QDISC_BYPASS              = 0x14
+	PACKET_RECV_OUTPUT               = 0x3
+	PACKET_RESERVE                   = 0xc
+	PACKET_ROLLOVER_STATS            = 0x15
+	PACKET_RX_RING                   = 0x5
+	PACKET_STATISTICS                = 0x6
+	PACKET_TIMESTAMP                 = 0x11
+	PACKET_TX_HAS_OFF                = 0x13
+	PACKET_TX_RING                   = 0xd
+	PACKET_TX_TIMESTAMP              = 0x10
+	PACKET_USER                      = 0x6
+	PACKET_VERSION                   = 0xa
+	PACKET_VNET_HDR                  = 0xf
+	PARENB                           = 0x100
+	PARITY_CRC16_PR0                 = 0x2
+	PARITY_CRC16_PR0_CCITT           = 0x4
+	PARITY_CRC16_PR1                 = 0x3
+	PARITY_CRC16_PR1_CCITT           = 0x5
+	PARITY_CRC32_PR0_CCITT           = 0x6
+	PARITY_CRC32_PR1_CCITT           = 0x7
+	PARITY_DEFAULT                   = 0x0
+	PARITY_NONE                      = 0x1
+	PARMRK                           = 0x8
+	PARODD                           = 0x200
+	PENDIN                           = 0x4000
+	PRIO_PGRP                        = 0x1
+	PRIO_PROCESS                     = 0x0
+	PRIO_USER                        = 0x2
+	PROT_EXEC                        = 0x4
+	PROT_GROWSDOWN                   = 0x1000000
+	PROT_GROWSUP                     = 0x2000000
+	PROT_NONE                        = 0x0
+	PROT_READ                        = 0x1
+	PROT_WRITE                       = 0x2
+	PR_CAPBSET_DROP                  = 0x18
+	PR_CAPBSET_READ                  = 0x17
+	PR_CAP_AMBIENT                   = 0x2f
+	PR_CAP_AMBIENT_CLEAR_ALL         = 0x4
+	PR_CAP_AMBIENT_IS_SET            = 0x1
+	PR_CAP_AMBIENT_LOWER             = 0x3
+	PR_CAP_AMBIENT_RAISE             = 0x2
+	PR_ENDIAN_BIG                    = 0x0
+	PR_ENDIAN_LITTLE                 = 0x1
+	PR_ENDIAN_PPC_LITTLE             = 0x2
+	PR_FPEMU_NOPRINT                 = 0x1
+	PR_FPEMU_SIGFPE                  = 0x2
+	PR_FP_EXC_ASYNC                  = 0x2
+	PR_FP_EXC_DISABLED               = 0x0
+	PR_FP_EXC_DIV                    = 0x10000
+	PR_FP_EXC_INV                    = 0x100000
+	PR_FP_EXC_NONRECOV               = 0x1
+	PR_FP_EXC_OVF                    = 0x20000
+	PR_FP_EXC_PRECISE                = 0x3
+	PR_FP_EXC_RES                    = 0x80000
+	PR_FP_EXC_SW_ENABLE              = 0x80
+	PR_FP_EXC_UND                    = 0x40000
+	PR_FP_MODE_FR                    = 0x1
+	PR_FP_MODE_FRE                   = 0x2
+	PR_GET_CHILD_SUBREAPER           = 0x25
+	PR_GET_DUMPABLE                  = 0x3
+	PR_GET_ENDIAN                    = 0x13
+	PR_GET_FPEMU                     = 0x9
+	PR_GET_FPEXC                     = 0xb
+	PR_GET_FP_MODE                   = 0x2e
+	PR_GET_KEEPCAPS                  = 0x7
+	PR_GET_NAME                      = 0x10
+	PR_GET_NO_NEW_PRIVS              = 0x27
+	PR_GET_PDEATHSIG                 = 0x2
+	PR_GET_SECCOMP                   = 0x15
+	PR_GET_SECUREBITS                = 0x1b
+	PR_GET_THP_DISABLE               = 0x2a
+	PR_GET_TID_ADDRESS               = 0x28
+	PR_GET_TIMERSLACK                = 0x1e
+	PR_GET_TIMING                    = 0xd
+	PR_GET_TSC                       = 0x19
+	PR_GET_UNALIGN                   = 0x5
+	PR_MCE_KILL                      = 0x21
+	PR_MCE_KILL_CLEAR                = 0x0
+	PR_MCE_KILL_DEFAULT              = 0x2
+	PR_MCE_KILL_EARLY                = 0x1
+	PR_MCE_KILL_GET                  = 0x22
+	PR_MCE_KILL_LATE                 = 0x0
+	PR_MCE_KILL_SET                  = 0x1
+	PR_MPX_DISABLE_MANAGEMENT        = 0x2c
+	PR_MPX_ENABLE_MANAGEMENT         = 0x2b
+	PR_SET_CHILD_SUBREAPER           = 0x24
+	PR_SET_DUMPABLE                  = 0x4
+	PR_SET_ENDIAN                    = 0x14
+	PR_SET_FPEMU                     = 0xa
+	PR_SET_FPEXC                     = 0xc
+	PR_SET_FP_MODE                   = 0x2d
+	PR_SET_KEEPCAPS                  = 0x8
+	PR_SET_MM                        = 0x23
+	PR_SET_MM_ARG_END                = 0x9
+	PR_SET_MM_ARG_START              = 0x8
+	PR_SET_MM_AUXV                   = 0xc
+	PR_SET_MM_BRK                    = 0x7
+	PR_SET_MM_END_CODE               = 0x2
+	PR_SET_MM_END_DATA               = 0x4
+	PR_SET_MM_ENV_END                = 0xb
+	PR_SET_MM_ENV_START              = 0xa
+	PR_SET_MM_EXE_FILE               = 0xd
+	PR_SET_MM_MAP                    = 0xe
+	PR_SET_MM_MAP_SIZE               = 0xf
+	PR_SET_MM_START_BRK              = 0x6
+	PR_SET_MM_START_CODE             = 0x1
+	PR_SET_MM_START_DATA             = 0x3
+	PR_SET_MM_START_STACK            = 0x5
+	PR_SET_NAME                      = 0xf
+	PR_SET_NO_NEW_PRIVS              = 0x26
+	PR_SET_PDEATHSIG                 = 0x1
+	PR_SET_PTRACER                   = 0x59616d61
+	PR_SET_PTRACER_ANY               = -0x1
+	PR_SET_SECCOMP                   = 0x16
+	PR_SET_SECUREBITS                = 0x1c
+	PR_SET_THP_DISABLE               = 0x29
+	PR_SET_TIMERSLACK                = 0x1d
+	PR_SET_TIMING                    = 0xe
+	PR_SET_TSC                       = 0x1a
+	PR_SET_UNALIGN                   = 0x6
+	PR_TASK_PERF_EVENTS_DISABLE      = 0x1f
+	PR_TASK_PERF_EVENTS_ENABLE       = 0x20
+	PR_TIMING_STATISTICAL            = 0x0
+	PR_TIMING_TIMESTAMP              = 0x1
+	PR_TSC_ENABLE                    = 0x1
+	PR_TSC_SIGSEGV                   = 0x2
+	PR_UNALIGN_NOPRINT               = 0x1
+	PR_UNALIGN_SIGBUS                = 0x2
+	PTRACE_ATTACH                    = 0x10
+	PTRACE_CONT                      = 0x7
+	PTRACE_DETACH                    = 0x11
+	PTRACE_EVENT_CLONE               = 0x3
+	PTRACE_EVENT_EXEC                = 0x4
+	PTRACE_EVENT_EXIT                = 0x6
+	PTRACE_EVENT_FORK                = 0x1
+	PTRACE_EVENT_SECCOMP             = 0x7
+	PTRACE_EVENT_STOP                = 0x80
+	PTRACE_EVENT_VFORK               = 0x2
+	PTRACE_EVENT_VFORK_DONE          = 0x5
+	PTRACE_GETEVENTMSG               = 0x4201
+	PTRACE_GETFPREGS                 = 0xe
+	PTRACE_GETREGS                   = 0xc
+	PTRACE_GETREGSET                 = 0x4204
+	PTRACE_GETSIGINFO                = 0x4202
+	PTRACE_GETSIGMASK                = 0x420a
+	PTRACE_GET_THREAD_AREA           = 0x19
+	PTRACE_GET_THREAD_AREA_3264      = 0xc4
+	PTRACE_GET_WATCH_REGS            = 0xd0
+	PTRACE_INTERRUPT                 = 0x4207
+	PTRACE_KILL                      = 0x8
+	PTRACE_LISTEN                    = 0x4208
+	PTRACE_OLDSETOPTIONS             = 0x15
+	PTRACE_O_EXITKILL                = 0x100000
+	PTRACE_O_MASK                    = 0x3000ff
+	PTRACE_O_SUSPEND_SECCOMP         = 0x200000
+	PTRACE_O_TRACECLONE              = 0x8
+	PTRACE_O_TRACEEXEC               = 0x10
+	PTRACE_O_TRACEEXIT               = 0x40
+	PTRACE_O_TRACEFORK               = 0x2
+	PTRACE_O_TRACESECCOMP            = 0x80
+	PTRACE_O_TRACESYSGOOD            = 0x1
+	PTRACE_O_TRACEVFORK              = 0x4
+	PTRACE_O_TRACEVFORKDONE          = 0x20
+	PTRACE_PEEKDATA                  = 0x2
+	PTRACE_PEEKDATA_3264             = 0xc1
+	PTRACE_PEEKSIGINFO               = 0x4209
+	PTRACE_PEEKSIGINFO_SHARED        = 0x1
+	PTRACE_PEEKTEXT                  = 0x1
+	PTRACE_PEEKTEXT_3264             = 0xc0
+	PTRACE_PEEKUSR                   = 0x3
+	PTRACE_POKEDATA                  = 0x5
+	PTRACE_POKEDATA_3264             = 0xc3
+	PTRACE_POKETEXT                  = 0x4
+	PTRACE_POKETEXT_3264             = 0xc2
+	PTRACE_POKEUSR                   = 0x6
+	PTRACE_SEIZE                     = 0x4206
+	PTRACE_SETFPREGS                 = 0xf
+	PTRACE_SETOPTIONS                = 0x4200
+	PTRACE_SETREGS                   = 0xd
+	PTRACE_SETREGSET                 = 0x4205
+	PTRACE_SETSIGINFO                = 0x4203
+	PTRACE_SETSIGMASK                = 0x420b
+	PTRACE_SET_THREAD_AREA           = 0x1a
+	PTRACE_SET_WATCH_REGS            = 0xd1
+	PTRACE_SINGLESTEP                = 0x9
+	PTRACE_SYSCALL                   = 0x18
+	PTRACE_TRACEME                   = 0x0
+	RLIMIT_AS                        = 0x6
+	RLIMIT_CORE                      = 0x4
+	RLIMIT_CPU                       = 0x0
+	RLIMIT_DATA                      = 0x2
+	RLIMIT_FSIZE                     = 0x1
+	RLIMIT_NOFILE                    = 0x5
+	RLIMIT_STACK                     = 0x3
+	RLIM_INFINITY                    = -0x1
+	RTAX_ADVMSS                      = 0x8
+	RTAX_CC_ALGO                     = 0x10
+	RTAX_CWND                        = 0x7
+	RTAX_FEATURES                    = 0xc
+	RTAX_FEATURE_ALLFRAG             = 0x8
+	RTAX_FEATURE_ECN                 = 0x1
+	RTAX_FEATURE_MASK                = 0xf
+	RTAX_FEATURE_SACK                = 0x2
+	RTAX_FEATURE_TIMESTAMP           = 0x4
+	RTAX_HOPLIMIT                    = 0xa
+	RTAX_INITCWND                    = 0xb
+	RTAX_INITRWND                    = 0xe
+	RTAX_LOCK                        = 0x1
+	RTAX_MAX                         = 0x10
+	RTAX_MTU                         = 0x2
+	RTAX_QUICKACK                    = 0xf
+	RTAX_REORDERING                  = 0x9
+	RTAX_RTO_MIN                     = 0xd
+	RTAX_RTT                         = 0x4
+	RTAX_RTTVAR                      = 0x5
+	RTAX_SSTHRESH                    = 0x6
+	RTAX_UNSPEC                      = 0x0
+	RTAX_WINDOW                      = 0x3
+	RTA_ALIGNTO                      = 0x4
+	RTA_MAX                          = 0x16
+	RTCF_DIRECTSRC                   = 0x4000000
+	RTCF_DOREDIRECT                  = 0x1000000
+	RTCF_LOG                         = 0x2000000
+	RTCF_MASQ                        = 0x400000
+	RTCF_NAT                         = 0x800000
+	RTCF_VALVE                       = 0x200000
+	RTF_ADDRCLASSMASK                = 0xf8000000
+	RTF_ADDRCONF                     = 0x40000
+	RTF_ALLONLINK                    = 0x20000
+	RTF_BROADCAST                    = 0x10000000
+	RTF_CACHE                        = 0x1000000
+	RTF_DEFAULT                      = 0x10000
+	RTF_DYNAMIC                      = 0x10
+	RTF_FLOW                         = 0x2000000
+	RTF_GATEWAY                      = 0x2
+	RTF_HOST                         = 0x4
+	RTF_INTERFACE                    = 0x40000000
+	RTF_IRTT                         = 0x100
+	RTF_LINKRT                       = 0x100000
+	RTF_LOCAL                        = 0x80000000
+	RTF_MODIFIED                     = 0x20
+	RTF_MSS                          = 0x40
+	RTF_MTU                          = 0x40
+	RTF_MULTICAST                    = 0x20000000
+	RTF_NAT                          = 0x8000000
+	RTF_NOFORWARD                    = 0x1000
+	RTF_NONEXTHOP                    = 0x200000
+	RTF_NOPMTUDISC                   = 0x4000
+	RTF_POLICY                       = 0x4000000
+	RTF_REINSTATE                    = 0x8
+	RTF_REJECT                       = 0x200
+	RTF_STATIC                       = 0x400
+	RTF_THROW                        = 0x2000
+	RTF_UP                           = 0x1
+	RTF_WINDOW                       = 0x80
+	RTF_XRESOLVE                     = 0x800
+	RTM_BASE                         = 0x10
+	RTM_DELACTION                    = 0x31
+	RTM_DELADDR                      = 0x15
+	RTM_DELADDRLABEL                 = 0x49
+	RTM_DELLINK                      = 0x11
+	RTM_DELMDB                       = 0x55
+	RTM_DELNEIGH                     = 0x1d
+	RTM_DELNSID                      = 0x59
+	RTM_DELQDISC                     = 0x25
+	RTM_DELROUTE                     = 0x19
+	RTM_DELRULE                      = 0x21
+	RTM_DELTCLASS                    = 0x29
+	RTM_DELTFILTER                   = 0x2d
+	RTM_F_CLONED                     = 0x200
+	RTM_F_EQUALIZE                   = 0x400
+	RTM_F_NOTIFY                     = 0x100
+	RTM_F_PREFIX                     = 0x800
+	RTM_GETACTION                    = 0x32
+	RTM_GETADDR                      = 0x16
+	RTM_GETADDRLABEL                 = 0x4a
+	RTM_GETANYCAST                   = 0x3e
+	RTM_GETDCB                       = 0x4e
+	RTM_GETLINK                      = 0x12
+	RTM_GETMDB                       = 0x56
+	RTM_GETMULTICAST                 = 0x3a
+	RTM_GETNEIGH                     = 0x1e
+	RTM_GETNEIGHTBL                  = 0x42
+	RTM_GETNETCONF                   = 0x52
+	RTM_GETNSID                      = 0x5a
+	RTM_GETQDISC                     = 0x26
+	RTM_GETROUTE                     = 0x1a
+	RTM_GETRULE                      = 0x22
+	RTM_GETTCLASS                    = 0x2a
+	RTM_GETTFILTER                   = 0x2e
+	RTM_MAX                          = 0x5b
+	RTM_NEWACTION                    = 0x30
+	RTM_NEWADDR                      = 0x14
+	RTM_NEWADDRLABEL                 = 0x48
+	RTM_NEWLINK                      = 0x10
+	RTM_NEWMDB                       = 0x54
+	RTM_NEWNDUSEROPT                 = 0x44
+	RTM_NEWNEIGH                     = 0x1c
+	RTM_NEWNEIGHTBL                  = 0x40
+	RTM_NEWNETCONF                   = 0x50
+	RTM_NEWNSID                      = 0x58
+	RTM_NEWPREFIX                    = 0x34
+	RTM_NEWQDISC                     = 0x24
+	RTM_NEWROUTE                     = 0x18
+	RTM_NEWRULE                      = 0x20
+	RTM_NEWTCLASS                    = 0x28
+	RTM_NEWTFILTER                   = 0x2c
+	RTM_NR_FAMILIES                  = 0x13
+	RTM_NR_MSGTYPES                  = 0x4c
+	RTM_SETDCB                       = 0x4f
+	RTM_SETLINK                      = 0x13
+	RTM_SETNEIGHTBL                  = 0x43
+	RTNH_ALIGNTO                     = 0x4
+	RTNH_COMPARE_MASK                = 0x11
+	RTNH_F_DEAD                      = 0x1
+	RTNH_F_LINKDOWN                  = 0x10
+	RTNH_F_OFFLOAD                   = 0x8
+	RTNH_F_ONLINK                    = 0x4
+	RTNH_F_PERVASIVE                 = 0x2
+	RTN_MAX                          = 0xb
+	RTPROT_BABEL                     = 0x2a
+	RTPROT_BIRD                      = 0xc
+	RTPROT_BOOT                      = 0x3
+	RTPROT_DHCP                      = 0x10
+	RTPROT_DNROUTED                  = 0xd
+	RTPROT_GATED                     = 0x8
+	RTPROT_KERNEL                    = 0x2
+	RTPROT_MROUTED                   = 0x11
+	RTPROT_MRT                       = 0xa
+	RTPROT_NTK                       = 0xf
+	RTPROT_RA                        = 0x9
+	RTPROT_REDIRECT                  = 0x1
+	RTPROT_STATIC                    = 0x4
+	RTPROT_UNSPEC                    = 0x0
+	RTPROT_XORP                      = 0xe
+	RTPROT_ZEBRA                     = 0xb
+	RT_CLASS_DEFAULT                 = 0xfd
+	RT_CLASS_LOCAL                   = 0xff
+	RT_CLASS_MAIN                    = 0xfe
+	RT_CLASS_MAX                     = 0xff
+	RT_CLASS_UNSPEC                  = 0x0
+	RUSAGE_CHILDREN                  = -0x1
+	RUSAGE_SELF                      = 0x0
+	RUSAGE_THREAD                    = 0x1
+	SCM_CREDENTIALS                  = 0x2
+	SCM_RIGHTS                       = 0x1
+	SCM_TIMESTAMP                    = 0x1d
+	SCM_TIMESTAMPING                 = 0x25
+	SCM_TIMESTAMPNS                  = 0x23
+	SCM_WIFI_STATUS                  = 0x29
+	SHUT_RD                          = 0x0
+	SHUT_RDWR                        = 0x2
+	SHUT_WR                          = 0x1
+	SIOCADDDLCI                      = 0x8980
+	SIOCADDMULTI                     = 0x8931
+	SIOCADDRT                        = 0x890b
+	SIOCATMARK                       = 0x40047307
+	SIOCDARP                         = 0x8953
+	SIOCDELDLCI                      = 0x8981
+	SIOCDELMULTI                     = 0x8932
+	SIOCDELRT                        = 0x890c
+	SIOCDEVPRIVATE                   = 0x89f0
+	SIOCDIFADDR                      = 0x8936
+	SIOCDRARP                        = 0x8960
+	SIOCGARP                         = 0x8954
+	SIOCGIFADDR                      = 0x8915
+	SIOCGIFBR                        = 0x8940
+	SIOCGIFBRDADDR                   = 0x8919
+	SIOCGIFCONF                      = 0x8912
+	SIOCGIFCOUNT                     = 0x8938
+	SIOCGIFDSTADDR                   = 0x8917
+	SIOCGIFENCAP                     = 0x8925
+	SIOCGIFFLAGS                     = 0x8913
+	SIOCGIFHWADDR                    = 0x8927
+	SIOCGIFINDEX                     = 0x8933
+	SIOCGIFMAP                       = 0x8970
+	SIOCGIFMEM                       = 0x891f
+	SIOCGIFMETRIC                    = 0x891d
+	SIOCGIFMTU                       = 0x8921
+	SIOCGIFNAME                      = 0x8910
+	SIOCGIFNETMASK                   = 0x891b
+	SIOCGIFPFLAGS                    = 0x8935
+	SIOCGIFSLAVE                     = 0x8929
+	SIOCGIFTXQLEN                    = 0x8942
+	SIOCGPGRP                        = 0x40047309
+	SIOCGRARP                        = 0x8961
+	SIOCGSTAMP                       = 0x8906
+	SIOCGSTAMPNS                     = 0x8907
+	SIOCPROTOPRIVATE                 = 0x89e0
+	SIOCRTMSG                        = 0x890d
+	SIOCSARP                         = 0x8955
+	SIOCSIFADDR                      = 0x8916
+	SIOCSIFBR                        = 0x8941
+	SIOCSIFBRDADDR                   = 0x891a
+	SIOCSIFDSTADDR                   = 0x8918
+	SIOCSIFENCAP                     = 0x8926
+	SIOCSIFFLAGS                     = 0x8914
+	SIOCSIFHWADDR                    = 0x8924
+	SIOCSIFHWBROADCAST               = 0x8937
+	SIOCSIFLINK                      = 0x8911
+	SIOCSIFMAP                       = 0x8971
+	SIOCSIFMEM                       = 0x8920
+	SIOCSIFMETRIC                    = 0x891e
+	SIOCSIFMTU                       = 0x8922
+	SIOCSIFNAME                      = 0x8923
+	SIOCSIFNETMASK                   = 0x891c
+	SIOCSIFPFLAGS                    = 0x8934
+	SIOCSIFSLAVE                     = 0x8930
+	SIOCSIFTXQLEN                    = 0x8943
+	SIOCSPGRP                        = 0x80047308
+	SIOCSRARP                        = 0x8962
+	SOCK_CLOEXEC                     = 0x80000
+	SOCK_DCCP                        = 0x6
+	SOCK_DGRAM                       = 0x1
+	SOCK_NONBLOCK                    = 0x80
+	SOCK_PACKET                      = 0xa
+	SOCK_RAW                         = 0x3
+	SOCK_RDM                         = 0x4
+	SOCK_SEQPACKET                   = 0x5
+	SOCK_STREAM                      = 0x2
+	SOL_AAL                          = 0x109
+	SOL_ATM                          = 0x108
+	SOL_DECNET                       = 0x105
+	SOL_ICMPV6                       = 0x3a
+	SOL_IP                           = 0x0
+	SOL_IPV6                         = 0x29
+	SOL_IRDA                         = 0x10a
+	SOL_PACKET                       = 0x107
+	SOL_RAW                          = 0xff
+	SOL_SOCKET                       = 0xffff
+	SOL_TCP                          = 0x6
+	SOL_X25                          = 0x106
+	SOMAXCONN                        = 0x80
+	SO_ACCEPTCONN                    = 0x1009
+	SO_ATTACH_BPF                    = 0x32
+	SO_ATTACH_FILTER                 = 0x1a
+	SO_BINDTODEVICE                  = 0x19
+	SO_BPF_EXTENSIONS                = 0x30
+	SO_BROADCAST                     = 0x20
+	SO_BSDCOMPAT                     = 0xe
+	SO_BUSY_POLL                     = 0x2e
+	SO_DEBUG                         = 0x1
+	SO_DETACH_BPF                    = 0x1b
+	SO_DETACH_FILTER                 = 0x1b
+	SO_DOMAIN                        = 0x1029
+	SO_DONTROUTE                     = 0x10
+	SO_ERROR                         = 0x1007
+	SO_GET_FILTER                    = 0x1a
+	SO_INCOMING_CPU                  = 0x31
+	SO_KEEPALIVE                     = 0x8
+	SO_LINGER                        = 0x80
+	SO_LOCK_FILTER                   = 0x2c
+	SO_MARK                          = 0x24
+	SO_MAX_PACING_RATE               = 0x2f
+	SO_NOFCS                         = 0x2b
+	SO_NO_CHECK                      = 0xb
+	SO_OOBINLINE                     = 0x100
+	SO_PASSCRED                      = 0x11
+	SO_PASSSEC                       = 0x22
+	SO_PEEK_OFF                      = 0x2a
+	SO_PEERCRED                      = 0x12
+	SO_PEERNAME                      = 0x1c
+	SO_PEERSEC                       = 0x1e
+	SO_PRIORITY                      = 0xc
+	SO_PROTOCOL                      = 0x1028
+	SO_RCVBUF                        = 0x1002
+	SO_RCVBUFFORCE                   = 0x21
+	SO_RCVLOWAT                      = 0x1004
+	SO_RCVTIMEO                      = 0x1006
+	SO_REUSEADDR                     = 0x4
+	SO_REUSEPORT                     = 0x200
+	SO_RXQ_OVFL                      = 0x28
+	SO_SECURITY_AUTHENTICATION       = 0x16
+	SO_SECURITY_ENCRYPTION_NETWORK   = 0x18
+	SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17
+	SO_SELECT_ERR_QUEUE              = 0x2d
+	SO_SNDBUF                        = 0x1001
+	SO_SNDBUFFORCE                   = 0x1f
+	SO_SNDLOWAT                      = 0x1003
+	SO_SNDTIMEO                      = 0x1005
+	SO_STYLE                         = 0x1008
+	SO_TIMESTAMP                     = 0x1d
+	SO_TIMESTAMPING                  = 0x25
+	SO_TIMESTAMPNS                   = 0x23
+	SO_TYPE                          = 0x1008
+	SO_WIFI_STATUS                   = 0x29
+	S_BLKSIZE                        = 0x200
+	S_IEXEC                          = 0x40
+	S_IFBLK                          = 0x6000
+	S_IFCHR                          = 0x2000
+	S_IFDIR                          = 0x4000
+	S_IFIFO                          = 0x1000
+	S_IFLNK                          = 0xa000
+	S_IFMT                           = 0xf000
+	S_IFREG                          = 0x8000
+	S_IFSOCK                         = 0xc000
+	S_IREAD                          = 0x100
+	S_IRGRP                          = 0x20
+	S_IROTH                          = 0x4
+	S_IRUSR                          = 0x100
+	S_IRWXG                          = 0x38
+	S_IRWXO                          = 0x7
+	S_IRWXU                          = 0x1c0
+	S_ISGID                          = 0x400
+	S_ISUID                          = 0x800
+	S_ISVTX                          = 0x200
+	S_IWGRP                          = 0x10
+	S_IWOTH                          = 0x2
+	S_IWRITE                         = 0x80
+	S_IWUSR                          = 0x80
+	S_IXGRP                          = 0x8
+	S_IXOTH                          = 0x1
+	S_IXUSR                          = 0x40
+	TCFLSH                           = 0x5407
+	TCIFLUSH                         = 0x0
+	TCIOFLUSH                        = 0x2
+	TCOFLUSH                         = 0x1
+	TCP_CONGESTION                   = 0xd
+	TCP_COOKIE_IN_ALWAYS             = 0x1
+	TCP_COOKIE_MAX                   = 0x10
+	TCP_COOKIE_MIN                   = 0x8
+	TCP_COOKIE_OUT_NEVER             = 0x2
+	TCP_COOKIE_PAIR_SIZE             = 0x20
+	TCP_COOKIE_TRANSACTIONS          = 0xf
+	TCP_CORK                         = 0x3
+	TCP_DEFER_ACCEPT                 = 0x9
+	TCP_FASTOPEN                     = 0x17
+	TCP_INFO                         = 0xb
+	TCP_KEEPCNT                      = 0x6
+	TCP_KEEPIDLE                     = 0x4
+	TCP_KEEPINTVL                    = 0x5
+	TCP_LINGER2                      = 0x8
+	TCP_MAXSEG                       = 0x2
+	TCP_MAXWIN                       = 0xffff
+	TCP_MAX_WINSHIFT                 = 0xe
+	TCP_MD5SIG                       = 0xe
+	TCP_MD5SIG_MAXKEYLEN             = 0x50
+	TCP_MSS                          = 0x200
+	TCP_MSS_DEFAULT                  = 0x218
+	TCP_MSS_DESIRED                  = 0x4c4
+	TCP_NODELAY                      = 0x1
+	TCP_QUEUE_SEQ                    = 0x15
+	TCP_QUICKACK                     = 0xc
+	TCP_REPAIR                       = 0x13
+	TCP_REPAIR_OPTIONS               = 0x16
+	TCP_REPAIR_QUEUE                 = 0x14
+	TCP_SYNCNT                       = 0x7
+	TCP_S_DATA_IN                    = 0x4
+	TCP_S_DATA_OUT                   = 0x8
+	TCP_THIN_DUPACK                  = 0x11
+	TCP_THIN_LINEAR_TIMEOUTS         = 0x10
+	TCP_TIMESTAMP                    = 0x18
+	TCP_USER_TIMEOUT                 = 0x12
+	TCP_WINDOW_CLAMP                 = 0xa
+	TCSAFLUSH                        = 0x5410
+	TCSBRK                           = 0x5405
+	TCXONC                           = 0x5406
+	TIOCCBRK                         = 0x5428
+	TIOCCONS                         = 0x80047478
+	TIOCEXCL                         = 0x740d
+	TIOCGDEV                         = 0x40045432
+	TIOCGETD                         = 0x7400
+	TIOCGETP                         = 0x7408
+	TIOCGEXCL                        = 0x40045440
+	TIOCGICOUNT                      = 0x5492
+	TIOCGLCKTRMIOS                   = 0x548b
+	TIOCGLTC                         = 0x7474
+	TIOCGPGRP                        = 0x40047477
+	TIOCGPKT                         = 0x40045438
+	TIOCGPTLCK                       = 0x40045439
+	TIOCGPTN                         = 0x40045430
+	TIOCGRS485                       = 0x4020542e
+	TIOCGSERIAL                      = 0x5484
+	TIOCGSID                         = 0x7416
+	TIOCGSOFTCAR                     = 0x5481
+	TIOCGWINSZ                       = 0x40087468
+	TIOCINQ                          = 0x467f
+	TIOCLINUX                        = 0x5483
+	TIOCMBIC                         = 0x741c
+	TIOCMBIS                         = 0x741b
+	TIOCMGET                         = 0x741d
+	TIOCMIWAIT                       = 0x5491
+	TIOCMSET                         = 0x741a
+	TIOCM_CAR                        = 0x100
+	TIOCM_CD                         = 0x100
+	TIOCM_CTS                        = 0x40
+	TIOCM_DSR                        = 0x400
+	TIOCM_DTR                        = 0x2
+	TIOCM_LE                         = 0x1
+	TIOCM_RI                         = 0x200
+	TIOCM_RNG                        = 0x200
+	TIOCM_RTS                        = 0x4
+	TIOCM_SR                         = 0x20
+	TIOCM_ST                         = 0x10
+	TIOCNOTTY                        = 0x5471
+	TIOCNXCL                         = 0x740e
+	TIOCOUTQ                         = 0x7472
+	TIOCPKT                          = 0x5470
+	TIOCPKT_DATA                     = 0x0
+	TIOCPKT_DOSTOP                   = 0x20
+	TIOCPKT_FLUSHREAD                = 0x1
+	TIOCPKT_FLUSHWRITE               = 0x2
+	TIOCPKT_IOCTL                    = 0x40
+	TIOCPKT_NOSTOP                   = 0x10
+	TIOCPKT_START                    = 0x8
+	TIOCPKT_STOP                     = 0x4
+	TIOCSBRK                         = 0x5427
+	TIOCSCTTY                        = 0x5480
+	TIOCSERCONFIG                    = 0x5488
+	TIOCSERGETLSR                    = 0x548e
+	TIOCSERGETMULTI                  = 0x548f
+	TIOCSERGSTRUCT                   = 0x548d
+	TIOCSERGWILD                     = 0x5489
+	TIOCSERSETMULTI                  = 0x5490
+	TIOCSERSWILD                     = 0x548a
+	TIOCSER_TEMT                     = 0x1
+	TIOCSETD                         = 0x7401
+	TIOCSETN                         = 0x740a
+	TIOCSETP                         = 0x7409
+	TIOCSIG                          = 0x80045436
+	TIOCSLCKTRMIOS                   = 0x548c
+	TIOCSLTC                         = 0x7475
+	TIOCSPGRP                        = 0x80047476
+	TIOCSPTLCK                       = 0x80045431
+	TIOCSRS485                       = 0xc020542f
+	TIOCSSERIAL                      = 0x5485
+	TIOCSSOFTCAR                     = 0x5482
+	TIOCSTI                          = 0x5472
+	TIOCSWINSZ                       = 0x80087467
+	TIOCVHANGUP                      = 0x5437
+	TOSTOP                           = 0x8000
+	TUNATTACHFILTER                  = 0x801054d5
+	TUNDETACHFILTER                  = 0x801054d6
+	TUNGETFEATURES                   = 0x400454cf
+	TUNGETFILTER                     = 0x401054db
+	TUNGETIFF                        = 0x400454d2
+	TUNGETSNDBUF                     = 0x400454d3
+	TUNGETVNETBE                     = 0x400454df
+	TUNGETVNETHDRSZ                  = 0x400454d7
+	TUNGETVNETLE                     = 0x400454dd
+	TUNSETDEBUG                      = 0x800454c9
+	TUNSETGROUP                      = 0x800454ce
+	TUNSETIFF                        = 0x800454ca
+	TUNSETIFINDEX                    = 0x800454da
+	TUNSETLINK                       = 0x800454cd
+	TUNSETNOCSUM                     = 0x800454c8
+	TUNSETOFFLOAD                    = 0x800454d0
+	TUNSETOWNER                      = 0x800454cc
+	TUNSETPERSIST                    = 0x800454cb
+	TUNSETQUEUE                      = 0x800454d9
+	TUNSETSNDBUF                     = 0x800454d4
+	TUNSETTXFILTER                   = 0x800454d1
+	TUNSETVNETBE                     = 0x800454de
+	TUNSETVNETHDRSZ                  = 0x800454d8
+	TUNSETVNETLE                     = 0x800454dc
+	VDISCARD                         = 0xd
+	VEOF                             = 0x10
+	VEOL                             = 0x11
+	VEOL2                            = 0x6
+	VERASE                           = 0x2
+	VINTR                            = 0x0
+	VKILL                            = 0x3
+	VLNEXT                           = 0xf
+	VMIN                             = 0x4
+	VQUIT                            = 0x1
+	VREPRINT                         = 0xc
+	VSTART                           = 0x8
+	VSTOP                            = 0x9
+	VSUSP                            = 0xa
+	VSWTC                            = 0x7
+	VSWTCH                           = 0x7
+	VT0                              = 0x0
+	VT1                              = 0x4000
+	VTDLY                            = 0x4000
+	VTIME                            = 0x5
+	VWERASE                          = 0xe
+	WALL                             = 0x40000000
+	WCLONE                           = 0x80000000
+	WCONTINUED                       = 0x8
+	WEXITED                          = 0x4
+	WNOHANG                          = 0x1
+	WNOTHREAD                        = 0x20000000
+	WNOWAIT                          = 0x1000000
+	WORDSIZE                         = 0x40
+	WSTOPPED                         = 0x2
+	WUNTRACED                        = 0x2
+)
+
+// Errors
+const (
+	E2BIG           = syscall.Errno(0x7)
+	EACCES          = syscall.Errno(0xd)
+	EADDRINUSE      = syscall.Errno(0x7d)
+	EADDRNOTAVAIL   = syscall.Errno(0x7e)
+	EADV            = syscall.Errno(0x44)
+	EAFNOSUPPORT    = syscall.Errno(0x7c)
+	EAGAIN          = syscall.Errno(0xb)
+	EALREADY        = syscall.Errno(0x95)
+	EBADE           = syscall.Errno(0x32)
+	EBADF           = syscall.Errno(0x9)
+	EBADFD          = syscall.Errno(0x51)
+	EBADMSG         = syscall.Errno(0x4d)
+	EBADR           = syscall.Errno(0x33)
+	EBADRQC         = syscall.Errno(0x36)
+	EBADSLT         = syscall.Errno(0x37)
+	EBFONT          = syscall.Errno(0x3b)
+	EBUSY           = syscall.Errno(0x10)
+	ECANCELED       = syscall.Errno(0x9e)
+	ECHILD          = syscall.Errno(0xa)
+	ECHRNG          = syscall.Errno(0x25)
+	ECOMM           = syscall.Errno(0x46)
+	ECONNABORTED    = syscall.Errno(0x82)
+	ECONNREFUSED    = syscall.Errno(0x92)
+	ECONNRESET      = syscall.Errno(0x83)
+	EDEADLK         = syscall.Errno(0x2d)
+	EDEADLOCK       = syscall.Errno(0x38)
+	EDESTADDRREQ    = syscall.Errno(0x60)
+	EDOM            = syscall.Errno(0x21)
+	EDOTDOT         = syscall.Errno(0x49)
+	EDQUOT          = syscall.Errno(0x46d)
+	EEXIST          = syscall.Errno(0x11)
+	EFAULT          = syscall.Errno(0xe)
+	EFBIG           = syscall.Errno(0x1b)
+	EHOSTDOWN       = syscall.Errno(0x93)
+	EHOSTUNREACH    = syscall.Errno(0x94)
+	EHWPOISON       = syscall.Errno(0xa8)
+	EIDRM           = syscall.Errno(0x24)
+	EILSEQ          = syscall.Errno(0x58)
+	EINIT           = syscall.Errno(0x8d)
+	EINPROGRESS     = syscall.Errno(0x96)
+	EINTR           = syscall.Errno(0x4)
+	EINVAL          = syscall.Errno(0x16)
+	EIO             = syscall.Errno(0x5)
+	EISCONN         = syscall.Errno(0x85)
+	EISDIR          = syscall.Errno(0x15)
+	EISNAM          = syscall.Errno(0x8b)
+	EKEYEXPIRED     = syscall.Errno(0xa2)
+	EKEYREJECTED    = syscall.Errno(0xa4)
+	EKEYREVOKED     = syscall.Errno(0xa3)
+	EL2HLT          = syscall.Errno(0x2c)
+	EL2NSYNC        = syscall.Errno(0x26)
+	EL3HLT          = syscall.Errno(0x27)
+	EL3RST          = syscall.Errno(0x28)
+	ELIBACC         = syscall.Errno(0x53)
+	ELIBBAD         = syscall.Errno(0x54)
+	ELIBEXEC        = syscall.Errno(0x57)
+	ELIBMAX         = syscall.Errno(0x56)
+	ELIBSCN         = syscall.Errno(0x55)
+	ELNRNG          = syscall.Errno(0x29)
+	ELOOP           = syscall.Errno(0x5a)
+	EMEDIUMTYPE     = syscall.Errno(0xa0)
+	EMFILE          = syscall.Errno(0x18)
+	EMLINK          = syscall.Errno(0x1f)
+	EMSGSIZE        = syscall.Errno(0x61)
+	EMULTIHOP       = syscall.Errno(0x4a)
+	ENAMETOOLONG    = syscall.Errno(0x4e)
+	ENAVAIL         = syscall.Errno(0x8a)
+	ENETDOWN        = syscall.Errno(0x7f)
+	ENETRESET       = syscall.Errno(0x81)
+	ENETUNREACH     = syscall.Errno(0x80)
+	ENFILE          = syscall.Errno(0x17)
+	ENOANO          = syscall.Errno(0x35)
+	ENOBUFS         = syscall.Errno(0x84)
+	ENOCSI          = syscall.Errno(0x2b)
+	ENODATA         = syscall.Errno(0x3d)
+	ENODEV          = syscall.Errno(0x13)
+	ENOENT          = syscall.Errno(0x2)
+	ENOEXEC         = syscall.Errno(0x8)
+	ENOKEY          = syscall.Errno(0xa1)
+	ENOLCK          = syscall.Errno(0x2e)
+	ENOLINK         = syscall.Errno(0x43)
+	ENOMEDIUM       = syscall.Errno(0x9f)
+	ENOMEM          = syscall.Errno(0xc)
+	ENOMSG          = syscall.Errno(0x23)
+	ENONET          = syscall.Errno(0x40)
+	ENOPKG          = syscall.Errno(0x41)
+	ENOPROTOOPT     = syscall.Errno(0x63)
+	ENOSPC          = syscall.Errno(0x1c)
+	ENOSR           = syscall.Errno(0x3f)
+	ENOSTR          = syscall.Errno(0x3c)
+	ENOSYS          = syscall.Errno(0x59)
+	ENOTBLK         = syscall.Errno(0xf)
+	ENOTCONN        = syscall.Errno(0x86)
+	ENOTDIR         = syscall.Errno(0x14)
+	ENOTEMPTY       = syscall.Errno(0x5d)
+	ENOTNAM         = syscall.Errno(0x89)
+	ENOTRECOVERABLE = syscall.Errno(0xa6)
+	ENOTSOCK        = syscall.Errno(0x5f)
+	ENOTSUP         = syscall.Errno(0x7a)
+	ENOTTY          = syscall.Errno(0x19)
+	ENOTUNIQ        = syscall.Errno(0x50)
+	ENXIO           = syscall.Errno(0x6)
+	EOPNOTSUPP      = syscall.Errno(0x7a)
+	EOVERFLOW       = syscall.Errno(0x4f)
+	EOWNERDEAD      = syscall.Errno(0xa5)
+	EPERM           = syscall.Errno(0x1)
+	EPFNOSUPPORT    = syscall.Errno(0x7b)
+	EPIPE           = syscall.Errno(0x20)
+	EPROTO          = syscall.Errno(0x47)
+	EPROTONOSUPPORT = syscall.Errno(0x78)
+	EPROTOTYPE      = syscall.Errno(0x62)
+	ERANGE          = syscall.Errno(0x22)
+	EREMCHG         = syscall.Errno(0x52)
+	EREMDEV         = syscall.Errno(0x8e)
+	EREMOTE         = syscall.Errno(0x42)
+	EREMOTEIO       = syscall.Errno(0x8c)
+	ERESTART        = syscall.Errno(0x5b)
+	ERFKILL         = syscall.Errno(0xa7)
+	EROFS           = syscall.Errno(0x1e)
+	ESHUTDOWN       = syscall.Errno(0x8f)
+	ESOCKTNOSUPPORT = syscall.Errno(0x79)
+	ESPIPE          = syscall.Errno(0x1d)
+	ESRCH           = syscall.Errno(0x3)
+	ESRMNT          = syscall.Errno(0x45)
+	ESTALE          = syscall.Errno(0x97)
+	ESTRPIPE        = syscall.Errno(0x5c)
+	ETIME           = syscall.Errno(0x3e)
+	ETIMEDOUT       = syscall.Errno(0x91)
+	ETOOMANYREFS    = syscall.Errno(0x90)
+	ETXTBSY         = syscall.Errno(0x1a)
+	EUCLEAN         = syscall.Errno(0x87)
+	EUNATCH         = syscall.Errno(0x2a)
+	EUSERS          = syscall.Errno(0x5e)
+	EWOULDBLOCK     = syscall.Errno(0xb)
+	EXDEV           = syscall.Errno(0x12)
+	EXFULL          = syscall.Errno(0x34)
+)
+
+// Signals
+const (
+	SIGABRT   = syscall.Signal(0x6)
+	SIGALRM   = syscall.Signal(0xe)
+	SIGBUS    = syscall.Signal(0xa)
+	SIGCHLD   = syscall.Signal(0x12)
+	SIGCLD    = syscall.Signal(0x12)
+	SIGCONT   = syscall.Signal(0x19)
+	SIGEMT    = syscall.Signal(0x7)
+	SIGFPE    = syscall.Signal(0x8)
+	SIGHUP    = syscall.Signal(0x1)
+	SIGILL    = syscall.Signal(0x4)
+	SIGINT    = syscall.Signal(0x2)
+	SIGIO     = syscall.Signal(0x16)
+	SIGIOT    = syscall.Signal(0x6)
+	SIGKILL   = syscall.Signal(0x9)
+	SIGPIPE   = syscall.Signal(0xd)
+	SIGPOLL   = syscall.Signal(0x16)
+	SIGPROF   = syscall.Signal(0x1d)
+	SIGPWR    = syscall.Signal(0x13)
+	SIGQUIT   = syscall.Signal(0x3)
+	SIGSEGV   = syscall.Signal(0xb)
+	SIGSTOP   = syscall.Signal(0x17)
+	SIGSYS    = syscall.Signal(0xc)
+	SIGTERM   = syscall.Signal(0xf)
+	SIGTRAP   = syscall.Signal(0x5)
+	SIGTSTP   = syscall.Signal(0x18)
+	SIGTTIN   = syscall.Signal(0x1a)
+	SIGTTOU   = syscall.Signal(0x1b)
+	SIGURG    = syscall.Signal(0x15)
+	SIGUSR1   = syscall.Signal(0x10)
+	SIGUSR2   = syscall.Signal(0x11)
+	SIGVTALRM = syscall.Signal(0x1c)
+	SIGWINCH  = syscall.Signal(0x14)
+	SIGXCPU   = syscall.Signal(0x1e)
+	SIGXFSZ   = syscall.Signal(0x1f)
+)
+
+// Error table
+var errors = [...]string{
+	1:    "operation not permitted",
+	2:    "no such file or directory",
+	3:    "no such process",
+	4:    "interrupted system call",
+	5:    "input/output error",
+	6:    "no such device or address",
+	7:    "argument list too long",
+	8:    "exec format error",
+	9:    "bad file descriptor",
+	10:   "no child processes",
+	11:   "resource temporarily unavailable",
+	12:   "cannot allocate memory",
+	13:   "permission denied",
+	14:   "bad address",
+	15:   "block device required",
+	16:   "device or resource busy",
+	17:   "file exists",
+	18:   "invalid cross-device link",
+	19:   "no such device",
+	20:   "not a directory",
+	21:   "is a directory",
+	22:   "invalid argument",
+	23:   "too many open files in system",
+	24:   "too many open files",
+	25:   "inappropriate ioctl for device",
+	26:   "text file busy",
+	27:   "file too large",
+	28:   "no space left on device",
+	29:   "illegal seek",
+	30:   "read-only file system",
+	31:   "too many links",
+	32:   "broken pipe",
+	33:   "numerical argument out of domain",
+	34:   "numerical result out of range",
+	35:   "no message of desired type",
+	36:   "identifier removed",
+	37:   "channel number out of range",
+	38:   "level 2 not synchronized",
+	39:   "level 3 halted",
+	40:   "level 3 reset",
+	41:   "link number out of range",
+	42:   "protocol driver not attached",
+	43:   "no CSI structure available",
+	44:   "level 2 halted",
+	45:   "resource deadlock avoided",
+	46:   "no locks available",
+	50:   "invalid exchange",
+	51:   "invalid request descriptor",
+	52:   "exchange full",
+	53:   "no anode",
+	54:   "invalid request code",
+	55:   "invalid slot",
+	56:   "file locking deadlock error",
+	59:   "bad font file format",
+	60:   "device not a stream",
+	61:   "no data available",
+	62:   "timer expired",
+	63:   "out of streams resources",
+	64:   "machine is not on the network",
+	65:   "package not installed",
+	66:   "object is remote",
+	67:   "link has been severed",
+	68:   "advertise error",
+	69:   "srmount error",
+	70:   "communication error on send",
+	71:   "protocol error",
+	73:   "RFS specific error",
+	74:   "multihop attempted",
+	77:   "bad message",
+	78:   "file name too long",
+	79:   "value too large for defined data type",
+	80:   "name not unique on network",
+	81:   "file descriptor in bad state",
+	82:   "remote address changed",
+	83:   "can not access a needed shared library",
+	84:   "accessing a corrupted shared library",
+	85:   ".lib section in a.out corrupted",
+	86:   "attempting to link in too many shared libraries",
+	87:   "cannot exec a shared library directly",
+	88:   "invalid or incomplete multibyte or wide character",
+	89:   "function not implemented",
+	90:   "too many levels of symbolic links",
+	91:   "interrupted system call should be restarted",
+	92:   "streams pipe error",
+	93:   "directory not empty",
+	94:   "too many users",
+	95:   "socket operation on non-socket",
+	96:   "destination address required",
+	97:   "message too long",
+	98:   "protocol wrong type for socket",
+	99:   "protocol not available",
+	120:  "protocol not supported",
+	121:  "socket type not supported",
+	122:  "operation not supported",
+	123:  "protocol family not supported",
+	124:  "address family not supported by protocol",
+	125:  "address already in use",
+	126:  "cannot assign requested address",
+	127:  "network is down",
+	128:  "network is unreachable",
+	129:  "network dropped connection on reset",
+	130:  "software caused connection abort",
+	131:  "connection reset by peer",
+	132:  "no buffer space available",
+	133:  "transport endpoint is already connected",
+	134:  "transport endpoint is not connected",
+	135:  "structure needs cleaning",
+	137:  "not a XENIX named type file",
+	138:  "no XENIX semaphores available",
+	139:  "is a named type file",
+	140:  "remote I/O error",
+	141:  "unknown error 141",
+	142:  "unknown error 142",
+	143:  "cannot send after transport endpoint shutdown",
+	144:  "too many references: cannot splice",
+	145:  "connection timed out",
+	146:  "connection refused",
+	147:  "host is down",
+	148:  "no route to host",
+	149:  "operation already in progress",
+	150:  "operation now in progress",
+	151:  "stale file handle",
+	158:  "operation canceled",
+	159:  "no medium found",
+	160:  "wrong medium type",
+	161:  "required key not available",
+	162:  "key has expired",
+	163:  "key has been revoked",
+	164:  "key was rejected by service",
+	165:  "owner died",
+	166:  "state not recoverable",
+	167:  "operation not possible due to RF-kill",
+	168:  "memory page has hardware error",
+	1133: "disk quota exceeded",
+}
+
+// Signal table
+var signals = [...]string{
+	1:  "hangup",
+	2:  "interrupt",
+	3:  "quit",
+	4:  "illegal instruction",
+	5:  "trace/breakpoint trap",
+	6:  "aborted",
+	7:  "EMT trap",
+	8:  "floating point exception",
+	9:  "killed",
+	10: "bus error",
+	11: "segmentation fault",
+	12: "bad system call",
+	13: "broken pipe",
+	14: "alarm clock",
+	15: "terminated",
+	16: "user defined signal 1",
+	17: "user defined signal 2",
+	18: "child exited",
+	19: "power failure",
+	20: "window changed",
+	21: "urgent I/O condition",
+	22: "I/O possible",
+	23: "stopped (signal)",
+	24: "stopped",
+	25: "continued",
+	26: "stopped (tty input)",
+	27: "stopped (tty output)",
+	28: "virtual timer expired",
+	29: "profiling timer expired",
+	30: "CPU time limit exceeded",
+	31: "file size limit exceeded",
+}
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
new file mode 100644
index 0000000000000000000000000000000000000000..e5debb688712bb7311cb8b49a1eb8700579fcf52
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -0,0 +1,1916 @@
+// mkerrors.sh
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
+
+// +build mips64le,linux
+
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs -- _const.go
+
+package unix
+
+import "syscall"
+
+const (
+	AF_ALG                           = 0x26
+	AF_APPLETALK                     = 0x5
+	AF_ASH                           = 0x12
+	AF_ATMPVC                        = 0x8
+	AF_ATMSVC                        = 0x14
+	AF_AX25                          = 0x3
+	AF_BLUETOOTH                     = 0x1f
+	AF_BRIDGE                        = 0x7
+	AF_CAIF                          = 0x25
+	AF_CAN                           = 0x1d
+	AF_DECnet                        = 0xc
+	AF_ECONET                        = 0x13
+	AF_FILE                          = 0x1
+	AF_IB                            = 0x1b
+	AF_IEEE802154                    = 0x24
+	AF_INET                          = 0x2
+	AF_INET6                         = 0xa
+	AF_IPX                           = 0x4
+	AF_IRDA                          = 0x17
+	AF_ISDN                          = 0x22
+	AF_IUCV                          = 0x20
+	AF_KEY                           = 0xf
+	AF_LLC                           = 0x1a
+	AF_LOCAL                         = 0x1
+	AF_MAX                           = 0x29
+	AF_MPLS                          = 0x1c
+	AF_NETBEUI                       = 0xd
+	AF_NETLINK                       = 0x10
+	AF_NETROM                        = 0x6
+	AF_NFC                           = 0x27
+	AF_PACKET                        = 0x11
+	AF_PHONET                        = 0x23
+	AF_PPPOX                         = 0x18
+	AF_RDS                           = 0x15
+	AF_ROSE                          = 0xb
+	AF_ROUTE                         = 0x10
+	AF_RXRPC                         = 0x21
+	AF_SECURITY                      = 0xe
+	AF_SNA                           = 0x16
+	AF_TIPC                          = 0x1e
+	AF_UNIX                          = 0x1
+	AF_UNSPEC                        = 0x0
+	AF_VSOCK                         = 0x28
+	AF_WANPIPE                       = 0x19
+	AF_X25                           = 0x9
+	ARPHRD_6LOWPAN                   = 0x339
+	ARPHRD_ADAPT                     = 0x108
+	ARPHRD_APPLETLK                  = 0x8
+	ARPHRD_ARCNET                    = 0x7
+	ARPHRD_ASH                       = 0x30d
+	ARPHRD_ATM                       = 0x13
+	ARPHRD_AX25                      = 0x3
+	ARPHRD_BIF                       = 0x307
+	ARPHRD_CAIF                      = 0x336
+	ARPHRD_CAN                       = 0x118
+	ARPHRD_CHAOS                     = 0x5
+	ARPHRD_CISCO                     = 0x201
+	ARPHRD_CSLIP                     = 0x101
+	ARPHRD_CSLIP6                    = 0x103
+	ARPHRD_DDCMP                     = 0x205
+	ARPHRD_DLCI                      = 0xf
+	ARPHRD_ECONET                    = 0x30e
+	ARPHRD_EETHER                    = 0x2
+	ARPHRD_ETHER                     = 0x1
+	ARPHRD_EUI64                     = 0x1b
+	ARPHRD_FCAL                      = 0x311
+	ARPHRD_FCFABRIC                  = 0x313
+	ARPHRD_FCPL                      = 0x312
+	ARPHRD_FCPP                      = 0x310
+	ARPHRD_FDDI                      = 0x306
+	ARPHRD_FRAD                      = 0x302
+	ARPHRD_HDLC                      = 0x201
+	ARPHRD_HIPPI                     = 0x30c
+	ARPHRD_HWX25                     = 0x110
+	ARPHRD_IEEE1394                  = 0x18
+	ARPHRD_IEEE802                   = 0x6
+	ARPHRD_IEEE80211                 = 0x321
+	ARPHRD_IEEE80211_PRISM           = 0x322
+	ARPHRD_IEEE80211_RADIOTAP        = 0x323
+	ARPHRD_IEEE802154                = 0x324
+	ARPHRD_IEEE802154_MONITOR        = 0x325
+	ARPHRD_IEEE802_TR                = 0x320
+	ARPHRD_INFINIBAND                = 0x20
+	ARPHRD_IP6GRE                    = 0x337
+	ARPHRD_IPDDP                     = 0x309
+	ARPHRD_IPGRE                     = 0x30a
+	ARPHRD_IRDA                      = 0x30f
+	ARPHRD_LAPB                      = 0x204
+	ARPHRD_LOCALTLK                  = 0x305
+	ARPHRD_LOOPBACK                  = 0x304
+	ARPHRD_METRICOM                  = 0x17
+	ARPHRD_NETLINK                   = 0x338
+	ARPHRD_NETROM                    = 0x0
+	ARPHRD_NONE                      = 0xfffe
+	ARPHRD_PHONET                    = 0x334
+	ARPHRD_PHONET_PIPE               = 0x335
+	ARPHRD_PIMREG                    = 0x30b
+	ARPHRD_PPP                       = 0x200
+	ARPHRD_PRONET                    = 0x4
+	ARPHRD_RAWHDLC                   = 0x206
+	ARPHRD_ROSE                      = 0x10e
+	ARPHRD_RSRVD                     = 0x104
+	ARPHRD_SIT                       = 0x308
+	ARPHRD_SKIP                      = 0x303
+	ARPHRD_SLIP                      = 0x100
+	ARPHRD_SLIP6                     = 0x102
+	ARPHRD_TUNNEL                    = 0x300
+	ARPHRD_TUNNEL6                   = 0x301
+	ARPHRD_VOID                      = 0xffff
+	ARPHRD_X25                       = 0x10f
+	B0                               = 0x0
+	B1000000                         = 0x1008
+	B110                             = 0x3
+	B115200                          = 0x1002
+	B1152000                         = 0x1009
+	B1200                            = 0x9
+	B134                             = 0x4
+	B150                             = 0x5
+	B1500000                         = 0x100a
+	B1800                            = 0xa
+	B19200                           = 0xe
+	B200                             = 0x6
+	B2000000                         = 0x100b
+	B230400                          = 0x1003
+	B2400                            = 0xb
+	B2500000                         = 0x100c
+	B300                             = 0x7
+	B3000000                         = 0x100d
+	B3500000                         = 0x100e
+	B38400                           = 0xf
+	B4000000                         = 0x100f
+	B460800                          = 0x1004
+	B4800                            = 0xc
+	B50                              = 0x1
+	B500000                          = 0x1005
+	B57600                           = 0x1001
+	B576000                          = 0x1006
+	B600                             = 0x8
+	B75                              = 0x2
+	B921600                          = 0x1007
+	B9600                            = 0xd
+	BPF_A                            = 0x10
+	BPF_ABS                          = 0x20
+	BPF_ADD                          = 0x0
+	BPF_ALU                          = 0x4
+	BPF_AND                          = 0x50
+	BPF_B                            = 0x10
+	BPF_DIV                          = 0x30
+	BPF_H                            = 0x8
+	BPF_IMM                          = 0x0
+	BPF_IND                          = 0x40
+	BPF_JA                           = 0x0
+	BPF_JEQ                          = 0x10
+	BPF_JGE                          = 0x30
+	BPF_JGT                          = 0x20
+	BPF_JMP                          = 0x5
+	BPF_JSET                         = 0x40
+	BPF_K                            = 0x0
+	BPF_LD                           = 0x0
+	BPF_LDX                          = 0x1
+	BPF_LEN                          = 0x80
+	BPF_LL_OFF                       = -0x200000
+	BPF_LSH                          = 0x60
+	BPF_MAJOR_VERSION                = 0x1
+	BPF_MAXINSNS                     = 0x1000
+	BPF_MEM                          = 0x60
+	BPF_MEMWORDS                     = 0x10
+	BPF_MINOR_VERSION                = 0x1
+	BPF_MISC                         = 0x7
+	BPF_MOD                          = 0x90
+	BPF_MSH                          = 0xa0
+	BPF_MUL                          = 0x20
+	BPF_NEG                          = 0x80
+	BPF_NET_OFF                      = -0x100000
+	BPF_OR                           = 0x40
+	BPF_RET                          = 0x6
+	BPF_RSH                          = 0x70
+	BPF_ST                           = 0x2
+	BPF_STX                          = 0x3
+	BPF_SUB                          = 0x10
+	BPF_TAX                          = 0x0
+	BPF_TXA                          = 0x80
+	BPF_W                            = 0x0
+	BPF_X                            = 0x8
+	BPF_XOR                          = 0xa0
+	BRKINT                           = 0x2
+	CFLUSH                           = 0xf
+	CLOCAL                           = 0x800
+	CLOCK_BOOTTIME                   = 0x7
+	CLOCK_BOOTTIME_ALARM             = 0x9
+	CLOCK_DEFAULT                    = 0x0
+	CLOCK_EXT                        = 0x1
+	CLOCK_INT                        = 0x2
+	CLOCK_MONOTONIC                  = 0x1
+	CLOCK_MONOTONIC_COARSE           = 0x6
+	CLOCK_MONOTONIC_RAW              = 0x4
+	CLOCK_PROCESS_CPUTIME_ID         = 0x2
+	CLOCK_REALTIME                   = 0x0
+	CLOCK_REALTIME_ALARM             = 0x8
+	CLOCK_REALTIME_COARSE            = 0x5
+	CLOCK_TAI                        = 0xb
+	CLOCK_THREAD_CPUTIME_ID          = 0x3
+	CLOCK_TXFROMRX                   = 0x4
+	CLOCK_TXINT                      = 0x3
+	CLONE_CHILD_CLEARTID             = 0x200000
+	CLONE_CHILD_SETTID               = 0x1000000
+	CLONE_DETACHED                   = 0x400000
+	CLONE_FILES                      = 0x400
+	CLONE_FS                         = 0x200
+	CLONE_IO                         = 0x80000000
+	CLONE_NEWIPC                     = 0x8000000
+	CLONE_NEWNET                     = 0x40000000
+	CLONE_NEWNS                      = 0x20000
+	CLONE_NEWPID                     = 0x20000000
+	CLONE_NEWUSER                    = 0x10000000
+	CLONE_NEWUTS                     = 0x4000000
+	CLONE_PARENT                     = 0x8000
+	CLONE_PARENT_SETTID              = 0x100000
+	CLONE_PTRACE                     = 0x2000
+	CLONE_SETTLS                     = 0x80000
+	CLONE_SIGHAND                    = 0x800
+	CLONE_SYSVSEM                    = 0x40000
+	CLONE_THREAD                     = 0x10000
+	CLONE_UNTRACED                   = 0x800000
+	CLONE_VFORK                      = 0x4000
+	CLONE_VM                         = 0x100
+	CREAD                            = 0x80
+	CS5                              = 0x0
+	CS6                              = 0x10
+	CS7                              = 0x20
+	CS8                              = 0x30
+	CSIGNAL                          = 0xff
+	CSIZE                            = 0x30
+	CSTART                           = 0x11
+	CSTATUS                          = 0x0
+	CSTOP                            = 0x13
+	CSTOPB                           = 0x40
+	CSUSP                            = 0x1a
+	DT_BLK                           = 0x6
+	DT_CHR                           = 0x2
+	DT_DIR                           = 0x4
+	DT_FIFO                          = 0x1
+	DT_LNK                           = 0xa
+	DT_REG                           = 0x8
+	DT_SOCK                          = 0xc
+	DT_UNKNOWN                       = 0x0
+	DT_WHT                           = 0xe
+	ECHO                             = 0x8
+	ECHOCTL                          = 0x200
+	ECHOE                            = 0x10
+	ECHOK                            = 0x20
+	ECHOKE                           = 0x800
+	ECHONL                           = 0x40
+	ECHOPRT                          = 0x400
+	ENCODING_DEFAULT                 = 0x0
+	ENCODING_FM_MARK                 = 0x3
+	ENCODING_FM_SPACE                = 0x4
+	ENCODING_MANCHESTER              = 0x5
+	ENCODING_NRZ                     = 0x1
+	ENCODING_NRZI                    = 0x2
+	EPOLLERR                         = 0x8
+	EPOLLET                          = 0x80000000
+	EPOLLHUP                         = 0x10
+	EPOLLIN                          = 0x1
+	EPOLLMSG                         = 0x400
+	EPOLLONESHOT                     = 0x40000000
+	EPOLLOUT                         = 0x4
+	EPOLLPRI                         = 0x2
+	EPOLLRDBAND                      = 0x80
+	EPOLLRDHUP                       = 0x2000
+	EPOLLRDNORM                      = 0x40
+	EPOLLWAKEUP                      = 0x20000000
+	EPOLLWRBAND                      = 0x200
+	EPOLLWRNORM                      = 0x100
+	EPOLL_CLOEXEC                    = 0x80000
+	EPOLL_CTL_ADD                    = 0x1
+	EPOLL_CTL_DEL                    = 0x2
+	EPOLL_CTL_MOD                    = 0x3
+	ETH_P_1588                       = 0x88f7
+	ETH_P_8021AD                     = 0x88a8
+	ETH_P_8021AH                     = 0x88e7
+	ETH_P_8021Q                      = 0x8100
+	ETH_P_80221                      = 0x8917
+	ETH_P_802_2                      = 0x4
+	ETH_P_802_3                      = 0x1
+	ETH_P_802_3_MIN                  = 0x600
+	ETH_P_802_EX1                    = 0x88b5
+	ETH_P_AARP                       = 0x80f3
+	ETH_P_AF_IUCV                    = 0xfbfb
+	ETH_P_ALL                        = 0x3
+	ETH_P_AOE                        = 0x88a2
+	ETH_P_ARCNET                     = 0x1a
+	ETH_P_ARP                        = 0x806
+	ETH_P_ATALK                      = 0x809b
+	ETH_P_ATMFATE                    = 0x8884
+	ETH_P_ATMMPOA                    = 0x884c
+	ETH_P_AX25                       = 0x2
+	ETH_P_BATMAN                     = 0x4305
+	ETH_P_BPQ                        = 0x8ff
+	ETH_P_CAIF                       = 0xf7
+	ETH_P_CAN                        = 0xc
+	ETH_P_CANFD                      = 0xd
+	ETH_P_CONTROL                    = 0x16
+	ETH_P_CUST                       = 0x6006
+	ETH_P_DDCMP                      = 0x6
+	ETH_P_DEC                        = 0x6000
+	ETH_P_DIAG                       = 0x6005
+	ETH_P_DNA_DL                     = 0x6001
+	ETH_P_DNA_RC                     = 0x6002
+	ETH_P_DNA_RT                     = 0x6003
+	ETH_P_DSA                        = 0x1b
+	ETH_P_ECONET                     = 0x18
+	ETH_P_EDSA                       = 0xdada
+	ETH_P_FCOE                       = 0x8906
+	ETH_P_FIP                        = 0x8914
+	ETH_P_HDLC                       = 0x19
+	ETH_P_IEEE802154                 = 0xf6
+	ETH_P_IEEEPUP                    = 0xa00
+	ETH_P_IEEEPUPAT                  = 0xa01
+	ETH_P_IP                         = 0x800
+	ETH_P_IPV6                       = 0x86dd
+	ETH_P_IPX                        = 0x8137
+	ETH_P_IRDA                       = 0x17
+	ETH_P_LAT                        = 0x6004
+	ETH_P_LINK_CTL                   = 0x886c
+	ETH_P_LOCALTALK                  = 0x9
+	ETH_P_LOOP                       = 0x60
+	ETH_P_LOOPBACK                   = 0x9000
+	ETH_P_MOBITEX                    = 0x15
+	ETH_P_MPLS_MC                    = 0x8848
+	ETH_P_MPLS_UC                    = 0x8847
+	ETH_P_MVRP                       = 0x88f5
+	ETH_P_PAE                        = 0x888e
+	ETH_P_PAUSE                      = 0x8808
+	ETH_P_PHONET                     = 0xf5
+	ETH_P_PPPTALK                    = 0x10
+	ETH_P_PPP_DISC                   = 0x8863
+	ETH_P_PPP_MP                     = 0x8
+	ETH_P_PPP_SES                    = 0x8864
+	ETH_P_PRP                        = 0x88fb
+	ETH_P_PUP                        = 0x200
+	ETH_P_PUPAT                      = 0x201
+	ETH_P_QINQ1                      = 0x9100
+	ETH_P_QINQ2                      = 0x9200
+	ETH_P_QINQ3                      = 0x9300
+	ETH_P_RARP                       = 0x8035
+	ETH_P_SCA                        = 0x6007
+	ETH_P_SLOW                       = 0x8809
+	ETH_P_SNAP                       = 0x5
+	ETH_P_TDLS                       = 0x890d
+	ETH_P_TEB                        = 0x6558
+	ETH_P_TIPC                       = 0x88ca
+	ETH_P_TRAILER                    = 0x1c
+	ETH_P_TR_802_2                   = 0x11
+	ETH_P_TSN                        = 0x22f0
+	ETH_P_WAN_PPP                    = 0x7
+	ETH_P_WCCP                       = 0x883e
+	ETH_P_X25                        = 0x805
+	ETH_P_XDSA                       = 0xf8
+	EXTA                             = 0xe
+	EXTB                             = 0xf
+	EXTPROC                          = 0x10000
+	FD_CLOEXEC                       = 0x1
+	FD_SETSIZE                       = 0x400
+	FLUSHO                           = 0x2000
+	F_DUPFD                          = 0x0
+	F_DUPFD_CLOEXEC                  = 0x406
+	F_EXLCK                          = 0x4
+	F_GETFD                          = 0x1
+	F_GETFL                          = 0x3
+	F_GETLEASE                       = 0x401
+	F_GETLK                          = 0xe
+	F_GETLK64                        = 0xe
+	F_GETOWN                         = 0x17
+	F_GETOWN_EX                      = 0x10
+	F_GETPIPE_SZ                     = 0x408
+	F_GETSIG                         = 0xb
+	F_LOCK                           = 0x1
+	F_NOTIFY                         = 0x402
+	F_OFD_GETLK                      = 0x24
+	F_OFD_SETLK                      = 0x25
+	F_OFD_SETLKW                     = 0x26
+	F_OK                             = 0x0
+	F_RDLCK                          = 0x0
+	F_SETFD                          = 0x2
+	F_SETFL                          = 0x4
+	F_SETLEASE                       = 0x400
+	F_SETLK                          = 0x6
+	F_SETLK64                        = 0x6
+	F_SETLKW                         = 0x7
+	F_SETLKW64                       = 0x7
+	F_SETOWN                         = 0x18
+	F_SETOWN_EX                      = 0xf
+	F_SETPIPE_SZ                     = 0x407
+	F_SETSIG                         = 0xa
+	F_SHLCK                          = 0x8
+	F_TEST                           = 0x3
+	F_TLOCK                          = 0x2
+	F_ULOCK                          = 0x0
+	F_UNLCK                          = 0x2
+	F_WRLCK                          = 0x1
+	HUPCL                            = 0x400
+	ICANON                           = 0x2
+	ICMPV6_FILTER                    = 0x1
+	ICRNL                            = 0x100
+	IEXTEN                           = 0x100
+	IFA_F_DADFAILED                  = 0x8
+	IFA_F_DEPRECATED                 = 0x20
+	IFA_F_HOMEADDRESS                = 0x10
+	IFA_F_MANAGETEMPADDR             = 0x100
+	IFA_F_MCAUTOJOIN                 = 0x400
+	IFA_F_NODAD                      = 0x2
+	IFA_F_NOPREFIXROUTE              = 0x200
+	IFA_F_OPTIMISTIC                 = 0x4
+	IFA_F_PERMANENT                  = 0x80
+	IFA_F_SECONDARY                  = 0x1
+	IFA_F_STABLE_PRIVACY             = 0x800
+	IFA_F_TEMPORARY                  = 0x1
+	IFA_F_TENTATIVE                  = 0x40
+	IFA_MAX                          = 0x8
+	IFF_ALLMULTI                     = 0x200
+	IFF_ATTACH_QUEUE                 = 0x200
+	IFF_AUTOMEDIA                    = 0x4000
+	IFF_BROADCAST                    = 0x2
+	IFF_DEBUG                        = 0x4
+	IFF_DETACH_QUEUE                 = 0x400
+	IFF_DORMANT                      = 0x20000
+	IFF_DYNAMIC                      = 0x8000
+	IFF_ECHO                         = 0x40000
+	IFF_LOOPBACK                     = 0x8
+	IFF_LOWER_UP                     = 0x10000
+	IFF_MASTER                       = 0x400
+	IFF_MULTICAST                    = 0x1000
+	IFF_MULTI_QUEUE                  = 0x100
+	IFF_NOARP                        = 0x80
+	IFF_NOFILTER                     = 0x1000
+	IFF_NOTRAILERS                   = 0x20
+	IFF_NO_PI                        = 0x1000
+	IFF_ONE_QUEUE                    = 0x2000
+	IFF_PERSIST                      = 0x800
+	IFF_POINTOPOINT                  = 0x10
+	IFF_PORTSEL                      = 0x2000
+	IFF_PROMISC                      = 0x100
+	IFF_RUNNING                      = 0x40
+	IFF_SLAVE                        = 0x800
+	IFF_TAP                          = 0x2
+	IFF_TUN                          = 0x1
+	IFF_TUN_EXCL                     = 0x8000
+	IFF_UP                           = 0x1
+	IFF_VNET_HDR                     = 0x4000
+	IFF_VOLATILE                     = 0x70c5a
+	IFNAMSIZ                         = 0x10
+	IGNBRK                           = 0x1
+	IGNCR                            = 0x80
+	IGNPAR                           = 0x4
+	IMAXBEL                          = 0x2000
+	INLCR                            = 0x40
+	INPCK                            = 0x10
+	IN_ACCESS                        = 0x1
+	IN_ALL_EVENTS                    = 0xfff
+	IN_ATTRIB                        = 0x4
+	IN_CLASSA_HOST                   = 0xffffff
+	IN_CLASSA_MAX                    = 0x80
+	IN_CLASSA_NET                    = 0xff000000
+	IN_CLASSA_NSHIFT                 = 0x18
+	IN_CLASSB_HOST                   = 0xffff
+	IN_CLASSB_MAX                    = 0x10000
+	IN_CLASSB_NET                    = 0xffff0000
+	IN_CLASSB_NSHIFT                 = 0x10
+	IN_CLASSC_HOST                   = 0xff
+	IN_CLASSC_NET                    = 0xffffff00
+	IN_CLASSC_NSHIFT                 = 0x8
+	IN_CLOEXEC                       = 0x80000
+	IN_CLOSE                         = 0x18
+	IN_CLOSE_NOWRITE                 = 0x10
+	IN_CLOSE_WRITE                   = 0x8
+	IN_CREATE                        = 0x100
+	IN_DELETE                        = 0x200
+	IN_DELETE_SELF                   = 0x400
+	IN_DONT_FOLLOW                   = 0x2000000
+	IN_EXCL_UNLINK                   = 0x4000000
+	IN_IGNORED                       = 0x8000
+	IN_ISDIR                         = 0x40000000
+	IN_LOOPBACKNET                   = 0x7f
+	IN_MASK_ADD                      = 0x20000000
+	IN_MODIFY                        = 0x2
+	IN_MOVE                          = 0xc0
+	IN_MOVED_FROM                    = 0x40
+	IN_MOVED_TO                      = 0x80
+	IN_MOVE_SELF                     = 0x800
+	IN_NONBLOCK                      = 0x80
+	IN_ONESHOT                       = 0x80000000
+	IN_ONLYDIR                       = 0x1000000
+	IN_OPEN                          = 0x20
+	IN_Q_OVERFLOW                    = 0x4000
+	IN_UNMOUNT                       = 0x2000
+	IPPROTO_AH                       = 0x33
+	IPPROTO_BEETPH                   = 0x5e
+	IPPROTO_COMP                     = 0x6c
+	IPPROTO_DCCP                     = 0x21
+	IPPROTO_DSTOPTS                  = 0x3c
+	IPPROTO_EGP                      = 0x8
+	IPPROTO_ENCAP                    = 0x62
+	IPPROTO_ESP                      = 0x32
+	IPPROTO_FRAGMENT                 = 0x2c
+	IPPROTO_GRE                      = 0x2f
+	IPPROTO_HOPOPTS                  = 0x0
+	IPPROTO_ICMP                     = 0x1
+	IPPROTO_ICMPV6                   = 0x3a
+	IPPROTO_IDP                      = 0x16
+	IPPROTO_IGMP                     = 0x2
+	IPPROTO_IP                       = 0x0
+	IPPROTO_IPIP                     = 0x4
+	IPPROTO_IPV6                     = 0x29
+	IPPROTO_MH                       = 0x87
+	IPPROTO_MTP                      = 0x5c
+	IPPROTO_NONE                     = 0x3b
+	IPPROTO_PIM                      = 0x67
+	IPPROTO_PUP                      = 0xc
+	IPPROTO_RAW                      = 0xff
+	IPPROTO_ROUTING                  = 0x2b
+	IPPROTO_RSVP                     = 0x2e
+	IPPROTO_SCTP                     = 0x84
+	IPPROTO_TCP                      = 0x6
+	IPPROTO_TP                       = 0x1d
+	IPPROTO_UDP                      = 0x11
+	IPPROTO_UDPLITE                  = 0x88
+	IPV6_2292DSTOPTS                 = 0x4
+	IPV6_2292HOPLIMIT                = 0x8
+	IPV6_2292HOPOPTS                 = 0x3
+	IPV6_2292PKTINFO                 = 0x2
+	IPV6_2292PKTOPTIONS              = 0x6
+	IPV6_2292RTHDR                   = 0x5
+	IPV6_ADDRFORM                    = 0x1
+	IPV6_ADD_MEMBERSHIP              = 0x14
+	IPV6_AUTHHDR                     = 0xa
+	IPV6_CHECKSUM                    = 0x7
+	IPV6_DONTFRAG                    = 0x3e
+	IPV6_DROP_MEMBERSHIP             = 0x15
+	IPV6_DSTOPTS                     = 0x3b
+	IPV6_HOPLIMIT                    = 0x34
+	IPV6_HOPOPTS                     = 0x36
+	IPV6_IPSEC_POLICY                = 0x22
+	IPV6_JOIN_ANYCAST                = 0x1b
+	IPV6_JOIN_GROUP                  = 0x14
+	IPV6_LEAVE_ANYCAST               = 0x1c
+	IPV6_LEAVE_GROUP                 = 0x15
+	IPV6_MTU                         = 0x18
+	IPV6_MTU_DISCOVER                = 0x17
+	IPV6_MULTICAST_HOPS              = 0x12
+	IPV6_MULTICAST_IF                = 0x11
+	IPV6_MULTICAST_LOOP              = 0x13
+	IPV6_NEXTHOP                     = 0x9
+	IPV6_PATHMTU                     = 0x3d
+	IPV6_PKTINFO                     = 0x32
+	IPV6_PMTUDISC_DO                 = 0x2
+	IPV6_PMTUDISC_DONT               = 0x0
+	IPV6_PMTUDISC_INTERFACE          = 0x4
+	IPV6_PMTUDISC_OMIT               = 0x5
+	IPV6_PMTUDISC_PROBE              = 0x3
+	IPV6_PMTUDISC_WANT               = 0x1
+	IPV6_RECVDSTOPTS                 = 0x3a
+	IPV6_RECVERR                     = 0x19
+	IPV6_RECVHOPLIMIT                = 0x33
+	IPV6_RECVHOPOPTS                 = 0x35
+	IPV6_RECVPATHMTU                 = 0x3c
+	IPV6_RECVPKTINFO                 = 0x31
+	IPV6_RECVRTHDR                   = 0x38
+	IPV6_RECVTCLASS                  = 0x42
+	IPV6_ROUTER_ALERT                = 0x16
+	IPV6_RTHDR                       = 0x39
+	IPV6_RTHDRDSTOPTS                = 0x37
+	IPV6_RTHDR_LOOSE                 = 0x0
+	IPV6_RTHDR_STRICT                = 0x1
+	IPV6_RTHDR_TYPE_0                = 0x0
+	IPV6_RXDSTOPTS                   = 0x3b
+	IPV6_RXHOPOPTS                   = 0x36
+	IPV6_TCLASS                      = 0x43
+	IPV6_UNICAST_HOPS                = 0x10
+	IPV6_V6ONLY                      = 0x1a
+	IPV6_XFRM_POLICY                 = 0x23
+	IP_ADD_MEMBERSHIP                = 0x23
+	IP_ADD_SOURCE_MEMBERSHIP         = 0x27
+	IP_BLOCK_SOURCE                  = 0x26
+	IP_CHECKSUM                      = 0x17
+	IP_DEFAULT_MULTICAST_LOOP        = 0x1
+	IP_DEFAULT_MULTICAST_TTL         = 0x1
+	IP_DF                            = 0x4000
+	IP_DROP_MEMBERSHIP               = 0x24
+	IP_DROP_SOURCE_MEMBERSHIP        = 0x28
+	IP_FREEBIND                      = 0xf
+	IP_HDRINCL                       = 0x3
+	IP_IPSEC_POLICY                  = 0x10
+	IP_MAXPACKET                     = 0xffff
+	IP_MAX_MEMBERSHIPS               = 0x14
+	IP_MF                            = 0x2000
+	IP_MINTTL                        = 0x15
+	IP_MSFILTER                      = 0x29
+	IP_MSS                           = 0x240
+	IP_MTU                           = 0xe
+	IP_MTU_DISCOVER                  = 0xa
+	IP_MULTICAST_ALL                 = 0x31
+	IP_MULTICAST_IF                  = 0x20
+	IP_MULTICAST_LOOP                = 0x22
+	IP_MULTICAST_TTL                 = 0x21
+	IP_NODEFRAG                      = 0x16
+	IP_OFFMASK                       = 0x1fff
+	IP_OPTIONS                       = 0x4
+	IP_ORIGDSTADDR                   = 0x14
+	IP_PASSSEC                       = 0x12
+	IP_PKTINFO                       = 0x8
+	IP_PKTOPTIONS                    = 0x9
+	IP_PMTUDISC                      = 0xa
+	IP_PMTUDISC_DO                   = 0x2
+	IP_PMTUDISC_DONT                 = 0x0
+	IP_PMTUDISC_INTERFACE            = 0x4
+	IP_PMTUDISC_OMIT                 = 0x5
+	IP_PMTUDISC_PROBE                = 0x3
+	IP_PMTUDISC_WANT                 = 0x1
+	IP_RECVERR                       = 0xb
+	IP_RECVOPTS                      = 0x6
+	IP_RECVORIGDSTADDR               = 0x14
+	IP_RECVRETOPTS                   = 0x7
+	IP_RECVTOS                       = 0xd
+	IP_RECVTTL                       = 0xc
+	IP_RETOPTS                       = 0x7
+	IP_RF                            = 0x8000
+	IP_ROUTER_ALERT                  = 0x5
+	IP_TOS                           = 0x1
+	IP_TRANSPARENT                   = 0x13
+	IP_TTL                           = 0x2
+	IP_UNBLOCK_SOURCE                = 0x25
+	IP_UNICAST_IF                    = 0x32
+	IP_XFRM_POLICY                   = 0x11
+	ISIG                             = 0x1
+	ISTRIP                           = 0x20
+	IUTF8                            = 0x4000
+	IXANY                            = 0x800
+	IXOFF                            = 0x1000
+	IXON                             = 0x400
+	LINUX_REBOOT_CMD_CAD_OFF         = 0x0
+	LINUX_REBOOT_CMD_CAD_ON          = 0x89abcdef
+	LINUX_REBOOT_CMD_HALT            = 0xcdef0123
+	LINUX_REBOOT_CMD_KEXEC           = 0x45584543
+	LINUX_REBOOT_CMD_POWER_OFF       = 0x4321fedc
+	LINUX_REBOOT_CMD_RESTART         = 0x1234567
+	LINUX_REBOOT_CMD_RESTART2        = 0xa1b2c3d4
+	LINUX_REBOOT_CMD_SW_SUSPEND      = 0xd000fce2
+	LINUX_REBOOT_MAGIC1              = 0xfee1dead
+	LINUX_REBOOT_MAGIC2              = 0x28121969
+	LOCK_EX                          = 0x2
+	LOCK_NB                          = 0x4
+	LOCK_SH                          = 0x1
+	LOCK_UN                          = 0x8
+	MADV_DODUMP                      = 0x11
+	MADV_DOFORK                      = 0xb
+	MADV_DONTDUMP                    = 0x10
+	MADV_DONTFORK                    = 0xa
+	MADV_DONTNEED                    = 0x4
+	MADV_HUGEPAGE                    = 0xe
+	MADV_HWPOISON                    = 0x64
+	MADV_MERGEABLE                   = 0xc
+	MADV_NOHUGEPAGE                  = 0xf
+	MADV_NORMAL                      = 0x0
+	MADV_RANDOM                      = 0x1
+	MADV_REMOVE                      = 0x9
+	MADV_SEQUENTIAL                  = 0x2
+	MADV_UNMERGEABLE                 = 0xd
+	MADV_WILLNEED                    = 0x3
+	MAP_ANON                         = 0x800
+	MAP_ANONYMOUS                    = 0x800
+	MAP_DENYWRITE                    = 0x2000
+	MAP_EXECUTABLE                   = 0x4000
+	MAP_FILE                         = 0x0
+	MAP_FIXED                        = 0x10
+	MAP_GROWSDOWN                    = 0x1000
+	MAP_HUGETLB                      = 0x80000
+	MAP_HUGE_MASK                    = 0x3f
+	MAP_HUGE_SHIFT                   = 0x1a
+	MAP_LOCKED                       = 0x8000
+	MAP_NONBLOCK                     = 0x20000
+	MAP_NORESERVE                    = 0x400
+	MAP_POPULATE                     = 0x10000
+	MAP_PRIVATE                      = 0x2
+	MAP_RENAME                       = 0x800
+	MAP_SHARED                       = 0x1
+	MAP_STACK                        = 0x40000
+	MAP_TYPE                         = 0xf
+	MCL_CURRENT                      = 0x1
+	MCL_FUTURE                       = 0x2
+	MNT_DETACH                       = 0x2
+	MNT_EXPIRE                       = 0x4
+	MNT_FORCE                        = 0x1
+	MSG_CMSG_CLOEXEC                 = 0x40000000
+	MSG_CONFIRM                      = 0x800
+	MSG_CTRUNC                       = 0x8
+	MSG_DONTROUTE                    = 0x4
+	MSG_DONTWAIT                     = 0x40
+	MSG_EOR                          = 0x80
+	MSG_ERRQUEUE                     = 0x2000
+	MSG_FASTOPEN                     = 0x20000000
+	MSG_FIN                          = 0x200
+	MSG_MORE                         = 0x8000
+	MSG_NOSIGNAL                     = 0x4000
+	MSG_OOB                          = 0x1
+	MSG_PEEK                         = 0x2
+	MSG_PROXY                        = 0x10
+	MSG_RST                          = 0x1000
+	MSG_SYN                          = 0x400
+	MSG_TRUNC                        = 0x20
+	MSG_TRYHARD                      = 0x4
+	MSG_WAITALL                      = 0x100
+	MSG_WAITFORONE                   = 0x10000
+	MS_ACTIVE                        = 0x40000000
+	MS_ASYNC                         = 0x1
+	MS_BIND                          = 0x1000
+	MS_DIRSYNC                       = 0x80
+	MS_INVALIDATE                    = 0x2
+	MS_I_VERSION                     = 0x800000
+	MS_KERNMOUNT                     = 0x400000
+	MS_LAZYTIME                      = 0x2000000
+	MS_MANDLOCK                      = 0x40
+	MS_MGC_MSK                       = 0xffff0000
+	MS_MGC_VAL                       = 0xc0ed0000
+	MS_MOVE                          = 0x2000
+	MS_NOATIME                       = 0x400
+	MS_NODEV                         = 0x4
+	MS_NODIRATIME                    = 0x800
+	MS_NOEXEC                        = 0x8
+	MS_NOSUID                        = 0x2
+	MS_NOUSER                        = -0x80000000
+	MS_POSIXACL                      = 0x10000
+	MS_PRIVATE                       = 0x40000
+	MS_RDONLY                        = 0x1
+	MS_REC                           = 0x4000
+	MS_RELATIME                      = 0x200000
+	MS_REMOUNT                       = 0x20
+	MS_RMT_MASK                      = 0x2800051
+	MS_SHARED                        = 0x100000
+	MS_SILENT                        = 0x8000
+	MS_SLAVE                         = 0x80000
+	MS_STRICTATIME                   = 0x1000000
+	MS_SYNC                          = 0x4
+	MS_SYNCHRONOUS                   = 0x10
+	MS_UNBINDABLE                    = 0x20000
+	NAME_MAX                         = 0xff
+	NETLINK_ADD_MEMBERSHIP           = 0x1
+	NETLINK_AUDIT                    = 0x9
+	NETLINK_BROADCAST_ERROR          = 0x4
+	NETLINK_CAP_ACK                  = 0xa
+	NETLINK_CONNECTOR                = 0xb
+	NETLINK_CRYPTO                   = 0x15
+	NETLINK_DNRTMSG                  = 0xe
+	NETLINK_DROP_MEMBERSHIP          = 0x2
+	NETLINK_ECRYPTFS                 = 0x13
+	NETLINK_FIB_LOOKUP               = 0xa
+	NETLINK_FIREWALL                 = 0x3
+	NETLINK_GENERIC                  = 0x10
+	NETLINK_INET_DIAG                = 0x4
+	NETLINK_IP6_FW                   = 0xd
+	NETLINK_ISCSI                    = 0x8
+	NETLINK_KOBJECT_UEVENT           = 0xf
+	NETLINK_LISTEN_ALL_NSID          = 0x8
+	NETLINK_LIST_MEMBERSHIPS         = 0x9
+	NETLINK_NETFILTER                = 0xc
+	NETLINK_NFLOG                    = 0x5
+	NETLINK_NO_ENOBUFS               = 0x5
+	NETLINK_PKTINFO                  = 0x3
+	NETLINK_RDMA                     = 0x14
+	NETLINK_ROUTE                    = 0x0
+	NETLINK_RX_RING                  = 0x6
+	NETLINK_SCSITRANSPORT            = 0x12
+	NETLINK_SELINUX                  = 0x7
+	NETLINK_SOCK_DIAG                = 0x4
+	NETLINK_TX_RING                  = 0x7
+	NETLINK_UNUSED                   = 0x1
+	NETLINK_USERSOCK                 = 0x2
+	NETLINK_XFRM                     = 0x6
+	NLA_ALIGNTO                      = 0x4
+	NLA_F_NESTED                     = 0x8000
+	NLA_F_NET_BYTEORDER              = 0x4000
+	NLA_HDRLEN                       = 0x4
+	NLMSG_ALIGNTO                    = 0x4
+	NLMSG_DONE                       = 0x3
+	NLMSG_ERROR                      = 0x2
+	NLMSG_HDRLEN                     = 0x10
+	NLMSG_MIN_TYPE                   = 0x10
+	NLMSG_NOOP                       = 0x1
+	NLMSG_OVERRUN                    = 0x4
+	NLM_F_ACK                        = 0x4
+	NLM_F_APPEND                     = 0x800
+	NLM_F_ATOMIC                     = 0x400
+	NLM_F_CREATE                     = 0x400
+	NLM_F_DUMP                       = 0x300
+	NLM_F_DUMP_INTR                  = 0x10
+	NLM_F_ECHO                       = 0x8
+	NLM_F_EXCL                       = 0x200
+	NLM_F_MATCH                      = 0x200
+	NLM_F_MULTI                      = 0x2
+	NLM_F_REPLACE                    = 0x100
+	NLM_F_REQUEST                    = 0x1
+	NLM_F_ROOT                       = 0x100
+	NOFLSH                           = 0x80
+	OCRNL                            = 0x8
+	OFDEL                            = 0x80
+	OFILL                            = 0x40
+	ONLCR                            = 0x4
+	ONLRET                           = 0x20
+	ONOCR                            = 0x10
+	OPOST                            = 0x1
+	O_ACCMODE                        = 0x3
+	O_APPEND                         = 0x8
+	O_ASYNC                          = 0x1000
+	O_CLOEXEC                        = 0x80000
+	O_CREAT                          = 0x100
+	O_DIRECT                         = 0x8000
+	O_DIRECTORY                      = 0x10000
+	O_DSYNC                          = 0x10
+	O_EXCL                           = 0x400
+	O_FSYNC                          = 0x4010
+	O_LARGEFILE                      = 0x0
+	O_NDELAY                         = 0x80
+	O_NOATIME                        = 0x40000
+	O_NOCTTY                         = 0x800
+	O_NOFOLLOW                       = 0x20000
+	O_NONBLOCK                       = 0x80
+	O_PATH                           = 0x200000
+	O_RDONLY                         = 0x0
+	O_RDWR                           = 0x2
+	O_RSYNC                          = 0x4010
+	O_SYNC                           = 0x4010
+	O_TMPFILE                        = 0x410000
+	O_TRUNC                          = 0x200
+	O_WRONLY                         = 0x1
+	PACKET_ADD_MEMBERSHIP            = 0x1
+	PACKET_AUXDATA                   = 0x8
+	PACKET_BROADCAST                 = 0x1
+	PACKET_COPY_THRESH               = 0x7
+	PACKET_DROP_MEMBERSHIP           = 0x2
+	PACKET_FANOUT                    = 0x12
+	PACKET_FANOUT_CBPF               = 0x6
+	PACKET_FANOUT_CPU                = 0x2
+	PACKET_FANOUT_DATA               = 0x16
+	PACKET_FANOUT_EBPF               = 0x7
+	PACKET_FANOUT_FLAG_DEFRAG        = 0x8000
+	PACKET_FANOUT_FLAG_ROLLOVER      = 0x1000
+	PACKET_FANOUT_HASH               = 0x0
+	PACKET_FANOUT_LB                 = 0x1
+	PACKET_FANOUT_QM                 = 0x5
+	PACKET_FANOUT_RND                = 0x4
+	PACKET_FANOUT_ROLLOVER           = 0x3
+	PACKET_FASTROUTE                 = 0x6
+	PACKET_HDRLEN                    = 0xb
+	PACKET_HOST                      = 0x0
+	PACKET_KERNEL                    = 0x7
+	PACKET_LOOPBACK                  = 0x5
+	PACKET_LOSS                      = 0xe
+	PACKET_MR_ALLMULTI               = 0x2
+	PACKET_MR_MULTICAST              = 0x0
+	PACKET_MR_PROMISC                = 0x1
+	PACKET_MR_UNICAST                = 0x3
+	PACKET_MULTICAST                 = 0x2
+	PACKET_ORIGDEV                   = 0x9
+	PACKET_OTHERHOST                 = 0x3
+	PACKET_OUTGOING                  = 0x4
+	PACKET_QDISC_BYPASS              = 0x14
+	PACKET_RECV_OUTPUT               = 0x3
+	PACKET_RESERVE                   = 0xc
+	PACKET_ROLLOVER_STATS            = 0x15
+	PACKET_RX_RING                   = 0x5
+	PACKET_STATISTICS                = 0x6
+	PACKET_TIMESTAMP                 = 0x11
+	PACKET_TX_HAS_OFF                = 0x13
+	PACKET_TX_RING                   = 0xd
+	PACKET_TX_TIMESTAMP              = 0x10
+	PACKET_USER                      = 0x6
+	PACKET_VERSION                   = 0xa
+	PACKET_VNET_HDR                  = 0xf
+	PARENB                           = 0x100
+	PARITY_CRC16_PR0                 = 0x2
+	PARITY_CRC16_PR0_CCITT           = 0x4
+	PARITY_CRC16_PR1                 = 0x3
+	PARITY_CRC16_PR1_CCITT           = 0x5
+	PARITY_CRC32_PR0_CCITT           = 0x6
+	PARITY_CRC32_PR1_CCITT           = 0x7
+	PARITY_DEFAULT                   = 0x0
+	PARITY_NONE                      = 0x1
+	PARMRK                           = 0x8
+	PARODD                           = 0x200
+	PENDIN                           = 0x4000
+	PRIO_PGRP                        = 0x1
+	PRIO_PROCESS                     = 0x0
+	PRIO_USER                        = 0x2
+	PROT_EXEC                        = 0x4
+	PROT_GROWSDOWN                   = 0x1000000
+	PROT_GROWSUP                     = 0x2000000
+	PROT_NONE                        = 0x0
+	PROT_READ                        = 0x1
+	PROT_WRITE                       = 0x2
+	PR_CAPBSET_DROP                  = 0x18
+	PR_CAPBSET_READ                  = 0x17
+	PR_CAP_AMBIENT                   = 0x2f
+	PR_CAP_AMBIENT_CLEAR_ALL         = 0x4
+	PR_CAP_AMBIENT_IS_SET            = 0x1
+	PR_CAP_AMBIENT_LOWER             = 0x3
+	PR_CAP_AMBIENT_RAISE             = 0x2
+	PR_ENDIAN_BIG                    = 0x0
+	PR_ENDIAN_LITTLE                 = 0x1
+	PR_ENDIAN_PPC_LITTLE             = 0x2
+	PR_FPEMU_NOPRINT                 = 0x1
+	PR_FPEMU_SIGFPE                  = 0x2
+	PR_FP_EXC_ASYNC                  = 0x2
+	PR_FP_EXC_DISABLED               = 0x0
+	PR_FP_EXC_DIV                    = 0x10000
+	PR_FP_EXC_INV                    = 0x100000
+	PR_FP_EXC_NONRECOV               = 0x1
+	PR_FP_EXC_OVF                    = 0x20000
+	PR_FP_EXC_PRECISE                = 0x3
+	PR_FP_EXC_RES                    = 0x80000
+	PR_FP_EXC_SW_ENABLE              = 0x80
+	PR_FP_EXC_UND                    = 0x40000
+	PR_FP_MODE_FR                    = 0x1
+	PR_FP_MODE_FRE                   = 0x2
+	PR_GET_CHILD_SUBREAPER           = 0x25
+	PR_GET_DUMPABLE                  = 0x3
+	PR_GET_ENDIAN                    = 0x13
+	PR_GET_FPEMU                     = 0x9
+	PR_GET_FPEXC                     = 0xb
+	PR_GET_FP_MODE                   = 0x2e
+	PR_GET_KEEPCAPS                  = 0x7
+	PR_GET_NAME                      = 0x10
+	PR_GET_NO_NEW_PRIVS              = 0x27
+	PR_GET_PDEATHSIG                 = 0x2
+	PR_GET_SECCOMP                   = 0x15
+	PR_GET_SECUREBITS                = 0x1b
+	PR_GET_THP_DISABLE               = 0x2a
+	PR_GET_TID_ADDRESS               = 0x28
+	PR_GET_TIMERSLACK                = 0x1e
+	PR_GET_TIMING                    = 0xd
+	PR_GET_TSC                       = 0x19
+	PR_GET_UNALIGN                   = 0x5
+	PR_MCE_KILL                      = 0x21
+	PR_MCE_KILL_CLEAR                = 0x0
+	PR_MCE_KILL_DEFAULT              = 0x2
+	PR_MCE_KILL_EARLY                = 0x1
+	PR_MCE_KILL_GET                  = 0x22
+	PR_MCE_KILL_LATE                 = 0x0
+	PR_MCE_KILL_SET                  = 0x1
+	PR_MPX_DISABLE_MANAGEMENT        = 0x2c
+	PR_MPX_ENABLE_MANAGEMENT         = 0x2b
+	PR_SET_CHILD_SUBREAPER           = 0x24
+	PR_SET_DUMPABLE                  = 0x4
+	PR_SET_ENDIAN                    = 0x14
+	PR_SET_FPEMU                     = 0xa
+	PR_SET_FPEXC                     = 0xc
+	PR_SET_FP_MODE                   = 0x2d
+	PR_SET_KEEPCAPS                  = 0x8
+	PR_SET_MM                        = 0x23
+	PR_SET_MM_ARG_END                = 0x9
+	PR_SET_MM_ARG_START              = 0x8
+	PR_SET_MM_AUXV                   = 0xc
+	PR_SET_MM_BRK                    = 0x7
+	PR_SET_MM_END_CODE               = 0x2
+	PR_SET_MM_END_DATA               = 0x4
+	PR_SET_MM_ENV_END                = 0xb
+	PR_SET_MM_ENV_START              = 0xa
+	PR_SET_MM_EXE_FILE               = 0xd
+	PR_SET_MM_MAP                    = 0xe
+	PR_SET_MM_MAP_SIZE               = 0xf
+	PR_SET_MM_START_BRK              = 0x6
+	PR_SET_MM_START_CODE             = 0x1
+	PR_SET_MM_START_DATA             = 0x3
+	PR_SET_MM_START_STACK            = 0x5
+	PR_SET_NAME                      = 0xf
+	PR_SET_NO_NEW_PRIVS              = 0x26
+	PR_SET_PDEATHSIG                 = 0x1
+	PR_SET_PTRACER                   = 0x59616d61
+	PR_SET_PTRACER_ANY               = -0x1
+	PR_SET_SECCOMP                   = 0x16
+	PR_SET_SECUREBITS                = 0x1c
+	PR_SET_THP_DISABLE               = 0x29
+	PR_SET_TIMERSLACK                = 0x1d
+	PR_SET_TIMING                    = 0xe
+	PR_SET_TSC                       = 0x1a
+	PR_SET_UNALIGN                   = 0x6
+	PR_TASK_PERF_EVENTS_DISABLE      = 0x1f
+	PR_TASK_PERF_EVENTS_ENABLE       = 0x20
+	PR_TIMING_STATISTICAL            = 0x0
+	PR_TIMING_TIMESTAMP              = 0x1
+	PR_TSC_ENABLE                    = 0x1
+	PR_TSC_SIGSEGV                   = 0x2
+	PR_UNALIGN_NOPRINT               = 0x1
+	PR_UNALIGN_SIGBUS                = 0x2
+	PTRACE_ATTACH                    = 0x10
+	PTRACE_CONT                      = 0x7
+	PTRACE_DETACH                    = 0x11
+	PTRACE_EVENT_CLONE               = 0x3
+	PTRACE_EVENT_EXEC                = 0x4
+	PTRACE_EVENT_EXIT                = 0x6
+	PTRACE_EVENT_FORK                = 0x1
+	PTRACE_EVENT_SECCOMP             = 0x7
+	PTRACE_EVENT_STOP                = 0x80
+	PTRACE_EVENT_VFORK               = 0x2
+	PTRACE_EVENT_VFORK_DONE          = 0x5
+	PTRACE_GETEVENTMSG               = 0x4201
+	PTRACE_GETFPREGS                 = 0xe
+	PTRACE_GETREGS                   = 0xc
+	PTRACE_GETREGSET                 = 0x4204
+	PTRACE_GETSIGINFO                = 0x4202
+	PTRACE_GETSIGMASK                = 0x420a
+	PTRACE_GET_THREAD_AREA           = 0x19
+	PTRACE_GET_THREAD_AREA_3264      = 0xc4
+	PTRACE_GET_WATCH_REGS            = 0xd0
+	PTRACE_INTERRUPT                 = 0x4207
+	PTRACE_KILL                      = 0x8
+	PTRACE_LISTEN                    = 0x4208
+	PTRACE_OLDSETOPTIONS             = 0x15
+	PTRACE_O_EXITKILL                = 0x100000
+	PTRACE_O_MASK                    = 0x3000ff
+	PTRACE_O_SUSPEND_SECCOMP         = 0x200000
+	PTRACE_O_TRACECLONE              = 0x8
+	PTRACE_O_TRACEEXEC               = 0x10
+	PTRACE_O_TRACEEXIT               = 0x40
+	PTRACE_O_TRACEFORK               = 0x2
+	PTRACE_O_TRACESECCOMP            = 0x80
+	PTRACE_O_TRACESYSGOOD            = 0x1
+	PTRACE_O_TRACEVFORK              = 0x4
+	PTRACE_O_TRACEVFORKDONE          = 0x20
+	PTRACE_PEEKDATA                  = 0x2
+	PTRACE_PEEKDATA_3264             = 0xc1
+	PTRACE_PEEKSIGINFO               = 0x4209
+	PTRACE_PEEKSIGINFO_SHARED        = 0x1
+	PTRACE_PEEKTEXT                  = 0x1
+	PTRACE_PEEKTEXT_3264             = 0xc0
+	PTRACE_PEEKUSR                   = 0x3
+	PTRACE_POKEDATA                  = 0x5
+	PTRACE_POKEDATA_3264             = 0xc3
+	PTRACE_POKETEXT                  = 0x4
+	PTRACE_POKETEXT_3264             = 0xc2
+	PTRACE_POKEUSR                   = 0x6
+	PTRACE_SEIZE                     = 0x4206
+	PTRACE_SETFPREGS                 = 0xf
+	PTRACE_SETOPTIONS                = 0x4200
+	PTRACE_SETREGS                   = 0xd
+	PTRACE_SETREGSET                 = 0x4205
+	PTRACE_SETSIGINFO                = 0x4203
+	PTRACE_SETSIGMASK                = 0x420b
+	PTRACE_SET_THREAD_AREA           = 0x1a
+	PTRACE_SET_WATCH_REGS            = 0xd1
+	PTRACE_SINGLESTEP                = 0x9
+	PTRACE_SYSCALL                   = 0x18
+	PTRACE_TRACEME                   = 0x0
+	RLIMIT_AS                        = 0x6
+	RLIMIT_CORE                      = 0x4
+	RLIMIT_CPU                       = 0x0
+	RLIMIT_DATA                      = 0x2
+	RLIMIT_FSIZE                     = 0x1
+	RLIMIT_NOFILE                    = 0x5
+	RLIMIT_STACK                     = 0x3
+	RLIM_INFINITY                    = -0x1
+	RTAX_ADVMSS                      = 0x8
+	RTAX_CC_ALGO                     = 0x10
+	RTAX_CWND                        = 0x7
+	RTAX_FEATURES                    = 0xc
+	RTAX_FEATURE_ALLFRAG             = 0x8
+	RTAX_FEATURE_ECN                 = 0x1
+	RTAX_FEATURE_MASK                = 0xf
+	RTAX_FEATURE_SACK                = 0x2
+	RTAX_FEATURE_TIMESTAMP           = 0x4
+	RTAX_HOPLIMIT                    = 0xa
+	RTAX_INITCWND                    = 0xb
+	RTAX_INITRWND                    = 0xe
+	RTAX_LOCK                        = 0x1
+	RTAX_MAX                         = 0x10
+	RTAX_MTU                         = 0x2
+	RTAX_QUICKACK                    = 0xf
+	RTAX_REORDERING                  = 0x9
+	RTAX_RTO_MIN                     = 0xd
+	RTAX_RTT                         = 0x4
+	RTAX_RTTVAR                      = 0x5
+	RTAX_SSTHRESH                    = 0x6
+	RTAX_UNSPEC                      = 0x0
+	RTAX_WINDOW                      = 0x3
+	RTA_ALIGNTO                      = 0x4
+	RTA_MAX                          = 0x16
+	RTCF_DIRECTSRC                   = 0x4000000
+	RTCF_DOREDIRECT                  = 0x1000000
+	RTCF_LOG                         = 0x2000000
+	RTCF_MASQ                        = 0x400000
+	RTCF_NAT                         = 0x800000
+	RTCF_VALVE                       = 0x200000
+	RTF_ADDRCLASSMASK                = 0xf8000000
+	RTF_ADDRCONF                     = 0x40000
+	RTF_ALLONLINK                    = 0x20000
+	RTF_BROADCAST                    = 0x10000000
+	RTF_CACHE                        = 0x1000000
+	RTF_DEFAULT                      = 0x10000
+	RTF_DYNAMIC                      = 0x10
+	RTF_FLOW                         = 0x2000000
+	RTF_GATEWAY                      = 0x2
+	RTF_HOST                         = 0x4
+	RTF_INTERFACE                    = 0x40000000
+	RTF_IRTT                         = 0x100
+	RTF_LINKRT                       = 0x100000
+	RTF_LOCAL                        = 0x80000000
+	RTF_MODIFIED                     = 0x20
+	RTF_MSS                          = 0x40
+	RTF_MTU                          = 0x40
+	RTF_MULTICAST                    = 0x20000000
+	RTF_NAT                          = 0x8000000
+	RTF_NOFORWARD                    = 0x1000
+	RTF_NONEXTHOP                    = 0x200000
+	RTF_NOPMTUDISC                   = 0x4000
+	RTF_POLICY                       = 0x4000000
+	RTF_REINSTATE                    = 0x8
+	RTF_REJECT                       = 0x200
+	RTF_STATIC                       = 0x400
+	RTF_THROW                        = 0x2000
+	RTF_UP                           = 0x1
+	RTF_WINDOW                       = 0x80
+	RTF_XRESOLVE                     = 0x800
+	RTM_BASE                         = 0x10
+	RTM_DELACTION                    = 0x31
+	RTM_DELADDR                      = 0x15
+	RTM_DELADDRLABEL                 = 0x49
+	RTM_DELLINK                      = 0x11
+	RTM_DELMDB                       = 0x55
+	RTM_DELNEIGH                     = 0x1d
+	RTM_DELNSID                      = 0x59
+	RTM_DELQDISC                     = 0x25
+	RTM_DELROUTE                     = 0x19
+	RTM_DELRULE                      = 0x21
+	RTM_DELTCLASS                    = 0x29
+	RTM_DELTFILTER                   = 0x2d
+	RTM_F_CLONED                     = 0x200
+	RTM_F_EQUALIZE                   = 0x400
+	RTM_F_NOTIFY                     = 0x100
+	RTM_F_PREFIX                     = 0x800
+	RTM_GETACTION                    = 0x32
+	RTM_GETADDR                      = 0x16
+	RTM_GETADDRLABEL                 = 0x4a
+	RTM_GETANYCAST                   = 0x3e
+	RTM_GETDCB                       = 0x4e
+	RTM_GETLINK                      = 0x12
+	RTM_GETMDB                       = 0x56
+	RTM_GETMULTICAST                 = 0x3a
+	RTM_GETNEIGH                     = 0x1e
+	RTM_GETNEIGHTBL                  = 0x42
+	RTM_GETNETCONF                   = 0x52
+	RTM_GETNSID                      = 0x5a
+	RTM_GETQDISC                     = 0x26
+	RTM_GETROUTE                     = 0x1a
+	RTM_GETRULE                      = 0x22
+	RTM_GETTCLASS                    = 0x2a
+	RTM_GETTFILTER                   = 0x2e
+	RTM_MAX                          = 0x5b
+	RTM_NEWACTION                    = 0x30
+	RTM_NEWADDR                      = 0x14
+	RTM_NEWADDRLABEL                 = 0x48
+	RTM_NEWLINK                      = 0x10
+	RTM_NEWMDB                       = 0x54
+	RTM_NEWNDUSEROPT                 = 0x44
+	RTM_NEWNEIGH                     = 0x1c
+	RTM_NEWNEIGHTBL                  = 0x40
+	RTM_NEWNETCONF                   = 0x50
+	RTM_NEWNSID                      = 0x58
+	RTM_NEWPREFIX                    = 0x34
+	RTM_NEWQDISC                     = 0x24
+	RTM_NEWROUTE                     = 0x18
+	RTM_NEWRULE                      = 0x20
+	RTM_NEWTCLASS                    = 0x28
+	RTM_NEWTFILTER                   = 0x2c
+	RTM_NR_FAMILIES                  = 0x13
+	RTM_NR_MSGTYPES                  = 0x4c
+	RTM_SETDCB                       = 0x4f
+	RTM_SETLINK                      = 0x13
+	RTM_SETNEIGHTBL                  = 0x43
+	RTNH_ALIGNTO                     = 0x4
+	RTNH_COMPARE_MASK                = 0x11
+	RTNH_F_DEAD                      = 0x1
+	RTNH_F_LINKDOWN                  = 0x10
+	RTNH_F_OFFLOAD                   = 0x8
+	RTNH_F_ONLINK                    = 0x4
+	RTNH_F_PERVASIVE                 = 0x2
+	RTN_MAX                          = 0xb
+	RTPROT_BABEL                     = 0x2a
+	RTPROT_BIRD                      = 0xc
+	RTPROT_BOOT                      = 0x3
+	RTPROT_DHCP                      = 0x10
+	RTPROT_DNROUTED                  = 0xd
+	RTPROT_GATED                     = 0x8
+	RTPROT_KERNEL                    = 0x2
+	RTPROT_MROUTED                   = 0x11
+	RTPROT_MRT                       = 0xa
+	RTPROT_NTK                       = 0xf
+	RTPROT_RA                        = 0x9
+	RTPROT_REDIRECT                  = 0x1
+	RTPROT_STATIC                    = 0x4
+	RTPROT_UNSPEC                    = 0x0
+	RTPROT_XORP                      = 0xe
+	RTPROT_ZEBRA                     = 0xb
+	RT_CLASS_DEFAULT                 = 0xfd
+	RT_CLASS_LOCAL                   = 0xff
+	RT_CLASS_MAIN                    = 0xfe
+	RT_CLASS_MAX                     = 0xff
+	RT_CLASS_UNSPEC                  = 0x0
+	RUSAGE_CHILDREN                  = -0x1
+	RUSAGE_SELF                      = 0x0
+	RUSAGE_THREAD                    = 0x1
+	SCM_CREDENTIALS                  = 0x2
+	SCM_RIGHTS                       = 0x1
+	SCM_TIMESTAMP                    = 0x1d
+	SCM_TIMESTAMPING                 = 0x25
+	SCM_TIMESTAMPNS                  = 0x23
+	SCM_WIFI_STATUS                  = 0x29
+	SHUT_RD                          = 0x0
+	SHUT_RDWR                        = 0x2
+	SHUT_WR                          = 0x1
+	SIOCADDDLCI                      = 0x8980
+	SIOCADDMULTI                     = 0x8931
+	SIOCADDRT                        = 0x890b
+	SIOCATMARK                       = 0x40047307
+	SIOCDARP                         = 0x8953
+	SIOCDELDLCI                      = 0x8981
+	SIOCDELMULTI                     = 0x8932
+	SIOCDELRT                        = 0x890c
+	SIOCDEVPRIVATE                   = 0x89f0
+	SIOCDIFADDR                      = 0x8936
+	SIOCDRARP                        = 0x8960
+	SIOCGARP                         = 0x8954
+	SIOCGIFADDR                      = 0x8915
+	SIOCGIFBR                        = 0x8940
+	SIOCGIFBRDADDR                   = 0x8919
+	SIOCGIFCONF                      = 0x8912
+	SIOCGIFCOUNT                     = 0x8938
+	SIOCGIFDSTADDR                   = 0x8917
+	SIOCGIFENCAP                     = 0x8925
+	SIOCGIFFLAGS                     = 0x8913
+	SIOCGIFHWADDR                    = 0x8927
+	SIOCGIFINDEX                     = 0x8933
+	SIOCGIFMAP                       = 0x8970
+	SIOCGIFMEM                       = 0x891f
+	SIOCGIFMETRIC                    = 0x891d
+	SIOCGIFMTU                       = 0x8921
+	SIOCGIFNAME                      = 0x8910
+	SIOCGIFNETMASK                   = 0x891b
+	SIOCGIFPFLAGS                    = 0x8935
+	SIOCGIFSLAVE                     = 0x8929
+	SIOCGIFTXQLEN                    = 0x8942
+	SIOCGPGRP                        = 0x40047309
+	SIOCGRARP                        = 0x8961
+	SIOCGSTAMP                       = 0x8906
+	SIOCGSTAMPNS                     = 0x8907
+	SIOCPROTOPRIVATE                 = 0x89e0
+	SIOCRTMSG                        = 0x890d
+	SIOCSARP                         = 0x8955
+	SIOCSIFADDR                      = 0x8916
+	SIOCSIFBR                        = 0x8941
+	SIOCSIFBRDADDR                   = 0x891a
+	SIOCSIFDSTADDR                   = 0x8918
+	SIOCSIFENCAP                     = 0x8926
+	SIOCSIFFLAGS                     = 0x8914
+	SIOCSIFHWADDR                    = 0x8924
+	SIOCSIFHWBROADCAST               = 0x8937
+	SIOCSIFLINK                      = 0x8911
+	SIOCSIFMAP                       = 0x8971
+	SIOCSIFMEM                       = 0x8920
+	SIOCSIFMETRIC                    = 0x891e
+	SIOCSIFMTU                       = 0x8922
+	SIOCSIFNAME                      = 0x8923
+	SIOCSIFNETMASK                   = 0x891c
+	SIOCSIFPFLAGS                    = 0x8934
+	SIOCSIFSLAVE                     = 0x8930
+	SIOCSIFTXQLEN                    = 0x8943
+	SIOCSPGRP                        = 0x80047308
+	SIOCSRARP                        = 0x8962
+	SOCK_CLOEXEC                     = 0x80000
+	SOCK_DCCP                        = 0x6
+	SOCK_DGRAM                       = 0x1
+	SOCK_NONBLOCK                    = 0x80
+	SOCK_PACKET                      = 0xa
+	SOCK_RAW                         = 0x3
+	SOCK_RDM                         = 0x4
+	SOCK_SEQPACKET                   = 0x5
+	SOCK_STREAM                      = 0x2
+	SOL_AAL                          = 0x109
+	SOL_ATM                          = 0x108
+	SOL_DECNET                       = 0x105
+	SOL_ICMPV6                       = 0x3a
+	SOL_IP                           = 0x0
+	SOL_IPV6                         = 0x29
+	SOL_IRDA                         = 0x10a
+	SOL_PACKET                       = 0x107
+	SOL_RAW                          = 0xff
+	SOL_SOCKET                       = 0xffff
+	SOL_TCP                          = 0x6
+	SOL_X25                          = 0x106
+	SOMAXCONN                        = 0x80
+	SO_ACCEPTCONN                    = 0x1009
+	SO_ATTACH_BPF                    = 0x32
+	SO_ATTACH_FILTER                 = 0x1a
+	SO_BINDTODEVICE                  = 0x19
+	SO_BPF_EXTENSIONS                = 0x30
+	SO_BROADCAST                     = 0x20
+	SO_BSDCOMPAT                     = 0xe
+	SO_BUSY_POLL                     = 0x2e
+	SO_DEBUG                         = 0x1
+	SO_DETACH_BPF                    = 0x1b
+	SO_DETACH_FILTER                 = 0x1b
+	SO_DOMAIN                        = 0x1029
+	SO_DONTROUTE                     = 0x10
+	SO_ERROR                         = 0x1007
+	SO_GET_FILTER                    = 0x1a
+	SO_INCOMING_CPU                  = 0x31
+	SO_KEEPALIVE                     = 0x8
+	SO_LINGER                        = 0x80
+	SO_LOCK_FILTER                   = 0x2c
+	SO_MARK                          = 0x24
+	SO_MAX_PACING_RATE               = 0x2f
+	SO_NOFCS                         = 0x2b
+	SO_NO_CHECK                      = 0xb
+	SO_OOBINLINE                     = 0x100
+	SO_PASSCRED                      = 0x11
+	SO_PASSSEC                       = 0x22
+	SO_PEEK_OFF                      = 0x2a
+	SO_PEERCRED                      = 0x12
+	SO_PEERNAME                      = 0x1c
+	SO_PEERSEC                       = 0x1e
+	SO_PRIORITY                      = 0xc
+	SO_PROTOCOL                      = 0x1028
+	SO_RCVBUF                        = 0x1002
+	SO_RCVBUFFORCE                   = 0x21
+	SO_RCVLOWAT                      = 0x1004
+	SO_RCVTIMEO                      = 0x1006
+	SO_REUSEADDR                     = 0x4
+	SO_REUSEPORT                     = 0x200
+	SO_RXQ_OVFL                      = 0x28
+	SO_SECURITY_AUTHENTICATION       = 0x16
+	SO_SECURITY_ENCRYPTION_NETWORK   = 0x18
+	SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17
+	SO_SELECT_ERR_QUEUE              = 0x2d
+	SO_SNDBUF                        = 0x1001
+	SO_SNDBUFFORCE                   = 0x1f
+	SO_SNDLOWAT                      = 0x1003
+	SO_SNDTIMEO                      = 0x1005
+	SO_STYLE                         = 0x1008
+	SO_TIMESTAMP                     = 0x1d
+	SO_TIMESTAMPING                  = 0x25
+	SO_TIMESTAMPNS                   = 0x23
+	SO_TYPE                          = 0x1008
+	SO_WIFI_STATUS                   = 0x29
+	S_BLKSIZE                        = 0x200
+	S_IEXEC                          = 0x40
+	S_IFBLK                          = 0x6000
+	S_IFCHR                          = 0x2000
+	S_IFDIR                          = 0x4000
+	S_IFIFO                          = 0x1000
+	S_IFLNK                          = 0xa000
+	S_IFMT                           = 0xf000
+	S_IFREG                          = 0x8000
+	S_IFSOCK                         = 0xc000
+	S_IREAD                          = 0x100
+	S_IRGRP                          = 0x20
+	S_IROTH                          = 0x4
+	S_IRUSR                          = 0x100
+	S_IRWXG                          = 0x38
+	S_IRWXO                          = 0x7
+	S_IRWXU                          = 0x1c0
+	S_ISGID                          = 0x400
+	S_ISUID                          = 0x800
+	S_ISVTX                          = 0x200
+	S_IWGRP                          = 0x10
+	S_IWOTH                          = 0x2
+	S_IWRITE                         = 0x80
+	S_IWUSR                          = 0x80
+	S_IXGRP                          = 0x8
+	S_IXOTH                          = 0x1
+	S_IXUSR                          = 0x40
+	TCFLSH                           = 0x5407
+	TCIFLUSH                         = 0x0
+	TCIOFLUSH                        = 0x2
+	TCOFLUSH                         = 0x1
+	TCP_CONGESTION                   = 0xd
+	TCP_COOKIE_IN_ALWAYS             = 0x1
+	TCP_COOKIE_MAX                   = 0x10
+	TCP_COOKIE_MIN                   = 0x8
+	TCP_COOKIE_OUT_NEVER             = 0x2
+	TCP_COOKIE_PAIR_SIZE             = 0x20
+	TCP_COOKIE_TRANSACTIONS          = 0xf
+	TCP_CORK                         = 0x3
+	TCP_DEFER_ACCEPT                 = 0x9
+	TCP_FASTOPEN                     = 0x17
+	TCP_INFO                         = 0xb
+	TCP_KEEPCNT                      = 0x6
+	TCP_KEEPIDLE                     = 0x4
+	TCP_KEEPINTVL                    = 0x5
+	TCP_LINGER2                      = 0x8
+	TCP_MAXSEG                       = 0x2
+	TCP_MAXWIN                       = 0xffff
+	TCP_MAX_WINSHIFT                 = 0xe
+	TCP_MD5SIG                       = 0xe
+	TCP_MD5SIG_MAXKEYLEN             = 0x50
+	TCP_MSS                          = 0x200
+	TCP_MSS_DEFAULT                  = 0x218
+	TCP_MSS_DESIRED                  = 0x4c4
+	TCP_NODELAY                      = 0x1
+	TCP_QUEUE_SEQ                    = 0x15
+	TCP_QUICKACK                     = 0xc
+	TCP_REPAIR                       = 0x13
+	TCP_REPAIR_OPTIONS               = 0x16
+	TCP_REPAIR_QUEUE                 = 0x14
+	TCP_SYNCNT                       = 0x7
+	TCP_S_DATA_IN                    = 0x4
+	TCP_S_DATA_OUT                   = 0x8
+	TCP_THIN_DUPACK                  = 0x11
+	TCP_THIN_LINEAR_TIMEOUTS         = 0x10
+	TCP_TIMESTAMP                    = 0x18
+	TCP_USER_TIMEOUT                 = 0x12
+	TCP_WINDOW_CLAMP                 = 0xa
+	TCSAFLUSH                        = 0x5410
+	TCSBRK                           = 0x5405
+	TCXONC                           = 0x5406
+	TIOCCBRK                         = 0x5428
+	TIOCCONS                         = 0x80047478
+	TIOCEXCL                         = 0x740d
+	TIOCGDEV                         = 0x40045432
+	TIOCGETD                         = 0x7400
+	TIOCGETP                         = 0x7408
+	TIOCGEXCL                        = 0x40045440
+	TIOCGICOUNT                      = 0x5492
+	TIOCGLCKTRMIOS                   = 0x548b
+	TIOCGLTC                         = 0x7474
+	TIOCGPGRP                        = 0x40047477
+	TIOCGPKT                         = 0x40045438
+	TIOCGPTLCK                       = 0x40045439
+	TIOCGPTN                         = 0x40045430
+	TIOCGRS485                       = 0x4020542e
+	TIOCGSERIAL                      = 0x5484
+	TIOCGSID                         = 0x7416
+	TIOCGSOFTCAR                     = 0x5481
+	TIOCGWINSZ                       = 0x40087468
+	TIOCINQ                          = 0x467f
+	TIOCLINUX                        = 0x5483
+	TIOCMBIC                         = 0x741c
+	TIOCMBIS                         = 0x741b
+	TIOCMGET                         = 0x741d
+	TIOCMIWAIT                       = 0x5491
+	TIOCMSET                         = 0x741a
+	TIOCM_CAR                        = 0x100
+	TIOCM_CD                         = 0x100
+	TIOCM_CTS                        = 0x40
+	TIOCM_DSR                        = 0x400
+	TIOCM_DTR                        = 0x2
+	TIOCM_LE                         = 0x1
+	TIOCM_RI                         = 0x200
+	TIOCM_RNG                        = 0x200
+	TIOCM_RTS                        = 0x4
+	TIOCM_SR                         = 0x20
+	TIOCM_ST                         = 0x10
+	TIOCNOTTY                        = 0x5471
+	TIOCNXCL                         = 0x740e
+	TIOCOUTQ                         = 0x7472
+	TIOCPKT                          = 0x5470
+	TIOCPKT_DATA                     = 0x0
+	TIOCPKT_DOSTOP                   = 0x20
+	TIOCPKT_FLUSHREAD                = 0x1
+	TIOCPKT_FLUSHWRITE               = 0x2
+	TIOCPKT_IOCTL                    = 0x40
+	TIOCPKT_NOSTOP                   = 0x10
+	TIOCPKT_START                    = 0x8
+	TIOCPKT_STOP                     = 0x4
+	TIOCSBRK                         = 0x5427
+	TIOCSCTTY                        = 0x5480
+	TIOCSERCONFIG                    = 0x5488
+	TIOCSERGETLSR                    = 0x548e
+	TIOCSERGETMULTI                  = 0x548f
+	TIOCSERGSTRUCT                   = 0x548d
+	TIOCSERGWILD                     = 0x5489
+	TIOCSERSETMULTI                  = 0x5490
+	TIOCSERSWILD                     = 0x548a
+	TIOCSER_TEMT                     = 0x1
+	TIOCSETD                         = 0x7401
+	TIOCSETN                         = 0x740a
+	TIOCSETP                         = 0x7409
+	TIOCSIG                          = 0x80045436
+	TIOCSLCKTRMIOS                   = 0x548c
+	TIOCSLTC                         = 0x7475
+	TIOCSPGRP                        = 0x80047476
+	TIOCSPTLCK                       = 0x80045431
+	TIOCSRS485                       = 0xc020542f
+	TIOCSSERIAL                      = 0x5485
+	TIOCSSOFTCAR                     = 0x5482
+	TIOCSTI                          = 0x5472
+	TIOCSWINSZ                       = 0x80087467
+	TIOCVHANGUP                      = 0x5437
+	TOSTOP                           = 0x8000
+	TUNATTACHFILTER                  = 0x801054d5
+	TUNDETACHFILTER                  = 0x801054d6
+	TUNGETFEATURES                   = 0x400454cf
+	TUNGETFILTER                     = 0x401054db
+	TUNGETIFF                        = 0x400454d2
+	TUNGETSNDBUF                     = 0x400454d3
+	TUNGETVNETBE                     = 0x400454df
+	TUNGETVNETHDRSZ                  = 0x400454d7
+	TUNGETVNETLE                     = 0x400454dd
+	TUNSETDEBUG                      = 0x800454c9
+	TUNSETGROUP                      = 0x800454ce
+	TUNSETIFF                        = 0x800454ca
+	TUNSETIFINDEX                    = 0x800454da
+	TUNSETLINK                       = 0x800454cd
+	TUNSETNOCSUM                     = 0x800454c8
+	TUNSETOFFLOAD                    = 0x800454d0
+	TUNSETOWNER                      = 0x800454cc
+	TUNSETPERSIST                    = 0x800454cb
+	TUNSETQUEUE                      = 0x800454d9
+	TUNSETSNDBUF                     = 0x800454d4
+	TUNSETTXFILTER                   = 0x800454d1
+	TUNSETVNETBE                     = 0x800454de
+	TUNSETVNETHDRSZ                  = 0x800454d8
+	TUNSETVNETLE                     = 0x800454dc
+	VDISCARD                         = 0xd
+	VEOF                             = 0x10
+	VEOL                             = 0x11
+	VEOL2                            = 0x6
+	VERASE                           = 0x2
+	VINTR                            = 0x0
+	VKILL                            = 0x3
+	VLNEXT                           = 0xf
+	VMIN                             = 0x4
+	VQUIT                            = 0x1
+	VREPRINT                         = 0xc
+	VSTART                           = 0x8
+	VSTOP                            = 0x9
+	VSUSP                            = 0xa
+	VSWTC                            = 0x7
+	VSWTCH                           = 0x7
+	VT0                              = 0x0
+	VT1                              = 0x4000
+	VTDLY                            = 0x4000
+	VTIME                            = 0x5
+	VWERASE                          = 0xe
+	WALL                             = 0x40000000
+	WCLONE                           = 0x80000000
+	WCONTINUED                       = 0x8
+	WEXITED                          = 0x4
+	WNOHANG                          = 0x1
+	WNOTHREAD                        = 0x20000000
+	WNOWAIT                          = 0x1000000
+	WORDSIZE                         = 0x40
+	WSTOPPED                         = 0x2
+	WUNTRACED                        = 0x2
+)
+
+// Errors
+const (
+	E2BIG           = syscall.Errno(0x7)
+	EACCES          = syscall.Errno(0xd)
+	EADDRINUSE      = syscall.Errno(0x7d)
+	EADDRNOTAVAIL   = syscall.Errno(0x7e)
+	EADV            = syscall.Errno(0x44)
+	EAFNOSUPPORT    = syscall.Errno(0x7c)
+	EAGAIN          = syscall.Errno(0xb)
+	EALREADY        = syscall.Errno(0x95)
+	EBADE           = syscall.Errno(0x32)
+	EBADF           = syscall.Errno(0x9)
+	EBADFD          = syscall.Errno(0x51)
+	EBADMSG         = syscall.Errno(0x4d)
+	EBADR           = syscall.Errno(0x33)
+	EBADRQC         = syscall.Errno(0x36)
+	EBADSLT         = syscall.Errno(0x37)
+	EBFONT          = syscall.Errno(0x3b)
+	EBUSY           = syscall.Errno(0x10)
+	ECANCELED       = syscall.Errno(0x9e)
+	ECHILD          = syscall.Errno(0xa)
+	ECHRNG          = syscall.Errno(0x25)
+	ECOMM           = syscall.Errno(0x46)
+	ECONNABORTED    = syscall.Errno(0x82)
+	ECONNREFUSED    = syscall.Errno(0x92)
+	ECONNRESET      = syscall.Errno(0x83)
+	EDEADLK         = syscall.Errno(0x2d)
+	EDEADLOCK       = syscall.Errno(0x38)
+	EDESTADDRREQ    = syscall.Errno(0x60)
+	EDOM            = syscall.Errno(0x21)
+	EDOTDOT         = syscall.Errno(0x49)
+	EDQUOT          = syscall.Errno(0x46d)
+	EEXIST          = syscall.Errno(0x11)
+	EFAULT          = syscall.Errno(0xe)
+	EFBIG           = syscall.Errno(0x1b)
+	EHOSTDOWN       = syscall.Errno(0x93)
+	EHOSTUNREACH    = syscall.Errno(0x94)
+	EHWPOISON       = syscall.Errno(0xa8)
+	EIDRM           = syscall.Errno(0x24)
+	EILSEQ          = syscall.Errno(0x58)
+	EINIT           = syscall.Errno(0x8d)
+	EINPROGRESS     = syscall.Errno(0x96)
+	EINTR           = syscall.Errno(0x4)
+	EINVAL          = syscall.Errno(0x16)
+	EIO             = syscall.Errno(0x5)
+	EISCONN         = syscall.Errno(0x85)
+	EISDIR          = syscall.Errno(0x15)
+	EISNAM          = syscall.Errno(0x8b)
+	EKEYEXPIRED     = syscall.Errno(0xa2)
+	EKEYREJECTED    = syscall.Errno(0xa4)
+	EKEYREVOKED     = syscall.Errno(0xa3)
+	EL2HLT          = syscall.Errno(0x2c)
+	EL2NSYNC        = syscall.Errno(0x26)
+	EL3HLT          = syscall.Errno(0x27)
+	EL3RST          = syscall.Errno(0x28)
+	ELIBACC         = syscall.Errno(0x53)
+	ELIBBAD         = syscall.Errno(0x54)
+	ELIBEXEC        = syscall.Errno(0x57)
+	ELIBMAX         = syscall.Errno(0x56)
+	ELIBSCN         = syscall.Errno(0x55)
+	ELNRNG          = syscall.Errno(0x29)
+	ELOOP           = syscall.Errno(0x5a)
+	EMEDIUMTYPE     = syscall.Errno(0xa0)
+	EMFILE          = syscall.Errno(0x18)
+	EMLINK          = syscall.Errno(0x1f)
+	EMSGSIZE        = syscall.Errno(0x61)
+	EMULTIHOP       = syscall.Errno(0x4a)
+	ENAMETOOLONG    = syscall.Errno(0x4e)
+	ENAVAIL         = syscall.Errno(0x8a)
+	ENETDOWN        = syscall.Errno(0x7f)
+	ENETRESET       = syscall.Errno(0x81)
+	ENETUNREACH     = syscall.Errno(0x80)
+	ENFILE          = syscall.Errno(0x17)
+	ENOANO          = syscall.Errno(0x35)
+	ENOBUFS         = syscall.Errno(0x84)
+	ENOCSI          = syscall.Errno(0x2b)
+	ENODATA         = syscall.Errno(0x3d)
+	ENODEV          = syscall.Errno(0x13)
+	ENOENT          = syscall.Errno(0x2)
+	ENOEXEC         = syscall.Errno(0x8)
+	ENOKEY          = syscall.Errno(0xa1)
+	ENOLCK          = syscall.Errno(0x2e)
+	ENOLINK         = syscall.Errno(0x43)
+	ENOMEDIUM       = syscall.Errno(0x9f)
+	ENOMEM          = syscall.Errno(0xc)
+	ENOMSG          = syscall.Errno(0x23)
+	ENONET          = syscall.Errno(0x40)
+	ENOPKG          = syscall.Errno(0x41)
+	ENOPROTOOPT     = syscall.Errno(0x63)
+	ENOSPC          = syscall.Errno(0x1c)
+	ENOSR           = syscall.Errno(0x3f)
+	ENOSTR          = syscall.Errno(0x3c)
+	ENOSYS          = syscall.Errno(0x59)
+	ENOTBLK         = syscall.Errno(0xf)
+	ENOTCONN        = syscall.Errno(0x86)
+	ENOTDIR         = syscall.Errno(0x14)
+	ENOTEMPTY       = syscall.Errno(0x5d)
+	ENOTNAM         = syscall.Errno(0x89)
+	ENOTRECOVERABLE = syscall.Errno(0xa6)
+	ENOTSOCK        = syscall.Errno(0x5f)
+	ENOTSUP         = syscall.Errno(0x7a)
+	ENOTTY          = syscall.Errno(0x19)
+	ENOTUNIQ        = syscall.Errno(0x50)
+	ENXIO           = syscall.Errno(0x6)
+	EOPNOTSUPP      = syscall.Errno(0x7a)
+	EOVERFLOW       = syscall.Errno(0x4f)
+	EOWNERDEAD      = syscall.Errno(0xa5)
+	EPERM           = syscall.Errno(0x1)
+	EPFNOSUPPORT    = syscall.Errno(0x7b)
+	EPIPE           = syscall.Errno(0x20)
+	EPROTO          = syscall.Errno(0x47)
+	EPROTONOSUPPORT = syscall.Errno(0x78)
+	EPROTOTYPE      = syscall.Errno(0x62)
+	ERANGE          = syscall.Errno(0x22)
+	EREMCHG         = syscall.Errno(0x52)
+	EREMDEV         = syscall.Errno(0x8e)
+	EREMOTE         = syscall.Errno(0x42)
+	EREMOTEIO       = syscall.Errno(0x8c)
+	ERESTART        = syscall.Errno(0x5b)
+	ERFKILL         = syscall.Errno(0xa7)
+	EROFS           = syscall.Errno(0x1e)
+	ESHUTDOWN       = syscall.Errno(0x8f)
+	ESOCKTNOSUPPORT = syscall.Errno(0x79)
+	ESPIPE          = syscall.Errno(0x1d)
+	ESRCH           = syscall.Errno(0x3)
+	ESRMNT          = syscall.Errno(0x45)
+	ESTALE          = syscall.Errno(0x97)
+	ESTRPIPE        = syscall.Errno(0x5c)
+	ETIME           = syscall.Errno(0x3e)
+	ETIMEDOUT       = syscall.Errno(0x91)
+	ETOOMANYREFS    = syscall.Errno(0x90)
+	ETXTBSY         = syscall.Errno(0x1a)
+	EUCLEAN         = syscall.Errno(0x87)
+	EUNATCH         = syscall.Errno(0x2a)
+	EUSERS          = syscall.Errno(0x5e)
+	EWOULDBLOCK     = syscall.Errno(0xb)
+	EXDEV           = syscall.Errno(0x12)
+	EXFULL          = syscall.Errno(0x34)
+)
+
+// Signals
+const (
+	SIGABRT   = syscall.Signal(0x6)
+	SIGALRM   = syscall.Signal(0xe)
+	SIGBUS    = syscall.Signal(0xa)
+	SIGCHLD   = syscall.Signal(0x12)
+	SIGCLD    = syscall.Signal(0x12)
+	SIGCONT   = syscall.Signal(0x19)
+	SIGEMT    = syscall.Signal(0x7)
+	SIGFPE    = syscall.Signal(0x8)
+	SIGHUP    = syscall.Signal(0x1)
+	SIGILL    = syscall.Signal(0x4)
+	SIGINT    = syscall.Signal(0x2)
+	SIGIO     = syscall.Signal(0x16)
+	SIGIOT    = syscall.Signal(0x6)
+	SIGKILL   = syscall.Signal(0x9)
+	SIGPIPE   = syscall.Signal(0xd)
+	SIGPOLL   = syscall.Signal(0x16)
+	SIGPROF   = syscall.Signal(0x1d)
+	SIGPWR    = syscall.Signal(0x13)
+	SIGQUIT   = syscall.Signal(0x3)
+	SIGSEGV   = syscall.Signal(0xb)
+	SIGSTOP   = syscall.Signal(0x17)
+	SIGSYS    = syscall.Signal(0xc)
+	SIGTERM   = syscall.Signal(0xf)
+	SIGTRAP   = syscall.Signal(0x5)
+	SIGTSTP   = syscall.Signal(0x18)
+	SIGTTIN   = syscall.Signal(0x1a)
+	SIGTTOU   = syscall.Signal(0x1b)
+	SIGURG    = syscall.Signal(0x15)
+	SIGUSR1   = syscall.Signal(0x10)
+	SIGUSR2   = syscall.Signal(0x11)
+	SIGVTALRM = syscall.Signal(0x1c)
+	SIGWINCH  = syscall.Signal(0x14)
+	SIGXCPU   = syscall.Signal(0x1e)
+	SIGXFSZ   = syscall.Signal(0x1f)
+)
+
+// Error table
+var errors = [...]string{
+	1:    "operation not permitted",
+	2:    "no such file or directory",
+	3:    "no such process",
+	4:    "interrupted system call",
+	5:    "input/output error",
+	6:    "no such device or address",
+	7:    "argument list too long",
+	8:    "exec format error",
+	9:    "bad file descriptor",
+	10:   "no child processes",
+	11:   "resource temporarily unavailable",
+	12:   "cannot allocate memory",
+	13:   "permission denied",
+	14:   "bad address",
+	15:   "block device required",
+	16:   "device or resource busy",
+	17:   "file exists",
+	18:   "invalid cross-device link",
+	19:   "no such device",
+	20:   "not a directory",
+	21:   "is a directory",
+	22:   "invalid argument",
+	23:   "too many open files in system",
+	24:   "too many open files",
+	25:   "inappropriate ioctl for device",
+	26:   "text file busy",
+	27:   "file too large",
+	28:   "no space left on device",
+	29:   "illegal seek",
+	30:   "read-only file system",
+	31:   "too many links",
+	32:   "broken pipe",
+	33:   "numerical argument out of domain",
+	34:   "numerical result out of range",
+	35:   "no message of desired type",
+	36:   "identifier removed",
+	37:   "channel number out of range",
+	38:   "level 2 not synchronized",
+	39:   "level 3 halted",
+	40:   "level 3 reset",
+	41:   "link number out of range",
+	42:   "protocol driver not attached",
+	43:   "no CSI structure available",
+	44:   "level 2 halted",
+	45:   "resource deadlock avoided",
+	46:   "no locks available",
+	50:   "invalid exchange",
+	51:   "invalid request descriptor",
+	52:   "exchange full",
+	53:   "no anode",
+	54:   "invalid request code",
+	55:   "invalid slot",
+	56:   "file locking deadlock error",
+	59:   "bad font file format",
+	60:   "device not a stream",
+	61:   "no data available",
+	62:   "timer expired",
+	63:   "out of streams resources",
+	64:   "machine is not on the network",
+	65:   "package not installed",
+	66:   "object is remote",
+	67:   "link has been severed",
+	68:   "advertise error",
+	69:   "srmount error",
+	70:   "communication error on send",
+	71:   "protocol error",
+	73:   "RFS specific error",
+	74:   "multihop attempted",
+	77:   "bad message",
+	78:   "file name too long",
+	79:   "value too large for defined data type",
+	80:   "name not unique on network",
+	81:   "file descriptor in bad state",
+	82:   "remote address changed",
+	83:   "can not access a needed shared library",
+	84:   "accessing a corrupted shared library",
+	85:   ".lib section in a.out corrupted",
+	86:   "attempting to link in too many shared libraries",
+	87:   "cannot exec a shared library directly",
+	88:   "invalid or incomplete multibyte or wide character",
+	89:   "function not implemented",
+	90:   "too many levels of symbolic links",
+	91:   "interrupted system call should be restarted",
+	92:   "streams pipe error",
+	93:   "directory not empty",
+	94:   "too many users",
+	95:   "socket operation on non-socket",
+	96:   "destination address required",
+	97:   "message too long",
+	98:   "protocol wrong type for socket",
+	99:   "protocol not available",
+	120:  "protocol not supported",
+	121:  "socket type not supported",
+	122:  "operation not supported",
+	123:  "protocol family not supported",
+	124:  "address family not supported by protocol",
+	125:  "address already in use",
+	126:  "cannot assign requested address",
+	127:  "network is down",
+	128:  "network is unreachable",
+	129:  "network dropped connection on reset",
+	130:  "software caused connection abort",
+	131:  "connection reset by peer",
+	132:  "no buffer space available",
+	133:  "transport endpoint is already connected",
+	134:  "transport endpoint is not connected",
+	135:  "structure needs cleaning",
+	137:  "not a XENIX named type file",
+	138:  "no XENIX semaphores available",
+	139:  "is a named type file",
+	140:  "remote I/O error",
+	141:  "unknown error 141",
+	142:  "unknown error 142",
+	143:  "cannot send after transport endpoint shutdown",
+	144:  "too many references: cannot splice",
+	145:  "connection timed out",
+	146:  "connection refused",
+	147:  "host is down",
+	148:  "no route to host",
+	149:  "operation already in progress",
+	150:  "operation now in progress",
+	151:  "stale file handle",
+	158:  "operation canceled",
+	159:  "no medium found",
+	160:  "wrong medium type",
+	161:  "required key not available",
+	162:  "key has expired",
+	163:  "key has been revoked",
+	164:  "key was rejected by service",
+	165:  "owner died",
+	166:  "state not recoverable",
+	167:  "operation not possible due to RF-kill",
+	168:  "memory page has hardware error",
+	1133: "disk quota exceeded",
+}
+
+// Signal table
+var signals = [...]string{
+	1:  "hangup",
+	2:  "interrupt",
+	3:  "quit",
+	4:  "illegal instruction",
+	5:  "trace/breakpoint trap",
+	6:  "aborted",
+	7:  "EMT trap",
+	8:  "floating point exception",
+	9:  "killed",
+	10: "bus error",
+	11: "segmentation fault",
+	12: "bad system call",
+	13: "broken pipe",
+	14: "alarm clock",
+	15: "terminated",
+	16: "user defined signal 1",
+	17: "user defined signal 2",
+	18: "child exited",
+	19: "power failure",
+	20: "window changed",
+	21: "urgent I/O condition",
+	22: "I/O possible",
+	23: "stopped (signal)",
+	24: "stopped",
+	25: "continued",
+	26: "stopped (tty input)",
+	27: "stopped (tty output)",
+	28: "virtual timer expired",
+	29: "profiling timer expired",
+	30: "CPU time limit exceeded",
+	31: "file size limit exceeded",
+}
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index 9d908d7199f597bd49068dd34445c4b7c27a322c..5b90d07ed2371e81b558331cef16d762afbadfbb 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -150,6 +150,7 @@ const (
 	B75                              = 0x2
 	B921600                          = 0x16
 	B9600                            = 0xd
+	BOTHER                           = 0x1f
 	BPF_A                            = 0x10
 	BPF_ABS                          = 0x20
 	BPF_ADD                          = 0x0
@@ -193,7 +194,13 @@ const (
 	BPF_X                            = 0x8
 	BPF_XOR                          = 0xa0
 	BRKINT                           = 0x2
+	BS0                              = 0x0
+	BS1                              = 0x8000
+	BSDLY                            = 0x8000
+	CBAUD                            = 0xff
+	CBAUDEX                          = 0x0
 	CFLUSH                           = 0xf
+	CIBAUD                           = 0xff0000
 	CLOCAL                           = 0x8000
 	CLOCK_BOOTTIME                   = 0x7
 	CLOCK_BOOTTIME_ALARM             = 0x9
@@ -232,7 +239,14 @@ const (
 	CLONE_UNTRACED                   = 0x800000
 	CLONE_VFORK                      = 0x4000
 	CLONE_VM                         = 0x100
+	CMSPAR                           = 0x40000000
+	CR0                              = 0x0
+	CR1                              = 0x1000
+	CR2                              = 0x2000
+	CR3                              = 0x3000
+	CRDLY                            = 0x3000
 	CREAD                            = 0x800
+	CRTSCTS                          = 0x80000000
 	CS5                              = 0x0
 	CS6                              = 0x100
 	CS7                              = 0x200
@@ -369,6 +383,9 @@ const (
 	EXTPROC                          = 0x10000000
 	FD_CLOEXEC                       = 0x1
 	FD_SETSIZE                       = 0x400
+	FF0                              = 0x0
+	FF1                              = 0x4000
+	FFDLY                            = 0x4000
 	FLUSHO                           = 0x800000
 	F_DUPFD                          = 0x0
 	F_DUPFD_CLOEXEC                  = 0x406
@@ -407,6 +424,7 @@ const (
 	F_UNLCK                          = 0x2
 	F_WRLCK                          = 0x1
 	HUPCL                            = 0x4000
+	IBSHIFT                          = 0x10
 	ICANON                           = 0x100
 	ICMPV6_FILTER                    = 0x1
 	ICRNL                            = 0x100
@@ -635,6 +653,7 @@ const (
 	IP_XFRM_POLICY                   = 0x11
 	ISIG                             = 0x80
 	ISTRIP                           = 0x20
+	IUCLC                            = 0x1000
 	IUTF8                            = 0x4000
 	IXANY                            = 0x800
 	IXOFF                            = 0x400
@@ -772,10 +791,15 @@ const (
 	NETLINK_UNUSED                   = 0x1
 	NETLINK_USERSOCK                 = 0x2
 	NETLINK_XFRM                     = 0x6
+	NL0                              = 0x0
+	NL1                              = 0x100
+	NL2                              = 0x200
+	NL3                              = 0x300
 	NLA_ALIGNTO                      = 0x4
 	NLA_F_NESTED                     = 0x8000
 	NLA_F_NET_BYTEORDER              = 0x4000
 	NLA_HDRLEN                       = 0x4
+	NLDLY                            = 0x300
 	NLMSG_ALIGNTO                    = 0x4
 	NLMSG_DONE                       = 0x3
 	NLMSG_ERROR                      = 0x2
@@ -800,6 +824,7 @@ const (
 	OCRNL                            = 0x8
 	OFDEL                            = 0x80
 	OFILL                            = 0x40
+	OLCUC                            = 0x4
 	ONLCR                            = 0x2
 	ONLRET                           = 0x20
 	ONOCR                            = 0x10
@@ -1398,10 +1423,21 @@ const (
 	S_IXGRP                          = 0x8
 	S_IXOTH                          = 0x1
 	S_IXUSR                          = 0x40
+	TAB0                             = 0x0
+	TAB1                             = 0x400
+	TAB2                             = 0x800
+	TAB3                             = 0xc00
+	TABDLY                           = 0xc00
 	TCFLSH                           = 0x2000741f
+	TCGETA                           = 0x40147417
+	TCGETS                           = 0x402c7413
 	TCIFLUSH                         = 0x0
+	TCIOFF                           = 0x2
 	TCIOFLUSH                        = 0x2
+	TCION                            = 0x3
 	TCOFLUSH                         = 0x1
+	TCOOFF                           = 0x0
+	TCOON                            = 0x1
 	TCP_CONGESTION                   = 0xd
 	TCP_COOKIE_IN_ALWAYS             = 0x1
 	TCP_COOKIE_MAX                   = 0x10
@@ -1440,6 +1476,15 @@ const (
 	TCP_USER_TIMEOUT                 = 0x12
 	TCP_WINDOW_CLAMP                 = 0xa
 	TCSAFLUSH                        = 0x2
+	TCSBRK                           = 0x2000741d
+	TCSBRKP                          = 0x5425
+	TCSETA                           = 0x80147418
+	TCSETAF                          = 0x8014741c
+	TCSETAW                          = 0x80147419
+	TCSETS                           = 0x802c7414
+	TCSETSF                          = 0x802c7416
+	TCSETSW                          = 0x802c7415
+	TCXONC                           = 0x2000741e
 	TIOCCBRK                         = 0x5428
 	TIOCCONS                         = 0x541d
 	TIOCEXCL                         = 0x540c
@@ -1571,6 +1616,8 @@ const (
 	WORDSIZE                         = 0x40
 	WSTOPPED                         = 0x2
 	WUNTRACED                        = 0x2
+	XCASE                            = 0x4000
+	XTABS                            = 0xc00
 )
 
 // Errors
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index ccf05a27415f59c0b0eb703c25735112b148f380..0861bd5666bbec81e3e6dc290ae626d56c76d66a 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -149,6 +149,7 @@ const (
 	B75                              = 0x2
 	B921600                          = 0x16
 	B9600                            = 0xd
+	BOTHER                           = 0x1f
 	BPF_A                            = 0x10
 	BPF_ABS                          = 0x20
 	BPF_ADD                          = 0x0
@@ -192,7 +193,13 @@ const (
 	BPF_X                            = 0x8
 	BPF_XOR                          = 0xa0
 	BRKINT                           = 0x2
+	BS0                              = 0x0
+	BS1                              = 0x8000
+	BSDLY                            = 0x8000
+	CBAUD                            = 0xff
+	CBAUDEX                          = 0x0
 	CFLUSH                           = 0xf
+	CIBAUD                           = 0xff0000
 	CLOCAL                           = 0x8000
 	CLOCK_BOOTTIME                   = 0x7
 	CLOCK_BOOTTIME_ALARM             = 0x9
@@ -231,7 +238,14 @@ const (
 	CLONE_UNTRACED                   = 0x800000
 	CLONE_VFORK                      = 0x4000
 	CLONE_VM                         = 0x100
+	CMSPAR                           = 0x40000000
+	CR0                              = 0x0
+	CR1                              = 0x1000
+	CR2                              = 0x2000
+	CR3                              = 0x3000
+	CRDLY                            = 0x3000
 	CREAD                            = 0x800
+	CRTSCTS                          = 0x80000000
 	CS5                              = 0x0
 	CS6                              = 0x100
 	CS7                              = 0x200
@@ -365,6 +379,9 @@ const (
 	EXTPROC                          = 0x10000000
 	FD_CLOEXEC                       = 0x1
 	FD_SETSIZE                       = 0x400
+	FF0                              = 0x0
+	FF1                              = 0x4000
+	FFDLY                            = 0x4000
 	FLUSHO                           = 0x800000
 	F_DUPFD                          = 0x0
 	F_DUPFD_CLOEXEC                  = 0x406
@@ -400,6 +417,7 @@ const (
 	F_UNLCK                          = 0x2
 	F_WRLCK                          = 0x1
 	HUPCL                            = 0x4000
+	IBSHIFT                          = 0x10
 	ICANON                           = 0x100
 	ICMPV6_FILTER                    = 0x1
 	ICRNL                            = 0x100
@@ -643,6 +661,7 @@ const (
 	IP_XFRM_POLICY                   = 0x11
 	ISIG                             = 0x80
 	ISTRIP                           = 0x20
+	IUCLC                            = 0x1000
 	IUTF8                            = 0x4000
 	IXANY                            = 0x800
 	IXOFF                            = 0x400
@@ -780,10 +799,15 @@ const (
 	NETLINK_UNUSED                   = 0x1
 	NETLINK_USERSOCK                 = 0x2
 	NETLINK_XFRM                     = 0x6
+	NL0                              = 0x0
+	NL1                              = 0x100
+	NL2                              = 0x200
+	NL3                              = 0x300
 	NLA_ALIGNTO                      = 0x4
 	NLA_F_NESTED                     = 0x8000
 	NLA_F_NET_BYTEORDER              = 0x4000
 	NLA_HDRLEN                       = 0x4
+	NLDLY                            = 0x300
 	NLMSG_ALIGNTO                    = 0x4
 	NLMSG_DONE                       = 0x3
 	NLMSG_ERROR                      = 0x2
@@ -808,6 +832,7 @@ const (
 	OCRNL                            = 0x8
 	OFDEL                            = 0x80
 	OFILL                            = 0x40
+	OLCUC                            = 0x4
 	ONLCR                            = 0x2
 	ONLRET                           = 0x20
 	ONOCR                            = 0x10
@@ -1397,10 +1422,21 @@ const (
 	S_IXGRP                          = 0x8
 	S_IXOTH                          = 0x1
 	S_IXUSR                          = 0x40
+	TAB0                             = 0x0
+	TAB1                             = 0x400
+	TAB2                             = 0x800
+	TAB3                             = 0xc00
+	TABDLY                           = 0xc00
 	TCFLSH                           = 0x2000741f
+	TCGETA                           = 0x40147417
+	TCGETS                           = 0x402c7413
 	TCIFLUSH                         = 0x0
+	TCIOFF                           = 0x2
 	TCIOFLUSH                        = 0x2
+	TCION                            = 0x3
 	TCOFLUSH                         = 0x1
+	TCOOFF                           = 0x0
+	TCOON                            = 0x1
 	TCP_CONGESTION                   = 0xd
 	TCP_COOKIE_IN_ALWAYS             = 0x1
 	TCP_COOKIE_MAX                   = 0x10
@@ -1439,6 +1475,15 @@ const (
 	TCP_USER_TIMEOUT                 = 0x12
 	TCP_WINDOW_CLAMP                 = 0xa
 	TCSAFLUSH                        = 0x2
+	TCSBRK                           = 0x2000741d
+	TCSBRKP                          = 0x5425
+	TCSETA                           = 0x80147418
+	TCSETAF                          = 0x8014741c
+	TCSETAW                          = 0x80147419
+	TCSETS                           = 0x802c7414
+	TCSETSF                          = 0x802c7416
+	TCSETSW                          = 0x802c7415
+	TCXONC                           = 0x2000741e
 	TIOCCBRK                         = 0x5428
 	TIOCCONS                         = 0x541d
 	TIOCEXCL                         = 0x540c
@@ -1570,6 +1615,8 @@ const (
 	WORDSIZE                         = 0x40
 	WSTOPPED                         = 0x2
 	WUNTRACED                        = 0x2
+	XCASE                            = 0x4000
+	XTABS                            = 0xc00
 )
 
 // Errors
diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go
index afdf7c565ed863c5ab27019b2736a5485f69437e..a08922b9818d5b8043ebb0e9d561e2d56006a4ff 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go
@@ -161,6 +161,14 @@ const (
 	BRKINT                        = 0x2
 	CFLUSH                        = 0xf
 	CLOCAL                        = 0x800
+	CLOCK_HIGHRES                 = 0x4
+	CLOCK_LEVEL                   = 0xa
+	CLOCK_MONOTONIC               = 0x4
+	CLOCK_PROCESS_CPUTIME_ID      = 0x5
+	CLOCK_PROF                    = 0x2
+	CLOCK_REALTIME                = 0x3
+	CLOCK_THREAD_CPUTIME_ID       = 0x2
+	CLOCK_VIRTUAL                 = 0x1
 	CREAD                         = 0x80
 	CS5                           = 0x0
 	CS6                           = 0x10
@@ -168,6 +176,7 @@ const (
 	CS8                           = 0x30
 	CSIZE                         = 0x30
 	CSTART                        = 0x11
+	CSTATUS                       = 0x14
 	CSTOP                         = 0x13
 	CSTOPB                        = 0x40
 	CSUSP                         = 0x1a
@@ -757,9 +766,7 @@ const (
 	SIOCDARP                      = -0x7fdb96e0
 	SIOCDELMULTI                  = -0x7fdf96ce
 	SIOCDELRT                     = -0x7fcf8df5
-	SIOCDIPSECONFIG               = -0x7ffb9669
 	SIOCDXARP                     = -0x7fff9658
-	SIOCFIPSECONFIG               = -0x7ffb966b
 	SIOCGARP                      = -0x3fdb96e1
 	SIOCGDSTINFO                  = -0x3fff965c
 	SIOCGENADDR                   = -0x3fdf96ab
@@ -821,7 +828,6 @@ const (
 	SIOCLIFGETND                  = -0x3f879672
 	SIOCLIFREMOVEIF               = -0x7f879692
 	SIOCLIFSETND                  = -0x7f879671
-	SIOCLIPSECONFIG               = -0x7ffb9668
 	SIOCLOWER                     = -0x7fdf96d7
 	SIOCSARP                      = -0x7fdb96e2
 	SIOCSCTPGOPT                  = -0x3fef9653
@@ -844,7 +850,6 @@ const (
 	SIOCSIFNETMASK                = -0x7fdf96e6
 	SIOCSIP6ADDRPOLICY            = -0x7fff965d
 	SIOCSIPMSFILTER               = -0x7ffb964b
-	SIOCSIPSECONFIG               = -0x7ffb966a
 	SIOCSLGETREQ                  = -0x3fdf96b9
 	SIOCSLIFADDR                  = -0x7f879690
 	SIOCSLIFBRDADDR               = -0x7f879684
@@ -951,6 +956,8 @@ const (
 	SO_VRRP                       = 0x1017
 	SO_WROFF                      = 0x2
 	TCFLSH                        = 0x5407
+	TCGETA                        = 0x5401
+	TCGETS                        = 0x540d
 	TCIFLUSH                      = 0x0
 	TCIOFLUSH                     = 0x2
 	TCOFLUSH                      = 0x1
@@ -977,6 +984,14 @@ const (
 	TCP_RTO_MAX                   = 0x1b
 	TCP_RTO_MIN                   = 0x1a
 	TCSAFLUSH                     = 0x5410
+	TCSBRK                        = 0x5405
+	TCSETA                        = 0x5402
+	TCSETAF                       = 0x5404
+	TCSETAW                       = 0x5403
+	TCSETS                        = 0x540e
+	TCSETSF                       = 0x5410
+	TCSETSW                       = 0x540f
+	TCXONC                        = 0x5406
 	TIOC                          = 0x5400
 	TIOCCBRK                      = 0x747a
 	TIOCCDTR                      = 0x7478
@@ -1052,6 +1067,7 @@ const (
 	VQUIT                         = 0x1
 	VREPRINT                      = 0xc
 	VSTART                        = 0x8
+	VSTATUS                       = 0x10
 	VSTOP                         = 0x9
 	VSUSP                         = 0xa
 	VSWTCH                        = 0x7
@@ -1215,6 +1231,7 @@ const (
 	SIGFREEZE  = syscall.Signal(0x22)
 	SIGHUP     = syscall.Signal(0x1)
 	SIGILL     = syscall.Signal(0x4)
+	SIGINFO    = syscall.Signal(0x29)
 	SIGINT     = syscall.Signal(0x2)
 	SIGIO      = syscall.Signal(0x16)
 	SIGIOT     = syscall.Signal(0x6)
@@ -1415,4 +1432,5 @@ var signals = [...]string{
 	38: "resource Control Exceeded",
 	39: "reserved for JVM 1",
 	40: "reserved for JVM 2",
+	41: "information Request",
 }
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
index e28b044e9fe73d83d5f1f01a185627f87342a282..74606b2f49925f9cf55d693ecf6bbfe0e5501fbb 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
@@ -1415,6 +1415,22 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) {
 	r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
 	sec = int64(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
index 81ae498a38f5fddea8e8f87637e64cebd76e8366..fe1f1dd240e9d42d7a35073b2bcce9b80e622fff 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
@@ -14,7 +14,7 @@ var _ syscall.Errno
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
+func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
 	if err != nil {
@@ -788,6 +788,16 @@ func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) {
+	_, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func read(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -912,6 +922,16 @@ func Settimeofday(tv *Timeval) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Setns(fd int, nstype int) (err error) {
+	_, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Setpriority(which int, who int, prio int) (err error) {
 	_, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
 	if e1 != 0 {
@@ -1053,22 +1073,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Utime(path string, buf *Utimbuf) (err error) {
-	var _p0 *byte
-	_p0, err = BytePtrFromString(path)
-	if err != nil {
-		return
-	}
-	_, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
-	use(unsafe.Pointer(_p0))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func write(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -1616,3 +1620,19 @@ func Time(t *Time_t) (tt Time_t, err error) {
 	}
 	return
 }
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Utime(path string, buf *Utimbuf) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
index 2adb9284a34e2fd7335e7151775ac9af81749ccc..510cb1bb16f040b93471044cdc2289412371cb3b 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
@@ -14,7 +14,7 @@ var _ syscall.Errno
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
+func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
 	if err != nil {
@@ -788,6 +788,16 @@ func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) {
+	_, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func read(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -912,6 +922,16 @@ func Settimeofday(tv *Timeval) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Setns(fd int, nstype int) (err error) {
+	_, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Setpriority(which int, who int, prio int) (err error) {
 	_, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
 	if e1 != 0 {
@@ -1053,22 +1073,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Utime(path string, buf *Utimbuf) (err error) {
-	var _p0 *byte
-	_p0, err = BytePtrFromString(path)
-	if err != nil {
-		return
-	}
-	_, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
-	use(unsafe.Pointer(_p0))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func write(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -1793,6 +1797,22 @@ func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int6
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Utime(path string, buf *Utimbuf) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func pipe(p *[2]_C_int) (err error) {
 	_, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
index ca00ed3dfb9c3804f1eb67a9fd53b10afa805d98..28c720fbc1906d2feb10bc577180e4593d222820 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
@@ -14,7 +14,7 @@ var _ syscall.Errno
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
+func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
 	if err != nil {
@@ -788,6 +788,16 @@ func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) {
+	_, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func read(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -912,6 +922,16 @@ func Settimeofday(tv *Timeval) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Setns(fd int, nstype int) (err error) {
+	_, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Setpriority(which int, who int, prio int) (err error) {
 	_, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
 	if e1 != 0 {
@@ -1053,22 +1073,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Utime(path string, buf *Utimbuf) (err error) {
-	var _p0 *byte
-	_p0, err = BytePtrFromString(path)
-	if err != nil {
-		return
-	}
-	_, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
-	use(unsafe.Pointer(_p0))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func write(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -1645,17 +1649,6 @@ func Gettimeofday(tv *Timeval) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Time(t *Time_t) (tt Time_t, err error) {
-	r0, _, e1 := RawSyscall(SYS_TIME, uintptr(unsafe.Pointer(t)), 0, 0)
-	tt = Time_t(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func Pread(fd int, p []byte, offset int64) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -1728,7 +1721,7 @@ func mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
 func getrlimit(resource int, rlim *rlimit32) (err error) {
-	_, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
+	_, _, e1 := RawSyscall(SYS_UGETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
 	if e1 != 0 {
 		err = errnoErr(e1)
 	}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
index 8eafcebcbfdd338d42a7a8dcce7734f102ffa43b..1ac54217adca4c354fa38acbe7e0051f5beaf873 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
@@ -14,7 +14,7 @@ var _ syscall.Errno
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
+func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
 	if err != nil {
@@ -788,6 +788,16 @@ func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) {
+	_, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func read(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -912,6 +922,16 @@ func Settimeofday(tv *Timeval) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Setns(fd int, nstype int) (err error) {
+	_, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Setpriority(which int, who int, prio int) (err error) {
 	_, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
 	if e1 != 0 {
@@ -1053,22 +1073,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Utime(path string, buf *Utimbuf) (err error) {
-	var _p0 *byte
-	_p0, err = BytePtrFromString(path)
-	if err != nil {
-		return
-	}
-	_, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
-	use(unsafe.Pointer(_p0))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func write(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -1720,17 +1724,6 @@ func Gettimeofday(tv *Timeval) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Time(t *Time_t) (tt Time_t, err error) {
-	r0, _, e1 := RawSyscall(SYS_TIME, uintptr(unsafe.Pointer(t)), 0, 0)
-	tt = Time_t(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func pipe2(p *[2]_C_int, flags int) (err error) {
 	_, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
new file mode 100644
index 0000000000000000000000000000000000000000..724700583d079271917f8acd54efb76e2479fc3a
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
@@ -0,0 +1,1781 @@
+// mksyscall.pl syscall_linux.go syscall_linux_mips64x.go
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
+
+// +build mips64,linux
+
+package unix
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+var _ syscall.Errno
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(oldpath)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(newpath)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0)
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0)
+	use(unsafe.Pointer(_p0))
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	var _p1 unsafe.Pointer
+	if len(buf) > 0 {
+		_p1 = unsafe.Pointer(&buf[0])
+	} else {
+		_p1 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0)
+	use(unsafe.Pointer(_p0))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(oldpath)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(newpath)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)))
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func unlinkat(dirfd int, path string, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimes(path string, times *[2]Timeval) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) {
+	_, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times)))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getcwd(buf []byte) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(buf) > 0 {
+		_p0 = unsafe.Pointer(&buf[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
+	r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
+	wpid = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
+	_, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(arg)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(source)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(target)
+	if err != nil {
+		return
+	}
+	var _p2 *byte
+	_p2, err = BytePtrFromString(fstype)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0)
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	use(unsafe.Pointer(_p2))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Acct(path string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Adjtimex(buf *Timex) (state int, err error) {
+	r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0)
+	state = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chdir(path string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chroot(path string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+	_, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Close(fd int) (err error) {
+	_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup(oldfd int) (fd int, err error) {
+	r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0)
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup3(oldfd int, newfd int, flags int) (err error) {
+	_, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollCreate(size int) (fd int, err error) {
+	r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0)
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollCreate1(flag int) (fd int, err error) {
+	r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0)
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
+	_, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(events) > 0 {
+		_p0 = unsafe.Pointer(&events[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Exit(code int) {
+	Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fallocate(fd int, mode uint32, off int64, len int64) (err error) {
+	_, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchdir(fd int) (err error) {
+	_, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchmod(fd int, mode uint32) (err error) {
+	_, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fcntl(fd int, cmd int, arg int) (val int, err error) {
+	r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fdatasync(fd int) (err error) {
+	_, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Flock(fd int, how int) (err error) {
+	_, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fsync(fd int) (err error) {
+	_, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getdents(fd int, buf []byte) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(buf) > 0 {
+		_p0 = unsafe.Pointer(&buf[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpgid(pid int) (pgid int, err error) {
+	r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0)
+	pgid = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpid() (pid int) {
+	r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0)
+	pid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getppid() (ppid int) {
+	r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0)
+	ppid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpriority(which int, who int) (prio int, err error) {
+	r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0)
+	prio = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrusage(who int, rusage *Rusage) (err error) {
+	_, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Gettid() (tid int) {
+	r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0)
+	tid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getxattr(path string, attr string, dest []byte) (sz int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(attr)
+	if err != nil {
+		return
+	}
+	var _p2 unsafe.Pointer
+	if len(dest) > 0 {
+		_p2 = unsafe.Pointer(&dest[0])
+	} else {
+		_p2 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	sz = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(pathname)
+	if err != nil {
+		return
+	}
+	r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask))
+	use(unsafe.Pointer(_p0))
+	watchdesc = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyInit1(flags int) (fd int, err error) {
+	r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0)
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) {
+	r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0)
+	success = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Kill(pid int, sig syscall.Signal) (err error) {
+	_, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Klogctl(typ int, buf []byte) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(buf) > 0 {
+		_p0 = unsafe.Pointer(&buf[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Listxattr(path string, dest []byte) (sz int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	var _p1 unsafe.Pointer
+	if len(dest) > 0 {
+		_p1 = unsafe.Pointer(&dest[0])
+	} else {
+		_p1 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
+	use(unsafe.Pointer(_p0))
+	sz = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mkdirat(dirfd int, path string, mode uint32) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
+	_, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pause() (err error) {
+	_, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func PivotRoot(newroot string, putold string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(newroot)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(putold)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) {
+	_, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) {
+	_, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func read(fd int, p []byte) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Removexattr(path string, attr string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(attr)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(oldpath)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(newpath)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setdomainname(p []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sethostname(p []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpgid(pid int, pgid int) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setsid() (pid int, err error) {
+	r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0)
+	pid = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Settimeofday(tv *Timeval) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setns(fd int, nstype int) (err error) {
+	_, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpriority(which int, who int, prio int) (err error) {
+	_, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setxattr(path string, attr string, data []byte, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(attr)
+	if err != nil {
+		return
+	}
+	var _p2 unsafe.Pointer
+	if len(data) > 0 {
+		_p2 = unsafe.Pointer(&data[0])
+	} else {
+		_p2 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sync() {
+	Syscall(SYS_SYNC, 0, 0, 0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sysinfo(info *Sysinfo_t) (err error) {
+	_, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) {
+	r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0)
+	n = int64(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
+	_, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Times(tms *Tms) (ticks uintptr, err error) {
+	r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0)
+	ticks = uintptr(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Umask(mask int) (oldmask int) {
+	r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0)
+	oldmask = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Uname(buf *Utsname) (err error) {
+	_, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unmount(target string, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(target)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unshare(flags int) (err error) {
+	_, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ustat(dev int, ubuf *Ustat_t) (err error) {
+	_, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func write(fd int, p []byte) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func exitThread(code int) (err error) {
+	_, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func readlen(fd int, p *byte, np int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func writelen(fd int, p *byte, np int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func munmap(addr uintptr, length uintptr) (err error) {
+	_, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Madvise(b []byte, advice int) (err error) {
+	var _p0 unsafe.Pointer
+	if len(b) > 0 {
+		_p0 = unsafe.Pointer(&b[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mprotect(b []byte, prot int) (err error) {
+	var _p0 unsafe.Pointer
+	if len(b) > 0 {
+		_p0 = unsafe.Pointer(&b[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mlock(b []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(b) > 0 {
+		_p0 = unsafe.Pointer(&b[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Munlock(b []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(b) > 0 {
+		_p0 = unsafe.Pointer(&b[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mlockall(flags int) (err error) {
+	_, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Munlockall() (err error) {
+	_, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchown(fd int, uid int, gid int) (err error) {
+	_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstatfs(fd int, buf *Statfs_t) (err error) {
+	_, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ftruncate(fd int, length int64) (err error) {
+	_, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getegid() (egid int) {
+	r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0)
+	egid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Geteuid() (euid int) {
+	r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0)
+	euid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getgid() (gid int) {
+	r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0)
+	gid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrlimit(resource int, rlim *Rlimit) (err error) {
+	_, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getuid() (uid int) {
+	r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
+	uid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Lchown(path string, uid int, gid int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Listen(s int, n int) (err error) {
+	_, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pread(fd int, p []byte, offset int64) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Seek(fd int, offset int64, whence int) (off int64, err error) {
+	r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence))
+	off = int64(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
+	r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+	r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0)
+	written = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setfsgid(gid int) (err error) {
+	_, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setfsuid(uid int) (err error) {
+	_, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setregid(rgid int, egid int) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setresgid(rgid int, egid int, sgid int) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setresuid(ruid int, euid int, suid int) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setrlimit(resource int, rlim *Rlimit) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setreuid(ruid int, euid int) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Shutdown(fd int, how int) (err error) {
+	_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) {
+	r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags))
+	n = int64(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Statfs(path string, buf *Statfs_t) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func SyncFileRange(fd int, off int64, n int64, flags int) (err error) {
+	_, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Truncate(path string, length int64) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
+	r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
+	r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+	_, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+	_, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getgroups(n int, list *_Gid_t) (nn int, err error) {
+	r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0)
+	nn = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setgroups(n int, list *_Gid_t) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
+	_, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
+	_, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socket(domain int, typ int, proto int) (fd int, err error) {
+	r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto))
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
+	_, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+	_, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+	_, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
+	var _p0 unsafe.Pointer
+	if len(buf) > 0 {
+		_p0 = unsafe.Pointer(&buf[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) {
+	r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset))
+	xaddr = uintptr(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Gettimeofday(tv *Timeval) (err error) {
+	_, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Utime(path string, buf *Utimbuf) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pipe2(p *[2]_C_int, flags int) (err error) {
+	_, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fstat(fd int, st *stat_t) (err error) {
+	_, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(st)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func lstat(path string, st *stat_t) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(st)), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func stat(path string, st *stat_t) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(st)), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
new file mode 100644
index 0000000000000000000000000000000000000000..1b7fb64ea9e3cdbf5c62651346d68784c6b654cd
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
@@ -0,0 +1,1781 @@
+// mksyscall.pl syscall_linux.go syscall_linux_mips64x.go
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
+
+// +build mips64le,linux
+
+package unix
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+var _ syscall.Errno
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(oldpath)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(newpath)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0)
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0)
+	use(unsafe.Pointer(_p0))
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	var _p1 unsafe.Pointer
+	if len(buf) > 0 {
+		_p1 = unsafe.Pointer(&buf[0])
+	} else {
+		_p1 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0)
+	use(unsafe.Pointer(_p0))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(oldpath)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(newpath)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)))
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func unlinkat(dirfd int, path string, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimes(path string, times *[2]Timeval) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) {
+	_, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times)))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getcwd(buf []byte) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(buf) > 0 {
+		_p0 = unsafe.Pointer(&buf[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
+	r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
+	wpid = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
+	_, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(arg)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(source)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(target)
+	if err != nil {
+		return
+	}
+	var _p2 *byte
+	_p2, err = BytePtrFromString(fstype)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0)
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	use(unsafe.Pointer(_p2))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Acct(path string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Adjtimex(buf *Timex) (state int, err error) {
+	r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0)
+	state = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chdir(path string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chroot(path string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+	_, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Close(fd int) (err error) {
+	_, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup(oldfd int) (fd int, err error) {
+	r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0)
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup3(oldfd int, newfd int, flags int) (err error) {
+	_, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollCreate(size int) (fd int, err error) {
+	r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0)
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollCreate1(flag int) (fd int, err error) {
+	r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0)
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
+	_, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(events) > 0 {
+		_p0 = unsafe.Pointer(&events[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Exit(code int) {
+	Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fallocate(fd int, mode uint32, off int64, len int64) (err error) {
+	_, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchdir(fd int) (err error) {
+	_, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchmod(fd int, mode uint32) (err error) {
+	_, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fcntl(fd int, cmd int, arg int) (val int, err error) {
+	r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
+	val = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fdatasync(fd int) (err error) {
+	_, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Flock(fd int, how int) (err error) {
+	_, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fsync(fd int) (err error) {
+	_, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getdents(fd int, buf []byte) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(buf) > 0 {
+		_p0 = unsafe.Pointer(&buf[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpgid(pid int) (pgid int, err error) {
+	r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0)
+	pgid = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpid() (pid int) {
+	r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0)
+	pid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getppid() (ppid int) {
+	r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0)
+	ppid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpriority(which int, who int) (prio int, err error) {
+	r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0)
+	prio = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrusage(who int, rusage *Rusage) (err error) {
+	_, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Gettid() (tid int) {
+	r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0)
+	tid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getxattr(path string, attr string, dest []byte) (sz int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(attr)
+	if err != nil {
+		return
+	}
+	var _p2 unsafe.Pointer
+	if len(dest) > 0 {
+		_p2 = unsafe.Pointer(&dest[0])
+	} else {
+		_p2 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	sz = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(pathname)
+	if err != nil {
+		return
+	}
+	r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask))
+	use(unsafe.Pointer(_p0))
+	watchdesc = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyInit1(flags int) (fd int, err error) {
+	r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0)
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) {
+	r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0)
+	success = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Kill(pid int, sig syscall.Signal) (err error) {
+	_, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Klogctl(typ int, buf []byte) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(buf) > 0 {
+		_p0 = unsafe.Pointer(&buf[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Listxattr(path string, dest []byte) (sz int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	var _p1 unsafe.Pointer
+	if len(dest) > 0 {
+		_p1 = unsafe.Pointer(&dest[0])
+	} else {
+		_p1 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
+	use(unsafe.Pointer(_p0))
+	sz = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mkdirat(dirfd int, path string, mode uint32) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
+	_, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pause() (err error) {
+	_, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func PivotRoot(newroot string, putold string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(newroot)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(putold)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) {
+	_, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) {
+	_, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func read(fd int, p []byte) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Removexattr(path string, attr string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(attr)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(oldpath)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(newpath)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setdomainname(p []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sethostname(p []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpgid(pid int, pgid int) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setsid() (pid int, err error) {
+	r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0)
+	pid = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Settimeofday(tv *Timeval) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setns(fd int, nstype int) (err error) {
+	_, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpriority(which int, who int, prio int) (err error) {
+	_, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setxattr(path string, attr string, data []byte, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(attr)
+	if err != nil {
+		return
+	}
+	var _p2 unsafe.Pointer
+	if len(data) > 0 {
+		_p2 = unsafe.Pointer(&data[0])
+	} else {
+		_p2 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sync() {
+	Syscall(SYS_SYNC, 0, 0, 0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sysinfo(info *Sysinfo_t) (err error) {
+	_, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) {
+	r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0)
+	n = int64(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
+	_, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Times(tms *Tms) (ticks uintptr, err error) {
+	r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0)
+	ticks = uintptr(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Umask(mask int) (oldmask int) {
+	r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0)
+	oldmask = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Uname(buf *Utsname) (err error) {
+	_, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unmount(target string, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(target)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unshare(flags int) (err error) {
+	_, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ustat(dev int, ubuf *Ustat_t) (err error) {
+	_, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func write(fd int, p []byte) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func exitThread(code int) (err error) {
+	_, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func readlen(fd int, p *byte, np int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func writelen(fd int, p *byte, np int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func munmap(addr uintptr, length uintptr) (err error) {
+	_, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Madvise(b []byte, advice int) (err error) {
+	var _p0 unsafe.Pointer
+	if len(b) > 0 {
+		_p0 = unsafe.Pointer(&b[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mprotect(b []byte, prot int) (err error) {
+	var _p0 unsafe.Pointer
+	if len(b) > 0 {
+		_p0 = unsafe.Pointer(&b[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mlock(b []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(b) > 0 {
+		_p0 = unsafe.Pointer(&b[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Munlock(b []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(b) > 0 {
+		_p0 = unsafe.Pointer(&b[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mlockall(flags int) (err error) {
+	_, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Munlockall() (err error) {
+	_, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchown(fd int, uid int, gid int) (err error) {
+	_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstatfs(fd int, buf *Statfs_t) (err error) {
+	_, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ftruncate(fd int, length int64) (err error) {
+	_, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getegid() (egid int) {
+	r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0)
+	egid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Geteuid() (euid int) {
+	r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0)
+	euid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getgid() (gid int) {
+	r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0)
+	gid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrlimit(resource int, rlim *Rlimit) (err error) {
+	_, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getuid() (uid int) {
+	r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
+	uid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Lchown(path string, uid int, gid int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Listen(s int, n int) (err error) {
+	_, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pread(fd int, p []byte, offset int64) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Seek(fd int, offset int64, whence int) (off int64, err error) {
+	r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence))
+	off = int64(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
+	r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+	r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0)
+	written = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setfsgid(gid int) (err error) {
+	_, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setfsuid(uid int) (err error) {
+	_, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setregid(rgid int, egid int) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setresgid(rgid int, egid int, sgid int) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setresuid(ruid int, euid int, suid int) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setrlimit(resource int, rlim *Rlimit) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setreuid(ruid int, euid int) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Shutdown(fd int, how int) (err error) {
+	_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) {
+	r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags))
+	n = int64(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Statfs(path string, buf *Statfs_t) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func SyncFileRange(fd int, off int64, n int64, flags int) (err error) {
+	_, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Truncate(path string, length int64) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
+	r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
+	r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+	_, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+	_, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getgroups(n int, list *_Gid_t) (nn int, err error) {
+	r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0)
+	nn = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setgroups(n int, list *_Gid_t) (err error) {
+	_, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
+	_, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
+	_, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socket(domain int, typ int, proto int) (fd int, err error) {
+	r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto))
+	fd = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
+	_, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+	_, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+	_, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(p) > 0 {
+		_p0 = unsafe.Pointer(&p[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
+	var _p0 unsafe.Pointer
+	if len(buf) > 0 {
+		_p0 = unsafe.Pointer(&buf[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) {
+	r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset))
+	xaddr = uintptr(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Gettimeofday(tv *Timeval) (err error) {
+	_, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Utime(path string, buf *Utimbuf) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pipe2(p *[2]_C_int, flags int) (err error) {
+	_, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fstat(fd int, st *stat_t) (err error) {
+	_, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(st)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func lstat(path string, st *stat_t) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(st)), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func stat(path string, st *stat_t) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(st)), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
index 008a52638b9f7c6de081e123542c8e684183035f..2b4cd7dfba0d32d6551aefed4e56beb2b7d0ba48 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
@@ -14,7 +14,7 @@ var _ syscall.Errno
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
+func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
 	if err != nil {
@@ -788,6 +788,16 @@ func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) {
+	_, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func read(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -912,6 +922,16 @@ func Settimeofday(tv *Timeval) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Setns(fd int, nstype int) (err error) {
+	_, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Setpriority(which int, who int, prio int) (err error) {
 	_, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
 	if e1 != 0 {
@@ -1053,22 +1073,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Utime(path string, buf *Utimbuf) (err error) {
-	var _p0 *byte
-	_p0, err = BytePtrFromString(path)
-	if err != nil {
-		return
-	}
-	_, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
-	use(unsafe.Pointer(_p0))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func write(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -1212,6 +1216,16 @@ func Munlockall() (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Dup2(oldfd int, newfd int) (err error) {
+	_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fchown(fd int, uid int, gid int) (err error) {
 	_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
 	if e1 != 0 {
@@ -1780,3 +1794,19 @@ func Time(t *Time_t) (tt Time_t, err error) {
 	}
 	return
 }
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Utime(path string, buf *Utimbuf) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
index d91f763af9d31ecc245950c5c227902aff5ac4fd..7e1708ded84f0f0ccfb8d273fef7e04582b9f7b6 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
@@ -14,7 +14,7 @@ var _ syscall.Errno
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
+func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(oldpath)
 	if err != nil {
@@ -788,6 +788,16 @@ func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) {
+	_, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func read(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -912,6 +922,16 @@ func Settimeofday(tv *Timeval) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Setns(fd int, nstype int) (err error) {
+	_, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Setpriority(which int, who int, prio int) (err error) {
 	_, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
 	if e1 != 0 {
@@ -1053,22 +1073,6 @@ func Ustat(dev int, ubuf *Ustat_t) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Utime(path string, buf *Utimbuf) (err error) {
-	var _p0 *byte
-	_p0, err = BytePtrFromString(path)
-	if err != nil {
-		return
-	}
-	_, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
-	use(unsafe.Pointer(_p0))
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func write(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -1212,6 +1216,16 @@ func Munlockall() (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Dup2(oldfd int, newfd int) (err error) {
+	_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fchown(fd int, uid int, gid int) (err error) {
 	_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
 	if e1 != 0 {
@@ -1780,3 +1794,19 @@ func Time(t *Time_t) (tt Time_t, err error) {
 	}
 	return
 }
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Utime(path string, buf *Utimbuf) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
index 95cb1f65f7e65c8ad657f99cf90062b763eb57f1..4326427817e0c1b991c5098ac7a06f35ffc6fa1b 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
@@ -10,11 +10,19 @@ import (
 	"unsafe"
 )
 
+//go:cgo_import_dynamic libc_getsockname getsockname "libsocket.so"
+//go:cgo_import_dynamic libc_getcwd getcwd "libc.so"
 //go:cgo_import_dynamic libc_getgroups getgroups "libc.so"
 //go:cgo_import_dynamic libc_setgroups setgroups "libc.so"
+//go:cgo_import_dynamic libc_utimes utimes "libc.so"
+//go:cgo_import_dynamic libc_utimensat utimensat "libc.so"
 //go:cgo_import_dynamic libc_fcntl fcntl "libc.so"
-//go:cgo_import_dynamic libsocket_accept accept "libsocket.so"
-//go:cgo_import_dynamic libsocket_sendmsg sendmsg "libsocket.so"
+//go:cgo_import_dynamic libc_futimesat futimesat "libc.so"
+//go:cgo_import_dynamic libc_accept accept "libsocket.so"
+//go:cgo_import_dynamic libc_recvmsg recvmsg "libsocket.so"
+//go:cgo_import_dynamic libc_sendmsg sendmsg "libsocket.so"
+//go:cgo_import_dynamic libc_acct acct "libc.so"
+//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
 //go:cgo_import_dynamic libc_access access "libc.so"
 //go:cgo_import_dynamic libc_adjtime adjtime "libc.so"
 //go:cgo_import_dynamic libc_chdir chdir "libc.so"
@@ -22,44 +30,65 @@ import (
 //go:cgo_import_dynamic libc_chown chown "libc.so"
 //go:cgo_import_dynamic libc_chroot chroot "libc.so"
 //go:cgo_import_dynamic libc_close close "libc.so"
+//go:cgo_import_dynamic libc_creat creat "libc.so"
 //go:cgo_import_dynamic libc_dup dup "libc.so"
+//go:cgo_import_dynamic libc_dup2 dup2 "libc.so"
 //go:cgo_import_dynamic libc_exit exit "libc.so"
 //go:cgo_import_dynamic libc_fchdir fchdir "libc.so"
 //go:cgo_import_dynamic libc_fchmod fchmod "libc.so"
+//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so"
 //go:cgo_import_dynamic libc_fchown fchown "libc.so"
+//go:cgo_import_dynamic libc_fchownat fchownat "libc.so"
+//go:cgo_import_dynamic libc_fdatasync fdatasync "libc.so"
 //go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so"
 //go:cgo_import_dynamic libc_fstat fstat "libc.so"
 //go:cgo_import_dynamic libc_getdents getdents "libc.so"
 //go:cgo_import_dynamic libc_getgid getgid "libc.so"
 //go:cgo_import_dynamic libc_getpid getpid "libc.so"
+//go:cgo_import_dynamic libc_getpgid getpgid "libc.so"
+//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so"
 //go:cgo_import_dynamic libc_geteuid geteuid "libc.so"
 //go:cgo_import_dynamic libc_getegid getegid "libc.so"
 //go:cgo_import_dynamic libc_getppid getppid "libc.so"
 //go:cgo_import_dynamic libc_getpriority getpriority "libc.so"
 //go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so"
+//go:cgo_import_dynamic libc_getrusage getrusage "libc.so"
 //go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so"
 //go:cgo_import_dynamic libc_getuid getuid "libc.so"
 //go:cgo_import_dynamic libc_kill kill "libc.so"
 //go:cgo_import_dynamic libc_lchown lchown "libc.so"
 //go:cgo_import_dynamic libc_link link "libc.so"
-//go:cgo_import_dynamic libsocket_listen listen "libsocket.so"
+//go:cgo_import_dynamic libc_listen listen "libsocket.so"
 //go:cgo_import_dynamic libc_lstat lstat "libc.so"
 //go:cgo_import_dynamic libc_madvise madvise "libc.so"
 //go:cgo_import_dynamic libc_mkdir mkdir "libc.so"
+//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so"
+//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so"
+//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so"
 //go:cgo_import_dynamic libc_mknod mknod "libc.so"
+//go:cgo_import_dynamic libc_mknodat mknodat "libc.so"
+//go:cgo_import_dynamic libc_mlock mlock "libc.so"
+//go:cgo_import_dynamic libc_mlockall mlockall "libc.so"
+//go:cgo_import_dynamic libc_mprotect mprotect "libc.so"
+//go:cgo_import_dynamic libc_munlock munlock "libc.so"
+//go:cgo_import_dynamic libc_munlockall munlockall "libc.so"
 //go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so"
 //go:cgo_import_dynamic libc_open open "libc.so"
+//go:cgo_import_dynamic libc_openat openat "libc.so"
 //go:cgo_import_dynamic libc_pathconf pathconf "libc.so"
+//go:cgo_import_dynamic libc_pause pause "libc.so"
 //go:cgo_import_dynamic libc_pread pread "libc.so"
 //go:cgo_import_dynamic libc_pwrite pwrite "libc.so"
 //go:cgo_import_dynamic libc_read read "libc.so"
 //go:cgo_import_dynamic libc_readlink readlink "libc.so"
 //go:cgo_import_dynamic libc_rename rename "libc.so"
+//go:cgo_import_dynamic libc_renameat renameat "libc.so"
 //go:cgo_import_dynamic libc_rmdir rmdir "libc.so"
 //go:cgo_import_dynamic libc_lseek lseek "libc.so"
 //go:cgo_import_dynamic libc_setegid setegid "libc.so"
 //go:cgo_import_dynamic libc_seteuid seteuid "libc.so"
 //go:cgo_import_dynamic libc_setgid setgid "libc.so"
+//go:cgo_import_dynamic libc_sethostname sethostname "libc.so"
 //go:cgo_import_dynamic libc_setpgid setpgid "libc.so"
 //go:cgo_import_dynamic libc_setpriority setpriority "libc.so"
 //go:cgo_import_dynamic libc_setregid setregid "libc.so"
@@ -67,36 +96,48 @@ import (
 //go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so"
 //go:cgo_import_dynamic libc_setsid setsid "libc.so"
 //go:cgo_import_dynamic libc_setuid setuid "libc.so"
-//go:cgo_import_dynamic libsocket_shutdown shutdown "libsocket.so"
+//go:cgo_import_dynamic libc_shutdown shutdown "libsocket.so"
 //go:cgo_import_dynamic libc_stat stat "libc.so"
 //go:cgo_import_dynamic libc_symlink symlink "libc.so"
 //go:cgo_import_dynamic libc_sync sync "libc.so"
+//go:cgo_import_dynamic libc_times times "libc.so"
 //go:cgo_import_dynamic libc_truncate truncate "libc.so"
 //go:cgo_import_dynamic libc_fsync fsync "libc.so"
 //go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so"
 //go:cgo_import_dynamic libc_umask umask "libc.so"
+//go:cgo_import_dynamic libc_uname uname "libc.so"
+//go:cgo_import_dynamic libc_umount umount "libc.so"
 //go:cgo_import_dynamic libc_unlink unlink "libc.so"
-//go:cgo_import_dynamic libc_utimes utimes "libc.so"
-//go:cgo_import_dynamic libsocket_bind bind "libsocket.so"
-//go:cgo_import_dynamic libsocket_connect connect "libsocket.so"
+//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so"
+//go:cgo_import_dynamic libc_ustat ustat "libc.so"
+//go:cgo_import_dynamic libc_utime utime "libc.so"
+//go:cgo_import_dynamic libc_bind bind "libsocket.so"
+//go:cgo_import_dynamic libc_connect connect "libsocket.so"
 //go:cgo_import_dynamic libc_mmap mmap "libc.so"
 //go:cgo_import_dynamic libc_munmap munmap "libc.so"
-//go:cgo_import_dynamic libsocket_sendto sendto "libsocket.so"
-//go:cgo_import_dynamic libsocket_socket socket "libsocket.so"
-//go:cgo_import_dynamic libsocket_socketpair socketpair "libsocket.so"
+//go:cgo_import_dynamic libc_sendto sendto "libsocket.so"
+//go:cgo_import_dynamic libc_socket socket "libsocket.so"
+//go:cgo_import_dynamic libc_socketpair socketpair "libsocket.so"
 //go:cgo_import_dynamic libc_write write "libc.so"
-//go:cgo_import_dynamic libsocket_getsockopt getsockopt "libsocket.so"
-//go:cgo_import_dynamic libsocket_getpeername getpeername "libsocket.so"
-//go:cgo_import_dynamic libsocket_getsockname getsockname "libsocket.so"
-//go:cgo_import_dynamic libsocket_setsockopt setsockopt "libsocket.so"
-//go:cgo_import_dynamic libsocket_recvfrom recvfrom "libsocket.so"
-//go:cgo_import_dynamic libsocket_recvmsg recvmsg "libsocket.so"
-
+//go:cgo_import_dynamic libc_getsockopt getsockopt "libsocket.so"
+//go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so"
+//go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so"
+//go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so"
+//go:cgo_import_dynamic libc_sysconf sysconf "libc.so"
+
+//go:linkname procgetsockname libc_getsockname
+//go:linkname procGetcwd libc_getcwd
 //go:linkname procgetgroups libc_getgroups
 //go:linkname procsetgroups libc_setgroups
+//go:linkname procutimes libc_utimes
+//go:linkname procutimensat libc_utimensat
 //go:linkname procfcntl libc_fcntl
-//go:linkname procaccept libsocket_accept
-//go:linkname procsendmsg libsocket_sendmsg
+//go:linkname procfutimesat libc_futimesat
+//go:linkname procaccept libc_accept
+//go:linkname procrecvmsg libc_recvmsg
+//go:linkname procsendmsg libc_sendmsg
+//go:linkname procacct libc_acct
+//go:linkname procioctl libc_ioctl
 //go:linkname procAccess libc_access
 //go:linkname procAdjtime libc_adjtime
 //go:linkname procChdir libc_chdir
@@ -104,44 +145,65 @@ import (
 //go:linkname procChown libc_chown
 //go:linkname procChroot libc_chroot
 //go:linkname procClose libc_close
+//go:linkname procCreat libc_creat
 //go:linkname procDup libc_dup
+//go:linkname procDup2 libc_dup2
 //go:linkname procExit libc_exit
 //go:linkname procFchdir libc_fchdir
 //go:linkname procFchmod libc_fchmod
+//go:linkname procFchmodat libc_fchmodat
 //go:linkname procFchown libc_fchown
+//go:linkname procFchownat libc_fchownat
+//go:linkname procFdatasync libc_fdatasync
 //go:linkname procFpathconf libc_fpathconf
 //go:linkname procFstat libc_fstat
 //go:linkname procGetdents libc_getdents
 //go:linkname procGetgid libc_getgid
 //go:linkname procGetpid libc_getpid
+//go:linkname procGetpgid libc_getpgid
+//go:linkname procGetpgrp libc_getpgrp
 //go:linkname procGeteuid libc_geteuid
 //go:linkname procGetegid libc_getegid
 //go:linkname procGetppid libc_getppid
 //go:linkname procGetpriority libc_getpriority
 //go:linkname procGetrlimit libc_getrlimit
+//go:linkname procGetrusage libc_getrusage
 //go:linkname procGettimeofday libc_gettimeofday
 //go:linkname procGetuid libc_getuid
 //go:linkname procKill libc_kill
 //go:linkname procLchown libc_lchown
 //go:linkname procLink libc_link
-//go:linkname proclisten libsocket_listen
+//go:linkname proclisten libc_listen
 //go:linkname procLstat libc_lstat
 //go:linkname procMadvise libc_madvise
 //go:linkname procMkdir libc_mkdir
+//go:linkname procMkdirat libc_mkdirat
+//go:linkname procMkfifo libc_mkfifo
+//go:linkname procMkfifoat libc_mkfifoat
 //go:linkname procMknod libc_mknod
+//go:linkname procMknodat libc_mknodat
+//go:linkname procMlock libc_mlock
+//go:linkname procMlockall libc_mlockall
+//go:linkname procMprotect libc_mprotect
+//go:linkname procMunlock libc_munlock
+//go:linkname procMunlockall libc_munlockall
 //go:linkname procNanosleep libc_nanosleep
 //go:linkname procOpen libc_open
+//go:linkname procOpenat libc_openat
 //go:linkname procPathconf libc_pathconf
+//go:linkname procPause libc_pause
 //go:linkname procPread libc_pread
 //go:linkname procPwrite libc_pwrite
 //go:linkname procread libc_read
 //go:linkname procReadlink libc_readlink
 //go:linkname procRename libc_rename
+//go:linkname procRenameat libc_renameat
 //go:linkname procRmdir libc_rmdir
 //go:linkname proclseek libc_lseek
 //go:linkname procSetegid libc_setegid
 //go:linkname procSeteuid libc_seteuid
 //go:linkname procSetgid libc_setgid
+//go:linkname procSethostname libc_sethostname
 //go:linkname procSetpgid libc_setpgid
 //go:linkname procSetpriority libc_setpriority
 //go:linkname procSetregid libc_setregid
@@ -149,37 +211,49 @@ import (
 //go:linkname procSetrlimit libc_setrlimit
 //go:linkname procSetsid libc_setsid
 //go:linkname procSetuid libc_setuid
-//go:linkname procshutdown libsocket_shutdown
+//go:linkname procshutdown libc_shutdown
 //go:linkname procStat libc_stat
 //go:linkname procSymlink libc_symlink
 //go:linkname procSync libc_sync
+//go:linkname procTimes libc_times
 //go:linkname procTruncate libc_truncate
 //go:linkname procFsync libc_fsync
 //go:linkname procFtruncate libc_ftruncate
 //go:linkname procUmask libc_umask
+//go:linkname procUname libc_uname
+//go:linkname procumount libc_umount
 //go:linkname procUnlink libc_unlink
-//go:linkname procUtimes libc_utimes
-//go:linkname procbind libsocket_bind
-//go:linkname procconnect libsocket_connect
+//go:linkname procUnlinkat libc_unlinkat
+//go:linkname procUstat libc_ustat
+//go:linkname procUtime libc_utime
+//go:linkname procbind libc_bind
+//go:linkname procconnect libc_connect
 //go:linkname procmmap libc_mmap
 //go:linkname procmunmap libc_munmap
-//go:linkname procsendto libsocket_sendto
-//go:linkname procsocket libsocket_socket
-//go:linkname procsocketpair libsocket_socketpair
+//go:linkname procsendto libc_sendto
+//go:linkname procsocket libc_socket
+//go:linkname procsocketpair libc_socketpair
 //go:linkname procwrite libc_write
-//go:linkname procgetsockopt libsocket_getsockopt
-//go:linkname procgetpeername libsocket_getpeername
-//go:linkname procgetsockname libsocket_getsockname
-//go:linkname procsetsockopt libsocket_setsockopt
-//go:linkname procrecvfrom libsocket_recvfrom
-//go:linkname procrecvmsg libsocket_recvmsg
+//go:linkname procgetsockopt libc_getsockopt
+//go:linkname procgetpeername libc_getpeername
+//go:linkname procsetsockopt libc_setsockopt
+//go:linkname procrecvfrom libc_recvfrom
+//go:linkname procsysconf libc_sysconf
 
 var (
+	procgetsockname,
+	procGetcwd,
 	procgetgroups,
 	procsetgroups,
+	procutimes,
+	procutimensat,
 	procfcntl,
+	procfutimesat,
 	procaccept,
+	procrecvmsg,
 	procsendmsg,
+	procacct,
+	procioctl,
 	procAccess,
 	procAdjtime,
 	procChdir,
@@ -187,21 +261,29 @@ var (
 	procChown,
 	procChroot,
 	procClose,
+	procCreat,
 	procDup,
+	procDup2,
 	procExit,
 	procFchdir,
 	procFchmod,
+	procFchmodat,
 	procFchown,
+	procFchownat,
+	procFdatasync,
 	procFpathconf,
 	procFstat,
 	procGetdents,
 	procGetgid,
 	procGetpid,
+	procGetpgid,
+	procGetpgrp,
 	procGeteuid,
 	procGetegid,
 	procGetppid,
 	procGetpriority,
 	procGetrlimit,
+	procGetrusage,
 	procGettimeofday,
 	procGetuid,
 	procKill,
@@ -211,20 +293,33 @@ var (
 	procLstat,
 	procMadvise,
 	procMkdir,
+	procMkdirat,
+	procMkfifo,
+	procMkfifoat,
 	procMknod,
+	procMknodat,
+	procMlock,
+	procMlockall,
+	procMprotect,
+	procMunlock,
+	procMunlockall,
 	procNanosleep,
 	procOpen,
+	procOpenat,
 	procPathconf,
+	procPause,
 	procPread,
 	procPwrite,
 	procread,
 	procReadlink,
 	procRename,
+	procRenameat,
 	procRmdir,
 	proclseek,
 	procSetegid,
 	procSeteuid,
 	procSetgid,
+	procSethostname,
 	procSetpgid,
 	procSetpriority,
 	procSetregid,
@@ -236,12 +331,17 @@ var (
 	procStat,
 	procSymlink,
 	procSync,
+	procTimes,
 	procTruncate,
 	procFsync,
 	procFtruncate,
 	procUmask,
+	procUname,
+	procumount,
 	procUnlink,
-	procUtimes,
+	procUnlinkat,
+	procUstat,
+	procUtime,
 	procbind,
 	procconnect,
 	procmmap,
@@ -252,12 +352,32 @@ var (
 	procwrite,
 	procgetsockopt,
 	procgetpeername,
-	procgetsockname,
 	procsetsockopt,
 	procrecvfrom,
-	procrecvmsg syscallFunc
+	procsysconf syscallFunc
 )
 
+func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetsockname)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
+func Getcwd(buf []byte) (n int, err error) {
+	var _p0 *byte
+	if len(buf) > 0 {
+		_p0 = &buf[0]
+	}
+	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetcwd)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0, 0, 0, 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 func getgroups(ngid int, gid *_Gid_t) (n int, err error) {
 	r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procgetgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0)
 	n = int(r0)
@@ -275,6 +395,34 @@ func setgroups(ngid int, gid *_Gid_t) (err error) {
 	return
 }
 
+func utimes(path string, times *[2]Timeval) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procutimes)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0, 0, 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
+func utimensat(fd int, path string, times *[2]Timespec, flag int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procutimensat)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flag), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 func fcntl(fd int, cmd int, arg int) (val int, err error) {
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0)
 	val = int(r0)
@@ -284,6 +432,14 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) {
 	return
 }
 
+func futimesat(fildes int, path *byte, times *[2]Timeval) (err error) {
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfutimesat)), 3, uintptr(fildes), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times)), 0, 0, 0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procaccept)), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
 	fd = int(r0)
@@ -293,6 +449,15 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
 	return
 }
 
+func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procrecvmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsendmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0)
 	n = int(r0)
@@ -302,6 +467,22 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
 	return
 }
 
+func acct(path *byte) (err error) {
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procacct)), 1, uintptr(unsafe.Pointer(path)), 0, 0, 0, 0, 0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
+func ioctl(fd int, req int, arg uintptr) (err error) {
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 func Access(path string, mode uint32) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -388,6 +569,21 @@ func Close(fd int) (err error) {
 	return
 }
 
+func Creat(path string, mode uint32) (fd int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procCreat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0)
+	use(unsafe.Pointer(_p0))
+	fd = int(r0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 func Dup(fd int) (nfd int, err error) {
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup)), 1, uintptr(fd), 0, 0, 0, 0, 0)
 	nfd = int(r0)
@@ -397,6 +593,14 @@ func Dup(fd int) (nfd int, err error) {
 	return
 }
 
+func Dup2(oldfd int, newfd int) (err error) {
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup2)), 2, uintptr(oldfd), uintptr(newfd), 0, 0, 0, 0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 func Exit(code int) {
 	sysvicall6(uintptr(unsafe.Pointer(&procExit)), 1, uintptr(code), 0, 0, 0, 0, 0)
 	return
@@ -418,6 +622,20 @@ func Fchmod(fd int, mode uint32) (err error) {
 	return
 }
 
+func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchmodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 func Fchown(fd int, uid int, gid int) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchown)), 3, uintptr(fd), uintptr(uid), uintptr(gid), 0, 0, 0)
 	if e1 != 0 {
@@ -426,6 +644,28 @@ func Fchown(fd int, uid int, gid int) (err error) {
 	return
 }
 
+func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchownat)), 5, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
+func Fdatasync(fd int) (err error) {
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFdatasync)), 1, uintptr(fd), 0, 0, 0, 0, 0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 func Fpathconf(fd int, name int) (val int, err error) {
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFpathconf)), 2, uintptr(fd), uintptr(name), 0, 0, 0, 0)
 	val = int(r0)
@@ -468,6 +708,24 @@ func Getpid() (pid int) {
 	return
 }
 
+func Getpgid(pid int) (pgid int, err error) {
+	r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgid)), 1, uintptr(pid), 0, 0, 0, 0, 0)
+	pgid = int(r0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
+func Getpgrp() (pgid int, err error) {
+	r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgrp)), 0, 0, 0, 0, 0, 0, 0)
+	pgid = int(r0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 func Geteuid() (euid int) {
 	r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procGeteuid)), 0, 0, 0, 0, 0, 0, 0)
 	euid = int(r0)
@@ -503,6 +761,14 @@ func Getrlimit(which int, lim *Rlimit) (err error) {
 	return
 }
 
+func Getrusage(who int, rusage *Rusage) (err error) {
+	_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetrusage)), 2, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0, 0, 0, 0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 func Gettimeofday(tv *Timeval) (err error) {
 	_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGettimeofday)), 1, uintptr(unsafe.Pointer(tv)), 0, 0, 0, 0, 0)
 	if e1 != 0 {
@@ -607,6 +873,48 @@ func Mkdir(path string, mode uint32) (err error) {
 	return
 }
 
+func Mkdirat(dirfd int, path string, mode uint32) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkdirat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
+func Mkfifo(path string, mode uint32) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkfifo)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
+func Mkfifoat(dirfd int, path string, mode uint32) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkfifoat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 func Mknod(path string, mode uint32, dev int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -621,6 +929,72 @@ func Mknod(path string, mode uint32, dev int) (err error) {
 	return
 }
 
+func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMknodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
+func Mlock(b []byte) (err error) {
+	var _p0 *byte
+	if len(b) > 0 {
+		_p0 = &b[0]
+	}
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMlock)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), 0, 0, 0, 0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
+func Mlockall(flags int) (err error) {
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMlockall)), 1, uintptr(flags), 0, 0, 0, 0, 0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
+func Mprotect(b []byte, prot int) (err error) {
+	var _p0 *byte
+	if len(b) > 0 {
+		_p0 = &b[0]
+	}
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMprotect)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(prot), 0, 0, 0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
+func Munlock(b []byte) (err error) {
+	var _p0 *byte
+	if len(b) > 0 {
+		_p0 = &b[0]
+	}
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMunlock)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), 0, 0, 0, 0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
+func Munlockall() (err error) {
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMunlockall)), 0, 0, 0, 0, 0, 0, 0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procNanosleep)), 2, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0, 0, 0, 0)
 	if e1 != 0 {
@@ -644,6 +1018,21 @@ func Open(path string, mode int, perm uint32) (fd int, err error) {
 	return
 }
 
+func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procOpenat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0)
+	use(unsafe.Pointer(_p0))
+	fd = int(r0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 func Pathconf(path string, name int) (val int, err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -659,6 +1048,14 @@ func Pathconf(path string, name int) (val int, err error) {
 	return
 }
 
+func Pause() (err error) {
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPause)), 0, 0, 0, 0, 0, 0, 0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 func Pread(fd int, p []byte, offset int64) (n int, err error) {
 	var _p0 *byte
 	if len(p) > 0 {
@@ -737,6 +1134,26 @@ func Rename(from string, to string) (err error) {
 	return
 }
 
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(oldpath)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = BytePtrFromString(newpath)
+	if err != nil {
+		return
+	}
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRenameat)), 4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+	use(unsafe.Pointer(_p0))
+	use(unsafe.Pointer(_p1))
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 func Rmdir(path string) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -784,6 +1201,18 @@ func Setgid(gid int) (err error) {
 	return
 }
 
+func Sethostname(p []byte) (err error) {
+	var _p0 *byte
+	if len(p) > 0 {
+		_p0 = &p[0]
+	}
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSethostname)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0, 0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 func Setpgid(pid int, pgid int) (err error) {
 	_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetpgid)), 2, uintptr(pid), uintptr(pgid), 0, 0, 0, 0)
 	if e1 != 0 {
@@ -891,6 +1320,15 @@ func Sync() (err error) {
 	return
 }
 
+func Times(tms *Tms) (ticks uintptr, err error) {
+	r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procTimes)), 1, uintptr(unsafe.Pointer(tms)), 0, 0, 0, 0, 0)
+	ticks = uintptr(r0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 func Truncate(path string, length int64) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -921,12 +1359,34 @@ func Ftruncate(fd int, length int64) (err error) {
 	return
 }
 
-func Umask(newmask int) (oldmask int) {
-	r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procUmask)), 1, uintptr(newmask), 0, 0, 0, 0, 0)
+func Umask(mask int) (oldmask int) {
+	r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procUmask)), 1, uintptr(mask), 0, 0, 0, 0, 0)
 	oldmask = int(r0)
 	return
 }
 
+func Uname(buf *Utsname) (err error) {
+	_, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procUname)), 1, uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0, 0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
+func Unmount(target string, flags int) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(target)
+	if err != nil {
+		return
+	}
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procumount)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0, 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 func Unlink(path string) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -941,13 +1401,35 @@ func Unlink(path string) (err error) {
 	return
 }
 
-func Utimes(path string, times *[2]Timeval) (err error) {
+func Unlinkat(dirfd int, path string, flags int) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
 	if err != nil {
 		return
 	}
-	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUtimes)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0, 0, 0, 0)
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUnlinkat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0, 0)
+	use(unsafe.Pointer(_p0))
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
+func Ustat(dev int, ubuf *Ustat_t) (err error) {
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUstat)), 2, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0, 0, 0, 0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
+func Utime(path string, buf *Utimbuf) (err error) {
+	var _p0 *byte
+	_p0, err = BytePtrFromString(path)
+	if err != nil {
+		return
+	}
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUtime)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0)
 	use(unsafe.Pointer(_p0))
 	if e1 != 0 {
 		err = e1
@@ -1046,14 +1528,6 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
 	return
 }
 
-func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
-	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetsockname)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
-	if e1 != 0 {
-		err = e1
-	}
-	return
-}
-
 func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsetsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
 	if e1 != 0 {
@@ -1075,9 +1549,9 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl
 	return
 }
 
-func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
-	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procrecvmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0)
-	n = int(r0)
+func sysconf(name int) (n int64, err error) {
+	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsysconf)), 1, uintptr(name), 0, 0, 0, 0, 0)
+	n = int64(r0)
 	if e1 != 0 {
 		err = e1
 	}
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go
index e32e90d9a2c30509d219c16e3c4cc21d2d4d8fd7..2786773ba3764b562584583d1f5481a908ef8142 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go
@@ -1,4 +1,4 @@
-// mksysnum_darwin.pl /usr/include/sys/unix.h
+// mksysnum_darwin.pl /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.10.sdk/usr/include/sys/syscall.h
 // MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
 
 // +build 386,darwin
@@ -42,7 +42,6 @@ const (
 	SYS_DUP                            = 41
 	SYS_PIPE                           = 42
 	SYS_GETEGID                        = 43
-	SYS_PROFIL                         = 44
 	SYS_SIGACTION                      = 46
 	SYS_GETGID                         = 47
 	SYS_SIGPROCMASK                    = 48
@@ -120,8 +119,9 @@ const (
 	SYS_QUOTACTL                       = 165
 	SYS_MOUNT                          = 167
 	SYS_CSOPS                          = 169
+	SYS_CSOPS_AUDITTOKEN               = 170
 	SYS_WAITID                         = 173
-	SYS_ADD_PROFIL                     = 176
+	SYS_KDEBUG_TRACE64                 = 179
 	SYS_KDEBUG_TRACE                   = 180
 	SYS_SETGID                         = 181
 	SYS_SETEGID                        = 182
@@ -141,21 +141,11 @@ const (
 	SYS_LSEEK                          = 199
 	SYS_TRUNCATE                       = 200
 	SYS_FTRUNCATE                      = 201
-	SYS___SYSCTL                       = 202
+	SYS_SYSCTL                         = 202
 	SYS_MLOCK                          = 203
 	SYS_MUNLOCK                        = 204
 	SYS_UNDELETE                       = 205
-	SYS_ATSOCKET                       = 206
-	SYS_ATGETMSG                       = 207
-	SYS_ATPUTMSG                       = 208
-	SYS_ATPSNDREQ                      = 209
-	SYS_ATPSNDRSP                      = 210
-	SYS_ATPGETREQ                      = 211
-	SYS_ATPGETRSP                      = 212
-	SYS_MKCOMPLEX                      = 216
-	SYS_STATV                          = 217
-	SYS_LSTATV                         = 218
-	SYS_FSTATV                         = 219
+	SYS_OPEN_DPROTECTED_NP             = 216
 	SYS_GETATTRLIST                    = 220
 	SYS_SETATTRLIST                    = 221
 	SYS_GETDIRENTRIESATTR              = 222
@@ -206,9 +196,7 @@ const (
 	SYS_SEM_WAIT                       = 271
 	SYS_SEM_TRYWAIT                    = 272
 	SYS_SEM_POST                       = 273
-	SYS_SEM_GETVALUE                   = 274
-	SYS_SEM_INIT                       = 275
-	SYS_SEM_DESTROY                    = 276
+	SYS_SYSCTLBYNAME                   = 274
 	SYS_OPEN_EXTENDED                  = 277
 	SYS_UMASK_EXTENDED                 = 278
 	SYS_STAT_EXTENDED                  = 279
@@ -282,8 +270,6 @@ const (
 	SYS_AUDITON                        = 351
 	SYS_GETAUID                        = 353
 	SYS_SETAUID                        = 354
-	SYS_GETAUDIT                       = 355
-	SYS_SETAUDIT                       = 356
 	SYS_GETAUDIT_ADDR                  = 357
 	SYS_SETAUDIT_ADDR                  = 358
 	SYS_AUDITCTL                       = 359
@@ -300,6 +286,7 @@ const (
 	SYS___OLD_SEMWAIT_SIGNAL           = 370
 	SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL  = 371
 	SYS_THREAD_SELFID                  = 372
+	SYS_LEDGER                         = 373
 	SYS___MAC_EXECVE                   = 380
 	SYS___MAC_SYSCALL                  = 381
 	SYS___MAC_GET_FILE                 = 382
@@ -358,5 +345,54 @@ const (
 	SYS_PID_HIBERNATE                  = 435
 	SYS_PID_SHUTDOWN_SOCKETS           = 436
 	SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438
-	SYS_MAXSYSCALL                     = 439
+	SYS_KAS_INFO                       = 439
+	SYS_MEMORYSTATUS_CONTROL           = 440
+	SYS_GUARDED_OPEN_NP                = 441
+	SYS_GUARDED_CLOSE_NP               = 442
+	SYS_GUARDED_KQUEUE_NP              = 443
+	SYS_CHANGE_FDGUARD_NP              = 444
+	SYS_PROC_RLIMIT_CONTROL            = 446
+	SYS_CONNECTX                       = 447
+	SYS_DISCONNECTX                    = 448
+	SYS_PEELOFF                        = 449
+	SYS_SOCKET_DELEGATE                = 450
+	SYS_TELEMETRY                      = 451
+	SYS_PROC_UUID_POLICY               = 452
+	SYS_MEMORYSTATUS_GET_LEVEL         = 453
+	SYS_SYSTEM_OVERRIDE                = 454
+	SYS_VFS_PURGE                      = 455
+	SYS_SFI_CTL                        = 456
+	SYS_SFI_PIDCTL                     = 457
+	SYS_COALITION                      = 458
+	SYS_COALITION_INFO                 = 459
+	SYS_NECP_MATCH_POLICY              = 460
+	SYS_GETATTRLISTBULK                = 461
+	SYS_OPENAT                         = 463
+	SYS_OPENAT_NOCANCEL                = 464
+	SYS_RENAMEAT                       = 465
+	SYS_FACCESSAT                      = 466
+	SYS_FCHMODAT                       = 467
+	SYS_FCHOWNAT                       = 468
+	SYS_FSTATAT                        = 469
+	SYS_FSTATAT64                      = 470
+	SYS_LINKAT                         = 471
+	SYS_UNLINKAT                       = 472
+	SYS_READLINKAT                     = 473
+	SYS_SYMLINKAT                      = 474
+	SYS_MKDIRAT                        = 475
+	SYS_GETATTRLISTAT                  = 476
+	SYS_PROC_TRACE_LOG                 = 477
+	SYS_BSDTHREAD_CTL                  = 478
+	SYS_OPENBYID_NP                    = 479
+	SYS_RECVMSG_X                      = 480
+	SYS_SENDMSG_X                      = 481
+	SYS_THREAD_SELFUSAGE               = 482
+	SYS_CSRCTL                         = 483
+	SYS_GUARDED_OPEN_DPROTECTED_NP     = 484
+	SYS_GUARDED_WRITE_NP               = 485
+	SYS_GUARDED_PWRITE_NP              = 486
+	SYS_GUARDED_WRITEV_NP              = 487
+	SYS_RENAME_EXT                     = 488
+	SYS_MREMAP_ENCRYPTED               = 489
+	SYS_MAXSYSCALL                     = 490
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go
index 42792d71900bf7878f193eaaf60400680c7af42a..09de240c8f8257c73e51afefd5adf26e4baeefae 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go
@@ -1,4 +1,4 @@
-// mksysnum_darwin.pl /usr/include/sys/unix.h
+// mksysnum_darwin.pl /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.10.sdk/usr/include/sys/syscall.h
 // MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
 
 // +build amd64,darwin
@@ -42,7 +42,6 @@ const (
 	SYS_DUP                            = 41
 	SYS_PIPE                           = 42
 	SYS_GETEGID                        = 43
-	SYS_PROFIL                         = 44
 	SYS_SIGACTION                      = 46
 	SYS_GETGID                         = 47
 	SYS_SIGPROCMASK                    = 48
@@ -120,8 +119,9 @@ const (
 	SYS_QUOTACTL                       = 165
 	SYS_MOUNT                          = 167
 	SYS_CSOPS                          = 169
+	SYS_CSOPS_AUDITTOKEN               = 170
 	SYS_WAITID                         = 173
-	SYS_ADD_PROFIL                     = 176
+	SYS_KDEBUG_TRACE64                 = 179
 	SYS_KDEBUG_TRACE                   = 180
 	SYS_SETGID                         = 181
 	SYS_SETEGID                        = 182
@@ -141,21 +141,11 @@ const (
 	SYS_LSEEK                          = 199
 	SYS_TRUNCATE                       = 200
 	SYS_FTRUNCATE                      = 201
-	SYS___SYSCTL                       = 202
+	SYS_SYSCTL                         = 202
 	SYS_MLOCK                          = 203
 	SYS_MUNLOCK                        = 204
 	SYS_UNDELETE                       = 205
-	SYS_ATSOCKET                       = 206
-	SYS_ATGETMSG                       = 207
-	SYS_ATPUTMSG                       = 208
-	SYS_ATPSNDREQ                      = 209
-	SYS_ATPSNDRSP                      = 210
-	SYS_ATPGETREQ                      = 211
-	SYS_ATPGETRSP                      = 212
-	SYS_MKCOMPLEX                      = 216
-	SYS_STATV                          = 217
-	SYS_LSTATV                         = 218
-	SYS_FSTATV                         = 219
+	SYS_OPEN_DPROTECTED_NP             = 216
 	SYS_GETATTRLIST                    = 220
 	SYS_SETATTRLIST                    = 221
 	SYS_GETDIRENTRIESATTR              = 222
@@ -206,9 +196,7 @@ const (
 	SYS_SEM_WAIT                       = 271
 	SYS_SEM_TRYWAIT                    = 272
 	SYS_SEM_POST                       = 273
-	SYS_SEM_GETVALUE                   = 274
-	SYS_SEM_INIT                       = 275
-	SYS_SEM_DESTROY                    = 276
+	SYS_SYSCTLBYNAME                   = 274
 	SYS_OPEN_EXTENDED                  = 277
 	SYS_UMASK_EXTENDED                 = 278
 	SYS_STAT_EXTENDED                  = 279
@@ -282,8 +270,6 @@ const (
 	SYS_AUDITON                        = 351
 	SYS_GETAUID                        = 353
 	SYS_SETAUID                        = 354
-	SYS_GETAUDIT                       = 355
-	SYS_SETAUDIT                       = 356
 	SYS_GETAUDIT_ADDR                  = 357
 	SYS_SETAUDIT_ADDR                  = 358
 	SYS_AUDITCTL                       = 359
@@ -300,6 +286,7 @@ const (
 	SYS___OLD_SEMWAIT_SIGNAL           = 370
 	SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL  = 371
 	SYS_THREAD_SELFID                  = 372
+	SYS_LEDGER                         = 373
 	SYS___MAC_EXECVE                   = 380
 	SYS___MAC_SYSCALL                  = 381
 	SYS___MAC_GET_FILE                 = 382
@@ -358,5 +345,54 @@ const (
 	SYS_PID_HIBERNATE                  = 435
 	SYS_PID_SHUTDOWN_SOCKETS           = 436
 	SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438
-	SYS_MAXSYSCALL                     = 439
+	SYS_KAS_INFO                       = 439
+	SYS_MEMORYSTATUS_CONTROL           = 440
+	SYS_GUARDED_OPEN_NP                = 441
+	SYS_GUARDED_CLOSE_NP               = 442
+	SYS_GUARDED_KQUEUE_NP              = 443
+	SYS_CHANGE_FDGUARD_NP              = 444
+	SYS_PROC_RLIMIT_CONTROL            = 446
+	SYS_CONNECTX                       = 447
+	SYS_DISCONNECTX                    = 448
+	SYS_PEELOFF                        = 449
+	SYS_SOCKET_DELEGATE                = 450
+	SYS_TELEMETRY                      = 451
+	SYS_PROC_UUID_POLICY               = 452
+	SYS_MEMORYSTATUS_GET_LEVEL         = 453
+	SYS_SYSTEM_OVERRIDE                = 454
+	SYS_VFS_PURGE                      = 455
+	SYS_SFI_CTL                        = 456
+	SYS_SFI_PIDCTL                     = 457
+	SYS_COALITION                      = 458
+	SYS_COALITION_INFO                 = 459
+	SYS_NECP_MATCH_POLICY              = 460
+	SYS_GETATTRLISTBULK                = 461
+	SYS_OPENAT                         = 463
+	SYS_OPENAT_NOCANCEL                = 464
+	SYS_RENAMEAT                       = 465
+	SYS_FACCESSAT                      = 466
+	SYS_FCHMODAT                       = 467
+	SYS_FCHOWNAT                       = 468
+	SYS_FSTATAT                        = 469
+	SYS_FSTATAT64                      = 470
+	SYS_LINKAT                         = 471
+	SYS_UNLINKAT                       = 472
+	SYS_READLINKAT                     = 473
+	SYS_SYMLINKAT                      = 474
+	SYS_MKDIRAT                        = 475
+	SYS_GETATTRLISTAT                  = 476
+	SYS_PROC_TRACE_LOG                 = 477
+	SYS_BSDTHREAD_CTL                  = 478
+	SYS_OPENBYID_NP                    = 479
+	SYS_RECVMSG_X                      = 480
+	SYS_SENDMSG_X                      = 481
+	SYS_THREAD_SELFUSAGE               = 482
+	SYS_CSRCTL                         = 483
+	SYS_GUARDED_OPEN_DPROTECTED_NP     = 484
+	SYS_GUARDED_WRITE_NP               = 485
+	SYS_GUARDED_PWRITE_NP              = 486
+	SYS_GUARDED_WRITEV_NP              = 487
+	SYS_RENAME_EXT                     = 488
+	SYS_MREMAP_ENCRYPTED               = 489
+	SYS_MAXSYSCALL                     = 490
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go
index b1271c34710d17cb90a175ffaf332e58eb1ba553..26677ebbf5b769ddd22e4f0cbcf35f5a2d4055c2 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go
@@ -1,4 +1,4 @@
-// mksysnum_darwin.pl /usr/include/sys/syscall.h
+// mksysnum_darwin.pl /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS8.4.sdk/usr/include/sys/syscall.h
 // MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
 
 // +build arm64,darwin
@@ -121,6 +121,7 @@ const (
 	SYS_CSOPS                          = 169
 	SYS_CSOPS_AUDITTOKEN               = 170
 	SYS_WAITID                         = 173
+	SYS_KDEBUG_TRACE64                 = 179
 	SYS_KDEBUG_TRACE                   = 180
 	SYS_SETGID                         = 181
 	SYS_SETEGID                        = 182
@@ -140,17 +141,10 @@ const (
 	SYS_LSEEK                          = 199
 	SYS_TRUNCATE                       = 200
 	SYS_FTRUNCATE                      = 201
-	SYS___SYSCTL                       = 202
+	SYS_SYSCTL                         = 202
 	SYS_MLOCK                          = 203
 	SYS_MUNLOCK                        = 204
 	SYS_UNDELETE                       = 205
-	SYS_ATSOCKET                       = 206
-	SYS_ATGETMSG                       = 207
-	SYS_ATPUTMSG                       = 208
-	SYS_ATPSNDREQ                      = 209
-	SYS_ATPSNDRSP                      = 210
-	SYS_ATPGETREQ                      = 211
-	SYS_ATPGETRSP                      = 212
 	SYS_OPEN_DPROTECTED_NP             = 216
 	SYS_GETATTRLIST                    = 220
 	SYS_SETATTRLIST                    = 221
@@ -202,9 +196,7 @@ const (
 	SYS_SEM_WAIT                       = 271
 	SYS_SEM_TRYWAIT                    = 272
 	SYS_SEM_POST                       = 273
-	SYS_SEM_GETVALUE                   = 274
-	SYS_SEM_INIT                       = 275
-	SYS_SEM_DESTROY                    = 276
+	SYS_SYSCTLBYNAME                   = 274
 	SYS_OPEN_EXTENDED                  = 277
 	SYS_UMASK_EXTENDED                 = 278
 	SYS_STAT_EXTENDED                  = 279
@@ -354,5 +346,53 @@ const (
 	SYS_PID_SHUTDOWN_SOCKETS           = 436
 	SYS_SHARED_REGION_MAP_AND_SLIDE_NP = 438
 	SYS_KAS_INFO                       = 439
-	SYS_MAXSYSCALL                     = 440
+	SYS_MEMORYSTATUS_CONTROL           = 440
+	SYS_GUARDED_OPEN_NP                = 441
+	SYS_GUARDED_CLOSE_NP               = 442
+	SYS_GUARDED_KQUEUE_NP              = 443
+	SYS_CHANGE_FDGUARD_NP              = 444
+	SYS_PROC_RLIMIT_CONTROL            = 446
+	SYS_CONNECTX                       = 447
+	SYS_DISCONNECTX                    = 448
+	SYS_PEELOFF                        = 449
+	SYS_SOCKET_DELEGATE                = 450
+	SYS_TELEMETRY                      = 451
+	SYS_PROC_UUID_POLICY               = 452
+	SYS_MEMORYSTATUS_GET_LEVEL         = 453
+	SYS_SYSTEM_OVERRIDE                = 454
+	SYS_VFS_PURGE                      = 455
+	SYS_SFI_CTL                        = 456
+	SYS_SFI_PIDCTL                     = 457
+	SYS_COALITION                      = 458
+	SYS_COALITION_INFO                 = 459
+	SYS_NECP_MATCH_POLICY              = 460
+	SYS_GETATTRLISTBULK                = 461
+	SYS_OPENAT                         = 463
+	SYS_OPENAT_NOCANCEL                = 464
+	SYS_RENAMEAT                       = 465
+	SYS_FACCESSAT                      = 466
+	SYS_FCHMODAT                       = 467
+	SYS_FCHOWNAT                       = 468
+	SYS_FSTATAT                        = 469
+	SYS_FSTATAT64                      = 470
+	SYS_LINKAT                         = 471
+	SYS_UNLINKAT                       = 472
+	SYS_READLINKAT                     = 473
+	SYS_SYMLINKAT                      = 474
+	SYS_MKDIRAT                        = 475
+	SYS_GETATTRLISTAT                  = 476
+	SYS_PROC_TRACE_LOG                 = 477
+	SYS_BSDTHREAD_CTL                  = 478
+	SYS_OPENBYID_NP                    = 479
+	SYS_RECVMSG_X                      = 480
+	SYS_SENDMSG_X                      = 481
+	SYS_THREAD_SELFUSAGE               = 482
+	SYS_CSRCTL                         = 483
+	SYS_GUARDED_OPEN_DPROTECTED_NP     = 484
+	SYS_GUARDED_WRITE_NP               = 485
+	SYS_GUARDED_PWRITE_NP              = 486
+	SYS_GUARDED_WRITEV_NP              = 487
+	SYS_RENAME_EXT                     = 488
+	SYS_MREMAP_ENCRYPTED               = 489
+	SYS_MAXSYSCALL                     = 490
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
new file mode 100644
index 0000000000000000000000000000000000000000..5ffe1c7191beb4f33024090a1dc23bd4bf6f9958
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
@@ -0,0 +1,327 @@
+// mksysnum_linux.pl /usr/include/asm/unistd.h
+// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
+
+// +build mips64,linux
+
+package unix
+
+const (
+	SYS_READ                   = 5000
+	SYS_WRITE                  = 5001
+	SYS_OPEN                   = 5002
+	SYS_CLOSE                  = 5003
+	SYS_STAT                   = 5004
+	SYS_FSTAT                  = 5005
+	SYS_LSTAT                  = 5006
+	SYS_POLL                   = 5007
+	SYS_LSEEK                  = 5008
+	SYS_MMAP                   = 5009
+	SYS_MPROTECT               = 5010
+	SYS_MUNMAP                 = 5011
+	SYS_BRK                    = 5012
+	SYS_RT_SIGACTION           = 5013
+	SYS_RT_SIGPROCMASK         = 5014
+	SYS_IOCTL                  = 5015
+	SYS_PREAD64                = 5016
+	SYS_PWRITE64               = 5017
+	SYS_READV                  = 5018
+	SYS_WRITEV                 = 5019
+	SYS_ACCESS                 = 5020
+	SYS_PIPE                   = 5021
+	SYS__NEWSELECT             = 5022
+	SYS_SCHED_YIELD            = 5023
+	SYS_MREMAP                 = 5024
+	SYS_MSYNC                  = 5025
+	SYS_MINCORE                = 5026
+	SYS_MADVISE                = 5027
+	SYS_SHMGET                 = 5028
+	SYS_SHMAT                  = 5029
+	SYS_SHMCTL                 = 5030
+	SYS_DUP                    = 5031
+	SYS_DUP2                   = 5032
+	SYS_PAUSE                  = 5033
+	SYS_NANOSLEEP              = 5034
+	SYS_GETITIMER              = 5035
+	SYS_SETITIMER              = 5036
+	SYS_ALARM                  = 5037
+	SYS_GETPID                 = 5038
+	SYS_SENDFILE               = 5039
+	SYS_SOCKET                 = 5040
+	SYS_CONNECT                = 5041
+	SYS_ACCEPT                 = 5042
+	SYS_SENDTO                 = 5043
+	SYS_RECVFROM               = 5044
+	SYS_SENDMSG                = 5045
+	SYS_RECVMSG                = 5046
+	SYS_SHUTDOWN               = 5047
+	SYS_BIND                   = 5048
+	SYS_LISTEN                 = 5049
+	SYS_GETSOCKNAME            = 5050
+	SYS_GETPEERNAME            = 5051
+	SYS_SOCKETPAIR             = 5052
+	SYS_SETSOCKOPT             = 5053
+	SYS_GETSOCKOPT             = 5054
+	SYS_CLONE                  = 5055
+	SYS_FORK                   = 5056
+	SYS_EXECVE                 = 5057
+	SYS_EXIT                   = 5058
+	SYS_WAIT4                  = 5059
+	SYS_KILL                   = 5060
+	SYS_UNAME                  = 5061
+	SYS_SEMGET                 = 5062
+	SYS_SEMOP                  = 5063
+	SYS_SEMCTL                 = 5064
+	SYS_SHMDT                  = 5065
+	SYS_MSGGET                 = 5066
+	SYS_MSGSND                 = 5067
+	SYS_MSGRCV                 = 5068
+	SYS_MSGCTL                 = 5069
+	SYS_FCNTL                  = 5070
+	SYS_FLOCK                  = 5071
+	SYS_FSYNC                  = 5072
+	SYS_FDATASYNC              = 5073
+	SYS_TRUNCATE               = 5074
+	SYS_FTRUNCATE              = 5075
+	SYS_GETDENTS               = 5076
+	SYS_GETCWD                 = 5077
+	SYS_CHDIR                  = 5078
+	SYS_FCHDIR                 = 5079
+	SYS_RENAME                 = 5080
+	SYS_MKDIR                  = 5081
+	SYS_RMDIR                  = 5082
+	SYS_CREAT                  = 5083
+	SYS_LINK                   = 5084
+	SYS_UNLINK                 = 5085
+	SYS_SYMLINK                = 5086
+	SYS_READLINK               = 5087
+	SYS_CHMOD                  = 5088
+	SYS_FCHMOD                 = 5089
+	SYS_CHOWN                  = 5090
+	SYS_FCHOWN                 = 5091
+	SYS_LCHOWN                 = 5092
+	SYS_UMASK                  = 5093
+	SYS_GETTIMEOFDAY           = 5094
+	SYS_GETRLIMIT              = 5095
+	SYS_GETRUSAGE              = 5096
+	SYS_SYSINFO                = 5097
+	SYS_TIMES                  = 5098
+	SYS_PTRACE                 = 5099
+	SYS_GETUID                 = 5100
+	SYS_SYSLOG                 = 5101
+	SYS_GETGID                 = 5102
+	SYS_SETUID                 = 5103
+	SYS_SETGID                 = 5104
+	SYS_GETEUID                = 5105
+	SYS_GETEGID                = 5106
+	SYS_SETPGID                = 5107
+	SYS_GETPPID                = 5108
+	SYS_GETPGRP                = 5109
+	SYS_SETSID                 = 5110
+	SYS_SETREUID               = 5111
+	SYS_SETREGID               = 5112
+	SYS_GETGROUPS              = 5113
+	SYS_SETGROUPS              = 5114
+	SYS_SETRESUID              = 5115
+	SYS_GETRESUID              = 5116
+	SYS_SETRESGID              = 5117
+	SYS_GETRESGID              = 5118
+	SYS_GETPGID                = 5119
+	SYS_SETFSUID               = 5120
+	SYS_SETFSGID               = 5121
+	SYS_GETSID                 = 5122
+	SYS_CAPGET                 = 5123
+	SYS_CAPSET                 = 5124
+	SYS_RT_SIGPENDING          = 5125
+	SYS_RT_SIGTIMEDWAIT        = 5126
+	SYS_RT_SIGQUEUEINFO        = 5127
+	SYS_RT_SIGSUSPEND          = 5128
+	SYS_SIGALTSTACK            = 5129
+	SYS_UTIME                  = 5130
+	SYS_MKNOD                  = 5131
+	SYS_PERSONALITY            = 5132
+	SYS_USTAT                  = 5133
+	SYS_STATFS                 = 5134
+	SYS_FSTATFS                = 5135
+	SYS_SYSFS                  = 5136
+	SYS_GETPRIORITY            = 5137
+	SYS_SETPRIORITY            = 5138
+	SYS_SCHED_SETPARAM         = 5139
+	SYS_SCHED_GETPARAM         = 5140
+	SYS_SCHED_SETSCHEDULER     = 5141
+	SYS_SCHED_GETSCHEDULER     = 5142
+	SYS_SCHED_GET_PRIORITY_MAX = 5143
+	SYS_SCHED_GET_PRIORITY_MIN = 5144
+	SYS_SCHED_RR_GET_INTERVAL  = 5145
+	SYS_MLOCK                  = 5146
+	SYS_MUNLOCK                = 5147
+	SYS_MLOCKALL               = 5148
+	SYS_MUNLOCKALL             = 5149
+	SYS_VHANGUP                = 5150
+	SYS_PIVOT_ROOT             = 5151
+	SYS__SYSCTL                = 5152
+	SYS_PRCTL                  = 5153
+	SYS_ADJTIMEX               = 5154
+	SYS_SETRLIMIT              = 5155
+	SYS_CHROOT                 = 5156
+	SYS_SYNC                   = 5157
+	SYS_ACCT                   = 5158
+	SYS_SETTIMEOFDAY           = 5159
+	SYS_MOUNT                  = 5160
+	SYS_UMOUNT2                = 5161
+	SYS_SWAPON                 = 5162
+	SYS_SWAPOFF                = 5163
+	SYS_REBOOT                 = 5164
+	SYS_SETHOSTNAME            = 5165
+	SYS_SETDOMAINNAME          = 5166
+	SYS_CREATE_MODULE          = 5167
+	SYS_INIT_MODULE            = 5168
+	SYS_DELETE_MODULE          = 5169
+	SYS_GET_KERNEL_SYMS        = 5170
+	SYS_QUERY_MODULE           = 5171
+	SYS_QUOTACTL               = 5172
+	SYS_NFSSERVCTL             = 5173
+	SYS_GETPMSG                = 5174
+	SYS_PUTPMSG                = 5175
+	SYS_AFS_SYSCALL            = 5176
+	SYS_RESERVED177            = 5177
+	SYS_GETTID                 = 5178
+	SYS_READAHEAD              = 5179
+	SYS_SETXATTR               = 5180
+	SYS_LSETXATTR              = 5181
+	SYS_FSETXATTR              = 5182
+	SYS_GETXATTR               = 5183
+	SYS_LGETXATTR              = 5184
+	SYS_FGETXATTR              = 5185
+	SYS_LISTXATTR              = 5186
+	SYS_LLISTXATTR             = 5187
+	SYS_FLISTXATTR             = 5188
+	SYS_REMOVEXATTR            = 5189
+	SYS_LREMOVEXATTR           = 5190
+	SYS_FREMOVEXATTR           = 5191
+	SYS_TKILL                  = 5192
+	SYS_RESERVED193            = 5193
+	SYS_FUTEX                  = 5194
+	SYS_SCHED_SETAFFINITY      = 5195
+	SYS_SCHED_GETAFFINITY      = 5196
+	SYS_CACHEFLUSH             = 5197
+	SYS_CACHECTL               = 5198
+	SYS_SYSMIPS                = 5199
+	SYS_IO_SETUP               = 5200
+	SYS_IO_DESTROY             = 5201
+	SYS_IO_GETEVENTS           = 5202
+	SYS_IO_SUBMIT              = 5203
+	SYS_IO_CANCEL              = 5204
+	SYS_EXIT_GROUP             = 5205
+	SYS_LOOKUP_DCOOKIE         = 5206
+	SYS_EPOLL_CREATE           = 5207
+	SYS_EPOLL_CTL              = 5208
+	SYS_EPOLL_WAIT             = 5209
+	SYS_REMAP_FILE_PAGES       = 5210
+	SYS_RT_SIGRETURN           = 5211
+	SYS_SET_TID_ADDRESS        = 5212
+	SYS_RESTART_SYSCALL        = 5213
+	SYS_SEMTIMEDOP             = 5214
+	SYS_FADVISE64              = 5215
+	SYS_TIMER_CREATE           = 5216
+	SYS_TIMER_SETTIME          = 5217
+	SYS_TIMER_GETTIME          = 5218
+	SYS_TIMER_GETOVERRUN       = 5219
+	SYS_TIMER_DELETE           = 5220
+	SYS_CLOCK_SETTIME          = 5221
+	SYS_CLOCK_GETTIME          = 5222
+	SYS_CLOCK_GETRES           = 5223
+	SYS_CLOCK_NANOSLEEP        = 5224
+	SYS_TGKILL                 = 5225
+	SYS_UTIMES                 = 5226
+	SYS_MBIND                  = 5227
+	SYS_GET_MEMPOLICY          = 5228
+	SYS_SET_MEMPOLICY          = 5229
+	SYS_MQ_OPEN                = 5230
+	SYS_MQ_UNLINK              = 5231
+	SYS_MQ_TIMEDSEND           = 5232
+	SYS_MQ_TIMEDRECEIVE        = 5233
+	SYS_MQ_NOTIFY              = 5234
+	SYS_MQ_GETSETATTR          = 5235
+	SYS_VSERVER                = 5236
+	SYS_WAITID                 = 5237
+	SYS_ADD_KEY                = 5239
+	SYS_REQUEST_KEY            = 5240
+	SYS_KEYCTL                 = 5241
+	SYS_SET_THREAD_AREA        = 5242
+	SYS_INOTIFY_INIT           = 5243
+	SYS_INOTIFY_ADD_WATCH      = 5244
+	SYS_INOTIFY_RM_WATCH       = 5245
+	SYS_MIGRATE_PAGES          = 5246
+	SYS_OPENAT                 = 5247
+	SYS_MKDIRAT                = 5248
+	SYS_MKNODAT                = 5249
+	SYS_FCHOWNAT               = 5250
+	SYS_FUTIMESAT              = 5251
+	SYS_NEWFSTATAT             = 5252
+	SYS_UNLINKAT               = 5253
+	SYS_RENAMEAT               = 5254
+	SYS_LINKAT                 = 5255
+	SYS_SYMLINKAT              = 5256
+	SYS_READLINKAT             = 5257
+	SYS_FCHMODAT               = 5258
+	SYS_FACCESSAT              = 5259
+	SYS_PSELECT6               = 5260
+	SYS_PPOLL                  = 5261
+	SYS_UNSHARE                = 5262
+	SYS_SPLICE                 = 5263
+	SYS_SYNC_FILE_RANGE        = 5264
+	SYS_TEE                    = 5265
+	SYS_VMSPLICE               = 5266
+	SYS_MOVE_PAGES             = 5267
+	SYS_SET_ROBUST_LIST        = 5268
+	SYS_GET_ROBUST_LIST        = 5269
+	SYS_KEXEC_LOAD             = 5270
+	SYS_GETCPU                 = 5271
+	SYS_EPOLL_PWAIT            = 5272
+	SYS_IOPRIO_SET             = 5273
+	SYS_IOPRIO_GET             = 5274
+	SYS_UTIMENSAT              = 5275
+	SYS_SIGNALFD               = 5276
+	SYS_TIMERFD                = 5277
+	SYS_EVENTFD                = 5278
+	SYS_FALLOCATE              = 5279
+	SYS_TIMERFD_CREATE         = 5280
+	SYS_TIMERFD_GETTIME        = 5281
+	SYS_TIMERFD_SETTIME        = 5282
+	SYS_SIGNALFD4              = 5283
+	SYS_EVENTFD2               = 5284
+	SYS_EPOLL_CREATE1          = 5285
+	SYS_DUP3                   = 5286
+	SYS_PIPE2                  = 5287
+	SYS_INOTIFY_INIT1          = 5288
+	SYS_PREADV                 = 5289
+	SYS_PWRITEV                = 5290
+	SYS_RT_TGSIGQUEUEINFO      = 5291
+	SYS_PERF_EVENT_OPEN        = 5292
+	SYS_ACCEPT4                = 5293
+	SYS_RECVMMSG               = 5294
+	SYS_FANOTIFY_INIT          = 5295
+	SYS_FANOTIFY_MARK          = 5296
+	SYS_PRLIMIT64              = 5297
+	SYS_NAME_TO_HANDLE_AT      = 5298
+	SYS_OPEN_BY_HANDLE_AT      = 5299
+	SYS_CLOCK_ADJTIME          = 5300
+	SYS_SYNCFS                 = 5301
+	SYS_SENDMMSG               = 5302
+	SYS_SETNS                  = 5303
+	SYS_PROCESS_VM_READV       = 5304
+	SYS_PROCESS_VM_WRITEV      = 5305
+	SYS_KCMP                   = 5306
+	SYS_FINIT_MODULE           = 5307
+	SYS_GETDENTS64             = 5308
+	SYS_SCHED_SETATTR          = 5309
+	SYS_SCHED_GETATTR          = 5310
+	SYS_RENAMEAT2              = 5311
+	SYS_SECCOMP                = 5312
+	SYS_GETRANDOM              = 5313
+	SYS_MEMFD_CREATE           = 5314
+	SYS_BPF                    = 5315
+	SYS_EXECVEAT               = 5316
+	SYS_USERFAULTFD            = 5317
+	SYS_MEMBARRIER             = 5318
+)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
new file mode 100644
index 0000000000000000000000000000000000000000..d192b940ce311d075ae594eed1e5956b2bc7dd3c
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
@@ -0,0 +1,327 @@
+// mksysnum_linux.pl /usr/include/asm/unistd.h
+// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
+
+// +build mips64le,linux
+
+package unix
+
+const (
+	SYS_READ                   = 5000
+	SYS_WRITE                  = 5001
+	SYS_OPEN                   = 5002
+	SYS_CLOSE                  = 5003
+	SYS_STAT                   = 5004
+	SYS_FSTAT                  = 5005
+	SYS_LSTAT                  = 5006
+	SYS_POLL                   = 5007
+	SYS_LSEEK                  = 5008
+	SYS_MMAP                   = 5009
+	SYS_MPROTECT               = 5010
+	SYS_MUNMAP                 = 5011
+	SYS_BRK                    = 5012
+	SYS_RT_SIGACTION           = 5013
+	SYS_RT_SIGPROCMASK         = 5014
+	SYS_IOCTL                  = 5015
+	SYS_PREAD64                = 5016
+	SYS_PWRITE64               = 5017
+	SYS_READV                  = 5018
+	SYS_WRITEV                 = 5019
+	SYS_ACCESS                 = 5020
+	SYS_PIPE                   = 5021
+	SYS__NEWSELECT             = 5022
+	SYS_SCHED_YIELD            = 5023
+	SYS_MREMAP                 = 5024
+	SYS_MSYNC                  = 5025
+	SYS_MINCORE                = 5026
+	SYS_MADVISE                = 5027
+	SYS_SHMGET                 = 5028
+	SYS_SHMAT                  = 5029
+	SYS_SHMCTL                 = 5030
+	SYS_DUP                    = 5031
+	SYS_DUP2                   = 5032
+	SYS_PAUSE                  = 5033
+	SYS_NANOSLEEP              = 5034
+	SYS_GETITIMER              = 5035
+	SYS_SETITIMER              = 5036
+	SYS_ALARM                  = 5037
+	SYS_GETPID                 = 5038
+	SYS_SENDFILE               = 5039
+	SYS_SOCKET                 = 5040
+	SYS_CONNECT                = 5041
+	SYS_ACCEPT                 = 5042
+	SYS_SENDTO                 = 5043
+	SYS_RECVFROM               = 5044
+	SYS_SENDMSG                = 5045
+	SYS_RECVMSG                = 5046
+	SYS_SHUTDOWN               = 5047
+	SYS_BIND                   = 5048
+	SYS_LISTEN                 = 5049
+	SYS_GETSOCKNAME            = 5050
+	SYS_GETPEERNAME            = 5051
+	SYS_SOCKETPAIR             = 5052
+	SYS_SETSOCKOPT             = 5053
+	SYS_GETSOCKOPT             = 5054
+	SYS_CLONE                  = 5055
+	SYS_FORK                   = 5056
+	SYS_EXECVE                 = 5057
+	SYS_EXIT                   = 5058
+	SYS_WAIT4                  = 5059
+	SYS_KILL                   = 5060
+	SYS_UNAME                  = 5061
+	SYS_SEMGET                 = 5062
+	SYS_SEMOP                  = 5063
+	SYS_SEMCTL                 = 5064
+	SYS_SHMDT                  = 5065
+	SYS_MSGGET                 = 5066
+	SYS_MSGSND                 = 5067
+	SYS_MSGRCV                 = 5068
+	SYS_MSGCTL                 = 5069
+	SYS_FCNTL                  = 5070
+	SYS_FLOCK                  = 5071
+	SYS_FSYNC                  = 5072
+	SYS_FDATASYNC              = 5073
+	SYS_TRUNCATE               = 5074
+	SYS_FTRUNCATE              = 5075
+	SYS_GETDENTS               = 5076
+	SYS_GETCWD                 = 5077
+	SYS_CHDIR                  = 5078
+	SYS_FCHDIR                 = 5079
+	SYS_RENAME                 = 5080
+	SYS_MKDIR                  = 5081
+	SYS_RMDIR                  = 5082
+	SYS_CREAT                  = 5083
+	SYS_LINK                   = 5084
+	SYS_UNLINK                 = 5085
+	SYS_SYMLINK                = 5086
+	SYS_READLINK               = 5087
+	SYS_CHMOD                  = 5088
+	SYS_FCHMOD                 = 5089
+	SYS_CHOWN                  = 5090
+	SYS_FCHOWN                 = 5091
+	SYS_LCHOWN                 = 5092
+	SYS_UMASK                  = 5093
+	SYS_GETTIMEOFDAY           = 5094
+	SYS_GETRLIMIT              = 5095
+	SYS_GETRUSAGE              = 5096
+	SYS_SYSINFO                = 5097
+	SYS_TIMES                  = 5098
+	SYS_PTRACE                 = 5099
+	SYS_GETUID                 = 5100
+	SYS_SYSLOG                 = 5101
+	SYS_GETGID                 = 5102
+	SYS_SETUID                 = 5103
+	SYS_SETGID                 = 5104
+	SYS_GETEUID                = 5105
+	SYS_GETEGID                = 5106
+	SYS_SETPGID                = 5107
+	SYS_GETPPID                = 5108
+	SYS_GETPGRP                = 5109
+	SYS_SETSID                 = 5110
+	SYS_SETREUID               = 5111
+	SYS_SETREGID               = 5112
+	SYS_GETGROUPS              = 5113
+	SYS_SETGROUPS              = 5114
+	SYS_SETRESUID              = 5115
+	SYS_GETRESUID              = 5116
+	SYS_SETRESGID              = 5117
+	SYS_GETRESGID              = 5118
+	SYS_GETPGID                = 5119
+	SYS_SETFSUID               = 5120
+	SYS_SETFSGID               = 5121
+	SYS_GETSID                 = 5122
+	SYS_CAPGET                 = 5123
+	SYS_CAPSET                 = 5124
+	SYS_RT_SIGPENDING          = 5125
+	SYS_RT_SIGTIMEDWAIT        = 5126
+	SYS_RT_SIGQUEUEINFO        = 5127
+	SYS_RT_SIGSUSPEND          = 5128
+	SYS_SIGALTSTACK            = 5129
+	SYS_UTIME                  = 5130
+	SYS_MKNOD                  = 5131
+	SYS_PERSONALITY            = 5132
+	SYS_USTAT                  = 5133
+	SYS_STATFS                 = 5134
+	SYS_FSTATFS                = 5135
+	SYS_SYSFS                  = 5136
+	SYS_GETPRIORITY            = 5137
+	SYS_SETPRIORITY            = 5138
+	SYS_SCHED_SETPARAM         = 5139
+	SYS_SCHED_GETPARAM         = 5140
+	SYS_SCHED_SETSCHEDULER     = 5141
+	SYS_SCHED_GETSCHEDULER     = 5142
+	SYS_SCHED_GET_PRIORITY_MAX = 5143
+	SYS_SCHED_GET_PRIORITY_MIN = 5144
+	SYS_SCHED_RR_GET_INTERVAL  = 5145
+	SYS_MLOCK                  = 5146
+	SYS_MUNLOCK                = 5147
+	SYS_MLOCKALL               = 5148
+	SYS_MUNLOCKALL             = 5149
+	SYS_VHANGUP                = 5150
+	SYS_PIVOT_ROOT             = 5151
+	SYS__SYSCTL                = 5152
+	SYS_PRCTL                  = 5153
+	SYS_ADJTIMEX               = 5154
+	SYS_SETRLIMIT              = 5155
+	SYS_CHROOT                 = 5156
+	SYS_SYNC                   = 5157
+	SYS_ACCT                   = 5158
+	SYS_SETTIMEOFDAY           = 5159
+	SYS_MOUNT                  = 5160
+	SYS_UMOUNT2                = 5161
+	SYS_SWAPON                 = 5162
+	SYS_SWAPOFF                = 5163
+	SYS_REBOOT                 = 5164
+	SYS_SETHOSTNAME            = 5165
+	SYS_SETDOMAINNAME          = 5166
+	SYS_CREATE_MODULE          = 5167
+	SYS_INIT_MODULE            = 5168
+	SYS_DELETE_MODULE          = 5169
+	SYS_GET_KERNEL_SYMS        = 5170
+	SYS_QUERY_MODULE           = 5171
+	SYS_QUOTACTL               = 5172
+	SYS_NFSSERVCTL             = 5173
+	SYS_GETPMSG                = 5174
+	SYS_PUTPMSG                = 5175
+	SYS_AFS_SYSCALL            = 5176
+	SYS_RESERVED177            = 5177
+	SYS_GETTID                 = 5178
+	SYS_READAHEAD              = 5179
+	SYS_SETXATTR               = 5180
+	SYS_LSETXATTR              = 5181
+	SYS_FSETXATTR              = 5182
+	SYS_GETXATTR               = 5183
+	SYS_LGETXATTR              = 5184
+	SYS_FGETXATTR              = 5185
+	SYS_LISTXATTR              = 5186
+	SYS_LLISTXATTR             = 5187
+	SYS_FLISTXATTR             = 5188
+	SYS_REMOVEXATTR            = 5189
+	SYS_LREMOVEXATTR           = 5190
+	SYS_FREMOVEXATTR           = 5191
+	SYS_TKILL                  = 5192
+	SYS_RESERVED193            = 5193
+	SYS_FUTEX                  = 5194
+	SYS_SCHED_SETAFFINITY      = 5195
+	SYS_SCHED_GETAFFINITY      = 5196
+	SYS_CACHEFLUSH             = 5197
+	SYS_CACHECTL               = 5198
+	SYS_SYSMIPS                = 5199
+	SYS_IO_SETUP               = 5200
+	SYS_IO_DESTROY             = 5201
+	SYS_IO_GETEVENTS           = 5202
+	SYS_IO_SUBMIT              = 5203
+	SYS_IO_CANCEL              = 5204
+	SYS_EXIT_GROUP             = 5205
+	SYS_LOOKUP_DCOOKIE         = 5206
+	SYS_EPOLL_CREATE           = 5207
+	SYS_EPOLL_CTL              = 5208
+	SYS_EPOLL_WAIT             = 5209
+	SYS_REMAP_FILE_PAGES       = 5210
+	SYS_RT_SIGRETURN           = 5211
+	SYS_SET_TID_ADDRESS        = 5212
+	SYS_RESTART_SYSCALL        = 5213
+	SYS_SEMTIMEDOP             = 5214
+	SYS_FADVISE64              = 5215
+	SYS_TIMER_CREATE           = 5216
+	SYS_TIMER_SETTIME          = 5217
+	SYS_TIMER_GETTIME          = 5218
+	SYS_TIMER_GETOVERRUN       = 5219
+	SYS_TIMER_DELETE           = 5220
+	SYS_CLOCK_SETTIME          = 5221
+	SYS_CLOCK_GETTIME          = 5222
+	SYS_CLOCK_GETRES           = 5223
+	SYS_CLOCK_NANOSLEEP        = 5224
+	SYS_TGKILL                 = 5225
+	SYS_UTIMES                 = 5226
+	SYS_MBIND                  = 5227
+	SYS_GET_MEMPOLICY          = 5228
+	SYS_SET_MEMPOLICY          = 5229
+	SYS_MQ_OPEN                = 5230
+	SYS_MQ_UNLINK              = 5231
+	SYS_MQ_TIMEDSEND           = 5232
+	SYS_MQ_TIMEDRECEIVE        = 5233
+	SYS_MQ_NOTIFY              = 5234
+	SYS_MQ_GETSETATTR          = 5235
+	SYS_VSERVER                = 5236
+	SYS_WAITID                 = 5237
+	SYS_ADD_KEY                = 5239
+	SYS_REQUEST_KEY            = 5240
+	SYS_KEYCTL                 = 5241
+	SYS_SET_THREAD_AREA        = 5242
+	SYS_INOTIFY_INIT           = 5243
+	SYS_INOTIFY_ADD_WATCH      = 5244
+	SYS_INOTIFY_RM_WATCH       = 5245
+	SYS_MIGRATE_PAGES          = 5246
+	SYS_OPENAT                 = 5247
+	SYS_MKDIRAT                = 5248
+	SYS_MKNODAT                = 5249
+	SYS_FCHOWNAT               = 5250
+	SYS_FUTIMESAT              = 5251
+	SYS_NEWFSTATAT             = 5252
+	SYS_UNLINKAT               = 5253
+	SYS_RENAMEAT               = 5254
+	SYS_LINKAT                 = 5255
+	SYS_SYMLINKAT              = 5256
+	SYS_READLINKAT             = 5257
+	SYS_FCHMODAT               = 5258
+	SYS_FACCESSAT              = 5259
+	SYS_PSELECT6               = 5260
+	SYS_PPOLL                  = 5261
+	SYS_UNSHARE                = 5262
+	SYS_SPLICE                 = 5263
+	SYS_SYNC_FILE_RANGE        = 5264
+	SYS_TEE                    = 5265
+	SYS_VMSPLICE               = 5266
+	SYS_MOVE_PAGES             = 5267
+	SYS_SET_ROBUST_LIST        = 5268
+	SYS_GET_ROBUST_LIST        = 5269
+	SYS_KEXEC_LOAD             = 5270
+	SYS_GETCPU                 = 5271
+	SYS_EPOLL_PWAIT            = 5272
+	SYS_IOPRIO_SET             = 5273
+	SYS_IOPRIO_GET             = 5274
+	SYS_UTIMENSAT              = 5275
+	SYS_SIGNALFD               = 5276
+	SYS_TIMERFD                = 5277
+	SYS_EVENTFD                = 5278
+	SYS_FALLOCATE              = 5279
+	SYS_TIMERFD_CREATE         = 5280
+	SYS_TIMERFD_GETTIME        = 5281
+	SYS_TIMERFD_SETTIME        = 5282
+	SYS_SIGNALFD4              = 5283
+	SYS_EVENTFD2               = 5284
+	SYS_EPOLL_CREATE1          = 5285
+	SYS_DUP3                   = 5286
+	SYS_PIPE2                  = 5287
+	SYS_INOTIFY_INIT1          = 5288
+	SYS_PREADV                 = 5289
+	SYS_PWRITEV                = 5290
+	SYS_RT_TGSIGQUEUEINFO      = 5291
+	SYS_PERF_EVENT_OPEN        = 5292
+	SYS_ACCEPT4                = 5293
+	SYS_RECVMMSG               = 5294
+	SYS_FANOTIFY_INIT          = 5295
+	SYS_FANOTIFY_MARK          = 5296
+	SYS_PRLIMIT64              = 5297
+	SYS_NAME_TO_HANDLE_AT      = 5298
+	SYS_OPEN_BY_HANDLE_AT      = 5299
+	SYS_CLOCK_ADJTIME          = 5300
+	SYS_SYNCFS                 = 5301
+	SYS_SENDMMSG               = 5302
+	SYS_SETNS                  = 5303
+	SYS_PROCESS_VM_READV       = 5304
+	SYS_PROCESS_VM_WRITEV      = 5305
+	SYS_KCMP                   = 5306
+	SYS_FINIT_MODULE           = 5307
+	SYS_GETDENTS64             = 5308
+	SYS_SCHED_SETATTR          = 5309
+	SYS_SCHED_GETATTR          = 5310
+	SYS_RENAMEAT2              = 5311
+	SYS_SECCOMP                = 5312
+	SYS_GETRANDOM              = 5313
+	SYS_MEMFD_CREATE           = 5314
+	SYS_BPF                    = 5315
+	SYS_EXECVEAT               = 5316
+	SYS_USERFAULTFD            = 5317
+	SYS_MEMBARRIER             = 5318
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go
index 7b1c2c1940ec4d7949419a02b53047ae812554da..2de1d44e281dd1b8b3822d6c7eac070d0e3e01fc 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go
@@ -1,8 +1,7 @@
+// +build 386,darwin
 // Created by cgo -godefs - DO NOT EDIT
 // cgo -godefs types_darwin.go
 
-// +build 386,darwin
-
 package unix
 
 const (
@@ -30,7 +29,7 @@ type Timeval struct {
 	Usec int32
 }
 
-type Timeval32 [0]byte
+type Timeval32 struct{}
 
 type Rusage struct {
 	Utime    Timeval
diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
index 9d07ef6e39f032d0c237ccf750de3020dd1577f3..044657878c854f5c98d12b6e88364370667f8a7d 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
@@ -1,8 +1,7 @@
+// +build amd64,darwin
 // Created by cgo -godefs - DO NOT EDIT
 // cgo -godefs types_darwin.go
 
-// +build amd64,darwin
-
 package unix
 
 const (
@@ -132,9 +131,9 @@ type Fbootstraptransfer_t struct {
 }
 
 type Log2phys_t struct {
-	Flags       uint32
-	Contigbytes int64
-	Devoffset   int64
+	Flags     uint32
+	Pad_cgo_0 [8]byte
+	Pad_cgo_1 [8]byte
 }
 
 type Fsid struct {
@@ -456,3 +455,8 @@ type Termios struct {
 	Ispeed    uint64
 	Ospeed    uint64
 }
+
+const (
+	AT_FDCWD            = -0x2
+	AT_SYMLINK_NOFOLLOW = 0x20
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
index 7329d07d636990ad44200aafbcd3215f94d4a894..85d56eabd3fa4c1167ea86afdad3d125d90876db 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
@@ -1,8 +1,7 @@
+// +build arm64,darwin
 // Created by cgo -godefs - DO NOT EDIT
 // cgo -godefs types_darwin.go
 
-// +build arm64,darwin
-
 package unix
 
 const (
@@ -132,9 +131,9 @@ type Fbootstraptransfer_t struct {
 }
 
 type Log2phys_t struct {
-	Flags       uint32
-	Contigbytes int64
-	Devoffset   int64
+	Flags     uint32
+	Pad_cgo_0 [8]byte
+	Pad_cgo_1 [8]byte
 }
 
 type Fsid struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
index 330c0e6356f9604317b073a6895acabcd0a232ec..8cf30947b412c6debd938fa6cf1bd8334af7454f 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
@@ -1,8 +1,7 @@
+// +build 386,freebsd
 // Created by cgo -godefs - DO NOT EDIT
 // cgo -godefs types_freebsd.go
 
-// +build 386,freebsd
-
 package unix
 
 const (
@@ -140,6 +139,15 @@ type Fsid struct {
 	Val [2]int32
 }
 
+const (
+	FADV_NORMAL     = 0x0
+	FADV_RANDOM     = 0x1
+	FADV_SEQUENTIAL = 0x2
+	FADV_WILLNEED   = 0x3
+	FADV_DONTNEED   = 0x4
+	FADV_NOREUSE    = 0x5
+)
+
 type RawSockaddrInet4 struct {
 	Len    uint8
 	Family uint8
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
index 93395924c48f9650e2da84e2176a45183137e827..e5feb207be60b399e534dd9c18e5b24ce2c088de 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
@@ -1,8 +1,7 @@
+// +build amd64,freebsd
 // Created by cgo -godefs - DO NOT EDIT
 // cgo -godefs types_freebsd.go
 
-// +build amd64,freebsd
-
 package unix
 
 const (
@@ -140,6 +139,15 @@ type Fsid struct {
 	Val [2]int32
 }
 
+const (
+	FADV_NORMAL     = 0x0
+	FADV_RANDOM     = 0x1
+	FADV_SEQUENTIAL = 0x2
+	FADV_WILLNEED   = 0x3
+	FADV_DONTNEED   = 0x4
+	FADV_NOREUSE    = 0x5
+)
+
 type RawSockaddrInet4 struct {
 	Len    uint8
 	Family uint8
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
index 9a58381b4dca8a45cc455312093ab8ee96fed854..650bf22f8c10729df0543631e8daa20dba2cefa4 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
@@ -1,8 +1,7 @@
+// +build 386,linux
 // Created by cgo -godefs - DO NOT EDIT
 // cgo -godefs types_linux.go
 
-// +build 386,linux
-
 package unix
 
 const (
@@ -575,18 +574,18 @@ type EpollEvent struct {
 
 const (
 	AT_FDCWD            = -0x64
-	AT_SYMLINK_NOFOLLOW = 0x100
 	AT_REMOVEDIR        = 0x200
+	AT_SYMLINK_FOLLOW   = 0x400
+	AT_SYMLINK_NOFOLLOW = 0x100
 )
 
 type Termios struct {
-	Iflag     uint32
-	Oflag     uint32
-	Cflag     uint32
-	Lflag     uint32
-	Line      uint8
-	Cc        [32]uint8
-	Pad_cgo_0 [3]byte
-	Ispeed    uint32
-	Ospeed    uint32
+	Iflag  uint32
+	Oflag  uint32
+	Cflag  uint32
+	Lflag  uint32
+	Line   uint8
+	Cc     [19]uint8
+	Ispeed uint32
+	Ospeed uint32
 }
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
index f1937a627562e17106bb6af75520481b170c13e0..539ec3ca066b2c5aee25db62d9ff7c4ae50502f9 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
@@ -1,8 +1,7 @@
+// +build amd64,linux
 // Created by cgo -godefs - DO NOT EDIT
 // cgo -godefs types_linux.go
 
-// +build amd64,linux
-
 package unix
 
 const (
@@ -593,18 +592,18 @@ type EpollEvent struct {
 
 const (
 	AT_FDCWD            = -0x64
-	AT_SYMLINK_NOFOLLOW = 0x100
 	AT_REMOVEDIR        = 0x200
+	AT_SYMLINK_FOLLOW   = 0x400
+	AT_SYMLINK_NOFOLLOW = 0x100
 )
 
 type Termios struct {
-	Iflag     uint32
-	Oflag     uint32
-	Cflag     uint32
-	Lflag     uint32
-	Line      uint8
-	Cc        [32]uint8
-	Pad_cgo_0 [3]byte
-	Ispeed    uint32
-	Ospeed    uint32
+	Iflag  uint32
+	Oflag  uint32
+	Cflag  uint32
+	Lflag  uint32
+	Line   uint8
+	Cc     [19]uint8
+	Ispeed uint32
+	Ospeed uint32
 }
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
index c8a0de45adfb00e0c30dd51434d5c6c32b1d555c..f437a497946f2c356483a98b1f66d1b2e1172b94 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
@@ -1,8 +1,7 @@
+// +build arm,linux
 // Created by cgo -godefs - DO NOT EDIT
 // cgo -godefs types_linux.go
 
-// +build arm,linux
-
 package unix
 
 const (
@@ -564,120 +563,18 @@ type EpollEvent struct {
 
 const (
 	AT_FDCWD            = -0x64
-	AT_SYMLINK_NOFOLLOW = 0x100
 	AT_REMOVEDIR        = 0x200
+	AT_SYMLINK_FOLLOW   = 0x400
+	AT_SYMLINK_NOFOLLOW = 0x100
 )
 
 type Termios struct {
-	Iflag     uint32
-	Oflag     uint32
-	Cflag     uint32
-	Lflag     uint32
-	Line      uint8
-	Cc        [32]uint8
-	Pad_cgo_0 [3]byte
-	Ispeed    uint32
-	Ospeed    uint32
+	Iflag  uint32
+	Oflag  uint32
+	Cflag  uint32
+	Lflag  uint32
+	Line   uint8
+	Cc     [19]uint8
+	Ispeed uint32
+	Ospeed uint32
 }
-
-const (
-	VINTR    = 0x0
-	VQUIT    = 0x1
-	VERASE   = 0x2
-	VKILL    = 0x3
-	VEOF     = 0x4
-	VTIME    = 0x5
-	VMIN     = 0x6
-	VSWTC    = 0x7
-	VSTART   = 0x8
-	VSTOP    = 0x9
-	VSUSP    = 0xa
-	VEOL     = 0xb
-	VREPRINT = 0xc
-	VDISCARD = 0xd
-	VWERASE  = 0xe
-	VLNEXT   = 0xf
-	VEOL2    = 0x10
-	IGNBRK   = 0x1
-	BRKINT   = 0x2
-	IGNPAR   = 0x4
-	PARMRK   = 0x8
-	INPCK    = 0x10
-	ISTRIP   = 0x20
-	INLCR    = 0x40
-	IGNCR    = 0x80
-	ICRNL    = 0x100
-	IUCLC    = 0x200
-	IXON     = 0x400
-	IXANY    = 0x800
-	IXOFF    = 0x1000
-	IMAXBEL  = 0x2000
-	IUTF8    = 0x4000
-	OPOST    = 0x1
-	OLCUC    = 0x2
-	ONLCR    = 0x4
-	OCRNL    = 0x8
-	ONOCR    = 0x10
-	ONLRET   = 0x20
-	OFILL    = 0x40
-	OFDEL    = 0x80
-	B0       = 0x0
-	B50      = 0x1
-	B75      = 0x2
-	B110     = 0x3
-	B134     = 0x4
-	B150     = 0x5
-	B200     = 0x6
-	B300     = 0x7
-	B600     = 0x8
-	B1200    = 0x9
-	B1800    = 0xa
-	B2400    = 0xb
-	B4800    = 0xc
-	B9600    = 0xd
-	B19200   = 0xe
-	B38400   = 0xf
-	CSIZE    = 0x30
-	CS5      = 0x0
-	CS6      = 0x10
-	CS7      = 0x20
-	CS8      = 0x30
-	CSTOPB   = 0x40
-	CREAD    = 0x80
-	PARENB   = 0x100
-	PARODD   = 0x200
-	HUPCL    = 0x400
-	CLOCAL   = 0x800
-	B57600   = 0x1001
-	B115200  = 0x1002
-	B230400  = 0x1003
-	B460800  = 0x1004
-	B500000  = 0x1005
-	B576000  = 0x1006
-	B921600  = 0x1007
-	B1000000 = 0x1008
-	B1152000 = 0x1009
-	B1500000 = 0x100a
-	B2000000 = 0x100b
-	B2500000 = 0x100c
-	B3000000 = 0x100d
-	B3500000 = 0x100e
-	B4000000 = 0x100f
-	ISIG     = 0x1
-	ICANON   = 0x2
-	XCASE    = 0x4
-	ECHO     = 0x8
-	ECHOE    = 0x10
-	ECHOK    = 0x20
-	ECHONL   = 0x40
-	NOFLSH   = 0x80
-	TOSTOP   = 0x100
-	ECHOCTL  = 0x200
-	ECHOPRT  = 0x400
-	ECHOKE   = 0x800
-	FLUSHO   = 0x1000
-	PENDIN   = 0x4000
-	IEXTEN   = 0x8000
-	TCGETS   = 0x5401
-	TCSETS   = 0x5402
-)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
index f989a36052c116de585071b7f0f2f0e83f014244..1028fcd6e16b74b9dcdc47f6cca9383e40e47aef 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
@@ -1,8 +1,7 @@
+// +build arm64,linux
 // Created by cgo -godefs - DO NOT EDIT
 // cgo -godefs -- -fsigned-char types_linux.go
 
-// +build arm64,linux
-
 package unix
 
 const (
@@ -581,17 +580,17 @@ type EpollEvent struct {
 const (
 	AT_FDCWD            = -0x64
 	AT_REMOVEDIR        = 0x200
+	AT_SYMLINK_FOLLOW   = 0x400
 	AT_SYMLINK_NOFOLLOW = 0x100
 )
 
 type Termios struct {
-	Iflag     uint32
-	Oflag     uint32
-	Cflag     uint32
-	Lflag     uint32
-	Line      uint8
-	Cc        [32]uint8
-	Pad_cgo_0 [3]byte
-	Ispeed    uint32
-	Ospeed    uint32
+	Iflag  uint32
+	Oflag  uint32
+	Cflag  uint32
+	Lflag  uint32
+	Line   uint8
+	Cc     [19]uint8
+	Ispeed uint32
+	Ospeed uint32
 }
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
new file mode 100644
index 0000000000000000000000000000000000000000..f08b5314b11018aeacc9a36ff1a3ef099edb228a
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
@@ -0,0 +1,600 @@
+// +build mips64,linux
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_linux.go
+
+package unix
+
+const (
+	sizeofPtr      = 0x8
+	sizeofShort    = 0x2
+	sizeofInt      = 0x4
+	sizeofLong     = 0x8
+	sizeofLongLong = 0x8
+	PathMax        = 0x1000
+)
+
+type (
+	_C_short     int16
+	_C_int       int32
+	_C_long      int64
+	_C_long_long int64
+)
+
+type Timespec struct {
+	Sec  int64
+	Nsec int64
+}
+
+type Timeval struct {
+	Sec  int64
+	Usec int64
+}
+
+type Timex struct {
+	Modes     uint32
+	Pad_cgo_0 [4]byte
+	Offset    int64
+	Freq      int64
+	Maxerror  int64
+	Esterror  int64
+	Status    int32
+	Pad_cgo_1 [4]byte
+	Constant  int64
+	Precision int64
+	Tolerance int64
+	Time      Timeval
+	Tick      int64
+	Ppsfreq   int64
+	Jitter    int64
+	Shift     int32
+	Pad_cgo_2 [4]byte
+	Stabil    int64
+	Jitcnt    int64
+	Calcnt    int64
+	Errcnt    int64
+	Stbcnt    int64
+	Tai       int32
+	Pad_cgo_3 [44]byte
+}
+
+type Time_t int64
+
+type Tms struct {
+	Utime  int64
+	Stime  int64
+	Cutime int64
+	Cstime int64
+}
+
+type Utimbuf struct {
+	Actime  int64
+	Modtime int64
+}
+
+type Rusage struct {
+	Utime    Timeval
+	Stime    Timeval
+	Maxrss   int64
+	Ixrss    int64
+	Idrss    int64
+	Isrss    int64
+	Minflt   int64
+	Majflt   int64
+	Nswap    int64
+	Inblock  int64
+	Oublock  int64
+	Msgsnd   int64
+	Msgrcv   int64
+	Nsignals int64
+	Nvcsw    int64
+	Nivcsw   int64
+}
+
+type Rlimit struct {
+	Cur uint64
+	Max uint64
+}
+
+type _Gid_t uint32
+
+type Stat_t struct {
+	Dev     uint32
+	Pad1    [3]int32
+	Ino     uint64
+	Mode    uint32
+	Nlink   uint32
+	Uid     uint32
+	Gid     uint32
+	Rdev    uint32
+	Pad2    [3]uint32
+	Size    int64
+	Atim    Timespec
+	Mtim    Timespec
+	Ctim    Timespec
+	Blksize uint32
+	Pad4    uint32
+	Blocks  int64
+}
+
+type Statfs_t struct {
+	Type    int64
+	Bsize   int64
+	Frsize  int64
+	Blocks  uint64
+	Bfree   uint64
+	Files   uint64
+	Ffree   uint64
+	Bavail  uint64
+	Fsid    Fsid
+	Namelen int64
+	Flags   int64
+	Spare   [5]int64
+}
+
+type Dirent struct {
+	Ino       uint64
+	Off       int64
+	Reclen    uint16
+	Type      uint8
+	Name      [256]int8
+	Pad_cgo_0 [5]byte
+}
+
+type Fsid struct {
+	X__val [2]int32
+}
+
+type Flock_t struct {
+	Type      int16
+	Whence    int16
+	Pad_cgo_0 [4]byte
+	Start     int64
+	Len       int64
+	Pid       int32
+	Pad_cgo_1 [4]byte
+}
+
+const (
+	FADV_NORMAL     = 0x0
+	FADV_RANDOM     = 0x1
+	FADV_SEQUENTIAL = 0x2
+	FADV_WILLNEED   = 0x3
+	FADV_DONTNEED   = 0x4
+	FADV_NOREUSE    = 0x5
+)
+
+type RawSockaddrInet4 struct {
+	Family uint16
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	Zero   [8]uint8
+}
+
+type RawSockaddrInet6 struct {
+	Family   uint16
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type RawSockaddrUnix struct {
+	Family uint16
+	Path   [108]int8
+}
+
+type RawSockaddrLinklayer struct {
+	Family   uint16
+	Protocol uint16
+	Ifindex  int32
+	Hatype   uint16
+	Pkttype  uint8
+	Halen    uint8
+	Addr     [8]uint8
+}
+
+type RawSockaddrNetlink struct {
+	Family uint16
+	Pad    uint16
+	Pid    uint32
+	Groups uint32
+}
+
+type RawSockaddr struct {
+	Family uint16
+	Data   [14]int8
+}
+
+type RawSockaddrAny struct {
+	Addr RawSockaddr
+	Pad  [96]int8
+}
+
+type _Socklen uint32
+
+type Linger struct {
+	Onoff  int32
+	Linger int32
+}
+
+type Iovec struct {
+	Base *byte
+	Len  uint64
+}
+
+type IPMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type IPMreqn struct {
+	Multiaddr [4]byte /* in_addr */
+	Address   [4]byte /* in_addr */
+	Ifindex   int32
+}
+
+type IPv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Interface uint32
+}
+
+type Msghdr struct {
+	Name       *byte
+	Namelen    uint32
+	Pad_cgo_0  [4]byte
+	Iov        *Iovec
+	Iovlen     uint64
+	Control    *byte
+	Controllen uint64
+	Flags      int32
+	Pad_cgo_1  [4]byte
+}
+
+type Cmsghdr struct {
+	Len   uint64
+	Level int32
+	Type  int32
+}
+
+type Inet4Pktinfo struct {
+	Ifindex  int32
+	Spec_dst [4]byte /* in_addr */
+	Addr     [4]byte /* in_addr */
+}
+
+type Inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex uint32
+}
+
+type IPv6MTUInfo struct {
+	Addr RawSockaddrInet6
+	Mtu  uint32
+}
+
+type ICMPv6Filter struct {
+	Data [8]uint32
+}
+
+type Ucred struct {
+	Pid int32
+	Uid uint32
+	Gid uint32
+}
+
+type TCPInfo struct {
+	State          uint8
+	Ca_state       uint8
+	Retransmits    uint8
+	Probes         uint8
+	Backoff        uint8
+	Options        uint8
+	Pad_cgo_0      [2]byte
+	Rto            uint32
+	Ato            uint32
+	Snd_mss        uint32
+	Rcv_mss        uint32
+	Unacked        uint32
+	Sacked         uint32
+	Lost           uint32
+	Retrans        uint32
+	Fackets        uint32
+	Last_data_sent uint32
+	Last_ack_sent  uint32
+	Last_data_recv uint32
+	Last_ack_recv  uint32
+	Pmtu           uint32
+	Rcv_ssthresh   uint32
+	Rtt            uint32
+	Rttvar         uint32
+	Snd_ssthresh   uint32
+	Snd_cwnd       uint32
+	Advmss         uint32
+	Reordering     uint32
+	Rcv_rtt        uint32
+	Rcv_space      uint32
+	Total_retrans  uint32
+}
+
+const (
+	SizeofSockaddrInet4     = 0x10
+	SizeofSockaddrInet6     = 0x1c
+	SizeofSockaddrAny       = 0x70
+	SizeofSockaddrUnix      = 0x6e
+	SizeofSockaddrLinklayer = 0x14
+	SizeofSockaddrNetlink   = 0xc
+	SizeofLinger            = 0x8
+	SizeofIPMreq            = 0x8
+	SizeofIPMreqn           = 0xc
+	SizeofIPv6Mreq          = 0x14
+	SizeofMsghdr            = 0x38
+	SizeofCmsghdr           = 0x10
+	SizeofInet4Pktinfo      = 0xc
+	SizeofInet6Pktinfo      = 0x14
+	SizeofIPv6MTUInfo       = 0x20
+	SizeofICMPv6Filter      = 0x20
+	SizeofUcred             = 0xc
+	SizeofTCPInfo           = 0x68
+)
+
+const (
+	IFA_UNSPEC          = 0x0
+	IFA_ADDRESS         = 0x1
+	IFA_LOCAL           = 0x2
+	IFA_LABEL           = 0x3
+	IFA_BROADCAST       = 0x4
+	IFA_ANYCAST         = 0x5
+	IFA_CACHEINFO       = 0x6
+	IFA_MULTICAST       = 0x7
+	IFLA_UNSPEC         = 0x0
+	IFLA_ADDRESS        = 0x1
+	IFLA_BROADCAST      = 0x2
+	IFLA_IFNAME         = 0x3
+	IFLA_MTU            = 0x4
+	IFLA_LINK           = 0x5
+	IFLA_QDISC          = 0x6
+	IFLA_STATS          = 0x7
+	IFLA_COST           = 0x8
+	IFLA_PRIORITY       = 0x9
+	IFLA_MASTER         = 0xa
+	IFLA_WIRELESS       = 0xb
+	IFLA_PROTINFO       = 0xc
+	IFLA_TXQLEN         = 0xd
+	IFLA_MAP            = 0xe
+	IFLA_WEIGHT         = 0xf
+	IFLA_OPERSTATE      = 0x10
+	IFLA_LINKMODE       = 0x11
+	IFLA_LINKINFO       = 0x12
+	IFLA_NET_NS_PID     = 0x13
+	IFLA_IFALIAS        = 0x14
+	IFLA_MAX            = 0x27
+	RT_SCOPE_UNIVERSE   = 0x0
+	RT_SCOPE_SITE       = 0xc8
+	RT_SCOPE_LINK       = 0xfd
+	RT_SCOPE_HOST       = 0xfe
+	RT_SCOPE_NOWHERE    = 0xff
+	RT_TABLE_UNSPEC     = 0x0
+	RT_TABLE_COMPAT     = 0xfc
+	RT_TABLE_DEFAULT    = 0xfd
+	RT_TABLE_MAIN       = 0xfe
+	RT_TABLE_LOCAL      = 0xff
+	RT_TABLE_MAX        = 0xffffffff
+	RTA_UNSPEC          = 0x0
+	RTA_DST             = 0x1
+	RTA_SRC             = 0x2
+	RTA_IIF             = 0x3
+	RTA_OIF             = 0x4
+	RTA_GATEWAY         = 0x5
+	RTA_PRIORITY        = 0x6
+	RTA_PREFSRC         = 0x7
+	RTA_METRICS         = 0x8
+	RTA_MULTIPATH       = 0x9
+	RTA_FLOW            = 0xb
+	RTA_CACHEINFO       = 0xc
+	RTA_TABLE           = 0xf
+	RTN_UNSPEC          = 0x0
+	RTN_UNICAST         = 0x1
+	RTN_LOCAL           = 0x2
+	RTN_BROADCAST       = 0x3
+	RTN_ANYCAST         = 0x4
+	RTN_MULTICAST       = 0x5
+	RTN_BLACKHOLE       = 0x6
+	RTN_UNREACHABLE     = 0x7
+	RTN_PROHIBIT        = 0x8
+	RTN_THROW           = 0x9
+	RTN_NAT             = 0xa
+	RTN_XRESOLVE        = 0xb
+	RTNLGRP_NONE        = 0x0
+	RTNLGRP_LINK        = 0x1
+	RTNLGRP_NOTIFY      = 0x2
+	RTNLGRP_NEIGH       = 0x3
+	RTNLGRP_TC          = 0x4
+	RTNLGRP_IPV4_IFADDR = 0x5
+	RTNLGRP_IPV4_MROUTE = 0x6
+	RTNLGRP_IPV4_ROUTE  = 0x7
+	RTNLGRP_IPV4_RULE   = 0x8
+	RTNLGRP_IPV6_IFADDR = 0x9
+	RTNLGRP_IPV6_MROUTE = 0xa
+	RTNLGRP_IPV6_ROUTE  = 0xb
+	RTNLGRP_IPV6_IFINFO = 0xc
+	RTNLGRP_IPV6_PREFIX = 0x12
+	RTNLGRP_IPV6_RULE   = 0x13
+	RTNLGRP_ND_USEROPT  = 0x14
+	SizeofNlMsghdr      = 0x10
+	SizeofNlMsgerr      = 0x14
+	SizeofRtGenmsg      = 0x1
+	SizeofNlAttr        = 0x4
+	SizeofRtAttr        = 0x4
+	SizeofIfInfomsg     = 0x10
+	SizeofIfAddrmsg     = 0x8
+	SizeofRtMsg         = 0xc
+	SizeofRtNexthop     = 0x8
+)
+
+type NlMsghdr struct {
+	Len   uint32
+	Type  uint16
+	Flags uint16
+	Seq   uint32
+	Pid   uint32
+}
+
+type NlMsgerr struct {
+	Error int32
+	Msg   NlMsghdr
+}
+
+type RtGenmsg struct {
+	Family uint8
+}
+
+type NlAttr struct {
+	Len  uint16
+	Type uint16
+}
+
+type RtAttr struct {
+	Len  uint16
+	Type uint16
+}
+
+type IfInfomsg struct {
+	Family     uint8
+	X__ifi_pad uint8
+	Type       uint16
+	Index      int32
+	Flags      uint32
+	Change     uint32
+}
+
+type IfAddrmsg struct {
+	Family    uint8
+	Prefixlen uint8
+	Flags     uint8
+	Scope     uint8
+	Index     uint32
+}
+
+type RtMsg struct {
+	Family   uint8
+	Dst_len  uint8
+	Src_len  uint8
+	Tos      uint8
+	Table    uint8
+	Protocol uint8
+	Scope    uint8
+	Type     uint8
+	Flags    uint32
+}
+
+type RtNexthop struct {
+	Len     uint16
+	Flags   uint8
+	Hops    uint8
+	Ifindex int32
+}
+
+const (
+	SizeofSockFilter = 0x8
+	SizeofSockFprog  = 0x10
+)
+
+type SockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
+
+type SockFprog struct {
+	Len       uint16
+	Pad_cgo_0 [6]byte
+	Filter    *SockFilter
+}
+
+type InotifyEvent struct {
+	Wd     int32
+	Mask   uint32
+	Cookie uint32
+	Len    uint32
+}
+
+const SizeofInotifyEvent = 0x10
+
+type PtraceRegs struct {
+	Regs        [102]uint64
+	U_tsize     uint64
+	U_dsize     uint64
+	U_ssize     uint64
+	Start_code  uint64
+	Start_data  uint64
+	Start_stack uint64
+	Signal      int64
+	U_ar0       uint64
+	Magic       uint64
+	U_comm      [32]int8
+}
+
+type FdSet struct {
+	Bits [16]int64
+}
+
+type Sysinfo_t struct {
+	Uptime    int64
+	Loads     [3]uint64
+	Totalram  uint64
+	Freeram   uint64
+	Sharedram uint64
+	Bufferram uint64
+	Totalswap uint64
+	Freeswap  uint64
+	Procs     uint16
+	Pad       uint16
+	Pad_cgo_0 [4]byte
+	Totalhigh uint64
+	Freehigh  uint64
+	Unit      uint32
+	X_f       [0]int8
+	Pad_cgo_1 [4]byte
+}
+
+type Utsname struct {
+	Sysname    [65]int8
+	Nodename   [65]int8
+	Release    [65]int8
+	Version    [65]int8
+	Machine    [65]int8
+	Domainname [65]int8
+}
+
+type Ustat_t struct {
+	Tfree     int32
+	Pad_cgo_0 [4]byte
+	Tinode    uint64
+	Fname     [6]int8
+	Fpack     [6]int8
+	Pad_cgo_1 [4]byte
+}
+
+type EpollEvent struct {
+	Events uint32
+	Fd     int32
+	Pad    int32
+}
+
+const (
+	AT_FDCWD            = -0x64
+	AT_REMOVEDIR        = 0x200
+	AT_SYMLINK_FOLLOW   = 0x400
+	AT_SYMLINK_NOFOLLOW = 0x100
+)
+
+type Termios struct {
+	Iflag     uint32
+	Oflag     uint32
+	Cflag     uint32
+	Lflag     uint32
+	Line      uint8
+	Cc        [32]uint8
+	Pad_cgo_0 [3]byte
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
new file mode 100644
index 0000000000000000000000000000000000000000..45e0df6b6f805ce971eea9fad3b6a400301e0182
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
@@ -0,0 +1,600 @@
+// +build mips64le,linux
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_linux.go
+
+package unix
+
+const (
+	sizeofPtr      = 0x8
+	sizeofShort    = 0x2
+	sizeofInt      = 0x4
+	sizeofLong     = 0x8
+	sizeofLongLong = 0x8
+	PathMax        = 0x1000
+)
+
+type (
+	_C_short     int16
+	_C_int       int32
+	_C_long      int64
+	_C_long_long int64
+)
+
+type Timespec struct {
+	Sec  int64
+	Nsec int64
+}
+
+type Timeval struct {
+	Sec  int64
+	Usec int64
+}
+
+type Timex struct {
+	Modes     uint32
+	Pad_cgo_0 [4]byte
+	Offset    int64
+	Freq      int64
+	Maxerror  int64
+	Esterror  int64
+	Status    int32
+	Pad_cgo_1 [4]byte
+	Constant  int64
+	Precision int64
+	Tolerance int64
+	Time      Timeval
+	Tick      int64
+	Ppsfreq   int64
+	Jitter    int64
+	Shift     int32
+	Pad_cgo_2 [4]byte
+	Stabil    int64
+	Jitcnt    int64
+	Calcnt    int64
+	Errcnt    int64
+	Stbcnt    int64
+	Tai       int32
+	Pad_cgo_3 [44]byte
+}
+
+type Time_t int64
+
+type Tms struct {
+	Utime  int64
+	Stime  int64
+	Cutime int64
+	Cstime int64
+}
+
+type Utimbuf struct {
+	Actime  int64
+	Modtime int64
+}
+
+type Rusage struct {
+	Utime    Timeval
+	Stime    Timeval
+	Maxrss   int64
+	Ixrss    int64
+	Idrss    int64
+	Isrss    int64
+	Minflt   int64
+	Majflt   int64
+	Nswap    int64
+	Inblock  int64
+	Oublock  int64
+	Msgsnd   int64
+	Msgrcv   int64
+	Nsignals int64
+	Nvcsw    int64
+	Nivcsw   int64
+}
+
+type Rlimit struct {
+	Cur uint64
+	Max uint64
+}
+
+type _Gid_t uint32
+
+type Stat_t struct {
+	Dev     uint32
+	Pad1    [3]int32
+	Ino     uint64
+	Mode    uint32
+	Nlink   uint32
+	Uid     uint32
+	Gid     uint32
+	Rdev    uint32
+	Pad2    [3]uint32
+	Size    int64
+	Atim    Timespec
+	Mtim    Timespec
+	Ctim    Timespec
+	Blksize uint32
+	Pad4    uint32
+	Blocks  int64
+}
+
+type Statfs_t struct {
+	Type    int64
+	Bsize   int64
+	Frsize  int64
+	Blocks  uint64
+	Bfree   uint64
+	Files   uint64
+	Ffree   uint64
+	Bavail  uint64
+	Fsid    Fsid
+	Namelen int64
+	Flags   int64
+	Spare   [5]int64
+}
+
+type Dirent struct {
+	Ino       uint64
+	Off       int64
+	Reclen    uint16
+	Type      uint8
+	Name      [256]int8
+	Pad_cgo_0 [5]byte
+}
+
+type Fsid struct {
+	X__val [2]int32
+}
+
+type Flock_t struct {
+	Type      int16
+	Whence    int16
+	Pad_cgo_0 [4]byte
+	Start     int64
+	Len       int64
+	Pid       int32
+	Pad_cgo_1 [4]byte
+}
+
+const (
+	FADV_NORMAL     = 0x0
+	FADV_RANDOM     = 0x1
+	FADV_SEQUENTIAL = 0x2
+	FADV_WILLNEED   = 0x3
+	FADV_DONTNEED   = 0x4
+	FADV_NOREUSE    = 0x5
+)
+
+type RawSockaddrInet4 struct {
+	Family uint16
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	Zero   [8]uint8
+}
+
+type RawSockaddrInet6 struct {
+	Family   uint16
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type RawSockaddrUnix struct {
+	Family uint16
+	Path   [108]int8
+}
+
+type RawSockaddrLinklayer struct {
+	Family   uint16
+	Protocol uint16
+	Ifindex  int32
+	Hatype   uint16
+	Pkttype  uint8
+	Halen    uint8
+	Addr     [8]uint8
+}
+
+type RawSockaddrNetlink struct {
+	Family uint16
+	Pad    uint16
+	Pid    uint32
+	Groups uint32
+}
+
+type RawSockaddr struct {
+	Family uint16
+	Data   [14]int8
+}
+
+type RawSockaddrAny struct {
+	Addr RawSockaddr
+	Pad  [96]int8
+}
+
+type _Socklen uint32
+
+type Linger struct {
+	Onoff  int32
+	Linger int32
+}
+
+type Iovec struct {
+	Base *byte
+	Len  uint64
+}
+
+type IPMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type IPMreqn struct {
+	Multiaddr [4]byte /* in_addr */
+	Address   [4]byte /* in_addr */
+	Ifindex   int32
+}
+
+type IPv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Interface uint32
+}
+
+type Msghdr struct {
+	Name       *byte
+	Namelen    uint32
+	Pad_cgo_0  [4]byte
+	Iov        *Iovec
+	Iovlen     uint64
+	Control    *byte
+	Controllen uint64
+	Flags      int32
+	Pad_cgo_1  [4]byte
+}
+
+type Cmsghdr struct {
+	Len   uint64
+	Level int32
+	Type  int32
+}
+
+type Inet4Pktinfo struct {
+	Ifindex  int32
+	Spec_dst [4]byte /* in_addr */
+	Addr     [4]byte /* in_addr */
+}
+
+type Inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex uint32
+}
+
+type IPv6MTUInfo struct {
+	Addr RawSockaddrInet6
+	Mtu  uint32
+}
+
+type ICMPv6Filter struct {
+	Data [8]uint32
+}
+
+type Ucred struct {
+	Pid int32
+	Uid uint32
+	Gid uint32
+}
+
+type TCPInfo struct {
+	State          uint8
+	Ca_state       uint8
+	Retransmits    uint8
+	Probes         uint8
+	Backoff        uint8
+	Options        uint8
+	Pad_cgo_0      [2]byte
+	Rto            uint32
+	Ato            uint32
+	Snd_mss        uint32
+	Rcv_mss        uint32
+	Unacked        uint32
+	Sacked         uint32
+	Lost           uint32
+	Retrans        uint32
+	Fackets        uint32
+	Last_data_sent uint32
+	Last_ack_sent  uint32
+	Last_data_recv uint32
+	Last_ack_recv  uint32
+	Pmtu           uint32
+	Rcv_ssthresh   uint32
+	Rtt            uint32
+	Rttvar         uint32
+	Snd_ssthresh   uint32
+	Snd_cwnd       uint32
+	Advmss         uint32
+	Reordering     uint32
+	Rcv_rtt        uint32
+	Rcv_space      uint32
+	Total_retrans  uint32
+}
+
+const (
+	SizeofSockaddrInet4     = 0x10
+	SizeofSockaddrInet6     = 0x1c
+	SizeofSockaddrAny       = 0x70
+	SizeofSockaddrUnix      = 0x6e
+	SizeofSockaddrLinklayer = 0x14
+	SizeofSockaddrNetlink   = 0xc
+	SizeofLinger            = 0x8
+	SizeofIPMreq            = 0x8
+	SizeofIPMreqn           = 0xc
+	SizeofIPv6Mreq          = 0x14
+	SizeofMsghdr            = 0x38
+	SizeofCmsghdr           = 0x10
+	SizeofInet4Pktinfo      = 0xc
+	SizeofInet6Pktinfo      = 0x14
+	SizeofIPv6MTUInfo       = 0x20
+	SizeofICMPv6Filter      = 0x20
+	SizeofUcred             = 0xc
+	SizeofTCPInfo           = 0x68
+)
+
+const (
+	IFA_UNSPEC          = 0x0
+	IFA_ADDRESS         = 0x1
+	IFA_LOCAL           = 0x2
+	IFA_LABEL           = 0x3
+	IFA_BROADCAST       = 0x4
+	IFA_ANYCAST         = 0x5
+	IFA_CACHEINFO       = 0x6
+	IFA_MULTICAST       = 0x7
+	IFLA_UNSPEC         = 0x0
+	IFLA_ADDRESS        = 0x1
+	IFLA_BROADCAST      = 0x2
+	IFLA_IFNAME         = 0x3
+	IFLA_MTU            = 0x4
+	IFLA_LINK           = 0x5
+	IFLA_QDISC          = 0x6
+	IFLA_STATS          = 0x7
+	IFLA_COST           = 0x8
+	IFLA_PRIORITY       = 0x9
+	IFLA_MASTER         = 0xa
+	IFLA_WIRELESS       = 0xb
+	IFLA_PROTINFO       = 0xc
+	IFLA_TXQLEN         = 0xd
+	IFLA_MAP            = 0xe
+	IFLA_WEIGHT         = 0xf
+	IFLA_OPERSTATE      = 0x10
+	IFLA_LINKMODE       = 0x11
+	IFLA_LINKINFO       = 0x12
+	IFLA_NET_NS_PID     = 0x13
+	IFLA_IFALIAS        = 0x14
+	IFLA_MAX            = 0x27
+	RT_SCOPE_UNIVERSE   = 0x0
+	RT_SCOPE_SITE       = 0xc8
+	RT_SCOPE_LINK       = 0xfd
+	RT_SCOPE_HOST       = 0xfe
+	RT_SCOPE_NOWHERE    = 0xff
+	RT_TABLE_UNSPEC     = 0x0
+	RT_TABLE_COMPAT     = 0xfc
+	RT_TABLE_DEFAULT    = 0xfd
+	RT_TABLE_MAIN       = 0xfe
+	RT_TABLE_LOCAL      = 0xff
+	RT_TABLE_MAX        = 0xffffffff
+	RTA_UNSPEC          = 0x0
+	RTA_DST             = 0x1
+	RTA_SRC             = 0x2
+	RTA_IIF             = 0x3
+	RTA_OIF             = 0x4
+	RTA_GATEWAY         = 0x5
+	RTA_PRIORITY        = 0x6
+	RTA_PREFSRC         = 0x7
+	RTA_METRICS         = 0x8
+	RTA_MULTIPATH       = 0x9
+	RTA_FLOW            = 0xb
+	RTA_CACHEINFO       = 0xc
+	RTA_TABLE           = 0xf
+	RTN_UNSPEC          = 0x0
+	RTN_UNICAST         = 0x1
+	RTN_LOCAL           = 0x2
+	RTN_BROADCAST       = 0x3
+	RTN_ANYCAST         = 0x4
+	RTN_MULTICAST       = 0x5
+	RTN_BLACKHOLE       = 0x6
+	RTN_UNREACHABLE     = 0x7
+	RTN_PROHIBIT        = 0x8
+	RTN_THROW           = 0x9
+	RTN_NAT             = 0xa
+	RTN_XRESOLVE        = 0xb
+	RTNLGRP_NONE        = 0x0
+	RTNLGRP_LINK        = 0x1
+	RTNLGRP_NOTIFY      = 0x2
+	RTNLGRP_NEIGH       = 0x3
+	RTNLGRP_TC          = 0x4
+	RTNLGRP_IPV4_IFADDR = 0x5
+	RTNLGRP_IPV4_MROUTE = 0x6
+	RTNLGRP_IPV4_ROUTE  = 0x7
+	RTNLGRP_IPV4_RULE   = 0x8
+	RTNLGRP_IPV6_IFADDR = 0x9
+	RTNLGRP_IPV6_MROUTE = 0xa
+	RTNLGRP_IPV6_ROUTE  = 0xb
+	RTNLGRP_IPV6_IFINFO = 0xc
+	RTNLGRP_IPV6_PREFIX = 0x12
+	RTNLGRP_IPV6_RULE   = 0x13
+	RTNLGRP_ND_USEROPT  = 0x14
+	SizeofNlMsghdr      = 0x10
+	SizeofNlMsgerr      = 0x14
+	SizeofRtGenmsg      = 0x1
+	SizeofNlAttr        = 0x4
+	SizeofRtAttr        = 0x4
+	SizeofIfInfomsg     = 0x10
+	SizeofIfAddrmsg     = 0x8
+	SizeofRtMsg         = 0xc
+	SizeofRtNexthop     = 0x8
+)
+
+type NlMsghdr struct {
+	Len   uint32
+	Type  uint16
+	Flags uint16
+	Seq   uint32
+	Pid   uint32
+}
+
+type NlMsgerr struct {
+	Error int32
+	Msg   NlMsghdr
+}
+
+type RtGenmsg struct {
+	Family uint8
+}
+
+type NlAttr struct {
+	Len  uint16
+	Type uint16
+}
+
+type RtAttr struct {
+	Len  uint16
+	Type uint16
+}
+
+type IfInfomsg struct {
+	Family     uint8
+	X__ifi_pad uint8
+	Type       uint16
+	Index      int32
+	Flags      uint32
+	Change     uint32
+}
+
+type IfAddrmsg struct {
+	Family    uint8
+	Prefixlen uint8
+	Flags     uint8
+	Scope     uint8
+	Index     uint32
+}
+
+type RtMsg struct {
+	Family   uint8
+	Dst_len  uint8
+	Src_len  uint8
+	Tos      uint8
+	Table    uint8
+	Protocol uint8
+	Scope    uint8
+	Type     uint8
+	Flags    uint32
+}
+
+type RtNexthop struct {
+	Len     uint16
+	Flags   uint8
+	Hops    uint8
+	Ifindex int32
+}
+
+const (
+	SizeofSockFilter = 0x8
+	SizeofSockFprog  = 0x10
+)
+
+type SockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
+
+type SockFprog struct {
+	Len       uint16
+	Pad_cgo_0 [6]byte
+	Filter    *SockFilter
+}
+
+type InotifyEvent struct {
+	Wd     int32
+	Mask   uint32
+	Cookie uint32
+	Len    uint32
+}
+
+const SizeofInotifyEvent = 0x10
+
+type PtraceRegs struct {
+	Regs        [102]uint64
+	U_tsize     uint64
+	U_dsize     uint64
+	U_ssize     uint64
+	Start_code  uint64
+	Start_data  uint64
+	Start_stack uint64
+	Signal      int64
+	U_ar0       uint64
+	Magic       uint64
+	U_comm      [32]int8
+}
+
+type FdSet struct {
+	Bits [16]int64
+}
+
+type Sysinfo_t struct {
+	Uptime    int64
+	Loads     [3]uint64
+	Totalram  uint64
+	Freeram   uint64
+	Sharedram uint64
+	Bufferram uint64
+	Totalswap uint64
+	Freeswap  uint64
+	Procs     uint16
+	Pad       uint16
+	Pad_cgo_0 [4]byte
+	Totalhigh uint64
+	Freehigh  uint64
+	Unit      uint32
+	X_f       [0]int8
+	Pad_cgo_1 [4]byte
+}
+
+type Utsname struct {
+	Sysname    [65]int8
+	Nodename   [65]int8
+	Release    [65]int8
+	Version    [65]int8
+	Machine    [65]int8
+	Domainname [65]int8
+}
+
+type Ustat_t struct {
+	Tfree     int32
+	Pad_cgo_0 [4]byte
+	Tinode    uint64
+	Fname     [6]int8
+	Fpack     [6]int8
+	Pad_cgo_1 [4]byte
+}
+
+type EpollEvent struct {
+	Events uint32
+	Fd     int32
+	Pad    int32
+}
+
+const (
+	AT_FDCWD            = -0x64
+	AT_REMOVEDIR        = 0x200
+	AT_SYMLINK_FOLLOW   = 0x400
+	AT_SYMLINK_NOFOLLOW = 0x100
+)
+
+type Termios struct {
+	Iflag     uint32
+	Oflag     uint32
+	Cflag     uint32
+	Lflag     uint32
+	Line      uint8
+	Cc        [32]uint8
+	Pad_cgo_0 [3]byte
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
index 808203d071b2e999904008c769fbece2d8e379f4..2bc296ebb606689197da6e1c82b04855711986ff 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
@@ -1,8 +1,7 @@
+// +build ppc64,linux
 // Created by cgo -godefs - DO NOT EDIT
 // cgo -godefs types_linux.go
 
-// +build ppc64,linux
-
 package unix
 
 const (
@@ -591,17 +590,17 @@ type EpollEvent struct {
 const (
 	AT_FDCWD            = -0x64
 	AT_REMOVEDIR        = 0x200
+	AT_SYMLINK_FOLLOW   = 0x400
 	AT_SYMLINK_NOFOLLOW = 0x100
 )
 
 type Termios struct {
-	Iflag     uint32
-	Oflag     uint32
-	Cflag     uint32
-	Lflag     uint32
-	Line      uint8
-	Cc        [32]uint8
-	Pad_cgo_0 [3]byte
-	Ispeed    uint32
-	Ospeed    uint32
+	Iflag  uint32
+	Oflag  uint32
+	Cflag  uint32
+	Lflag  uint32
+	Cc     [19]uint8
+	Line   uint8
+	Ispeed uint32
+	Ospeed uint32
 }
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
index d4a689faffa0bf5dfec2c1c9144b713dcfeb9bfe..0a668931aab8c18baf7993b2b203f1fd200205b1 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
@@ -1,8 +1,7 @@
+// +build ppc64le,linux
 // Created by cgo -godefs - DO NOT EDIT
 // cgo -godefs types_linux.go
 
-// +build ppc64le,linux
-
 package unix
 
 const (
@@ -591,17 +590,17 @@ type EpollEvent struct {
 const (
 	AT_FDCWD            = -0x64
 	AT_REMOVEDIR        = 0x200
+	AT_SYMLINK_FOLLOW   = 0x400
 	AT_SYMLINK_NOFOLLOW = 0x100
 )
 
 type Termios struct {
-	Iflag     uint32
-	Oflag     uint32
-	Cflag     uint32
-	Lflag     uint32
-	Line      uint8
-	Cc        [32]uint8
-	Pad_cgo_0 [3]byte
-	Ispeed    uint32
-	Ospeed    uint32
+	Iflag  uint32
+	Oflag  uint32
+	Cflag  uint32
+	Lflag  uint32
+	Cc     [19]uint8
+	Line   uint8
+	Ispeed uint32
+	Ospeed uint32
 }
diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go
index 45e9f422217a40b97fdd5599a45a23128af7883a..b3b928a51e60148e9fb2e4af289502bc0c5c932b 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go
@@ -1,8 +1,7 @@
+// +build amd64,solaris
 // Created by cgo -godefs - DO NOT EDIT
 // cgo -godefs types_solaris.go
 
-// +build amd64,solaris
-
 package unix
 
 const (
@@ -11,6 +10,7 @@ const (
 	sizeofInt      = 0x4
 	sizeofLong     = 0x8
 	sizeofLongLong = 0x8
+	PathMax        = 0x400
 )
 
 type (
@@ -35,6 +35,18 @@ type Timeval32 struct {
 	Usec int32
 }
 
+type Tms struct {
+	Utime  int64
+	Stime  int64
+	Cutime int64
+	Cstime int64
+}
+
+type Utimbuf struct {
+	Actime  int64
+	Modtime int64
+}
+
 type Rusage struct {
 	Utime    Timeval
 	Stime    Timeval
@@ -230,6 +242,30 @@ type FdSet struct {
 	Bits [1024]int64
 }
 
+type Utsname struct {
+	Sysname  [257]int8
+	Nodename [257]int8
+	Release  [257]int8
+	Version  [257]int8
+	Machine  [257]int8
+}
+
+type Ustat_t struct {
+	Tfree     int64
+	Tinode    uint64
+	Fname     [6]int8
+	Fpack     [6]int8
+	Pad_cgo_0 [4]byte
+}
+
+const (
+	AT_FDCWD            = 0xffd19553
+	AT_SYMLINK_NOFOLLOW = 0x1000
+	AT_SYMLINK_FOLLOW   = 0x2000
+	AT_REMOVEDIR        = 0x1
+	AT_EACCESS          = 0x4
+)
+
 const (
 	SizeofIfMsghdr  = 0x54
 	SizeofIfData    = 0x44
@@ -357,6 +393,8 @@ type BpfHdr struct {
 	Pad_cgo_0 [2]byte
 }
 
+const _SC_PAGESIZE = 0xb
+
 type Termios struct {
 	Iflag     uint32
 	Oflag     uint32
@@ -365,3 +403,20 @@ type Termios struct {
 	Cc        [19]uint8
 	Pad_cgo_0 [1]byte
 }
+
+type Termio struct {
+	Iflag     uint16
+	Oflag     uint16
+	Cflag     uint16
+	Lflag     uint16
+	Line      int8
+	Cc        [8]uint8
+	Pad_cgo_0 [1]byte
+}
+
+type Winsize struct {
+	Row    uint16
+	Col    uint16
+	Xpixel uint16
+	Ypixel uint16
+}
diff --git a/vendor/golang.org/x/sys/windows/asm.s b/vendor/golang.org/x/sys/windows/asm.s
deleted file mode 100644
index d4ca868f17f98e7500163b292a1a139961448ca5..0000000000000000000000000000000000000000
--- a/vendor/golang.org/x/sys/windows/asm.s
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 The Go Authors.  All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "textflag.h"
-
-TEXT ·use(SB),NOSPLIT,$0
-	RET
diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go
index 7f9f05f934737091e2e8ef92b4685d292b744159..5f110679ea880d64bee189ee5d6debfac6f832d0 100644
--- a/vendor/golang.org/x/sys/windows/dll_windows.go
+++ b/vendor/golang.org/x/sys/windows/dll_windows.go
@@ -31,6 +31,10 @@ type DLL struct {
 }
 
 // LoadDLL loads DLL file into memory.
+//
+// Warning: using LoadDLL without an absolute path name is subject to
+// DLL preloading attacks. To safely load a system DLL, use LazyDLL
+// with System set to true, or use LoadLibraryEx directly.
 func LoadDLL(name string) (dll *DLL, err error) {
 	namep, err := UTF16PtrFromString(name)
 	if err != nil {
@@ -162,29 +166,48 @@ func (p *Proc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) {
 // call to its Handle method or to one of its
 // LazyProc's Addr method.
 type LazyDLL struct {
-	mu   sync.Mutex
-	dll  *DLL // non nil once DLL is loaded
 	Name string
+
+	// System determines whether the DLL must be loaded from the
+	// Windows System directory, bypassing the normal DLL search
+	// path.
+	System bool
+
+	mu  sync.Mutex
+	dll *DLL // non nil once DLL is loaded
 }
 
 // Load loads DLL file d.Name into memory. It returns an error if fails.
 // Load will not try to load DLL, if it is already loaded into memory.
 func (d *LazyDLL) Load() error {
 	// Non-racy version of:
-	// if d.dll == nil {
-	if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll))) == nil {
-		d.mu.Lock()
-		defer d.mu.Unlock()
-		if d.dll == nil {
-			dll, e := LoadDLL(d.Name)
-			if e != nil {
-				return e
-			}
-			// Non-racy version of:
-			// d.dll = dll
-			atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll)), unsafe.Pointer(dll))
-		}
+	// if d.dll != nil {
+	if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll))) != nil {
+		return nil
+	}
+	d.mu.Lock()
+	defer d.mu.Unlock()
+	if d.dll != nil {
+		return nil
+	}
+
+	// kernel32.dll is special, since it's where LoadLibraryEx comes from.
+	// The kernel already special-cases its name, so it's always
+	// loaded from system32.
+	var dll *DLL
+	var err error
+	if d.Name == "kernel32.dll" {
+		dll, err = LoadDLL(d.Name)
+	} else {
+		dll, err = loadLibraryEx(d.Name, d.System)
+	}
+	if err != nil {
+		return err
 	}
+
+	// Non-racy version of:
+	// d.dll = dll
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll)), unsafe.Pointer(dll))
 	return nil
 }
 
@@ -212,11 +235,19 @@ func NewLazyDLL(name string) *LazyDLL {
 	return &LazyDLL{Name: name}
 }
 
+// NewLazySystemDLL is like NewLazyDLL, but will only
+// search Windows System directory for the DLL if name is
+// a base name (like "advapi32.dll").
+func NewLazySystemDLL(name string) *LazyDLL {
+	return &LazyDLL{Name: name, System: true}
+}
+
 // A LazyProc implements access to a procedure inside a LazyDLL.
 // It delays the lookup until the Addr method is called.
 type LazyProc struct {
-	mu   sync.Mutex
 	Name string
+
+	mu   sync.Mutex
 	l    *LazyDLL
 	proc *Proc
 }
@@ -273,3 +304,71 @@ func (p *LazyProc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) {
 	p.mustFind()
 	return p.proc.Call(a...)
 }
+
+var canDoSearchSystem32Once struct {
+	sync.Once
+	v bool
+}
+
+func initCanDoSearchSystem32() {
+	// https://msdn.microsoft.com/en-us/library/ms684179(v=vs.85).aspx says:
+	// "Windows 7, Windows Server 2008 R2, Windows Vista, and Windows
+	// Server 2008: The LOAD_LIBRARY_SEARCH_* flags are available on
+	// systems that have KB2533623 installed. To determine whether the
+	// flags are available, use GetProcAddress to get the address of the
+	// AddDllDirectory, RemoveDllDirectory, or SetDefaultDllDirectories
+	// function. If GetProcAddress succeeds, the LOAD_LIBRARY_SEARCH_*
+	// flags can be used with LoadLibraryEx."
+	canDoSearchSystem32Once.v = (modkernel32.NewProc("AddDllDirectory").Find() == nil)
+}
+
+func canDoSearchSystem32() bool {
+	canDoSearchSystem32Once.Do(initCanDoSearchSystem32)
+	return canDoSearchSystem32Once.v
+}
+
+func isBaseName(name string) bool {
+	for _, c := range name {
+		if c == ':' || c == '/' || c == '\\' {
+			return false
+		}
+	}
+	return true
+}
+
+// loadLibraryEx wraps the Windows LoadLibraryEx function.
+//
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684179(v=vs.85).aspx
+//
+// If name is not an absolute path, LoadLibraryEx searches for the DLL
+// in a variety of automatic locations unless constrained by flags.
+// See: https://msdn.microsoft.com/en-us/library/ff919712%28VS.85%29.aspx
+func loadLibraryEx(name string, system bool) (*DLL, error) {
+	loadDLL := name
+	var flags uintptr
+	if system {
+		if canDoSearchSystem32() {
+			const LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800
+			flags = LOAD_LIBRARY_SEARCH_SYSTEM32
+		} else if isBaseName(name) {
+			// WindowsXP or unpatched Windows machine
+			// trying to load "foo.dll" out of the system
+			// folder, but LoadLibraryEx doesn't support
+			// that yet on their system, so emulate it.
+			windir, _ := Getenv("WINDIR") // old var; apparently works on XP
+			if windir == "" {
+				return nil, errString("%WINDIR% not defined")
+			}
+			loadDLL = windir + "\\System32\\" + name
+		}
+	}
+	h, err := LoadLibraryEx(loadDLL, 0, flags)
+	if err != nil {
+		return nil, err
+	}
+	return &DLL{Name: name, Handle: h}, nil
+}
+
+type errString string
+
+func (s errString) Error() string { return string(s) }
diff --git a/vendor/golang.org/x/sys/windows/env_unset.go b/vendor/golang.org/x/sys/windows/env_unset.go
index 999ffac4345566ff51a4528de9d240e9a7de7899..4ed03aeefc07ef6cbc5148d929de4725a41b34dd 100644
--- a/vendor/golang.org/x/sys/windows/env_unset.go
+++ b/vendor/golang.org/x/sys/windows/env_unset.go
@@ -2,6 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
+// +build windows
 // +build go1.4
 
 package windows
diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go
index 5426cae90963308313cc186026e9187dcd0b6b78..c8a986f44b18da92aae73763319faea7515f4e2f 100644
--- a/vendor/golang.org/x/sys/windows/registry/syscall.go
+++ b/vendor/golang.org/x/sys/windows/registry/syscall.go
@@ -8,7 +8,7 @@ package registry
 
 import "syscall"
 
-//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go syscall.go
+//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -xsys -output zsyscall_windows.go syscall.go
 
 const (
 	_REG_OPTION_NON_VOLATILE = 0
diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go
index ac68810ecc091596bdfcdf8e5e2f602dc5ab3bdf..71d4e15bab16ea5492ab0c979d7357394ded8039 100644
--- a/vendor/golang.org/x/sys/windows/registry/value.go
+++ b/vendor/golang.org/x/sys/windows/registry/value.go
@@ -108,7 +108,7 @@ func (k Key) GetStringValue(name string) (val string, valtype uint32, err error)
 	if len(data) == 0 {
 		return "", typ, nil
 	}
-	u := (*[1 << 10]uint16)(unsafe.Pointer(&data[0]))[:]
+	u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[:]
 	return syscall.UTF16ToString(u), typ, nil
 }
 
@@ -185,7 +185,7 @@ func ExpandString(value string) (string, error) {
 			return "", err
 		}
 		if n <= uint32(len(r)) {
-			u := (*[1 << 15]uint16)(unsafe.Pointer(&r[0]))[:]
+			u := (*[1 << 29]uint16)(unsafe.Pointer(&r[0]))[:]
 			return syscall.UTF16ToString(u), nil
 		}
 		r = make([]uint16, n)
@@ -208,7 +208,7 @@ func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err err
 	if len(data) == 0 {
 		return nil, typ, nil
 	}
-	p := (*[1 << 24]uint16)(unsafe.Pointer(&data[0]))[:len(data)/2]
+	p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[:len(data)/2]
 	if len(p) == 0 {
 		return nil, typ, nil
 	}
@@ -296,7 +296,7 @@ func (k Key) setStringValue(name string, valtype uint32, value string) error {
 	if err != nil {
 		return err
 	}
-	buf := (*[1 << 10]byte)(unsafe.Pointer(&v[0]))[:len(v)*2]
+	buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[:len(v)*2]
 	return k.setValue(name, valtype, buf)
 }
 
@@ -326,7 +326,7 @@ func (k Key) SetStringsValue(name string, value []string) error {
 		ss += s + "\x00"
 	}
 	v := utf16.Encode([]rune(ss + "\x00"))
-	buf := (*[1 << 10]byte)(unsafe.Pointer(&v[0]))[:len(v)*2]
+	buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[:len(v)*2]
 	return k.setValue(name, MULTI_SZ, buf)
 }
 
diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
index 9c17675a249a9c0e47eeb931cda9caba5b6b2cd2..0fa24c6db44a7f0ee27fe5bda2451a760014ec40 100644
--- a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
@@ -2,14 +2,17 @@
 
 package registry
 
-import "unsafe"
-import "syscall"
+import (
+	"golang.org/x/sys/windows"
+	"syscall"
+	"unsafe"
+)
 
 var _ unsafe.Pointer
 
 var (
-	modadvapi32 = syscall.NewLazyDLL("advapi32.dll")
-	modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+	modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
+	modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
 
 	procRegCreateKeyExW           = modadvapi32.NewProc("RegCreateKeyExW")
 	procRegDeleteKeyW             = modadvapi32.NewProc("RegDeleteKeyW")
diff --git a/vendor/golang.org/x/sys/windows/svc/mgr/mgr.go b/vendor/golang.org/x/sys/windows/svc/mgr/mgr.go
index 4d7e72ec463c282187d0c0d05ba433a430b755cd..da8ceb6ed8b648e78a6a63e0e1c55f414a81ad5a 100644
--- a/vendor/golang.org/x/sys/windows/svc/mgr/mgr.go
+++ b/vendor/golang.org/x/sys/windows/svc/mgr/mgr.go
@@ -85,12 +85,15 @@ func (m *Mgr) CreateService(name, exepath string, c Config, args ...string) (*Se
 	if c.ErrorControl == 0 {
 		c.ErrorControl = ErrorNormal
 	}
+	if c.ServiceType == 0 {
+		c.ServiceType = windows.SERVICE_WIN32_OWN_PROCESS
+	}
 	s := syscall.EscapeArg(exepath)
 	for _, v := range args {
 		s += " " + syscall.EscapeArg(v)
 	}
 	h, err := windows.CreateService(m.Handle, toPtr(name), toPtr(c.DisplayName),
-		windows.SERVICE_ALL_ACCESS, windows.SERVICE_WIN32_OWN_PROCESS,
+		windows.SERVICE_ALL_ACCESS, c.ServiceType,
 		c.StartType, c.ErrorControl, toPtr(s), toPtr(c.LoadOrderGroup),
 		nil, toStringBlock(c.Dependencies), toPtr(c.ServiceStartName), toPtr(c.Password))
 	if err != nil {
diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go
index a8cc609b322dbbfbe70e82e217d72831794bff9f..4e2fbe86e20c46df3486d538a78adb8eb17b00ee 100644
--- a/vendor/golang.org/x/sys/windows/syscall.go
+++ b/vendor/golang.org/x/sys/windows/syscall.go
@@ -23,7 +23,6 @@ package windows // import "golang.org/x/sys/windows"
 
 import (
 	"syscall"
-	"unsafe"
 )
 
 // ByteSliceFromString returns a NUL-terminated slice of bytes
@@ -70,8 +69,3 @@ func (ts *Timespec) Nano() int64 {
 func (tv *Timeval) Nano() int64 {
 	return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000
 }
-
-// use is a no-op, but the compiler cannot see that it is.
-// Calling use(p) ensures that p is kept live until that point.
-//go:noescape
-func use(p unsafe.Pointer)
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index 441c193cfe370e06efe16e7b7a6d435a2f49c45c..863f103e57956c78b02fafc7b86973f6083faab3 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -14,7 +14,7 @@ import (
 	"unsafe"
 )
 
-//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go
+//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -xsys -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go
 
 type Handle uintptr
 
@@ -84,6 +84,7 @@ func NewCallbackCDecl(fn interface{}) uintptr
 
 //sys	GetLastError() (lasterr error)
 //sys	LoadLibrary(libname string) (handle Handle, err error) = LoadLibraryW
+//sys	LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) = LoadLibraryExW
 //sys	FreeLibrary(handle Handle) (err error)
 //sys	GetProcAddress(module Handle, procname string) (proc uintptr, err error)
 //sys	GetVersion() (ver uint32, err error)
@@ -105,6 +106,7 @@ func NewCallbackCDecl(fn interface{}) uintptr
 //sys	RemoveDirectory(path *uint16) (err error) = RemoveDirectoryW
 //sys	DeleteFile(path *uint16) (err error) = DeleteFileW
 //sys	MoveFile(from *uint16, to *uint16) (err error) = MoveFileW
+//sys	MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) = MoveFileExW
 //sys	GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW
 //sys	GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW
 //sys	SetEndOfFile(handle Handle) (err error)
@@ -369,7 +371,7 @@ func Rename(oldpath, newpath string) (err error) {
 	if err != nil {
 		return err
 	}
-	return MoveFile(from, to)
+	return MoveFileEx(from, to, MOVEFILE_REPLACE_EXISTING)
 }
 
 func ComputerName() (name string, err error) {
@@ -528,6 +530,9 @@ const socket_error = uintptr(^uint32(0))
 //sys	GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) = iphlpapi.GetAdaptersInfo
 //sys	SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) = kernel32.SetFileCompletionNotificationModes
 //sys	WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) [failretval==-1] = ws2_32.WSAEnumProtocolsW
+//sys	GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses
+//sys	GetACP() (acp uint32) = kernel32.GetACP
+//sys	MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar
 
 // For testing: clients can set this flag to force
 // creation of IPv6 sockets to return EAFNOSUPPORT.
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index e130ddd0832c3f30b3e106a7d940e4bbac96d5f1..3ff8f52532c00065c2ae321d3b5fc85f3e6a1026 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -2,23 +2,25 @@
 
 package windows
 
-import "unsafe"
-import "syscall"
+import (
+	"syscall"
+	"unsafe"
+)
 
 var _ unsafe.Pointer
 
 var (
-	modadvapi32 = syscall.NewLazyDLL("advapi32.dll")
-	modkernel32 = syscall.NewLazyDLL("kernel32.dll")
-	modshell32  = syscall.NewLazyDLL("shell32.dll")
-	modmswsock  = syscall.NewLazyDLL("mswsock.dll")
-	modcrypt32  = syscall.NewLazyDLL("crypt32.dll")
-	modws2_32   = syscall.NewLazyDLL("ws2_32.dll")
-	moddnsapi   = syscall.NewLazyDLL("dnsapi.dll")
-	modiphlpapi = syscall.NewLazyDLL("iphlpapi.dll")
-	modsecur32  = syscall.NewLazyDLL("secur32.dll")
-	modnetapi32 = syscall.NewLazyDLL("netapi32.dll")
-	moduserenv  = syscall.NewLazyDLL("userenv.dll")
+	modadvapi32 = NewLazySystemDLL("advapi32.dll")
+	modkernel32 = NewLazySystemDLL("kernel32.dll")
+	modshell32  = NewLazySystemDLL("shell32.dll")
+	modmswsock  = NewLazySystemDLL("mswsock.dll")
+	modcrypt32  = NewLazySystemDLL("crypt32.dll")
+	modws2_32   = NewLazySystemDLL("ws2_32.dll")
+	moddnsapi   = NewLazySystemDLL("dnsapi.dll")
+	modiphlpapi = NewLazySystemDLL("iphlpapi.dll")
+	modsecur32  = NewLazySystemDLL("secur32.dll")
+	modnetapi32 = NewLazySystemDLL("netapi32.dll")
+	moduserenv  = NewLazySystemDLL("userenv.dll")
 
 	procRegisterEventSourceW               = modadvapi32.NewProc("RegisterEventSourceW")
 	procDeregisterEventSource              = modadvapi32.NewProc("DeregisterEventSource")
@@ -39,6 +41,7 @@ var (
 	procQueryServiceConfig2W               = modadvapi32.NewProc("QueryServiceConfig2W")
 	procGetLastError                       = modkernel32.NewProc("GetLastError")
 	procLoadLibraryW                       = modkernel32.NewProc("LoadLibraryW")
+	procLoadLibraryExW                     = modkernel32.NewProc("LoadLibraryExW")
 	procFreeLibrary                        = modkernel32.NewProc("FreeLibrary")
 	procGetProcAddress                     = modkernel32.NewProc("GetProcAddress")
 	procGetVersion                         = modkernel32.NewProc("GetVersion")
@@ -60,6 +63,7 @@ var (
 	procRemoveDirectoryW                   = modkernel32.NewProc("RemoveDirectoryW")
 	procDeleteFileW                        = modkernel32.NewProc("DeleteFileW")
 	procMoveFileW                          = modkernel32.NewProc("MoveFileW")
+	procMoveFileExW                        = modkernel32.NewProc("MoveFileExW")
 	procGetComputerNameW                   = modkernel32.NewProc("GetComputerNameW")
 	procGetComputerNameExW                 = modkernel32.NewProc("GetComputerNameExW")
 	procSetEndOfFile                       = modkernel32.NewProc("SetEndOfFile")
@@ -169,6 +173,9 @@ var (
 	procGetAdaptersInfo                    = modiphlpapi.NewProc("GetAdaptersInfo")
 	procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
 	procWSAEnumProtocolsW                  = modws2_32.NewProc("WSAEnumProtocolsW")
+	procGetAdaptersAddresses               = modiphlpapi.NewProc("GetAdaptersAddresses")
+	procGetACP                             = modkernel32.NewProc("GetACP")
+	procMultiByteToWideChar                = modkernel32.NewProc("MultiByteToWideChar")
 	procTranslateNameW                     = modsecur32.NewProc("TranslateNameW")
 	procGetUserNameExW                     = modsecur32.NewProc("GetUserNameExW")
 	procNetUserGetInfo                     = modnetapi32.NewProc("NetUserGetInfo")
@@ -426,6 +433,28 @@ func _LoadLibrary(libname *uint16) (handle Handle, err error) {
 	return
 }
 
+func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) {
+	var _p0 *uint16
+	_p0, err = syscall.UTF16PtrFromString(libname)
+	if err != nil {
+		return
+	}
+	return _LoadLibraryEx(_p0, zero, flags)
+}
+
+func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) {
+	r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags))
+	handle = Handle(r0)
+	if handle == 0 {
+		if e1 != 0 {
+			err = error(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
 func FreeLibrary(handle Handle) (err error) {
 	r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0)
 	if r1 == 0 {
@@ -700,6 +729,18 @@ func MoveFile(from *uint16, to *uint16) (err error) {
 	return
 }
 
+func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = error(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
 func GetComputerName(buf *uint16, n *uint32) (err error) {
 	r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0)
 	if r1 == 0 {
@@ -1996,6 +2037,33 @@ func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferL
 	return
 }
 
+func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) {
+	r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0)
+	if r0 != 0 {
+		errcode = syscall.Errno(r0)
+	}
+	return
+}
+
+func GetACP() (acp uint32) {
+	r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0)
+	acp = uint32(r0)
+	return
+}
+
+func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) {
+	r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar))
+	nwrite = int32(r0)
+	if nwrite == 0 {
+		if e1 != 0 {
+			err = error(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
 func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) {
 	r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0)
 	if r1&0xff == 0 {
diff --git a/vendor/golang.org/x/sys/windows/ztypes_windows.go b/vendor/golang.org/x/sys/windows/ztypes_windows.go
index ea600f6a976030a047bbf7f80e6976e769befd6b..1fe19d1d7f3d70646bdb90b7b35c670589cc071e 100644
--- a/vendor/golang.org/x/sys/windows/ztypes_windows.go
+++ b/vendor/golang.org/x/sys/windows/ztypes_windows.go
@@ -1135,3 +1135,108 @@ const (
 	ComputerNamePhysicalDnsFullyQualified = 7
 	ComputerNameMax                       = 8
 )
+
+const (
+	MOVEFILE_REPLACE_EXISTING      = 0x1
+	MOVEFILE_COPY_ALLOWED          = 0x2
+	MOVEFILE_DELAY_UNTIL_REBOOT    = 0x4
+	MOVEFILE_WRITE_THROUGH         = 0x8
+	MOVEFILE_CREATE_HARDLINK       = 0x10
+	MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20
+)
+
+const GAA_FLAG_INCLUDE_PREFIX = 0x00000010
+
+const (
+	IF_TYPE_OTHER              = 1
+	IF_TYPE_ETHERNET_CSMACD    = 6
+	IF_TYPE_ISO88025_TOKENRING = 9
+	IF_TYPE_PPP                = 23
+	IF_TYPE_SOFTWARE_LOOPBACK  = 24
+	IF_TYPE_ATM                = 37
+	IF_TYPE_IEEE80211          = 71
+	IF_TYPE_TUNNEL             = 131
+	IF_TYPE_IEEE1394           = 144
+)
+
+type SocketAddress struct {
+	Sockaddr       *syscall.RawSockaddrAny
+	SockaddrLength int32
+}
+
+type IpAdapterUnicastAddress struct {
+	Length             uint32
+	Flags              uint32
+	Next               *IpAdapterUnicastAddress
+	Address            SocketAddress
+	PrefixOrigin       int32
+	SuffixOrigin       int32
+	DadState           int32
+	ValidLifetime      uint32
+	PreferredLifetime  uint32
+	LeaseLifetime      uint32
+	OnLinkPrefixLength uint8
+}
+
+type IpAdapterAnycastAddress struct {
+	Length  uint32
+	Flags   uint32
+	Next    *IpAdapterAnycastAddress
+	Address SocketAddress
+}
+
+type IpAdapterMulticastAddress struct {
+	Length  uint32
+	Flags   uint32
+	Next    *IpAdapterMulticastAddress
+	Address SocketAddress
+}
+
+type IpAdapterDnsServerAdapter struct {
+	Length   uint32
+	Reserved uint32
+	Next     *IpAdapterDnsServerAdapter
+	Address  SocketAddress
+}
+
+type IpAdapterPrefix struct {
+	Length       uint32
+	Flags        uint32
+	Next         *IpAdapterPrefix
+	Address      SocketAddress
+	PrefixLength uint32
+}
+
+type IpAdapterAddresses struct {
+	Length                uint32
+	IfIndex               uint32
+	Next                  *IpAdapterAddresses
+	AdapterName           *byte
+	FirstUnicastAddress   *IpAdapterUnicastAddress
+	FirstAnycastAddress   *IpAdapterAnycastAddress
+	FirstMulticastAddress *IpAdapterMulticastAddress
+	FirstDnsServerAddress *IpAdapterDnsServerAdapter
+	DnsSuffix             *uint16
+	Description           *uint16
+	FriendlyName          *uint16
+	PhysicalAddress       [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte
+	PhysicalAddressLength uint32
+	Flags                 uint32
+	Mtu                   uint32
+	IfType                uint32
+	OperStatus            uint32
+	Ipv6IfIndex           uint32
+	ZoneIndices           [16]uint32
+	FirstPrefix           *IpAdapterPrefix
+	/* more fields might be present here. */
+}
+
+const (
+	IfOperStatusUp             = 1
+	IfOperStatusDown           = 2
+	IfOperStatusTesting        = 3
+	IfOperStatusUnknown        = 4
+	IfOperStatusDormant        = 5
+	IfOperStatusNotPresent     = 6
+	IfOperStatusLowerLayerDown = 7
+)
diff --git a/vendor/vendor.json b/vendor/vendor.json
index fdb2cdd6ff965c950ff28e9202012d087ad51182..cc73784b7f02ce753815b73d0b915a781d4530b0 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -14,6 +14,18 @@
 			"path": "cloud.google.com/go/internal",
 			"revision": "05253f6a829103296c351b643f6815aedd81a3fb"
 		},
+		{
+			"checksumSHA1": "23FJUX+AInYeEM2hoUMvYZtXZd4=",
+			"path": "github.com/Azure/go-ansiterm",
+			"revision": "fa152c58bc15761d0200cb75fe958b89a9d4888e",
+			"revisionTime": "2016-06-22T17:32:16Z"
+		},
+		{
+			"checksumSHA1": "jBimnggjIiFUjaImNoJhSVLtdzw=",
+			"path": "github.com/Azure/go-ansiterm/winterm",
+			"revision": "fa152c58bc15761d0200cb75fe958b89a9d4888e",
+			"revisionTime": "2016-06-22T17:32:16Z"
+		},
 		{
 			"checksumSHA1": "1IeSD7UyLXPqBP1nbaqpT9fSanQ=",
 			"comment": "v0.2.0",
@@ -109,6 +121,18 @@
 			"revision": "346938d642f2ec3594ed81d874461961cd0faa76",
 			"revisionTime": "2016-10-29T20:57:26Z"
 		},
+		{
+			"checksumSHA1": "tz1lZdR0AlIRg6Aqov+ccO3k+Ko=",
+			"path": "github.com/docker/distribution",
+			"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
+			"revisionTime": "2017-01-18T00:16:05Z"
+		},
+		{
+			"checksumSHA1": "0au+tD+jymXNssdb1JgcctY7PN4=",
+			"path": "github.com/docker/distribution/context",
+			"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
+			"revisionTime": "2017-01-18T00:16:05Z"
+		},
 		{
 			"checksumSHA1": "f1wARLDzsF/JoyN01yoxXEwFIp8=",
 			"comment": "v2.6.0",
@@ -121,119 +145,396 @@
 			"path": "github.com/docker/distribution/reference",
 			"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329"
 		},
+		{
+			"checksumSHA1": "ClxxEM8HAe3DrneFwpUoIgoW+XA=",
+			"path": "github.com/docker/distribution/registry/api/errcode",
+			"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
+			"revisionTime": "2017-01-18T00:16:05Z"
+		},
+		{
+			"checksumSHA1": "AdqP2O9atmZXE2SUf28oONdslxI=",
+			"path": "github.com/docker/distribution/registry/api/v2",
+			"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
+			"revisionTime": "2017-01-18T00:16:05Z"
+		},
+		{
+			"checksumSHA1": "+BD1MapPtKWpc2NAMAVL9LzHErk=",
+			"path": "github.com/docker/distribution/registry/client",
+			"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
+			"revisionTime": "2017-01-18T00:16:05Z"
+		},
+		{
+			"checksumSHA1": "g1oeZgaYxQTmd8bzYTNe4neymLY=",
+			"path": "github.com/docker/distribution/registry/client/auth",
+			"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
+			"revisionTime": "2017-01-18T00:16:05Z"
+		},
+		{
+			"checksumSHA1": "HT3SwoOQunakEwoxg6rAvy94aLs=",
+			"path": "github.com/docker/distribution/registry/client/auth/challenge",
+			"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
+			"revisionTime": "2017-01-18T00:16:05Z"
+		},
+		{
+			"checksumSHA1": "KjpG7FYMU5ugtc/fTfL1YqhdaV4=",
+			"path": "github.com/docker/distribution/registry/client/transport",
+			"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
+			"revisionTime": "2017-01-18T00:16:05Z"
+		},
+		{
+			"checksumSHA1": "OfCHyYvzswfb+mAswNnEJmiQSq4=",
+			"path": "github.com/docker/distribution/registry/storage/cache",
+			"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
+			"revisionTime": "2017-01-18T00:16:05Z"
+		},
+		{
+			"checksumSHA1": "ruzWihEQ6o3c2MIl+5bAXijBMSg=",
+			"path": "github.com/docker/distribution/registry/storage/cache/memory",
+			"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
+			"revisionTime": "2017-01-18T00:16:05Z"
+		},
+		{
+			"checksumSHA1": "cNp7rNReJHvdSfrIetXS9RGsLSo=",
+			"path": "github.com/docker/distribution/uuid",
+			"revision": "325b0804fef3a66309d962357aac3c2ce3f4d329",
+			"revisionTime": "2017-01-18T00:16:05Z"
+		},
+		{
+			"checksumSHA1": "ul8vP70NJebrZ6HxWzYvXP+9dQw=",
+			"path": "github.com/docker/docker-credential-helpers/client",
+			"revision": "2d19ebb7f482fb33cc63c9965ccda98b89821637",
+			"revisionTime": "2017-06-15T09:31:05Z"
+		},
+		{
+			"checksumSHA1": "R6fu97962XXuRIIZL1AgJHEQbFI=",
+			"path": "github.com/docker/docker-credential-helpers/credentials",
+			"revision": "2d19ebb7f482fb33cc63c9965ccda98b89821637",
+			"revisionTime": "2017-06-15T09:31:05Z"
+		},
 		{
 			"checksumSHA1": "2Q0WHljjto/McA1/491ixAMi3vc=",
 			"comment": "v1.13.0",
 			"path": "github.com/docker/docker/api/types",
-			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178"
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
 		},
 		{
 			"checksumSHA1": "jVJDbe0IcyjoKc2xbohwzQr+FF0=",
 			"comment": "v1.13.0",
 			"path": "github.com/docker/docker/api/types/blkiodev",
-			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178"
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
 		},
 		{
 			"checksumSHA1": "kRUxG8BqWGMPA0TGu7I8LuFNISE=",
 			"comment": "v1.13.0",
 			"path": "github.com/docker/docker/api/types/container",
-			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178"
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
 		},
 		{
 			"checksumSHA1": "fzeGodcTcWuV18AT0BcvB4EFByo=",
 			"comment": "v1.13.0",
 			"path": "github.com/docker/docker/api/types/events",
-			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178"
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
 		},
 		{
 			"checksumSHA1": "U7BQrWTTXX/5UtI8SN6u4Hhbrw4=",
 			"comment": "v1.13.0",
 			"path": "github.com/docker/docker/api/types/filters",
-			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178"
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
 		},
 		{
 			"checksumSHA1": "4rxOgOCSZleDDi5HKWnhwR+PnK4=",
 			"comment": "v1.13.0",
 			"path": "github.com/docker/docker/api/types/mount",
-			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178"
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
 		},
 		{
 			"checksumSHA1": "vGvZtL6F5XsNlr1p/tmLO1b3dN0=",
 			"comment": "v1.13.0",
 			"path": "github.com/docker/docker/api/types/network",
-			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178"
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
 		},
 		{
 			"checksumSHA1": "BHWRYLuNJly1Asgi2s/OQiS+LJU=",
 			"comment": "v1.13.0",
 			"path": "github.com/docker/docker/api/types/reference",
-			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178"
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
 		},
 		{
 			"checksumSHA1": "XNedxDfkAxGCe5d8nLNPJRbWwDE=",
 			"comment": "v1.13.0",
 			"path": "github.com/docker/docker/api/types/registry",
-			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178"
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
 		},
 		{
 			"checksumSHA1": "VTxWyFud/RedrpllGdQonVtGM/A=",
 			"comment": "v1.13.0",
 			"path": "github.com/docker/docker/api/types/strslice",
-			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178"
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
 		},
 		{
 			"checksumSHA1": "+VZ2iq/v0b1G9CkjUOnuhreBB/k=",
 			"comment": "v1.13.0",
 			"path": "github.com/docker/docker/api/types/swarm",
-			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178"
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
 		},
 		{
 			"checksumSHA1": "XwEQCyDAwJhWtDk0njEv/qHlWvA=",
 			"comment": "v1.13.0",
 			"path": "github.com/docker/docker/api/types/time",
-			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178"
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
 		},
 		{
 			"checksumSHA1": "iRarXGLKK2hKs5sdktWlAcP6adM=",
 			"comment": "v1.13.0",
 			"path": "github.com/docker/docker/api/types/versions",
-			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178"
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
 		},
 		{
 			"checksumSHA1": "Aw4GIxyeak7ey5CjVhtlx0UVSug=",
 			"comment": "v1.13.0",
 			"path": "github.com/docker/docker/api/types/volume",
-			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178"
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
 		},
 		{
 			"checksumSHA1": "g7GYKRELcI6AU56MWKh+0ImdJ40=",
 			"comment": "v1.13.0",
 			"path": "github.com/docker/docker/cliconfig/configfile",
-			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178"
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "EW7BcX6QOFZ3prp5ehYF8j5lXKg=",
+			"path": "github.com/docker/docker/cliconfig/credentials",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
 		},
 		{
 			"checksumSHA1": "aJHqMWaColZ6YPJXgIhTjuZ+MKI=",
 			"comment": "v1.13.0",
 			"path": "github.com/docker/docker/client",
-			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178"
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "d3dnZt0YviJwjWdXOkmAfezkjMw=",
+			"path": "github.com/docker/docker/daemon/graphdriver",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "nmLjLexIsNe8FdxmELFB+enmsW8=",
+			"path": "github.com/docker/docker/image",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "c3VDvvD6BFJr1zW2gyAil5qopGE=",
+			"path": "github.com/docker/docker/image/v1",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "LPbzAHwPih/oy26E4fY03K6Zt8E=",
+			"path": "github.com/docker/docker/layer",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "8YI8cNLL5x1mWD2xLegHOL2kQ50=",
+			"path": "github.com/docker/docker/oci",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "uPCR9ik+hpdlmDpnONWhwVpypq4=",
+			"path": "github.com/docker/docker/opts",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "nyiTxTOUETr1n3DOISNNwAQDMMo=",
+			"path": "github.com/docker/docker/pkg/archive",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "g2t5enZCIOPQmEepA1IHv4T7JYs=",
+			"path": "github.com/docker/docker/pkg/chrootarchive",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "G7KKPJF5ux2FijDVpd4ZVrFRw0U=",
+			"path": "github.com/docker/docker/pkg/fileutils",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
 		},
 		{
 			"checksumSHA1": "p6Ud4Yf1ywWy20YxXF1RU4yhTio=",
 			"comment": "v1.13.0",
 			"path": "github.com/docker/docker/pkg/homedir",
-			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178"
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "BkiWzuIrq4LhbXJ4KQvcCAO21/4=",
+			"path": "github.com/docker/docker/pkg/httputils",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "V2sRiFFOWRiu3TpB/8ouD+GkQSk=",
+			"path": "github.com/docker/docker/pkg/idtools",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "MgSwrRRAfCQrrToxPAoMCmbieLE=",
+			"path": "github.com/docker/docker/pkg/ioutils",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "BlFSSK7zUjPzPuxkLmM/0wpvku8=",
+			"path": "github.com/docker/docker/pkg/jsonlog",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "ozBPOmsHDn6vMwIbk2iu7KaJBgY=",
+			"path": "github.com/docker/docker/pkg/jsonmessage",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "ndnAFCfsGC3upNQ6jAEwzxcurww=",
+			"path": "github.com/docker/docker/pkg/longpath",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "2LWFF3SAaqGw9SRWC3oq9cAYsxs=",
+			"path": "github.com/docker/docker/pkg/mount",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "25gh2wyZiRvm8aY05JIBkfN7XD8=",
+			"path": "github.com/docker/docker/pkg/plugingetter",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "ldI7xaoFkJhw8htm4sAxD8w2H6M=",
+			"path": "github.com/docker/docker/pkg/plugins",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "7PpleyPctFC+y0OS4+kksrD6sJE=",
+			"path": "github.com/docker/docker/pkg/plugins/transport",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "oHqQiESBEfIJ+uu1+ue7PHvJ6CI=",
+			"path": "github.com/docker/docker/pkg/pools",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "txf3EORYff4hO6PEvwBm2lyh1MU=",
+			"path": "github.com/docker/docker/pkg/promise",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "lThih54jzz9A4zHKEFb9SIV3Ed0=",
+			"path": "github.com/docker/docker/pkg/random",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "B37RZZTMQB7Z6uS6PoIysI78Y+4=",
+			"path": "github.com/docker/docker/pkg/reexec",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
 		},
 		{
 			"checksumSHA1": "FHOOypod1NsTgQffapTFhtZ49+8=",
 			"comment": "v1.13.0",
 			"path": "github.com/docker/docker/pkg/stdcopy",
-			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178"
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "/WHKetY1usDjm9D0VKgHgnr58Kk=",
+			"path": "github.com/docker/docker/pkg/stringid",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "sjdNnoPZipse7blbn/DzfxVKpe4=",
+			"path": "github.com/docker/docker/pkg/system",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "nbJLV/dLA2gV5fUsZS8Z3muCQGk=",
+			"path": "github.com/docker/docker/pkg/tarsum",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "wJgY3xvAk7ixRUbhfzaAlKtCXRM=",
+			"path": "github.com/docker/docker/pkg/term",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "EMjTiUTHNxqSsmdMB1V29hRvJbQ=",
+			"path": "github.com/docker/docker/pkg/term/windows",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
 		},
 		{
 			"checksumSHA1": "8I0Ez+aUYGpsDEVZ8wN/Ztf6Zqs=",
 			"comment": "v1.13.0",
 			"path": "github.com/docker/docker/pkg/tlsconfig",
-			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178"
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "e7F9xGwV5bZsI9sxr0Sak07rPi4=",
+			"path": "github.com/docker/docker/plugin/v2",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "QsI9CYC5YT0+oRVubpcfgvufKIU=",
+			"path": "github.com/docker/docker/reference",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
+		},
+		{
+			"checksumSHA1": "RPHQTpwxxCgAoV//gNovwxt8d60=",
+			"path": "github.com/docker/docker/registry",
+			"revision": "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178",
+			"revisionTime": "2017-01-17T09:00:55Z"
 		},
 		{
 			"checksumSHA1": "GlyAlAfGRjTBbuqHgRNaRG2fAaw=",
@@ -296,22 +597,25 @@
 			"revision": "4290f40c056686fcaa5c9caf02eac1dde9315adf"
 		},
 		{
-			"checksumSHA1": "42YGW0xAQT+pnQ2s8A9tYgfQhAg=",
+			"checksumSHA1": "JbiWTzH699Sqz25XmDlsARpMN9w=",
 			"comment": "v0.2.1-4-g988efe9",
 			"path": "github.com/docker/go-connections/nat",
-			"revision": "988efe982fdecb46f01d53465878ff1f2ff411ce"
+			"revision": "3ede32e2033de7505e6500d6c868c2b9ed9f169d",
+			"revisionTime": "2017-06-23T20:36:43Z"
 		},
 		{
-			"checksumSHA1": "3sQ35fcqLJRnyTqaZn8Zb9l97uk=",
+			"checksumSHA1": "jUfDG3VQsA2UZHvvIXncgiddpYA=",
 			"comment": "v0.2.1-4-g988efe9",
 			"path": "github.com/docker/go-connections/sockets",
-			"revision": "988efe982fdecb46f01d53465878ff1f2ff411ce"
+			"revision": "3ede32e2033de7505e6500d6c868c2b9ed9f169d",
+			"revisionTime": "2017-06-23T20:36:43Z"
 		},
 		{
-			"checksumSHA1": "acyHmsV4QAeJGUQaOGsO9qHmMG0=",
+			"checksumSHA1": "c6lDGNwTm5mYq18IHP+lqYpk8xU=",
 			"comment": "v0.2.1-4-g988efe9",
 			"path": "github.com/docker/go-connections/tlsconfig",
-			"revision": "988efe982fdecb46f01d53465878ff1f2ff411ce"
+			"revision": "3ede32e2033de7505e6500d6c868c2b9ed9f169d",
+			"revisionTime": "2017-06-23T20:36:43Z"
 		},
 		{
 			"checksumSHA1": "KbWP8VsU9gVoZm9pCqe79AkfDmk=",
@@ -419,6 +723,18 @@
 			"path": "github.com/gorhill/cronexpr",
 			"revision": "f0984319b44273e83de132089ae42b1810f4933b"
 		},
+		{
+			"checksumSHA1": "g/V4qrXjUGG9B+e3hB+4NAYJ5Gs=",
+			"path": "github.com/gorilla/context",
+			"revision": "08b5f424b9271eedf6f9f0ce86cb9396ed337a42",
+			"revisionTime": "2016-08-17T18:46:32Z"
+		},
+		{
+			"checksumSHA1": "zmCk+lgIeiOf0Ng9aFP9aFy8ksE=",
+			"path": "github.com/gorilla/mux",
+			"revision": "599cba5e7b6137d46ddf58fb1765f5d928e69604",
+			"revisionTime": "2017-02-28T22:43:54Z"
+		},
 		{
 			"checksumSHA1": "hwGdeQbcfc2RvIQS5wAaYRKJDd4=",
 			"comment": "0.2.2-6-g50d4dbd",
@@ -477,6 +793,12 @@
 			"path": "github.com/opencontainers/runc/libcontainer/configs",
 			"revision": "94dc520a5732126985fec249f80c91b9e0601815"
 		},
+		{
+			"checksumSHA1": "IUQSNA2zZqenm6uAjy8txdY9mrw=",
+			"path": "github.com/opencontainers/runc/libcontainer/devices",
+			"revision": "4f601205d475e1995472d81b3787b197008c5dd7",
+			"revisionTime": "2016-02-05T20:37:56Z"
+		},
 		{
 			"checksumSHA1": "HN/wo0SqwzYkenW9uBWQ/3df8cY=",
 			"comment": "v0.0.9",
@@ -495,6 +817,12 @@
 			"path": "github.com/opencontainers/runc/libcontainer/utils",
 			"revision": "94dc520a5732126985fec249f80c91b9e0601815"
 		},
+		{
+			"checksumSHA1": "yvrnpBTTXzXgguz6xhUzURvxzQA=",
+			"path": "github.com/opencontainers/runtime-spec/specs-go",
+			"revision": "313f40bdfcc04c6b0f7b8a8c3e91a7b7a3a0ef4e",
+			"revisionTime": "2016-09-20T21:29:59Z"
+		},
 		{
 			"checksumSHA1": "xN14ZoFcgefABp24aowYtQVMDsc=",
 			"comment": "v1.0-11-gc55201b",
@@ -584,6 +912,24 @@
 			"path": "github.com/ugorji/go/codec",
 			"revision": "4a1cb5252a6951f715a85d0e4be334c2a2dbf2a2"
 		},
+		{
+			"checksumSHA1": "xMhZY52HabUCN/3v3xkP/w3bYsc=",
+			"path": "github.com/vbatts/tar-split/archive/tar",
+			"revision": "b9127a139315e57ebc26030e7decf72d0a20acb4",
+			"revisionTime": "2017-03-14T15:24:38Z"
+		},
+		{
+			"checksumSHA1": "5p0tRQ6s4VofapI7DZTqWzsKtIc=",
+			"path": "github.com/vbatts/tar-split/tar/asm",
+			"revision": "b9127a139315e57ebc26030e7decf72d0a20acb4",
+			"revisionTime": "2017-03-14T15:24:38Z"
+		},
+		{
+			"checksumSHA1": "Cu5bXApv9vuX7Ar2zEgAXX8EIKA=",
+			"path": "github.com/vbatts/tar-split/tar/storage",
+			"revision": "b9127a139315e57ebc26030e7decf72d0a20acb4",
+			"revisionTime": "2017-03-14T15:24:38Z"
+		},
 		{
 			"checksumSHA1": "A+G1gBUl5JkopfxqoL/ojs/zThE=",
 			"path": "gitlab.com/ayufan/golang-cli-helpers",
@@ -665,34 +1011,40 @@
 			"revision": "3b966c7f301c0c71c53d94dc632a62df0a682cd7"
 		},
 		{
-			"checksumSHA1": "ADczDfJ6IEStBdLQDkVcwVnj2j0=",
+			"checksumSHA1": "MXF5BCS3JjmGSfZXH1dWvyWXaSQ=",
 			"path": "golang.org/x/sys/unix",
-			"revision": "98fc11432b951eb53c62602ba4ae99d03c1fb7e2"
+			"revision": "042a8f53ce82bbe081222da955159491e32146a0",
+			"revisionTime": "2016-04-06T05:52:57Z"
 		},
 		{
-			"checksumSHA1": "eMNHNAV71hl2K8XAUDl5JM+LpU0=",
+			"checksumSHA1": "6aa7Y4gpUDxYNt48cOT95zMjp9E=",
 			"path": "golang.org/x/sys/windows",
-			"revision": "98fc11432b951eb53c62602ba4ae99d03c1fb7e2"
+			"revision": "042a8f53ce82bbe081222da955159491e32146a0",
+			"revisionTime": "2016-04-06T05:52:57Z"
 		},
 		{
-			"checksumSHA1": "npjha3mZrErG3/HdLowD1Ht5l4E=",
+			"checksumSHA1": "ZcvekRXq6FjnGCznsRqfVTjA+pc=",
 			"path": "golang.org/x/sys/windows/registry",
-			"revision": "98fc11432b951eb53c62602ba4ae99d03c1fb7e2"
+			"revision": "042a8f53ce82bbe081222da955159491e32146a0",
+			"revisionTime": "2016-04-06T05:52:57Z"
 		},
 		{
 			"checksumSHA1": "IRqLaXM/VQRzkbXPuiqOxTb2W0Y=",
 			"path": "golang.org/x/sys/windows/svc",
-			"revision": "98fc11432b951eb53c62602ba4ae99d03c1fb7e2"
+			"revision": "042a8f53ce82bbe081222da955159491e32146a0",
+			"revisionTime": "2016-04-06T05:52:57Z"
 		},
 		{
 			"checksumSHA1": "uVlUSSKplihZG7N+QJ6fzDZ4Kh8=",
 			"path": "golang.org/x/sys/windows/svc/eventlog",
-			"revision": "98fc11432b951eb53c62602ba4ae99d03c1fb7e2"
+			"revision": "042a8f53ce82bbe081222da955159491e32146a0",
+			"revisionTime": "2016-04-06T05:52:57Z"
 		},
 		{
-			"checksumSHA1": "KI6gK4mC68QfNu9ISn2nbN33svM=",
+			"checksumSHA1": "6dvXhF7BOHN87mMJ8SaAuFxai0Q=",
 			"path": "golang.org/x/sys/windows/svc/mgr",
-			"revision": "98fc11432b951eb53c62602ba4ae99d03c1fb7e2"
+			"revision": "042a8f53ce82bbe081222da955159491e32146a0",
+			"revisionTime": "2016-04-06T05:52:57Z"
 		},
 		{
 			"checksumSHA1": "xduDNbu9zEufcDc8UNWHoLrip5w=",