From 914c48ebfefc2ef3ccd2968e85bac7b66d45d965 Mon Sep 17 00:00:00 2001 From: Milos Zivkovic Date: Wed, 10 Nov 2021 16:11:27 +0100 Subject: [PATCH 01/19] Standardize the way secrets are retrieved, implement the local secrets manager --- command/ibft/ibft_init.go | 47 +++-- command/secretsManager/secret_manager.go | 26 +++ .../secrets_manager_generate.go | 165 ++++++++++++++++++ command/util/commands.go | 12 ++ crypto/crypto.go | 25 +++ helper/keystore/keystore.go | 12 +- network/keystore.go | 38 ++++ network/server_test.go | 8 +- secrets/config.go | 40 +++++ secrets/hashicorpVault/hashicorpVault.go | 2 + secrets/local/local.go | 133 ++++++++++++++ secrets/secrets.go | 93 ++++++++++ server/builtin.go | 9 + 13 files changed, 595 insertions(+), 15 deletions(-) create mode 100644 command/secretsManager/secret_manager.go create mode 100644 command/secretsManager/secrets_manager_generate.go create mode 100644 secrets/config.go create mode 100644 secrets/hashicorpVault/hashicorpVault.go create mode 100644 secrets/local/local.go create mode 100644 secrets/secrets.go diff --git a/command/ibft/ibft_init.go b/command/ibft/ibft_init.go index b1df8d33b2..e0582c93dd 100644 --- a/command/ibft/ibft_init.go +++ b/command/ibft/ibft_init.go @@ -6,9 +6,10 @@ import ( "path/filepath" "github.com/0xPolygon/polygon-sdk/command/helper" + "github.com/0xPolygon/polygon-sdk/secrets" + "github.com/0xPolygon/polygon-sdk/secrets/local" "github.com/libp2p/go-libp2p-core/peer" - "github.com/0xPolygon/polygon-sdk/consensus/ibft" "github.com/0xPolygon/polygon-sdk/crypto" "github.com/0xPolygon/polygon-sdk/network" "github.com/0xPolygon/polygon-sdk/server" @@ -93,22 +94,48 @@ func (p *IbftInit) Run(args []string) int { } } + // TODO branch for the type of secrets manager + + // Local (FS) SecretsManager if err := server.SetupDataDir(dataDir, []string{consensusDir, libp2pDir}); err != nil { p.UI.Error(err.Error()) return 1 } - // try to write the ibft private key - key, err := crypto.GenerateOrReadPrivateKey(filepath.Join(dataDir, consensusDir, ibft.IbftKeyName)) - if err != nil { - p.UI.Error(err.Error()) + localSecretsManager, factoryErr := local.SecretsManagerFactory(&secrets.SecretsManagerParams{ + Logger: nil, + Params: map[string]interface{}{ + secrets.Path: dataDir, + }, + }) + if factoryErr != nil { + p.UI.Error(factoryErr.Error()) return 1 } - // try to create also a libp2p address - libp2pKey, err := network.ReadLibp2pKey(filepath.Join(dataDir, libp2pDir)) - if err != nil { - p.UI.Error(err.Error()) + // Generate the IBFT validator private key + validatorKey, validatorKeyEncoded, keyErr := crypto.GenerateAndEncodePrivateKey() + if keyErr != nil { + p.UI.Error(keyErr.Error()) + return 1 + } + + // Write the validator private key to disk + if setErr := localSecretsManager.SetSecret(secrets.ValidatorKey, validatorKeyEncoded); setErr != nil { + p.UI.Error(setErr.Error()) + return 1 + } + + // Generate the libp2p private key + libp2pKey, libp2pKeyEncoded, keyErr := network.GenerateAndEncodeLibp2pKey() + if keyErr != nil { + p.UI.Error(keyErr.Error()) + return 1 + } + + // Write the networking private key to disk + if setErr := localSecretsManager.SetSecret(secrets.NetworkKey, libp2pKeyEncoded); setErr != nil { + p.UI.Error(setErr.Error()) return 1 } @@ -121,7 +148,7 @@ func (p *IbftInit) Run(args []string) int { output := "\n[IBFT INIT]\n" output += helper.FormatKV([]string{ - fmt.Sprintf("Public key (address)|%s", crypto.PubKeyToAddress(&key.PublicKey)), + fmt.Sprintf("Public key (address)|%s", crypto.PubKeyToAddress(&validatorKey.PublicKey)), fmt.Sprintf("Node ID|%s", nodeId.String()), }) diff --git a/command/secretsManager/secret_manager.go b/command/secretsManager/secret_manager.go new file mode 100644 index 0000000000..8dc2b4982d --- /dev/null +++ b/command/secretsManager/secret_manager.go @@ -0,0 +1,26 @@ +package secretsManager + +import "github.com/mitchellh/cli" + +// SecretManagerCommand is the top level secret manager command +type SecretManagerCommand struct { +} + +// Help implements the cli.Command interface +func (c *SecretManagerCommand) Help() string { + return c.Synopsis() +} + +func (c *SecretManagerCommand) GetBaseCommand() string { + return "secrets-manager" +} + +// Synopsis implements the cli.Command interface +func (c *SecretManagerCommand) Synopsis() string { + return "Top level SecretsManager command for interacting with secrets-manager functionality. Only accepts subcommands" +} + +// Run implements the cli.Command interface +func (c *SecretManagerCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/secretsManager/secrets_manager_generate.go b/command/secretsManager/secrets_manager_generate.go new file mode 100644 index 0000000000..876f3be95e --- /dev/null +++ b/command/secretsManager/secrets_manager_generate.go @@ -0,0 +1,165 @@ +package secretsManager + +import ( + "flag" + "fmt" + + "github.com/0xPolygon/polygon-sdk/command/helper" + "github.com/0xPolygon/polygon-sdk/secrets" +) + +// SecretsManagerGenerate is the command to generate a secrets manager configuration +type SecretsManagerGenerate struct { + helper.Meta +} + +const ( + defaultNodeName = "polygon-sdk-node" + defaultConfigFileName = "./secretsManagerConfig.json" +) + +func (i *SecretsManagerGenerate) DefineFlags() { + if i.FlagMap == nil { + // Flag map not initialized + i.FlagMap = make(map[string]helper.FlagDescriptor) + } + + i.FlagMap["dir"] = helper.FlagDescriptor{ + Description: fmt.Sprintf("Sets the directory for the secrets manager configuration file Default: %s", defaultConfigFileName), + Arguments: []string{ + "DIRECTORY", + }, + ArgumentsOptional: false, + FlagOptional: false, + } + + i.FlagMap["type"] = helper.FlagDescriptor{ + Description: fmt.Sprintf("Specifies the type of the secrets manager. Default: %s", secrets.HashicorpVault), + Arguments: []string{ + "TYPE", + }, + ArgumentsOptional: false, + FlagOptional: false, + } + + i.FlagMap["token"] = helper.FlagDescriptor{ + Description: "Specifies the access token for the service", + Arguments: []string{ + "TOKEN", + }, + ArgumentsOptional: false, + FlagOptional: false, + } + + i.FlagMap["server-url"] = helper.FlagDescriptor{ + Description: "Specifies the server URL for the service", + Arguments: []string{ + "SERVER_URL", + }, + ArgumentsOptional: false, + FlagOptional: false, + } + + i.FlagMap["name"] = helper.FlagDescriptor{ + Description: fmt.Sprintf("Specifies the name of the node for on-service record keeping. Default %s", defaultNodeName), + Arguments: []string{ + "SERVER_URL", + }, + ArgumentsOptional: false, + FlagOptional: false, + } +} + +// GetHelperText returns a simple description of the command +func (p *SecretsManagerGenerate) GetHelperText() string { + return "Initializes the secrets manager configuration in the provided directory. Used for Hashicorp Vault" +} + +// Help implements the cli.SecretsManagerGenerate interface +func (p *SecretsManagerGenerate) Help() string { + p.DefineFlags() + + return helper.GenerateHelp(p.Synopsis(), helper.GenerateUsage(p.GetBaseCommand(), p.FlagMap), p.FlagMap) +} + +// Synopsis implements the cli.SecretsManagerGenerate interface +func (p *SecretsManagerGenerate) Synopsis() string { + return p.GetHelperText() +} + +func (p *SecretsManagerGenerate) GetBaseCommand() string { + return "secrets-manager generate" +} + +// Run implements the cli.SecretsManagerGenerate interface +func (p *SecretsManagerGenerate) Run(args []string) int { + flags := flag.NewFlagSet(p.GetBaseCommand(), flag.ContinueOnError) + var path string + var token string + var serverURL string + var serviceType string + var name string + + flags.StringVar(&path, "dir", defaultConfigFileName, "") + flags.StringVar(&token, "token", "", "") + flags.StringVar(&serverURL, "server-url", "", "") + flags.StringVar(&serviceType, "type", string(secrets.HashicorpVault), "") + flags.StringVar(&name, "name", defaultNodeName, "") + + if err := flags.Parse(args); err != nil { + p.UI.Error(err.Error()) + return 1 + } + + // Safety checks + if path == "" { + p.UI.Error("required argument (path) not passed in") + return 1 + } + + if token == "" { + p.UI.Error("required argument (token) not passed in") + return 1 + } + + if serverURL == "" { + p.UI.Error("required argument (serverURL) not passed in") + return 1 + } + + if !secrets.SupportedServiceManager(secrets.SecretsManagerType(serviceType)) { + p.UI.Error("unsupported service manager type") + return 1 + } + + // Generate the configuration + config := &secrets.SecretsManagerConfig{ + Token: token, + ServerURL: serverURL, + Type: secrets.SecretsManagerType(serviceType), + Name: name, + Extra: nil, + } + + writeErr := config.WriteConfig(path) + if writeErr != nil { + p.UI.Error("unable to write configuration file") + return 1 + } + + output := "\n[SECRETS MANAGER GENERATE]\n" + + output += helper.FormatKV([]string{ + fmt.Sprintf("Service Type|%s", serviceType), + fmt.Sprintf("Server URL|%s", serverURL), + fmt.Sprintf("Access Token|%s", token), + fmt.Sprintf("Node Name|%s", name), + }) + + output += "\n\nCONFIGURATION GENERATED" + output += "\n" + + p.UI.Output(output) + + return 0 +} diff --git a/command/util/commands.go b/command/util/commands.go index 6e3fedd094..956703d9ba 100644 --- a/command/util/commands.go +++ b/command/util/commands.go @@ -9,6 +9,7 @@ import ( "github.com/0xPolygon/polygon-sdk/command/ibft" "github.com/0xPolygon/polygon-sdk/command/monitor" "github.com/0xPolygon/polygon-sdk/command/peers" + "github.com/0xPolygon/polygon-sdk/command/secretsManager" "github.com/0xPolygon/polygon-sdk/command/server" "github.com/0xPolygon/polygon-sdk/command/status" "github.com/0xPolygon/polygon-sdk/command/txpool" @@ -52,6 +53,9 @@ func Commands() map[string]cli.CommandFactory { txPoolAddCmd := txpool.TxPoolAdd{Meta: meta} txPoolStatusCmd := txpool.TxPoolStatus{Meta: meta} + smCmd := secretsManager.SecretManagerCommand{} + smGenerateCmd := secretsManager.SecretsManagerGenerate{Meta: meta} + return map[string]cli.CommandFactory{ // GENERIC SDK COMMANDS // @@ -126,5 +130,13 @@ func Commands() map[string]cli.CommandFactory { versionCmd.GetBaseCommand(): func() (cli.Command, error) { return &versionCmd, nil }, + + // SECRETS MANAGER COMMANDS // + smCmd.GetBaseCommand(): func() (cli.Command, error) { + return &smCmd, nil + }, + smGenerateCmd.GetBaseCommand(): func() (cli.Command, error) { + return &smGenerateCmd, nil + }, } } diff --git a/crypto/crypto.go b/crypto/crypto.go index 27d1023ecb..87536bccd9 100644 --- a/crypto/crypto.go +++ b/crypto/crypto.go @@ -11,6 +11,7 @@ import ( "github.com/0xPolygon/polygon-sdk/helper/hex" "github.com/0xPolygon/polygon-sdk/helper/keystore" + "github.com/0xPolygon/polygon-sdk/secrets" "github.com/0xPolygon/polygon-sdk/types" "github.com/btcsuite/btcd/btcec" "golang.org/x/crypto/sha3" @@ -274,3 +275,27 @@ func GenerateOrReadPrivateKey(path string) (*ecdsa.PrivateKey, error) { return privateKey, nil } + +// GenerateAndEncodePrivateKey returns a newly generated private key and the Base64 encoding of that private key +func GenerateAndEncodePrivateKey() (*ecdsa.PrivateKey, []byte, error) { + keyBuff, err := keystore.CreatePrivateKey(generateKeyAndMarshal) + if err != nil { + return nil, nil, err + } + + privateKey, err := bytesToPrivateKey(keyBuff) + if err != nil { + return nil, nil, fmt.Errorf("unable to execute byte array -> private key conversion, %v", err) + } + + return privateKey, keyBuff, nil +} + +func ReadConsensusKey(manager secrets.SecretsManager) (*ecdsa.PrivateKey, error) { + validatorKey, err := manager.GetSecret(secrets.ValidatorKey) + if err != nil { + return nil, err + } + + return bytesToPrivateKey(validatorKey.([]byte)) +} diff --git a/helper/keystore/keystore.go b/helper/keystore/keystore.go index b8a3aae0e2..4473b3603c 100644 --- a/helper/keystore/keystore.go +++ b/helper/keystore/keystore.go @@ -20,7 +20,7 @@ func CreateIfNotExists(path string, create createFn) ([]byte, error) { var keyBuff []byte if !os.IsNotExist(err) { // Key exists - + keyBuff, err = ioutil.ReadFile(path) if err != nil { return nil, fmt.Errorf("unable to read private key from disk (%s), %v", path, err) @@ -43,3 +43,13 @@ func CreateIfNotExists(path string, create createFn) ([]byte, error) { return keyBuff, nil } + +func CreatePrivateKey(create createFn) ([]byte, error) { + keyBuff, err := create() + if err != nil { + return nil, fmt.Errorf("unable to generate private key, %v", err) + } + + // Encode it to a readable format (Base64) and return + return []byte(hex.EncodeToString(keyBuff)), nil +} diff --git a/network/keystore.go b/network/keystore.go index 978c5a32fd..ec2ca25aaa 100644 --- a/network/keystore.go +++ b/network/keystore.go @@ -7,6 +7,7 @@ import ( "os" "path/filepath" + "github.com/0xPolygon/polygon-sdk/secrets" "github.com/libp2p/go-libp2p-core/crypto" ) @@ -75,3 +76,40 @@ func ReadLibp2pKey(dataDir string) (crypto.PrivKey, error) { return key, nil } + +func ReadLibp2pKeyNEW(manager secrets.SecretsManager) (crypto.PrivKey, error) { + libp2pKey, err := manager.GetSecret(secrets.NetworkKey) + if err != nil { + return nil, err + } + + return ParseLibp2pKey(libp2pKey.([]byte)) +} + +func GenerateAndEncodeLibp2pKey() (crypto.PrivKey, []byte, error) { + priv, _, err := crypto.GenerateKeyPair(crypto.Secp256k1, 256) + if err != nil { + return nil, nil, err + } + + buf, err := crypto.MarshalPrivateKey(priv) + if err != nil { + return nil, nil, err + } + + return priv, []byte(hex.EncodeToString(buf)), nil +} + +func ParseLibp2pKey(key []byte) (crypto.PrivKey, error) { + buf, err := hex.DecodeString(string(key)) + if err != nil { + return nil, err + } + + libp2pKey, err := crypto.UnmarshalPrivateKey(buf) + if err != nil { + return nil, err + } + + return libp2pKey, nil +} diff --git a/network/server_test.go b/network/server_test.go index 6d038afb48..a451110601 100644 --- a/network/server_test.go +++ b/network/server_test.go @@ -16,7 +16,7 @@ import ( "github.com/stretchr/testify/assert" ) -func GenerateLibp2pKey(t *testing.T) (crypto.PrivKey, string) { +func GenerateTestLibp2pKey(t *testing.T) (crypto.PrivKey, string) { t.Helper() dir, err := ioutil.TempDir(os.TempDir(), "") @@ -376,8 +376,8 @@ func TestPeerReconnection(t *testing.T) { func TestReconnectionWithNewIP(t *testing.T) { natIP := "127.0.0.1" - _, dir0 := GenerateLibp2pKey(t) - _, dir1 := GenerateLibp2pKey(t) + _, dir0 := GenerateTestLibp2pKey(t) + _, dir1 := GenerateTestLibp2pKey(t) defaultConfig := func(c *Config) { c.NoDiscover = true @@ -426,7 +426,7 @@ func TestReconnectionWithNewIP(t *testing.T) { func TestSelfConnection_WithBootNodes(t *testing.T) { //Create a temporary directory for storing the key file - key, directoryName := GenerateLibp2pKey(t) + key, directoryName := GenerateTestLibp2pKey(t) peerId, err := peer.IDFromPrivateKey(key) assert.NoError(t, err) peerAddressInfo, err := StringToAddrInfo("/ip4/127.0.0.1/tcp/10001/p2p/16Uiu2HAmJxxH1tScDX2rLGSU9exnuvZKNM9SoK3v315azp68DLPW") diff --git a/secrets/config.go b/secrets/config.go new file mode 100644 index 0000000000..321cc20414 --- /dev/null +++ b/secrets/config.go @@ -0,0 +1,40 @@ +package secrets + +import ( + "encoding/json" + "io/ioutil" +) + +// SecretsManagerConfig is the configuration that gets +// written to a single configuration file +type SecretsManagerConfig struct { + Token string `json:"token"` // Access token to the instance + ServerURL string `json:"server_url"` // The URL of the running server + Type SecretsManagerType `json:"type"` // The type of SecretsManager + Name string `json:"name"` // The name of the current node + Extra map[string]interface{} `json:"extra"` // Any kind of arbitrary data +} + +// WriteConfig writes the current configuration to the specified path +func (c *SecretsManagerConfig) WriteConfig(path string) error { + jsonBytes, _ := json.MarshalIndent(c, "", " ") + + return ioutil.WriteFile(path, jsonBytes, 0600) +} + +// ReadConfig reads the SecretsManagerConfig from the specified path +func ReadConfig(path string) (*SecretsManagerConfig, error) { + configFile, readErr := ioutil.ReadFile(path) + if readErr != nil { + return nil, readErr + } + + config := &SecretsManagerConfig{} + + unmarshalErr := json.Unmarshal(configFile, &config) + if unmarshalErr != nil { + return nil, unmarshalErr + } + + return config, nil +} diff --git a/secrets/hashicorpVault/hashicorpVault.go b/secrets/hashicorpVault/hashicorpVault.go new file mode 100644 index 0000000000..514b965bc3 --- /dev/null +++ b/secrets/hashicorpVault/hashicorpVault.go @@ -0,0 +1,2 @@ +package hashicorpVault + diff --git a/secrets/local/local.go b/secrets/local/local.go new file mode 100644 index 0000000000..28cc4cb2c5 --- /dev/null +++ b/secrets/local/local.go @@ -0,0 +1,133 @@ +package local + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/0xPolygon/polygon-sdk/secrets" +) + +// LocalSecretsManager is a SecretsManager that +// stores secrets locally on disk +type LocalSecretsManager struct { + // Path to the base working directory + path string + + // Map of known secrets and their paths + secretPathMap map[string]string + + // Mux for the secretPathMap + secretPathMapLock sync.RWMutex +} + +// SecretsManagerFactory implements the factory method +func SecretsManagerFactory( + config *secrets.SecretsManagerParams, +) (secrets.SecretsManager, error) { + // Set up the base object + localManager := &LocalSecretsManager{ + secretPathMap: make(map[string]string), + } + + // Grab the path to the working directory + path, ok := config.Params[secrets.Path] + if !ok { + return nil, errors.New("no path specified for local secrets manager") + } + localManager.path = path.(string) + + // Run the initial setup + _ = localManager.Setup() + + return localManager, nil +} + +// Setup sets up the local SecretsManager +func (l *LocalSecretsManager) Setup() error { + // The local SecretsManager initially handles only the + // validator and networking private keys + l.secretPathMapLock.Lock() + // baseDir/consensus/validator.key + l.secretPathMap[secrets.ValidatorKey] = filepath.Join( + l.path, + secrets.ConsensusFolderLocal, + secrets.ValidatorKeyLocal, + ) + + // baseDir/libp2p/libp2p.key + l.secretPathMap[secrets.NetworkKey] = filepath.Join( + l.path, + secrets.NetworkFolderLocal, + secrets.NetworkKeyLocal, + ) + + l.secretPathMapLock.Unlock() + + return nil +} + +// GetSecret gets the local SecretsManager's secret from disk +func (l *LocalSecretsManager) GetSecret(name string) (interface{}, error) { + l.secretPathMapLock.RLock() + secretPath, ok := l.secretPathMap[name] + l.secretPathMapLock.RUnlock() + + if !ok { + return nil, secrets.ErrSecretNotFound + } + + // Read the secret from disk + secret, err := ioutil.ReadFile(secretPath) + if err != nil { + return nil, fmt.Errorf( + "unable to read secret from disk (%s), %v", + secretPath, + err, + ) + } + + return secret, nil +} + +// SetSecret saves the local SecretsManager's secret to disk +func (l *LocalSecretsManager) SetSecret(name string, value interface{}) error { + l.secretPathMapLock.Lock() + secretPath, ok := l.secretPathMap[name] + l.secretPathMapLock.Unlock() + if !ok { + return secrets.ErrSecretNotFound + } + + // Write the secret to disk + if err := ioutil.WriteFile(secretPath, value.([]byte), 0600); err != nil { + return fmt.Errorf( + "unable to write secret to disk (%s), %v", + secretPath, + err, + ) + } + + return nil +} + +// RemoveSecret removes the local SecretsManager's secret from disk +func (l *LocalSecretsManager) RemoveSecret(name string) error { + l.secretPathMapLock.Lock() + secretPath, ok := l.secretPathMap[name] + defer l.secretPathMapLock.Unlock() + + if !ok { + return secrets.ErrSecretNotFound + } + delete(l.secretPathMap, name) + + if removeErr := os.Remove(secretPath); removeErr != nil { + return fmt.Errorf("unable to remove secret, %v", removeErr) + } + + return nil +} diff --git a/secrets/secrets.go b/secrets/secrets.go new file mode 100644 index 0000000000..4f667900d5 --- /dev/null +++ b/secrets/secrets.go @@ -0,0 +1,93 @@ +package secrets + +import ( + "errors" + + "github.com/hashicorp/go-hclog" +) + +// Define constant key names for SecretsManagerParams.Params +const ( + // Path is the path to the base working directory + Path = "path" + + // Token is the token used for authenticating with a KMS + Token = "token" + + // Server is the address of the KMS + Server = "server" +) + +// Define constant names for available secrets +const ( + // ValidatorKey is the private key secret of the validator node + ValidatorKey = "validator-key" + + // NetworkKey is the libp2p private key secret used for networking + NetworkKey = "network-key" +) + +// Define constant file names for the local StorageManager +const ( + ValidatorKeyLocal = "validator.key" + NetworkKeyLocal = "libp2p.key" +) + +// Define constant folder names for the local StorageManager +const ( + ConsensusFolderLocal = "consensus" + NetworkFolderLocal = "libp2p" +) + +var ( + ErrSecretNotFound = errors.New("secret not found") + ErrSecretInvalidInit = errors.New("invalid secret initialization request") +) + +type SecretsManagerType string + +// Define constant types of secrets managers +const ( + // Local pertains to the local FS [Default] + Local SecretsManagerType = "local" + + // HashicorpVault pertains to the Hashicorp Vault server + HashicorpVault SecretsManagerType = "hashicorp-vault" +) + +// SecretsManager defines the base public interface that all +// secret manager implementations should have +type SecretsManager interface { + // Setup performs secret manager-specific setup + Setup() error + + // GetSecret gets the secret by name + GetSecret(name string) (interface{}, error) + + // SetSecret sets the secret to a provided value + SetSecret(name string, value interface{}) error + + // RemoveSecret removes the secret from storage + RemoveSecret(name string) error +} + +// SecretsManagerParams defines the configuration params for the +// secrets manager +type SecretsManagerParams struct { + // Local logger object + Logger hclog.Logger + + // Params needed for the SecretsManager to function + Params map[string]interface{} +} + +// SecretsManagerFactory is the factory method for secrets managers +type SecretsManagerFactory func( + config *SecretsManagerParams, +) (SecretsManager, error) + +// SupportedServiceManager checks if the passed in service manager type is supported +func SupportedServiceManager(service SecretsManagerType) bool { + return service == HashicorpVault || + service == Local +} diff --git a/server/builtin.go b/server/builtin.go index dd4fc4a10f..095016c517 100644 --- a/server/builtin.go +++ b/server/builtin.go @@ -4,6 +4,8 @@ import ( consensusDev "github.com/0xPolygon/polygon-sdk/consensus/dev" consensusDummy "github.com/0xPolygon/polygon-sdk/consensus/dummy" consensusIBFT "github.com/0xPolygon/polygon-sdk/consensus/ibft" + "github.com/0xPolygon/polygon-sdk/secrets" + "github.com/0xPolygon/polygon-sdk/secrets/local" "github.com/0xPolygon/polygon-sdk/consensus" ) @@ -14,3 +16,10 @@ var consensusBackends = map[string]consensus.Factory{ "ibft": consensusIBFT.Factory, "dummy": consensusDummy.Factory, } + +// secretsManagerBackends defines the SecretManager factories for different +// secret management solutions +var secretsManagerBackends = map[string]secrets.SecretsManagerFactory{ + "local": local.SecretsManagerFactory, + //"hashicorp-vault": {}, +} From 9de503addbfa436de134a78f86a171ff802bc40f Mon Sep 17 00:00:00 2001 From: Milos Zivkovic Date: Thu, 11 Nov 2021 14:53:07 +0100 Subject: [PATCH 02/19] Make the consensus and networking modules use the secret manager --- command/ibft/ibft_init.go | 144 ++++++++++++++---- .../secrets_manager_generate.go | 2 +- consensus/consensus.go | 23 ++- consensus/dev/dev.go | 26 +--- consensus/dummy/dummy.go | 18 +-- consensus/ibft/ibft.go | 81 +++++----- helper/common/common.go | 38 +++++ network/server.go | 55 ++++++- network/testing.go | 7 - secrets/hashicorpVault/hashicorpVault.go | 2 - secrets/hashicorpvault/hashicorpVault.go | 74 +++++++++ secrets/local/local.go | 7 + secrets/secrets.go | 6 + server/builtin.go | 8 +- server/config.go | 3 + server/server.go | 133 +++++++++++----- 16 files changed, 472 insertions(+), 155 deletions(-) delete mode 100644 secrets/hashicorpVault/hashicorpVault.go create mode 100644 secrets/hashicorpvault/hashicorpVault.go diff --git a/command/ibft/ibft_init.go b/command/ibft/ibft_init.go index e0582c93dd..f4333db16e 100644 --- a/command/ibft/ibft_init.go +++ b/command/ibft/ibft_init.go @@ -1,18 +1,20 @@ package ibft import ( + "errors" "flag" "fmt" "path/filepath" "github.com/0xPolygon/polygon-sdk/command/helper" + "github.com/0xPolygon/polygon-sdk/helper/common" "github.com/0xPolygon/polygon-sdk/secrets" + "github.com/0xPolygon/polygon-sdk/secrets/hashicorpvault" "github.com/0xPolygon/polygon-sdk/secrets/local" "github.com/libp2p/go-libp2p-core/peer" "github.com/0xPolygon/polygon-sdk/crypto" "github.com/0xPolygon/polygon-sdk/network" - "github.com/0xPolygon/polygon-sdk/server" ) // IbftInit is the command to query the snapshot @@ -20,6 +22,10 @@ type IbftInit struct { helper.Meta } +var ( + ErrorInvalidSecretsConfig = errors.New("invalid secrets configuration file") +) + func (i *IbftInit) DefineFlags() { if i.FlagMap == nil { // Flag map not initialized @@ -34,6 +40,16 @@ func (i *IbftInit) DefineFlags() { ArgumentsOptional: false, FlagOptional: false, } + + i.FlagMap["secrets-config"] = helper.FlagDescriptor{ + Description: "Sets the path to the SecretsManager config file. Used for Hashicorp Vault. " + + "If omitted, the local FS secrets manager is used", + Arguments: []string{ + "SECRETS_CONFIG", + }, + ArgumentsOptional: false, + FlagOptional: true, + } } // GetHelperText returns a simple description of the command @@ -60,8 +76,14 @@ func (p *IbftInit) GetBaseCommand() string { // generateAlreadyInitializedError generates an output for when the IBFT directory // has already been initialized in the past func generateAlreadyInitializedError(directory string) string { + return fmt.Sprintf("Directory %s has previously initialized IBFT data\n", directory) +} + +// constructInitError is a wrapper function for the error output to CLI +func constructInitError(err string) string { output := "\n[IBFT INIT ERROR]\n" - output += fmt.Sprintf("Directory %s has previously initialized IBFT data\n", directory) + output += err + return output } @@ -70,11 +92,72 @@ var ( libp2pDir = "libp2p" ) +// setupLocalSM is a helper method for boilerplate local secrets manager setup +func setupLocalSM(dataDir string) (secrets.SecretsManager, error) { + // Check if the sub-directories exist / are already populated + for _, subDirectory := range []string{consensusDir, libp2pDir} { + if helper.DirectoryExists(filepath.Join(dataDir, subDirectory)) { + return nil, errors.New(generateAlreadyInitializedError(dataDir)) + } + } + + // Set up the local directories + if err := common.SetupDataDir(dataDir, []string{consensusDir, libp2pDir}); err != nil { + return nil, err + } + + localSecretsManager, factoryErr := local.SecretsManagerFactory(&secrets.SecretsManagerParams{ + Logger: nil, + Params: map[string]interface{}{ + secrets.Path: dataDir, + }, + }) + + return localSecretsManager, factoryErr +} + +// setupHashicorpVault is a helper method for boilerplate hashicorp vault secrets manager setup +func setupHashicorpVault( + secretsConfig *secrets.SecretsManagerConfig, +) (secrets.SecretsManager, error) { + params := make(map[string]interface{}) + + // Check if the token is present + if secretsConfig.Token == "" { + return nil, ErrorInvalidSecretsConfig + } + params[secrets.Token] = secretsConfig.Token + + // Check if the server URL is present + if secretsConfig.ServerURL == "" { + return nil, ErrorInvalidSecretsConfig + } + params[secrets.Server] = secretsConfig.ServerURL + + // Check if the node name is present + if secretsConfig.Name == "" { + return nil, ErrorInvalidSecretsConfig + } + params[secrets.Name] = secretsConfig.Name + + vaultSecretsManager, factoryErr := hashicorpvault.SecretsManagerFactory( + &secrets.SecretsManagerParams{ + Logger: nil, + Params: params, + }, + ) + + return vaultSecretsManager, factoryErr +} + // Run implements the cli.IbftInit interface func (p *IbftInit) Run(args []string) int { flags := flag.NewFlagSet(p.GetBaseCommand(), flag.ContinueOnError) var dataDir string + var configPath string + flags.StringVar(&dataDir, "data-dir", "", "") + flags.StringVar(&configPath, "secrets-config", "", "") if err := flags.Parse(args); err != nil { p.UI.Error(err.Error()) @@ -86,31 +169,38 @@ func (p *IbftInit) Run(args []string) int { return 1 } - // Check if the sub-directories exist / are already populated - for _, subDirectory := range []string{consensusDir, libp2pDir} { - if helper.DirectoryExists(filepath.Join(dataDir, subDirectory)) { - p.UI.Error(generateAlreadyInitializedError(dataDir)) + var secretsManager secrets.SecretsManager + if configPath == "" { + // No secrets manager config specified, + // use the local secrets manager + localSecretsManager, setupErr := setupLocalSM(dataDir) + if setupErr != nil { + p.UI.Error(constructInitError(setupErr.Error())) return 1 } - } - // TODO branch for the type of secrets manager - - // Local (FS) SecretsManager - if err := server.SetupDataDir(dataDir, []string{consensusDir, libp2pDir}); err != nil { - p.UI.Error(err.Error()) - return 1 - } + secretsManager = localSecretsManager + } else { + // Config file passed in + secretsConfig, readErr := secrets.ReadConfig(configPath) + if readErr != nil { + p.UI.Error(constructInitError(fmt.Sprintf("Unable to read config file, %v", readErr))) + return 1 + } - localSecretsManager, factoryErr := local.SecretsManagerFactory(&secrets.SecretsManagerParams{ - Logger: nil, - Params: map[string]interface{}{ - secrets.Path: dataDir, - }, - }) - if factoryErr != nil { - p.UI.Error(factoryErr.Error()) - return 1 + // Set up the corresponding secrets manager + switch secretsConfig.Type { + case secrets.HashicorpVault: + vaultSecretsManager, setupErr := setupHashicorpVault(secretsConfig) + if setupErr != nil { + p.UI.Error(constructInitError(setupErr.Error())) + return 1 + } + + secretsManager = vaultSecretsManager + default: + p.UI.Error(constructInitError("Unknown secrets manager type")) + } } // Generate the IBFT validator private key @@ -120,8 +210,8 @@ func (p *IbftInit) Run(args []string) int { return 1 } - // Write the validator private key to disk - if setErr := localSecretsManager.SetSecret(secrets.ValidatorKey, validatorKeyEncoded); setErr != nil { + // Write the validator private key to the secrets manager storage + if setErr := secretsManager.SetSecret(secrets.ValidatorKey, validatorKeyEncoded); setErr != nil { p.UI.Error(setErr.Error()) return 1 } @@ -133,8 +223,8 @@ func (p *IbftInit) Run(args []string) int { return 1 } - // Write the networking private key to disk - if setErr := localSecretsManager.SetSecret(secrets.NetworkKey, libp2pKeyEncoded); setErr != nil { + // Write the networking private key to the secrets manager storage + if setErr := secretsManager.SetSecret(secrets.NetworkKey, libp2pKeyEncoded); setErr != nil { p.UI.Error(setErr.Error()) return 1 } diff --git a/command/secretsManager/secrets_manager_generate.go b/command/secretsManager/secrets_manager_generate.go index 876f3be95e..ebf1f6af51 100644 --- a/command/secretsManager/secrets_manager_generate.go +++ b/command/secretsManager/secrets_manager_generate.go @@ -30,7 +30,7 @@ func (i *SecretsManagerGenerate) DefineFlags() { "DIRECTORY", }, ArgumentsOptional: false, - FlagOptional: false, + FlagOptional: true, } i.FlagMap["type"] = helper.FlagDescriptor{ diff --git a/consensus/consensus.go b/consensus/consensus.go index ae9f8c1c5e..4d422414eb 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -7,6 +7,7 @@ import ( "github.com/0xPolygon/polygon-sdk/blockchain" "github.com/0xPolygon/polygon-sdk/chain" "github.com/0xPolygon/polygon-sdk/network" + "github.com/0xPolygon/polygon-sdk/secrets" "github.com/0xPolygon/polygon-sdk/state" "github.com/0xPolygon/polygon-sdk/txpool" "github.com/0xPolygon/polygon-sdk/types" @@ -45,14 +46,20 @@ type Config struct { Path string } +type ConsensusParams struct { + Context context.Context + Seal bool + Config *Config + Txpool *txpool.TxPool + Network *network.Server + Blockchain *blockchain.Blockchain + Executor *state.Executor + Grpc *grpc.Server + Logger hclog.Logger + SecretsManager secrets.SecretsManager +} + // Factory is the factory function to create a discovery backend type Factory func( - context.Context, - bool, *Config, - *txpool.TxPool, - *network.Server, - *blockchain.Blockchain, - *state.Executor, - *grpc.Server, - hclog.Logger, + *ConsensusParams, ) (Consensus, error) diff --git a/consensus/dev/dev.go b/consensus/dev/dev.go index b662c196b3..b220aba287 100644 --- a/consensus/dev/dev.go +++ b/consensus/dev/dev.go @@ -7,12 +7,10 @@ import ( "github.com/0xPolygon/polygon-sdk/blockchain" "github.com/0xPolygon/polygon-sdk/consensus" - "github.com/0xPolygon/polygon-sdk/network" "github.com/0xPolygon/polygon-sdk/state" "github.com/0xPolygon/polygon-sdk/txpool" "github.com/0xPolygon/polygon-sdk/types" "github.com/hashicorp/go-hclog" - "google.golang.org/grpc" ) // Dev consensus protocol seals any new transaction immediately @@ -31,28 +29,20 @@ type Dev struct { // Factory implements the base factory method func Factory( - ctx context.Context, - sealing bool, - config *consensus.Config, - txpool *txpool.TxPool, - network *network.Server, - blockchain *blockchain.Blockchain, - executor *state.Executor, - srv *grpc.Server, - logger hclog.Logger, + params *consensus.ConsensusParams, ) (consensus.Consensus, error) { - logger = logger.Named("dev") + logger := params.Logger.Named("dev") d := &Dev{ logger: logger, notifyCh: make(chan struct{}), closeCh: make(chan struct{}), - blockchain: blockchain, - executor: executor, - txpool: txpool, + blockchain: params.Blockchain, + executor: params.Executor, + txpool: params.Txpool, } - rawInterval, ok := config.Config["interval"] + rawInterval, ok := params.Config.Config["interval"] if ok { interval, ok := rawInterval.(uint64) if !ok { @@ -62,8 +52,8 @@ func Factory( } // enable dev mode so that we can accept non-signed txns - txpool.EnableDev() - txpool.NotifyCh = d.notifyCh + params.Txpool.EnableDev() + params.Txpool.NotifyCh = d.notifyCh return d, nil } diff --git a/consensus/dummy/dummy.go b/consensus/dummy/dummy.go index 5eaf29cbc5..96bed569b7 100644 --- a/consensus/dummy/dummy.go +++ b/consensus/dummy/dummy.go @@ -1,16 +1,12 @@ package dummy import ( - "context" - "github.com/0xPolygon/polygon-sdk/blockchain" "github.com/0xPolygon/polygon-sdk/consensus" - "github.com/0xPolygon/polygon-sdk/network" "github.com/0xPolygon/polygon-sdk/state" "github.com/0xPolygon/polygon-sdk/txpool" "github.com/0xPolygon/polygon-sdk/types" "github.com/hashicorp/go-hclog" - "google.golang.org/grpc" ) type Dummy struct { @@ -23,20 +19,20 @@ type Dummy struct { executor *state.Executor } -func Factory(ctx context.Context, sealing bool, config *consensus.Config, txpool *txpool.TxPool, network *network.Server, blockchain *blockchain.Blockchain, executor *state.Executor, srv *grpc.Server, logger hclog.Logger) (consensus.Consensus, error) { - logger = logger.Named("dummy") +func Factory(params *consensus.ConsensusParams) (consensus.Consensus, error) { + logger := params.Logger.Named("dummy") d := &Dummy{ - sealing: sealing, + sealing: params.Seal, logger: logger, notifyCh: make(chan struct{}), closeCh: make(chan struct{}), - blockchain: blockchain, - executor: executor, - txpool: txpool, + blockchain: params.Blockchain, + executor: params.Executor, + txpool: params.Txpool, } - txpool.NotifyCh = d.notifyCh + params.Txpool.NotifyCh = d.notifyCh return d, nil } diff --git a/consensus/ibft/ibft.go b/consensus/ibft/ibft.go index c83c40eee4..fec69cb879 100644 --- a/consensus/ibft/ibft.go +++ b/consensus/ibft/ibft.go @@ -1,26 +1,23 @@ package ibft import ( - "context" "crypto/ecdsa" "fmt" "math" - "path/filepath" "reflect" "time" - "github.com/0xPolygon/polygon-sdk/blockchain" "github.com/0xPolygon/polygon-sdk/consensus" "github.com/0xPolygon/polygon-sdk/consensus/ibft/proto" "github.com/0xPolygon/polygon-sdk/crypto" "github.com/0xPolygon/polygon-sdk/helper/hex" "github.com/0xPolygon/polygon-sdk/network" "github.com/0xPolygon/polygon-sdk/protocol" + "github.com/0xPolygon/polygon-sdk/secrets" "github.com/0xPolygon/polygon-sdk/state" "github.com/0xPolygon/polygon-sdk/txpool" "github.com/0xPolygon/polygon-sdk/types" "github.com/hashicorp/go-hclog" - "google.golang.org/grpc" any "google.golang.org/protobuf/types/known/anypb" ) @@ -68,43 +65,39 @@ type Ibft struct { // aux test methods forceTimeoutCh bool + + secretsManager secrets.SecretsManager } // Factory implements the base consensus Factory method func Factory( - ctx context.Context, - sealing bool, - config *consensus.Config, - txpool *txpool.TxPool, - network *network.Server, - blockchain *blockchain.Blockchain, - executor *state.Executor, - srv *grpc.Server, - logger hclog.Logger, + params *consensus.ConsensusParams, ) (consensus.Consensus, error) { p := &Ibft{ - logger: logger.Named("ibft"), - config: config, - blockchain: blockchain, - executor: executor, - closeCh: make(chan struct{}), - txpool: txpool, - state: ¤tState{}, - network: network, - epochSize: DefaultEpochSize, - syncNotifyCh: make(chan bool), - sealing: sealing, + logger: params.Logger.Named("ibft"), + config: params.Config, + blockchain: params.Blockchain, + executor: params.Executor, + closeCh: make(chan struct{}), + txpool: params.Txpool, + state: ¤tState{}, + network: params.Network, + epochSize: DefaultEpochSize, + syncNotifyCh: make(chan bool), + sealing: params.Seal, + secretsManager: params.SecretsManager, } // Istanbul requires a different header hash function types.HeaderHash = istanbulHeaderHash - p.syncer = protocol.NewSyncer(logger, network, blockchain) + p.syncer = protocol.NewSyncer(params.Logger, params.Network, params.Blockchain) // register the grpc operator p.operator = &operator{ibft: p} - proto.RegisterIbftOperatorServer(srv, p.operator) + proto.RegisterIbftOperatorServer(params.Grpc, p.operator) + // Set up the node's validator key if err := p.createKey(); err != nil { return nil, err } @@ -194,27 +187,47 @@ func (i *Ibft) setupTransport() error { return nil } -// createKey sets the validator's private key, from the file path +// createKey sets the validator's private key from the secrets manager func (i *Ibft) createKey() error { - // i.msgQueue = msgQueueImpl{} i.msgQueue = newMsgQueue() i.closeCh = make(chan struct{}) i.updateCh = make(chan struct{}) if i.validatorKey == nil { - // generate a validator private key - validatorKey, err := crypto.GenerateOrReadPrivateKey(filepath.Join(i.config.Path, IbftKeyName)) - if err != nil { - return err + // Check if the validator key is initialized + var key *ecdsa.PrivateKey + if i.secretsManager.HasSecret(secrets.ValidatorKey) { + // The validator key is present in the secrets manager, load it + validatorKey, readErr := crypto.ReadConsensusKey(i.secretsManager) + if readErr != nil { + return fmt.Errorf("unable to read validator key from SM, %v", readErr) + } + + key = validatorKey + } else { + // The validator key is not present in the secrets manager, generate it + validatorKey, validatorKeyEncoded, genErr := crypto.GenerateAndEncodePrivateKey() + if genErr != nil { + return fmt.Errorf("unable to generate validator key for SM, %v", genErr) + } + + // Save the key to the secrets manager + saveErr := i.secretsManager.SetSecret(secrets.ValidatorKey, validatorKeyEncoded) + if saveErr != nil { + return fmt.Errorf("unable to save validator key to SM, %v", saveErr) + } + + key = validatorKey } - i.validatorKey = validatorKey - i.validatorKeyAddr = crypto.PubKeyToAddress(&validatorKey.PublicKey) + i.validatorKey = key + i.validatorKeyAddr = crypto.PubKeyToAddress(&key.PublicKey) } return nil } +// TODO remove this const IbftKeyName = "validator.key" // start starts the IBFT consensus state machine diff --git a/helper/common/common.go b/helper/common/common.go index d383a4edc2..f8eaef3502 100644 --- a/helper/common/common.go +++ b/helper/common/common.go @@ -1,5 +1,11 @@ package common +import ( + "fmt" + "os" + "path/filepath" +) + // Min returns the strictly lower number func Min(a, b uint64) uint64 { if a < b { @@ -17,3 +23,35 @@ func Max(a, b uint64) uint64 { return b } + +// SetupDataDir sets up the data directory and the corresponding sub-directories +func SetupDataDir(dataDir string, paths []string) error { + if err := createDir(dataDir); err != nil { + return fmt.Errorf("Failed to create data dir: (%s): %v", dataDir, err) + } + + for _, path := range paths { + path := filepath.Join(dataDir, path) + if err := createDir(path); err != nil { + return fmt.Errorf("Failed to create path: (%s): %v", path, err) + } + } + + return nil +} + +// createDir creates a file system directory if it doesn't exist +func createDir(path string) error { + _, err := os.Stat(path) + if err != nil && !os.IsNotExist(err) { + return err + } + + if os.IsNotExist(err) { + if err := os.MkdirAll(path, os.ModePerm); err != nil { + return err + } + } + + return nil +} diff --git a/network/server.go b/network/server.go index 5b16e6eb14..415edc04f2 100644 --- a/network/server.go +++ b/network/server.go @@ -10,8 +10,10 @@ import ( "time" "github.com/0xPolygon/polygon-sdk/chain" + "github.com/0xPolygon/polygon-sdk/secrets" "github.com/hashicorp/go-hclog" "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p-core/crypto" "github.com/libp2p/go-libp2p-core/event" "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/network" @@ -28,12 +30,13 @@ const DefaultLibp2pPort int = 1478 const MinimumPeerConnections int64 = 1 type Config struct { - NoDiscover bool - Addr *net.TCPAddr - NatAddr net.IP - DataDir string - MaxPeers uint64 - Chain *chain.Chain + NoDiscover bool + Addr *net.TCPAddr + NatAddr net.IP + DataDir string + MaxPeers uint64 + Chain *chain.Chain + SecretsManager secrets.SecretsManager } func DefaultConfig() *Config { @@ -64,6 +67,9 @@ type Server struct { protocols map[string]Protocol protocolsLock sync.Mutex + // Secrets manager + secretsManager secrets.SecretsManager + // pubsub ps *pubsub.PubSub @@ -79,10 +85,44 @@ type Peer struct { Info peer.AddrInfo } +// setupLibp2pKey is a helper method for setting up the networking private key +func setupLibp2pKey(secretsManager secrets.SecretsManager) (crypto.PrivKey, error) { + var key crypto.PrivKey + if secretsManager.HasSecret(secrets.NetworkKey) { + // The key is present in the secrets manager, read it + networkingKeyEncoded, readErr := secretsManager.GetSecret(secrets.NetworkKey) + if readErr != nil { + return nil, fmt.Errorf("unable to read networking private key from SM, %v", readErr) + } + + networkingKey, parseErr := ParseLibp2pKey(networkingKeyEncoded.([]byte)) + if parseErr != nil { + return nil, fmt.Errorf("unable to parse networking private key from SM, %v", parseErr) + } + + key = networkingKey + } else { + // The key is not present in the secrets manager, generate it + libp2pKey, libp2pKeyEncoded, keyErr := GenerateAndEncodeLibp2pKey() + if keyErr != nil { + return nil, fmt.Errorf("unable to generate networking private key for SM, %v", keyErr) + } + + // Write the networking private key to disk + if setErr := secretsManager.SetSecret(secrets.NetworkKey, libp2pKeyEncoded); setErr != nil { + return nil, fmt.Errorf("unable to store networking private key to SM, %v", setErr) + } + + key = libp2pKey + } + + return key, nil +} + func NewServer(logger hclog.Logger, config *Config) (*Server, error) { logger = logger.Named("network") - key, err := ReadLibp2pKey(config.DataDir) + key, err := setupLibp2pKey(config.SecretsManager) if err != nil { return nil, err } @@ -131,6 +171,7 @@ func NewServer(logger hclog.Logger, config *Config) (*Server, error) { closeCh: make(chan struct{}), emitterPeerEvent: emitter, protocols: map[string]Protocol{}, + secretsManager: config.SecretsManager, } // start identity diff --git a/network/testing.go b/network/testing.go index 900fe8d54f..878eba66fc 100644 --- a/network/testing.go +++ b/network/testing.go @@ -24,13 +24,6 @@ func CreateServer(t *testing.T, callback func(c *Config)) *Server { logger := hclog.NewNullLogger() - /* - logger := hclog.New(&hclog.LoggerOptions{ - Name: "polygon", - Level: hclog.LevelFromString("debug"), - }) - */ - if callback != nil { callback(cfg) } diff --git a/secrets/hashicorpVault/hashicorpVault.go b/secrets/hashicorpVault/hashicorpVault.go deleted file mode 100644 index 514b965bc3..0000000000 --- a/secrets/hashicorpVault/hashicorpVault.go +++ /dev/null @@ -1,2 +0,0 @@ -package hashicorpVault - diff --git a/secrets/hashicorpvault/hashicorpVault.go b/secrets/hashicorpvault/hashicorpVault.go new file mode 100644 index 0000000000..5682896457 --- /dev/null +++ b/secrets/hashicorpvault/hashicorpVault.go @@ -0,0 +1,74 @@ +package hashicorpvault + +import ( + "errors" + + "github.com/0xPolygon/polygon-sdk/secrets" +) + +// VaultSecretsManager is a SecretsManager that +// stores secrets on a Hashicorp Vault instance +type VaultSecretsManager struct { + // Token used for Vault instance authentication + token string + + // The Server URL of the Vault instance + serverURL string + + // The name of the current node, used for prefixing names of secrets + name string +} + +// SecretsManagerFactory implements the factory method +func SecretsManagerFactory( + config *secrets.SecretsManagerParams, +) (secrets.SecretsManager, error) { + // Set up the base object + vaultManager := &VaultSecretsManager{} + + // Grab the token from the config + token, ok := config.Params[secrets.Token] + if !ok { + return nil, errors.New("no token specified for Vault secrets manager") + } + vaultManager.token = token.(string) + + // Grab the server URL from the config + serverURL, ok := config.Params[secrets.Server] + if !ok { + return nil, errors.New("no server URL specified for Vault secrets manager") + } + vaultManager.serverURL = serverURL.(string) + + // Grab the node name from the config + name, ok := config.Params[secrets.Name] + if !ok { + return nil, errors.New("no node name specified for Vault secrets manager") + } + vaultManager.name = name.(string) + + // Run the initial setup + _ = vaultManager.Setup() + + return vaultManager, nil +} + +func (v *VaultSecretsManager) Setup() error { + panic("implement me") +} + +func (v *VaultSecretsManager) GetSecret(name string) (interface{}, error) { + panic("implement me") +} + +func (v *VaultSecretsManager) SetSecret(name string, value interface{}) error { + panic("implement me") +} + +func (v *VaultSecretsManager) HasSecret(name string) bool { + panic("implement me") +} + +func (v *VaultSecretsManager) RemoveSecret(name string) error { + panic("implement me") +} diff --git a/secrets/local/local.go b/secrets/local/local.go index 28cc4cb2c5..0ec12196ce 100644 --- a/secrets/local/local.go +++ b/secrets/local/local.go @@ -114,6 +114,13 @@ func (l *LocalSecretsManager) SetSecret(name string, value interface{}) error { return nil } +// HasSecret checks if the secret is present on disk +func (l *LocalSecretsManager) HasSecret(name string) bool { + _, err := l.GetSecret(name) + + return err == nil +} + // RemoveSecret removes the local SecretsManager's secret from disk func (l *LocalSecretsManager) RemoveSecret(name string) error { l.secretPathMapLock.Lock() diff --git a/secrets/secrets.go b/secrets/secrets.go index 4f667900d5..be9d5137a2 100644 --- a/secrets/secrets.go +++ b/secrets/secrets.go @@ -16,6 +16,9 @@ const ( // Server is the address of the KMS Server = "server" + + // Name is the name of the current node + Name ) // Define constant names for available secrets @@ -67,6 +70,9 @@ type SecretsManager interface { // SetSecret sets the secret to a provided value SetSecret(name string, value interface{}) error + // HasSecret checks if the secret is present + HasSecret(name string) bool + // RemoveSecret removes the secret from storage RemoveSecret(name string) error } diff --git a/server/builtin.go b/server/builtin.go index 095016c517..f828bb5825 100644 --- a/server/builtin.go +++ b/server/builtin.go @@ -5,13 +5,13 @@ import ( consensusDummy "github.com/0xPolygon/polygon-sdk/consensus/dummy" consensusIBFT "github.com/0xPolygon/polygon-sdk/consensus/ibft" "github.com/0xPolygon/polygon-sdk/secrets" + "github.com/0xPolygon/polygon-sdk/secrets/hashicorpvault" "github.com/0xPolygon/polygon-sdk/secrets/local" "github.com/0xPolygon/polygon-sdk/consensus" ) var consensusBackends = map[string]consensus.Factory{ - // "ethash": consensusEthash.Factory, "dev": consensusDev.Factory, "ibft": consensusIBFT.Factory, "dummy": consensusDummy.Factory, @@ -19,7 +19,7 @@ var consensusBackends = map[string]consensus.Factory{ // secretsManagerBackends defines the SecretManager factories for different // secret management solutions -var secretsManagerBackends = map[string]secrets.SecretsManagerFactory{ - "local": local.SecretsManagerFactory, - //"hashicorp-vault": {}, +var secretsManagerBackends = map[secrets.SecretsManagerType]secrets.SecretsManagerFactory{ + secrets.Local: local.SecretsManagerFactory, + secrets.HashicorpVault: hashicorpvault.SecretsManagerFactory, } diff --git a/server/config.go b/server/config.go index 6941cc79e0..7b73cd68f8 100644 --- a/server/config.go +++ b/server/config.go @@ -5,6 +5,7 @@ import ( "github.com/0xPolygon/polygon-sdk/chain" "github.com/0xPolygon/polygon-sdk/network" + "github.com/0xPolygon/polygon-sdk/secrets" "github.com/0xPolygon/polygon-sdk/types" ) @@ -26,6 +27,8 @@ type Config struct { NoLocals bool PriceLimit uint64 MaxSlots uint64 + + SecretsManager *secrets.SecretsManagerConfig } // DefaultConfig returns the default config for JSON-RPC, GRPC (ports) and Networking diff --git a/server/server.go b/server/server.go index 0a31fdf47d..376282535d 100644 --- a/server/server.go +++ b/server/server.go @@ -2,18 +2,20 @@ package server import ( "context" + "errors" "fmt" "math/big" "net" - "os" "path/filepath" "time" "github.com/0xPolygon/polygon-sdk/chain" "github.com/0xPolygon/polygon-sdk/crypto" + "github.com/0xPolygon/polygon-sdk/helper/common" "github.com/0xPolygon/polygon-sdk/helper/keccak" "github.com/0xPolygon/polygon-sdk/jsonrpc" "github.com/0xPolygon/polygon-sdk/network" + "github.com/0xPolygon/polygon-sdk/secrets" "github.com/0xPolygon/polygon-sdk/server/proto" "github.com/0xPolygon/polygon-sdk/state" "github.com/0xPolygon/polygon-sdk/state/runtime" @@ -58,6 +60,9 @@ type Server struct { // transaction pool txpool *txpool.TxPool + + // secrets manager + secretsManager secrets.SecretsManager } var dirPaths = []string{ @@ -80,15 +85,21 @@ func NewServer(logger hclog.Logger, config *Config) (*Server, error) { m.logger.Info("Data dir", "path", config.DataDir) // Generate all the paths in the dataDir - if err := SetupDataDir(config.DataDir, dirPaths); err != nil { + if err := common.SetupDataDir(config.DataDir, dirPaths); err != nil { return nil, fmt.Errorf("failed to create data directories: %v", err) } + // Set up the secrets manager + if err := m.setupSecretsManager(); err != nil { + return nil, fmt.Errorf("failed to set up the secrets manager: %v", err) + } + // start libp2p { netConfig := config.Network netConfig.Chain = m.config.Chain netConfig.DataDir = filepath.Join(m.config.DataDir, "libp2p") + netConfig.SecretsManager = m.secretsManager network, err := network.NewServer(logger, netConfig) if err != nil { @@ -129,7 +140,18 @@ func NewServer(logger hclog.Logger, config *Config) (*Server, error) { Blockchain: m.blockchain, } // start transaction pool - m.txpool, err = txpool.NewTxPool(logger, m.config.Seal, m.config.Locals, m.config.NoLocals, m.config.PriceLimit, m.config.MaxSlots, m.chain.Params.Forks.At(0), hub, m.grpcServer, m.network) + m.txpool, err = txpool.NewTxPool( + logger, + m.config.Seal, + m.config.Locals, + m.config.NoLocals, + m.config.PriceLimit, + m.config.MaxSlots, + m.chain.Params.Forks.At(0), + hub, + m.grpcServer, + m.network, + ) if err != nil { return nil, err } @@ -211,6 +233,63 @@ func (t *txpoolHub) GetBalance(root types.Hash, addr types.Address) (*big.Int, e return account.Balance, nil } +// setupSecretsManager sets up the secrets manager +func (s *Server) setupSecretsManager() error { + secretsManagerConfig := s.config.SecretsManager + secretsManagerType := secretsManagerConfig.Type + + // Grab the factory method + secretsManagerFactory, ok := secretsManagerBackends[secretsManagerType] + if !ok { + return fmt.Errorf("secrets manager type '%s' not found", secretsManagerType) + } + + // Initialize the params for setup + params := make(map[string]interface{}) + + switch secretsManagerType { + case secrets.Local: + // Only the base directory is required for + // the local secrets manager + params[secrets.Path] = s.config.DataDir + case secrets.HashicorpVault: + // Check if the token is present + if s.config.SecretsManager.Token == "" { + return errors.New("missing token from secrets config") + } + params[secrets.Token] = s.config.SecretsManager.Token + + // Check if the server URL is present + if s.config.SecretsManager.ServerURL == "" { + return errors.New("missing server URL from secrets config") + } + params[secrets.Server] = s.config.SecretsManager.ServerURL + + // Check if the node name is present + if s.config.SecretsManager.Name == "" { + return errors.New("missing node name from secrets config") + + } + params[secrets.Name] = s.config.SecretsManager.Name + } + + // Instantiate the secrets manager + secretsManager, factoryErr := secretsManagerFactory( + &secrets.SecretsManagerParams{ + Logger: s.logger, + Params: params, + }, + ) + + if factoryErr != nil { + return fmt.Errorf("unable to instantiate secrets manager, %v", factoryErr) + } + + s.secretsManager = secretsManager + + return nil +} + // setupConsensus sets up the consensus mechanism func (s *Server) setupConsensus() error { engineName := s.config.Chain.Params.GetEngine() @@ -228,7 +307,21 @@ func (s *Server) setupConsensus() error { Config: engineConfig, Path: filepath.Join(s.config.DataDir, "consensus"), } - consensus, err := engine(context.Background(), s.config.Seal, config, s.txpool, s.network, s.blockchain, s.executor, s.grpcServer, s.logger.Named("consensus")) + + consensus, err := engine( + &consensus.ConsensusParams{ + Context: context.Background(), + Seal: s.config.Seal, + Config: config, + Txpool: s.txpool, + Network: s.network, + Blockchain: s.blockchain, + Executor: s.executor, + Grpc: s.grpcServer, + Logger: s.logger.Named("consensus"), + SecretsManager: s.secretsManager, + }, + ) if err != nil { return err } @@ -405,35 +498,3 @@ type Entry struct { Enabled bool Config map[string]interface{} } - -// SetupDataDir sets up the polygon-sdk data directory and sub-folders -func SetupDataDir(dataDir string, paths []string) error { - if err := createDir(dataDir); err != nil { - return fmt.Errorf("Failed to create data dir: (%s): %v", dataDir, err) - } - - for _, path := range paths { - path := filepath.Join(dataDir, path) - if err := createDir(path); err != nil { - return fmt.Errorf("Failed to create path: (%s): %v", path, err) - } - } - - return nil -} - -// createDir creates a file system directory if it doesn't exist -func createDir(path string) error { - _, err := os.Stat(path) - if err != nil && !os.IsNotExist(err) { - return err - } - - if os.IsNotExist(err) { - if err := os.MkdirAll(path, os.ModePerm); err != nil { - return err - } - } - - return nil -} From 5ef9cbe26aab9517c08081bc0aa9f84d325c4d38 Mon Sep 17 00:00:00 2001 From: Milos Zivkovic Date: Thu, 11 Nov 2021 17:06:30 +0100 Subject: [PATCH 03/19] Use the secrets manager within the testing framework --- e2e/framework/testserver.go | 42 ++++++++++++++++++++++++++++--------- network/server.go | 7 +------ network/server_test.go | 32 +++++++++++++++++++++++++--- network/testing.go | 14 +++++++++++++ secrets/local/local.go | 5 +++++ server/server.go | 7 +++++++ 6 files changed, 88 insertions(+), 19 deletions(-) diff --git a/e2e/framework/testserver.go b/e2e/framework/testserver.go index 0acc79bb9a..5ce0f8fd73 100644 --- a/e2e/framework/testserver.go +++ b/e2e/framework/testserver.go @@ -17,11 +17,12 @@ import ( "time" "github.com/0xPolygon/polygon-sdk/command/helper" + "github.com/0xPolygon/polygon-sdk/secrets" + "github.com/0xPolygon/polygon-sdk/secrets/local" "github.com/0xPolygon/polygon-sdk/command/genesis" ibftCommand "github.com/0xPolygon/polygon-sdk/command/ibft" "github.com/0xPolygon/polygon-sdk/command/server" - "github.com/0xPolygon/polygon-sdk/consensus/ibft" "github.com/0xPolygon/polygon-sdk/crypto" "github.com/0xPolygon/polygon-sdk/helper/tests" "github.com/0xPolygon/polygon-sdk/network" @@ -152,16 +153,37 @@ func (t *TestServer) InitIBFT() (*InitIBFTResult, error) { } res := &InitIBFTResult{} - // Read the private key - key, err := crypto.GenerateOrReadPrivateKey(filepath.Join(cmd.Dir, t.Config.IBFTDir, "consensus", ibft.IbftKeyName)) - if err != nil { - return nil, err + + localSecretsManager, factoryErr := local.SecretsManagerFactory(&secrets.SecretsManagerParams{ + Logger: nil, + Params: map[string]interface{}{ + secrets.Path: filepath.Join(cmd.Dir, t.Config.IBFTDir), + }, + }) + if factoryErr != nil { + return nil, factoryErr } - // Read the Libp2p key - libp2pKey, err := network.ReadLibp2pKey(filepath.Join(cmd.Dir, t.Config.IBFTDir, "libp2p")) - if err != nil { - return nil, err + // Generate the IBFT validator private key + validatorKey, validatorKeyEncoded, keyErr := crypto.GenerateAndEncodePrivateKey() + if keyErr != nil { + return nil, keyErr + } + + // Write the validator private key to the secrets manager storage + if setErr := localSecretsManager.SetSecret(secrets.ValidatorKey, validatorKeyEncoded); setErr != nil { + return nil, setErr + } + + // Generate the libp2p private key + libp2pKey, libp2pKeyEncoded, keyErr := network.GenerateAndEncodeLibp2pKey() + if keyErr != nil { + return nil, keyErr + } + + // Write the networking private key to the secrets manager storage + if setErr := localSecretsManager.SetSecret(secrets.NetworkKey, libp2pKeyEncoded); setErr != nil { + return nil, setErr } // Get the node ID from the private key @@ -170,7 +192,7 @@ func (t *TestServer) InitIBFT() (*InitIBFTResult, error) { return nil, err } - res.Address = crypto.PubKeyToAddress(&key.PublicKey).String() + res.Address = crypto.PubKeyToAddress(&validatorKey.PublicKey).String() res.NodeID = nodeId.String() return res, nil diff --git a/network/server.go b/network/server.go index 415edc04f2..c095bb8244 100644 --- a/network/server.go +++ b/network/server.go @@ -90,16 +90,11 @@ func setupLibp2pKey(secretsManager secrets.SecretsManager) (crypto.PrivKey, erro var key crypto.PrivKey if secretsManager.HasSecret(secrets.NetworkKey) { // The key is present in the secrets manager, read it - networkingKeyEncoded, readErr := secretsManager.GetSecret(secrets.NetworkKey) + networkingKey, readErr := ReadLibp2pKeyNEW(secretsManager) if readErr != nil { return nil, fmt.Errorf("unable to read networking private key from SM, %v", readErr) } - networkingKey, parseErr := ParseLibp2pKey(networkingKeyEncoded.([]byte)) - if parseErr != nil { - return nil, fmt.Errorf("unable to parse networking private key from SM, %v", parseErr) - } - key = networkingKey } else { // The key is not present in the secrets manager, generate it diff --git a/network/server_test.go b/network/server_test.go index a451110601..b96310df23 100644 --- a/network/server_test.go +++ b/network/server_test.go @@ -9,7 +9,11 @@ import ( "testing" "time" + "github.com/0xPolygon/polygon-sdk/helper/common" "github.com/0xPolygon/polygon-sdk/helper/tests" + "github.com/0xPolygon/polygon-sdk/secrets" + "github.com/0xPolygon/polygon-sdk/secrets/local" + "github.com/hashicorp/go-hclog" "github.com/libp2p/go-libp2p-core/crypto" "github.com/libp2p/go-libp2p-core/peer" "github.com/multiformats/go-multiaddr" @@ -21,14 +25,36 @@ func GenerateTestLibp2pKey(t *testing.T) (crypto.PrivKey, string) { dir, err := ioutil.TempDir(os.TempDir(), "") assert.NoError(t, err) - key, err := ReadLibp2pKey(dir) - assert.NoError(t, err) + + // Instantiate the correct folder structure + setupErr := common.SetupDataDir(dir, []string{"libp2p"}) + if setupErr != nil { + t.Fatalf("unable to generate libp2p folder structure, %v", setupErr) + } + + localSecretsManager, factoryErr := local.SecretsManagerFactory(&secrets.SecretsManagerParams{ + Logger: hclog.NewNullLogger(), + Params: map[string]interface{}{ + secrets.Path: dir, + }, + }) + assert.NoError(t, factoryErr) + + libp2pKey, libp2pKeyEncoded, keyErr := GenerateAndEncodeLibp2pKey() + if keyErr != nil { + t.Fatalf("unable to generate libp2p key, %v", keyErr) + } + + if setErr := localSecretsManager.SetSecret(secrets.NetworkKey, libp2pKeyEncoded); setErr != nil { + t.Fatalf("unable to save libp2p key, %v", setErr) + } + t.Cleanup(func() { // remove directory after test is done assert.NoError(t, os.RemoveAll(dir)) }) - return key, dir + return libp2pKey, dir } func TestConnLimit_Inbound(t *testing.T) { diff --git a/network/testing.go b/network/testing.go index 878eba66fc..4b9129d12d 100644 --- a/network/testing.go +++ b/network/testing.go @@ -6,6 +6,8 @@ import ( "time" "github.com/0xPolygon/polygon-sdk/chain" + "github.com/0xPolygon/polygon-sdk/secrets" + "github.com/0xPolygon/polygon-sdk/secrets/local" "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/assert" ) @@ -27,6 +29,18 @@ func CreateServer(t *testing.T, callback func(c *Config)) *Server { if callback != nil { callback(cfg) } + + secretsManager, factoryErr := local.SecretsManagerFactory(&secrets.SecretsManagerParams{ + Logger: logger, + Params: map[string]interface{}{ + secrets.Path: cfg.DataDir, + }, + }) + + assert.NoError(t, factoryErr) + + cfg.SecretsManager = secretsManager + srv, err := NewServer(logger, cfg) assert.NoError(t, err) diff --git a/secrets/local/local.go b/secrets/local/local.go index 0ec12196ce..7717985396 100644 --- a/secrets/local/local.go +++ b/secrets/local/local.go @@ -95,6 +95,11 @@ func (l *LocalSecretsManager) GetSecret(name string) (interface{}, error) { // SetSecret saves the local SecretsManager's secret to disk func (l *LocalSecretsManager) SetSecret(name string, value interface{}) error { + // If the data directory is not specified, skip write + if l.path == "" { + return nil + } + l.secretPathMapLock.Lock() secretPath, ok := l.secretPathMap[name] l.secretPathMapLock.Unlock() diff --git a/server/server.go b/server/server.go index 376282535d..c326c48898 100644 --- a/server/server.go +++ b/server/server.go @@ -236,6 +236,13 @@ func (t *txpoolHub) GetBalance(root types.Hash, addr types.Address) (*big.Int, e // setupSecretsManager sets up the secrets manager func (s *Server) setupSecretsManager() error { secretsManagerConfig := s.config.SecretsManager + if secretsManagerConfig == nil { + // No config provided, use default + secretsManagerConfig = &secrets.SecretsManagerConfig{ + Type: secrets.Local, + } + } + secretsManagerType := secretsManagerConfig.Type // Grab the factory method From 68a6235bd8a48af351fbc90987e65f389dfa3181 Mon Sep 17 00:00:00 2001 From: Milos Zivkovic Date: Thu, 11 Nov 2021 18:53:41 +0100 Subject: [PATCH 04/19] Implement Vault specific methods --- command/ibft/ibft_init.go | 3 +- e2e/framework/testserver.go | 3 +- go.mod | 16 +- go.sum | 170 + secrets/hashicorpvault/hashicorpVault.go | 109 +- secrets/hashicorpvault/vault_test.go | 65 + secrets/local/local.go | 5 + vendor/github.com/armon/go-metrics/.gitignore | 26 + .../github.com/armon/go-metrics/.travis.yml | 13 + vendor/github.com/armon/go-metrics/LICENSE | 20 + vendor/github.com/armon/go-metrics/README.md | 91 + .../github.com/armon/go-metrics/const_unix.go | 12 + .../armon/go-metrics/const_windows.go | 13 + vendor/github.com/armon/go-metrics/go.mod | 17 + vendor/github.com/armon/go-metrics/go.sum | 125 + vendor/github.com/armon/go-metrics/inmem.go | 339 + .../armon/go-metrics/inmem_endpoint.go | 162 + .../armon/go-metrics/inmem_signal.go | 117 + vendor/github.com/armon/go-metrics/metrics.go | 293 + vendor/github.com/armon/go-metrics/sink.go | 115 + vendor/github.com/armon/go-metrics/start.go | 146 + vendor/github.com/armon/go-metrics/statsd.go | 184 + .../github.com/armon/go-metrics/statsite.go | 172 + vendor/github.com/armon/go-radix/go.mod | 1 + .../github.com/cenkalti/backoff/v3/.gitignore | 22 + .../cenkalti/backoff/v3/.travis.yml | 10 + vendor/github.com/cenkalti/backoff/v3/LICENSE | 20 + .../github.com/cenkalti/backoff/v3/README.md | 30 + .../github.com/cenkalti/backoff/v3/backoff.go | 66 + .../github.com/cenkalti/backoff/v3/context.go | 63 + .../cenkalti/backoff/v3/exponential.go | 153 + vendor/github.com/cenkalti/backoff/v3/go.mod | 3 + .../github.com/cenkalti/backoff/v3/retry.go | 82 + .../github.com/cenkalti/backoff/v3/ticker.go | 82 + .../github.com/cenkalti/backoff/v3/tries.go | 35 + .../golang/protobuf/proto/registry.go | 10 +- .../protoc-gen-go/descriptor/descriptor.pb.go | 200 + .../github.com/golang/protobuf/ptypes/any.go | 14 + .../github.com/golang/protobuf/ptypes/doc.go | 4 + .../golang/protobuf/ptypes/duration.go | 4 + .../golang/protobuf/ptypes/timestamp.go | 9 + vendor/github.com/golang/snappy/AUTHORS | 3 + vendor/github.com/golang/snappy/CONTRIBUTORS | 4 + vendor/github.com/golang/snappy/decode.go | 87 +- .../github.com/golang/snappy/decode_arm64.s | 494 ++ .../snappy/{decode_amd64.go => decode_asm.go} | 1 + .../github.com/golang/snappy/decode_other.go | 24 +- vendor/github.com/golang/snappy/encode.go | 4 + .../github.com/golang/snappy/encode_arm64.s | 722 ++ .../snappy/{encode_amd64.go => encode_asm.go} | 1 + .../github.com/golang/snappy/encode_other.go | 2 +- .../github.com/hashicorp/errwrap/errwrap.go | 9 + .../github.com/hashicorp/go-cleanhttp/LICENSE | 363 + .../hashicorp/go-cleanhttp/README.md | 30 + .../hashicorp/go-cleanhttp/cleanhttp.go | 57 + .../github.com/hashicorp/go-cleanhttp/doc.go | 20 + .../github.com/hashicorp/go-cleanhttp/go.mod | 1 + .../hashicorp/go-cleanhttp/handlers.go | 48 + .../hashicorp/go-hclog/interceptlogger.go | 105 +- .../hashicorp/go-hclog/intlogger.go | 187 +- .../github.com/hashicorp/go-hclog/logger.go | 30 +- .../github.com/hashicorp/go-hclog/stdlog.go | 2 +- .../hashicorp/go-immutable-radix/CHANGELOG.md | 2 + .../hashicorp/go-immutable-radix/iter.go | 59 +- .../go-immutable-radix/reverse_iter.go | 166 +- .../hashicorp/go-multierror/README.md | 29 +- .../hashicorp/go-multierror/append.go | 2 + .../github.com/hashicorp/go-multierror/go.mod | 2 +- .../hashicorp/go-multierror/multierror.go | 15 +- .../github.com/hashicorp/go-plugin/.gitignore | 2 + vendor/github.com/hashicorp/go-plugin/LICENSE | 353 + .../github.com/hashicorp/go-plugin/README.md | 163 + .../github.com/hashicorp/go-plugin/client.go | 1048 +++ .../hashicorp/go-plugin/discover.go | 28 + .../github.com/hashicorp/go-plugin/error.go | 24 + vendor/github.com/hashicorp/go-plugin/go.mod | 15 + vendor/github.com/hashicorp/go-plugin/go.sum | 87 + .../hashicorp/go-plugin/grpc_broker.go | 457 + .../hashicorp/go-plugin/grpc_client.go | 126 + .../hashicorp/go-plugin/grpc_controller.go | 23 + .../hashicorp/go-plugin/grpc_server.go | 149 + .../hashicorp/go-plugin/grpc_stdio.go | 207 + .../go-plugin/internal/plugin/gen.go | 3 + .../internal/plugin/grpc_broker.pb.go | 203 + .../internal/plugin/grpc_broker.proto | 13 + .../internal/plugin/grpc_controller.pb.go | 145 + .../internal/plugin/grpc_controller.proto | 11 + .../internal/plugin/grpc_stdio.pb.go | 233 + .../internal/plugin/grpc_stdio.proto | 30 + .../hashicorp/go-plugin/log_entry.go | 73 + vendor/github.com/hashicorp/go-plugin/mtls.go | 73 + .../hashicorp/go-plugin/mux_broker.go | 204 + .../github.com/hashicorp/go-plugin/plugin.go | 58 + .../github.com/hashicorp/go-plugin/process.go | 24 + .../hashicorp/go-plugin/process_posix.go | 19 + .../hashicorp/go-plugin/process_windows.go | 30 + .../hashicorp/go-plugin/protocol.go | 45 + .../hashicorp/go-plugin/rpc_client.go | 170 + .../hashicorp/go-plugin/rpc_server.go | 197 + .../github.com/hashicorp/go-plugin/server.go | 589 ++ .../hashicorp/go-plugin/server_mux.go | 31 + .../github.com/hashicorp/go-plugin/stream.go | 18 + .../github.com/hashicorp/go-plugin/testing.go | 180 + .../hashicorp/go-retryablehttp/.gitignore | 4 + .../.travis.yml | 4 +- .../hashicorp/go-retryablehttp/LICENSE | 363 + .../hashicorp/go-retryablehttp/Makefile | 11 + .../hashicorp/go-retryablehttp/README.md | 61 + .../hashicorp/go-retryablehttp/client.go | 705 ++ .../hashicorp/go-retryablehttp/go.mod | 8 + .../hashicorp/go-retryablehttp/go.sum | 10 + .../go-retryablehttp/roundtripper.go | 43 + .../hashicorp/go-rootcerts/.travis.yml | 12 + .../github.com/hashicorp/go-rootcerts/LICENSE | 363 + .../hashicorp/go-rootcerts/Makefile | 8 + .../hashicorp/go-rootcerts/README.md | 44 + .../github.com/hashicorp/go-rootcerts/doc.go | 9 + .../github.com/hashicorp/go-rootcerts/go.mod | 5 + .../github.com/hashicorp/go-rootcerts/go.sum | 2 + .../hashicorp/go-rootcerts/rootcerts.go | 123 + .../hashicorp/go-rootcerts/rootcerts_base.go | 12 + .../go-rootcerts/rootcerts_darwin.go | 48 + .../hashicorp/go-secure-stdlib/mlock/LICENSE | 363 + .../hashicorp/go-secure-stdlib/mlock/go.mod | 5 + .../hashicorp/go-secure-stdlib/mlock/go.sum | 2 + .../hashicorp/go-secure-stdlib/mlock/mlock.go | 15 + .../go-secure-stdlib/mlock/mlock_unavail.go | 13 + .../go-secure-stdlib/mlock/mlock_unix.go | 18 + .../go-secure-stdlib/parseutil/LICENSE | 363 + .../go-secure-stdlib/parseutil/go.mod | 10 + .../go-secure-stdlib/parseutil/go.sum | 31 + .../go-secure-stdlib/parseutil/parsepath.go | 46 + .../go-secure-stdlib/parseutil/parseutil.go | 308 + .../go-secure-stdlib/strutil/LICENSE | 363 + .../hashicorp/go-secure-stdlib/strutil/go.mod | 8 + .../hashicorp/go-secure-stdlib/strutil/go.sum | 13 + .../go-secure-stdlib/strutil/strutil.go | 508 ++ .../hashicorp/go-sockaddr/.gitignore | 26 + .../hashicorp/go-sockaddr/GNUmakefile | 65 + .../github.com/hashicorp/go-sockaddr/LICENSE | 373 + .../hashicorp/go-sockaddr/README.md | 118 + .../github.com/hashicorp/go-sockaddr/doc.go | 5 + .../github.com/hashicorp/go-sockaddr/go.mod | 8 + .../github.com/hashicorp/go-sockaddr/go.sum | 24 + .../hashicorp/go-sockaddr/ifaddr.go | 254 + .../hashicorp/go-sockaddr/ifaddrs.go | 1304 +++ .../hashicorp/go-sockaddr/ifattr.go | 65 + .../hashicorp/go-sockaddr/ipaddr.go | 169 + .../hashicorp/go-sockaddr/ipaddrs.go | 98 + .../hashicorp/go-sockaddr/ipv4addr.go | 516 ++ .../hashicorp/go-sockaddr/ipv6addr.go | 591 ++ .../github.com/hashicorp/go-sockaddr/rfc.go | 948 ++ .../hashicorp/go-sockaddr/route_info.go | 19 + .../go-sockaddr/route_info_android.go | 34 + .../hashicorp/go-sockaddr/route_info_bsd.go | 36 + .../go-sockaddr/route_info_default.go | 10 + .../hashicorp/go-sockaddr/route_info_linux.go | 42 + .../go-sockaddr/route_info_solaris.go | 37 + .../go-sockaddr/route_info_windows.go | 41 + .../hashicorp/go-sockaddr/sockaddr.go | 206 + .../hashicorp/go-sockaddr/sockaddrs.go | 193 + .../hashicorp/go-sockaddr/unixsock.go | 135 + .../github.com/hashicorp/go-uuid/.travis.yml | 12 + vendor/github.com/hashicorp/go-uuid/LICENSE | 363 + vendor/github.com/hashicorp/go-uuid/README.md | 8 + vendor/github.com/hashicorp/go-uuid/go.mod | 1 + vendor/github.com/hashicorp/go-uuid/uuid.go | 83 + .../hashicorp/go-version/.travis.yml | 13 + .../github.com/hashicorp/go-version/LICENSE | 354 + .../github.com/hashicorp/go-version/README.md | 65 + .../hashicorp/go-version/constraint.go | 204 + vendor/github.com/hashicorp/go-version/go.mod | 1 + .../hashicorp/go-version/version.go | 380 + .../go-version/version_collection.go | 17 + vendor/github.com/hashicorp/vault/api/LICENSE | 363 + .../github.com/hashicorp/vault/api/README.md | 8 + vendor/github.com/hashicorp/vault/api/auth.go | 46 + .../hashicorp/vault/api/auth_token.go | 296 + .../github.com/hashicorp/vault/api/client.go | 1393 +++ vendor/github.com/hashicorp/vault/api/go.mod | 24 + vendor/github.com/hashicorp/vault/api/go.sum | 326 + vendor/github.com/hashicorp/vault/api/help.go | 30 + .../hashicorp/vault/api/lifetime_watcher.go | 396 + .../github.com/hashicorp/vault/api/logical.go | 311 + .../hashicorp/vault/api/output_string.go | 73 + .../hashicorp/vault/api/plugin_helpers.go | 189 + .../github.com/hashicorp/vault/api/request.go | 148 + .../hashicorp/vault/api/response.go | 133 + .../github.com/hashicorp/vault/api/secret.go | 322 + vendor/github.com/hashicorp/vault/api/ssh.go | 62 + .../hashicorp/vault/api/ssh_agent.go | 241 + vendor/github.com/hashicorp/vault/api/sys.go | 11 + .../hashicorp/vault/api/sys_audit.go | 134 + .../hashicorp/vault/api/sys_auth.go | 82 + .../hashicorp/vault/api/sys_capabilities.go | 64 + .../hashicorp/vault/api/sys_config_cors.go | 75 + .../hashicorp/vault/api/sys_generate_root.go | 140 + .../hashicorp/vault/api/sys_health.go | 41 + .../hashicorp/vault/api/sys_init.go | 61 + .../hashicorp/vault/api/sys_leader.go | 35 + .../hashicorp/vault/api/sys_leases.go | 132 + .../hashicorp/vault/api/sys_monitor.go | 64 + .../hashicorp/vault/api/sys_mounts.go | 187 + .../hashicorp/vault/api/sys_plugins.go | 337 + .../hashicorp/vault/api/sys_policy.go | 113 + .../hashicorp/vault/api/sys_raft.go | 370 + .../hashicorp/vault/api/sys_rekey.go | 388 + .../hashicorp/vault/api/sys_rotate.go | 91 + .../hashicorp/vault/api/sys_seal.go | 87 + .../hashicorp/vault/api/sys_stepdown.go | 15 + vendor/github.com/hashicorp/vault/sdk/LICENSE | 363 + .../vault/sdk/helper/certutil/helpers.go | 1035 +++ .../vault/sdk/helper/certutil/types.go | 788 ++ .../vault/sdk/helper/compressutil/compress.go | 207 + .../vault/sdk/helper/consts/agent.go | 5 + .../vault/sdk/helper/consts/consts.go | 35 + .../vault/sdk/helper/consts/error.go | 25 + .../vault/sdk/helper/consts/plugin_types.go | 59 + .../vault/sdk/helper/consts/replication.go | 159 + .../vault/sdk/helper/cryptoutil/cryptoutil.go | 11 + .../vault/sdk/helper/errutil/error.go | 20 + .../hashicorp/vault/sdk/helper/hclutil/hcl.go | 36 + .../vault/sdk/helper/jsonutil/json.go | 100 + .../vault/sdk/helper/license/feature.go | 10 + .../vault/sdk/helper/locksutil/locks.go | 59 + .../vault/sdk/helper/logging/logging.go | 80 + .../sdk/helper/pathmanager/pathmanager.go | 136 + .../vault/sdk/helper/pluginutil/env.go | 69 + .../vault/sdk/helper/pluginutil/run_config.go | 161 + .../vault/sdk/helper/pluginutil/runner.go | 88 + .../vault/sdk/helper/pluginutil/tls.go | 108 + .../vault/sdk/helper/strutil/strutil.go | 94 + .../vault/sdk/helper/wrapping/wrapinfo.go | 37 + .../hashicorp/vault/sdk/logical/audit.go | 19 + .../hashicorp/vault/sdk/logical/auth.go | 107 + .../hashicorp/vault/sdk/logical/connection.go | 15 + .../vault/sdk/logical/controlgroup.go | 17 + .../hashicorp/vault/sdk/logical/error.go | 117 + .../vault/sdk/logical/identity.pb.go | 477 + .../vault/sdk/logical/identity.proto | 76 + .../hashicorp/vault/sdk/logical/lease.go | 53 + .../hashicorp/vault/sdk/logical/logical.go | 139 + .../vault/sdk/logical/logical_storage.go | 52 + .../hashicorp/vault/sdk/logical/plugin.pb.go | 146 + .../hashicorp/vault/sdk/logical/plugin.proto | 10 + .../hashicorp/vault/sdk/logical/request.go | 379 + .../hashicorp/vault/sdk/logical/response.go | 221 + .../vault/sdk/logical/response_util.go | 180 + .../hashicorp/vault/sdk/logical/secret.go | 30 + .../hashicorp/vault/sdk/logical/storage.go | 158 + .../vault/sdk/logical/storage_inmem.go | 87 + .../vault/sdk/logical/storage_view.go | 110 + .../vault/sdk/logical/system_view.go | 211 + .../hashicorp/vault/sdk/logical/testing.go | 87 + .../hashicorp/vault/sdk/logical/token.go | 245 + .../vault/sdk/logical/translate_response.go | 157 + .../hashicorp/vault/sdk/physical/cache.go | 261 + .../hashicorp/vault/sdk/physical/encoding.go | 108 + .../hashicorp/vault/sdk/physical/entry.go | 11 + .../hashicorp/vault/sdk/physical/error.go | 110 + .../vault/sdk/physical/inmem/inmem.go | 292 + .../vault/sdk/physical/inmem/inmem_ha.go | 167 + .../hashicorp/vault/sdk/physical/latency.go | 113 + .../hashicorp/vault/sdk/physical/physical.go | 133 + .../vault/sdk/physical/physical_access.go | 40 + .../vault/sdk/physical/physical_view.go | 94 + .../hashicorp/vault/sdk/physical/testing.go | 497 ++ .../vault/sdk/physical/transactions.go | 131 + .../hashicorp/vault/sdk/version/cgo.go | 7 + .../hashicorp/vault/sdk/version/version.go | 74 + .../vault/sdk/version/version_base.go | 14 + vendor/github.com/hashicorp/yamux/.gitignore | 23 + vendor/github.com/hashicorp/yamux/LICENSE | 362 + vendor/github.com/hashicorp/yamux/README.md | 86 + vendor/github.com/hashicorp/yamux/addr.go | 60 + vendor/github.com/hashicorp/yamux/const.go | 157 + vendor/github.com/hashicorp/yamux/mux.go | 87 + vendor/github.com/hashicorp/yamux/session.go | 648 ++ vendor/github.com/hashicorp/yamux/spec.md | 140 + vendor/github.com/hashicorp/yamux/stream.go | 470 + vendor/github.com/hashicorp/yamux/util.go | 43 + .../github.com/mattn/go-colorable/.travis.yml | 12 +- .../github.com/mattn/go-colorable/README.md | 4 +- .../mattn/go-colorable/colorable_appengine.go | 8 + .../mattn/go-colorable/colorable_others.go | 8 + .../mattn/go-colorable/colorable_windows.go | 28 + vendor/github.com/mattn/go-colorable/go.mod | 7 +- vendor/github.com/mattn/go-colorable/go.sum | 9 +- .../github.com/mattn/go-colorable/go.test.sh | 12 + vendor/github.com/mattn/go-isatty/.travis.yml | 15 +- vendor/github.com/mattn/go-isatty/README.md | 2 +- vendor/github.com/mattn/go-isatty/go.mod | 4 +- vendor/github.com/mattn/go-isatty/go.sum | 6 +- vendor/github.com/mattn/go-isatty/go.test.sh | 12 + .../mattn/go-isatty/isatty_android.go | 23 - .../github.com/mattn/go-isatty/isatty_bsd.go | 12 +- .../mattn/go-isatty/isatty_plan9.go | 2 +- .../mattn/go-isatty/isatty_tcgets.go | 1 - .../github.com/mattn/go-isatty/renovate.json | 8 + .../mitchellh/copystructure/.travis.yml | 12 + .../mitchellh/copystructure/LICENSE | 21 + .../mitchellh/copystructure/README.md | 21 + .../mitchellh/copystructure/copier_time.go | 15 + .../mitchellh/copystructure/copystructure.go | 548 ++ .../github.com/mitchellh/copystructure/go.mod | 3 + .../github.com/mitchellh/copystructure/go.sum | 2 + .../github.com/mitchellh/go-homedir/LICENSE | 21 + .../github.com/mitchellh/go-homedir/README.md | 14 + vendor/github.com/mitchellh/go-homedir/go.mod | 1 + .../mitchellh/go-homedir/homedir.go | 167 + .../go-testing-interface/.travis.yml | 13 + .../mitchellh/go-testing-interface/LICENSE | 21 + .../mitchellh/go-testing-interface/README.md | 52 + .../mitchellh/go-testing-interface/go.mod | 1 + .../mitchellh/go-testing-interface/testing.go | 84 + .../go-testing-interface/testing_go19.go | 108 + .../mitchellh/mapstructure/CHANGELOG.md | 58 + .../mitchellh/mapstructure/decode_hooks.go | 72 +- .../github.com/mitchellh/mapstructure/go.mod | 2 + .../mitchellh/mapstructure/mapstructure.go | 490 +- .../mitchellh/reflectwalk/.travis.yml | 1 + .../github.com/mitchellh/reflectwalk/LICENSE | 21 + .../mitchellh/reflectwalk/README.md | 6 + .../github.com/mitchellh/reflectwalk/go.mod | 1 + .../mitchellh/reflectwalk/location.go | 19 + .../mitchellh/reflectwalk/location_string.go | 16 + .../mitchellh/reflectwalk/reflectwalk.go | 401 + vendor/github.com/oklog/run/.gitignore | 14 + vendor/github.com/oklog/run/.travis.yml | 12 + vendor/github.com/oklog/run/LICENSE | 201 + vendor/github.com/oklog/run/README.md | 73 + vendor/github.com/oklog/run/group.go | 62 + vendor/github.com/pierrec/lz4/.gitignore | 34 + vendor/github.com/pierrec/lz4/.travis.yml | 24 + vendor/github.com/pierrec/lz4/LICENSE | 28 + vendor/github.com/pierrec/lz4/README.md | 90 + vendor/github.com/pierrec/lz4/block.go | 413 + vendor/github.com/pierrec/lz4/debug.go | 23 + vendor/github.com/pierrec/lz4/debug_stub.go | 7 + vendor/github.com/pierrec/lz4/decode_amd64.go | 8 + vendor/github.com/pierrec/lz4/decode_amd64.s | 375 + vendor/github.com/pierrec/lz4/decode_other.go | 98 + vendor/github.com/pierrec/lz4/errors.go | 30 + .../pierrec/lz4/internal/xxh32/xxh32zero.go | 223 + vendor/github.com/pierrec/lz4/lz4.go | 113 + vendor/github.com/pierrec/lz4/lz4_go1.10.go | 29 + .../github.com/pierrec/lz4/lz4_notgo1.10.go | 29 + vendor/github.com/pierrec/lz4/reader.go | 335 + vendor/github.com/pierrec/lz4/writer.go | 408 + .../go-glob}/.travis.yml | 5 +- vendor/github.com/ryanuber/go-glob/LICENSE | 21 + vendor/github.com/ryanuber/go-glob/README.md | 29 + vendor/github.com/ryanuber/go-glob/glob.go | 56 + vendor/github.com/ryanuber/go-glob/go.mod | 1 + vendor/go.uber.org/atomic/.codecov.yml | 4 + vendor/go.uber.org/atomic/.gitignore | 3 + vendor/go.uber.org/atomic/.travis.yml | 27 - vendor/go.uber.org/atomic/CHANGELOG.md | 54 +- vendor/go.uber.org/atomic/Makefile | 52 +- vendor/go.uber.org/atomic/README.md | 4 +- vendor/go.uber.org/atomic/atomic.go | 356 - vendor/go.uber.org/atomic/bool.go | 81 + vendor/go.uber.org/atomic/bool_ext.go | 53 + vendor/go.uber.org/atomic/doc.go | 23 + vendor/go.uber.org/atomic/duration.go | 82 + vendor/go.uber.org/atomic/duration_ext.go | 40 + vendor/go.uber.org/atomic/error.go | 48 +- vendor/go.uber.org/atomic/error_ext.go | 39 + vendor/go.uber.org/atomic/float64.go | 77 + vendor/go.uber.org/atomic/float64_ext.go | 69 + vendor/go.uber.org/atomic/gen.go | 27 + vendor/go.uber.org/atomic/go.mod | 2 - vendor/go.uber.org/atomic/go.sum | 14 - vendor/go.uber.org/atomic/int32.go | 102 + vendor/go.uber.org/atomic/int64.go | 102 + vendor/go.uber.org/atomic/nocmp.go | 35 + vendor/go.uber.org/atomic/string.go | 41 +- vendor/go.uber.org/atomic/string_ext.go | 45 + vendor/go.uber.org/atomic/time.go | 55 + vendor/go.uber.org/atomic/time_ext.go | 36 + vendor/go.uber.org/atomic/uint32.go | 102 + vendor/go.uber.org/atomic/uint64.go | 102 + vendor/go.uber.org/atomic/uintptr.go | 102 + vendor/go.uber.org/atomic/unsafe_pointer.go | 58 + vendor/go.uber.org/atomic/value.go | 31 + .../x/crypto/blake2b/blake2bAVX2_amd64.go | 1 + .../x/crypto/blake2b/blake2bAVX2_amd64.s | 95 +- .../x/crypto/blake2b/blake2b_amd64.go | 1 + .../x/crypto/blake2b/blake2b_amd64.s | 56 +- .../x/crypto/blake2b/blake2b_ref.go | 1 + .../golang.org/x/crypto/blake2b/register.go | 1 + .../x/crypto/blake2s/blake2s_386.go | 1 + .../golang.org/x/crypto/blake2s/blake2s_386.s | 95 +- .../x/crypto/blake2s/blake2s_amd64.go | 1 + .../x/crypto/blake2s/blake2s_amd64.s | 67 +- .../x/crypto/blake2s/blake2s_ref.go | 1 + .../golang.org/x/crypto/blake2s/register.go | 1 + .../x/crypto/chacha20/chacha_arm64.go | 1 + .../x/crypto/chacha20/chacha_arm64.s | 1 + .../x/crypto/chacha20/chacha_noasm.go | 1 + .../x/crypto/chacha20/chacha_ppc64le.go | 1 + .../x/crypto/chacha20/chacha_ppc64le.s | 1 + .../x/crypto/chacha20/chacha_s390x.go | 1 + .../x/crypto/chacha20/chacha_s390x.s | 1 + .../chacha20poly1305_amd64.go | 1 + .../chacha20poly1305/chacha20poly1305_amd64.s | 1 + .../chacha20poly1305_noasm.go | 1 + vendor/golang.org/x/crypto/cryptobyte/asn1.go | 804 ++ .../x/crypto/cryptobyte/asn1/asn1.go | 46 + .../golang.org/x/crypto/cryptobyte/builder.go | 337 + .../golang.org/x/crypto/cryptobyte/string.go | 161 + .../x/crypto/curve25519/curve25519.go | 52 +- .../x/crypto/curve25519/curve25519_amd64.go | 240 - .../x/crypto/curve25519/curve25519_amd64.s | 1793 ---- .../x/crypto/curve25519/curve25519_generic.go | 828 -- .../x/crypto/curve25519/curve25519_noasm.go | 11 - .../x/crypto/curve25519/internal/field/README | 7 + .../x/crypto/curve25519/internal/field/fe.go | 416 + .../curve25519/internal/field/fe_amd64.go | 13 + .../curve25519/internal/field/fe_amd64.s | 379 + .../internal/field/fe_amd64_noasm.go | 12 + .../curve25519/internal/field/fe_arm64.go | 16 + .../curve25519/internal/field/fe_arm64.s | 43 + .../internal/field/fe_arm64_noasm.go | 12 + .../curve25519/internal/field/fe_generic.go | 264 + .../curve25519/internal/field/sync.checkpoint | 1 + .../crypto/curve25519/internal/field/sync.sh | 19 + vendor/golang.org/x/crypto/ed25519/ed25519.go | 223 + .../x/crypto/ed25519/ed25519_go113.go | 74 + .../ed25519/internal/edwards25519/const.go | 1422 +++ .../internal/edwards25519/edwards25519.go | 1793 ++++ .../x/crypto/internal/subtle/aliasing.go | 1 + .../crypto/internal/subtle/aliasing_purego.go | 1 + vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 77 + .../x/crypto/poly1305/bits_compat.go | 1 + .../x/crypto/poly1305/bits_go1.13.go | 1 + .../golang.org/x/crypto/poly1305/mac_noasm.go | 1 + .../golang.org/x/crypto/poly1305/sum_amd64.go | 1 + .../golang.org/x/crypto/poly1305/sum_amd64.s | 1 + .../x/crypto/poly1305/sum_ppc64le.go | 1 + .../x/crypto/poly1305/sum_ppc64le.s | 19 +- .../golang.org/x/crypto/poly1305/sum_s390x.go | 1 + .../golang.org/x/crypto/poly1305/sum_s390x.s | 1 + .../x/crypto/salsa20/salsa/salsa20_amd64.go | 1 + .../x/crypto/salsa20/salsa/salsa20_amd64.s | 236 +- .../x/crypto/salsa20/salsa/salsa20_noasm.go | 1 + .../x/crypto/sha3/hashes_generic.go | 1 + vendor/golang.org/x/crypto/sha3/keccakf.go | 3 +- .../golang.org/x/crypto/sha3/keccakf_amd64.go | 1 + .../golang.org/x/crypto/sha3/keccakf_amd64.s | 1 + vendor/golang.org/x/crypto/sha3/register.go | 1 + vendor/golang.org/x/crypto/sha3/sha3_s390x.go | 1 + vendor/golang.org/x/crypto/sha3/sha3_s390x.s | 1 + .../golang.org/x/crypto/sha3/shake_generic.go | 1 + vendor/golang.org/x/crypto/sha3/xor.go | 1 + .../golang.org/x/crypto/sha3/xor_generic.go | 2 +- .../golang.org/x/crypto/sha3/xor_unaligned.go | 1 + vendor/golang.org/x/net/context/context.go | 56 + vendor/golang.org/x/net/context/go17.go | 73 + vendor/golang.org/x/net/context/go19.go | 21 + vendor/golang.org/x/net/context/pre_go17.go | 301 + vendor/golang.org/x/net/context/pre_go19.go | 110 + vendor/golang.org/x/net/html/const.go | 2 +- vendor/golang.org/x/net/html/foreign.go | 119 +- vendor/golang.org/x/net/html/parse.go | 15 +- vendor/golang.org/x/net/html/render.go | 2 +- .../x/net/http2/client_conn_pool.go | 2 + vendor/golang.org/x/net/http2/flow.go | 2 + vendor/golang.org/x/net/http2/go111.go | 1 + .../golang.org/x/net/http2/hpack/huffman.go | 7 + vendor/golang.org/x/net/http2/http2.go | 7 + vendor/golang.org/x/net/http2/not_go111.go | 1 + vendor/golang.org/x/net/http2/server.go | 50 +- vendor/golang.org/x/net/http2/transport.go | 118 +- vendor/golang.org/x/net/idna/idna10.0.0.go | 1 + vendor/golang.org/x/net/idna/idna9.0.0.go | 1 + vendor/golang.org/x/net/idna/tables10.0.0.go | 1 + vendor/golang.org/x/net/idna/tables11.0.0.go | 1 + .../idna/{tables12.00.go => tables12.0.0.go} | 3 +- vendor/golang.org/x/net/idna/tables13.0.0.go | 4840 +++++++++++ vendor/golang.org/x/net/idna/tables9.0.0.go | 1 + .../x/net/internal/socket/cmsghdr.go | 3 +- .../x/net/internal/socket/cmsghdr_bsd.go | 1 + .../internal/socket/cmsghdr_linux_32bit.go | 1 + .../internal/socket/cmsghdr_linux_64bit.go | 1 + .../internal/socket/cmsghdr_solaris_64bit.go | 4 +- .../x/net/internal/socket/cmsghdr_stub.go | 17 +- .../x/net/internal/socket/cmsghdr_unix.go | 22 + .../net/internal/socket/cmsghdr_zos_s390x.go | 25 + .../x/net/internal/socket/error_unix.go | 3 +- .../x/net/internal/socket/iovec_32bit.go | 1 + .../x/net/internal/socket/iovec_64bit.go | 3 +- .../internal/socket/iovec_solaris_64bit.go | 4 +- .../x/net/internal/socket/iovec_stub.go | 3 +- .../x/net/internal/socket/mmsghdr_stub.go | 1 + .../x/net/internal/socket/mmsghdr_unix.go | 1 + .../x/net/internal/socket/msghdr_bsd.go | 1 + .../x/net/internal/socket/msghdr_bsdvar.go | 1 + .../net/internal/socket/msghdr_linux_32bit.go | 1 + .../net/internal/socket/msghdr_linux_64bit.go | 1 + .../internal/socket/msghdr_solaris_64bit.go | 4 +- .../x/net/internal/socket/msghdr_stub.go | 3 +- .../x/net/internal/socket/msghdr_zos_s390x.go | 36 + .../x/net/internal/socket/norace.go | 1 + .../golang.org/x/net/internal/socket/race.go | 1 + .../x/net/internal/socket/rawconn_mmsg.go | 1 + .../x/net/internal/socket/rawconn_msg.go | 7 +- .../x/net/internal/socket/rawconn_nommsg.go | 1 + .../x/net/internal/socket/rawconn_nomsg.go | 3 +- .../x/net/internal/socket/socket.go | 10 +- .../golang.org/x/net/internal/socket/sys.go | 14 +- .../x/net/internal/socket/sys_bsd.go | 1 + .../x/net/internal/socket/sys_bsdvar.go | 23 - .../x/net/internal/socket/sys_const_unix.go | 1 + .../x/net/internal/socket/sys_const_zos.go | 18 + .../x/net/internal/socket/sys_darwin.go | 7 - .../x/net/internal/socket/sys_dragonfly.go | 32 - .../x/net/internal/socket/sys_linkname.go | 1 + .../x/net/internal/socket/sys_linux.go | 6 +- .../x/net/internal/socket/sys_linux_386.go | 2 - .../net/internal/socket/sys_linux_riscv64.go | 1 + .../x/net/internal/socket/sys_linux_s390x.go | 2 - .../x/net/internal/socket/sys_posix.go | 3 +- .../x/net/internal/socket/sys_solaris.go | 11 - .../x/net/internal/socket/sys_stub.go | 19 +- .../x/net/internal/socket/sys_unix.go | 1 + ...{sys_go1_11_darwin.go => sys_zos_s390x.go} | 19 +- .../x/net/internal/socket/sys_zos_s390x.s | 11 + .../x/net/internal/socket/zsys_aix_ppc64.go | 6 +- .../x/net/internal/socket/zsys_darwin_386.go | 5 +- .../net/internal/socket/zsys_darwin_amd64.go | 5 +- .../x/net/internal/socket/zsys_darwin_arm.go | 5 +- .../net/internal/socket/zsys_darwin_arm64.go | 5 +- .../internal/socket/zsys_dragonfly_amd64.go | 5 +- .../x/net/internal/socket/zsys_freebsd_386.go | 5 +- .../net/internal/socket/zsys_freebsd_amd64.go | 5 +- .../x/net/internal/socket/zsys_freebsd_arm.go | 5 +- .../net/internal/socket/zsys_freebsd_arm64.go | 5 +- .../x/net/internal/socket/zsys_linux_386.go | 5 +- .../x/net/internal/socket/zsys_linux_amd64.go | 5 +- .../x/net/internal/socket/zsys_linux_arm.go | 2 - .../x/net/internal/socket/zsys_linux_arm64.go | 2 - .../x/net/internal/socket/zsys_linux_mips.go | 2 - .../net/internal/socket/zsys_linux_mips64.go | 2 - .../internal/socket/zsys_linux_mips64le.go | 2 - .../net/internal/socket/zsys_linux_mipsle.go | 2 - .../x/net/internal/socket/zsys_linux_ppc64.go | 2 - .../net/internal/socket/zsys_linux_ppc64le.go | 2 - .../net/internal/socket/zsys_linux_riscv64.go | 3 +- .../x/net/internal/socket/zsys_linux_s390x.go | 2 - .../x/net/internal/socket/zsys_netbsd_386.go | 2 - .../net/internal/socket/zsys_netbsd_amd64.go | 2 - .../x/net/internal/socket/zsys_netbsd_arm.go | 2 - .../net/internal/socket/zsys_netbsd_arm64.go | 5 +- .../x/net/internal/socket/zsys_openbsd_386.go | 5 +- .../net/internal/socket/zsys_openbsd_amd64.go | 5 +- .../x/net/internal/socket/zsys_openbsd_arm.go | 5 +- .../net/internal/socket/zsys_openbsd_arm64.go | 5 +- .../internal/socket/zsys_openbsd_mips64.go | 50 + .../net/internal/socket/zsys_solaris_amd64.go | 5 +- .../x/net/internal/socket/zsys_zos_s390x.go | 32 + vendor/golang.org/x/net/ipv4/control_bsd.go | 1 + .../golang.org/x/net/ipv4/control_pktinfo.go | 1 + vendor/golang.org/x/net/ipv4/control_stub.go | 3 +- vendor/golang.org/x/net/ipv4/control_unix.go | 1 + vendor/golang.org/x/net/ipv4/control_zos.go | 86 + vendor/golang.org/x/net/ipv4/header.go | 9 +- vendor/golang.org/x/net/ipv4/icmp_stub.go | 1 + vendor/golang.org/x/net/ipv4/payload_cmsg.go | 3 +- .../golang.org/x/net/ipv4/payload_nocmsg.go | 3 +- vendor/golang.org/x/net/ipv4/sockopt_posix.go | 3 +- vendor/golang.org/x/net/ipv4/sockopt_stub.go | 3 +- vendor/golang.org/x/net/ipv4/sys_aix.go | 1 + vendor/golang.org/x/net/ipv4/sys_asmreq.go | 1 + .../golang.org/x/net/ipv4/sys_asmreq_stub.go | 1 + vendor/golang.org/x/net/ipv4/sys_asmreqn.go | 1 + .../golang.org/x/net/ipv4/sys_asmreqn_stub.go | 1 + vendor/golang.org/x/net/ipv4/sys_bpf.go | 1 + vendor/golang.org/x/net/ipv4/sys_bpf_stub.go | 1 + vendor/golang.org/x/net/ipv4/sys_bsd.go | 1 + vendor/golang.org/x/net/ipv4/sys_ssmreq.go | 1 + .../golang.org/x/net/ipv4/sys_ssmreq_stub.go | 1 + vendor/golang.org/x/net/ipv4/sys_stub.go | 3 +- vendor/golang.org/x/net/ipv4/sys_zos.go | 55 + .../golang.org/x/net/ipv4/zsys_aix_ppc64.go | 1 + .../x/net/ipv4/zsys_linux_riscv64.go | 1 + .../golang.org/x/net/ipv4/zsys_zos_s390x.go | 80 + vendor/golang.org/x/net/route/address.go | 3 +- vendor/golang.org/x/net/route/binary.go | 1 + vendor/golang.org/x/net/route/interface.go | 1 + .../x/net/route/interface_announce.go | 1 + .../x/net/route/interface_classic.go | 1 + .../x/net/route/interface_multicast.go | 1 + vendor/golang.org/x/net/route/message.go | 1 + vendor/golang.org/x/net/route/route.go | 1 + .../golang.org/x/net/route/route_classic.go | 3 +- vendor/golang.org/x/net/route/sys.go | 1 + vendor/golang.org/x/net/route/syscall.go | 1 + .../x/net/route/syscall_go1_11_darwin.go | 28 - .../x/net/route/syscall_go1_12_darwin.go | 1 + vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s | 3 +- vendor/golang.org/x/sys/cpu/byteorder.go | 11 +- vendor/golang.org/x/sys/cpu/cpu.go | 161 +- .../sys/cpu/{cpu_aix_ppc64.go => cpu_aix.go} | 8 +- vendor/golang.org/x/sys/cpu/cpu_arm.go | 33 + vendor/golang.org/x/sys/cpu/cpu_arm64.go | 64 +- vendor/golang.org/x/sys/cpu/cpu_arm64.s | 3 +- vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go | 3 +- vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go | 3 +- vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 7 +- .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 1 + .../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 1 + vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go | 7 + vendor/golang.org/x/sys/cpu/cpu_linux.go | 3 +- .../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 2 + .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 1 + .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 3 +- .../golang.org/x/sys/cpu/cpu_linux_s390x.go | 123 +- vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 7 + vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 3 + .../golang.org/x/sys/cpu/cpu_netbsd_arm64.go | 173 + vendor/golang.org/x/sys/cpu/cpu_other_arm.go | 10 + .../golang.org/x/sys/cpu/cpu_other_arm64.go | 3 +- .../golang.org/x/sys/cpu/cpu_other_mips64x.go | 13 + vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 17 + vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 3 + vendor/golang.org/x/sys/cpu/cpu_s390x.go | 172 + vendor/golang.org/x/sys/cpu/cpu_s390x.s | 3 +- vendor/golang.org/x/sys/cpu/cpu_wasm.go | 5 + vendor/golang.org/x/sys/cpu/cpu_x86.go | 89 +- vendor/golang.org/x/sys/cpu/cpu_x86.s | 27 +- vendor/golang.org/x/sys/cpu/cpu_zos.go | 10 + vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go | 25 + .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 27 + .../x/sys/cpu/syscall_aix_ppc64_gc.go | 4 +- .../sys/internal/unsafeheader/unsafeheader.go | 30 + vendor/golang.org/x/sys/unix/README.md | 10 +- vendor/golang.org/x/sys/unix/aliases.go | 3 +- vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 3 +- .../unix/{asm_darwin_386.s => asm_bsd_386.s} | 12 +- .../{asm_openbsd_amd64.s => asm_bsd_amd64.s} | 10 +- .../unix/{asm_netbsd_arm.s => asm_bsd_arm.s} | 10 +- .../{asm_netbsd_amd64.s => asm_bsd_arm64.s} | 10 +- .../golang.org/x/sys/unix/asm_darwin_amd64.s | 29 - vendor/golang.org/x/sys/unix/asm_darwin_arm.s | 30 - .../golang.org/x/sys/unix/asm_darwin_arm64.s | 30 - .../x/sys/unix/asm_dragonfly_amd64.s | 29 - .../golang.org/x/sys/unix/asm_freebsd_386.s | 29 - .../golang.org/x/sys/unix/asm_freebsd_amd64.s | 29 - .../golang.org/x/sys/unix/asm_freebsd_arm.s | 29 - .../golang.org/x/sys/unix/asm_freebsd_arm64.s | 29 - vendor/golang.org/x/sys/unix/asm_linux_386.s | 3 +- .../golang.org/x/sys/unix/asm_linux_amd64.s | 3 +- vendor/golang.org/x/sys/unix/asm_linux_arm.s | 3 +- .../golang.org/x/sys/unix/asm_linux_arm64.s | 3 +- .../golang.org/x/sys/unix/asm_linux_mips64x.s | 3 +- .../golang.org/x/sys/unix/asm_linux_mipsx.s | 3 +- .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 3 +- .../golang.org/x/sys/unix/asm_linux_riscv64.s | 4 +- .../golang.org/x/sys/unix/asm_linux_s390x.s | 5 +- vendor/golang.org/x/sys/unix/asm_netbsd_386.s | 29 - .../golang.org/x/sys/unix/asm_netbsd_arm64.s | 29 - .../golang.org/x/sys/unix/asm_openbsd_386.s | 29 - .../golang.org/x/sys/unix/asm_openbsd_arm.s | 29 - ...m_openbsd_arm64.s => asm_openbsd_mips64.s} | 5 +- .../golang.org/x/sys/unix/asm_solaris_amd64.s | 3 +- vendor/golang.org/x/sys/unix/asm_zos_s390x.s | 426 + vendor/golang.org/x/sys/unix/cap_freebsd.go | 1 + vendor/golang.org/x/sys/unix/constants.go | 3 +- vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 4 +- vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 4 +- vendor/golang.org/x/sys/unix/dev_zos.go | 29 + vendor/golang.org/x/sys/unix/dirent.go | 1 + vendor/golang.org/x/sys/unix/endian_big.go | 3 +- vendor/golang.org/x/sys/unix/endian_little.go | 3 +- vendor/golang.org/x/sys/unix/env_unix.go | 3 +- vendor/golang.org/x/sys/unix/epoll_zos.go | 221 + vendor/golang.org/x/sys/unix/fcntl.go | 1 + vendor/golang.org/x/sys/unix/fcntl_darwin.go | 6 + .../x/sys/unix/fcntl_linux_32bit.go | 5 +- vendor/golang.org/x/sys/unix/fdset.go | 3 +- vendor/golang.org/x/sys/unix/fstatfs_zos.go | 164 + vendor/golang.org/x/sys/unix/gccgo.go | 6 +- vendor/golang.org/x/sys/unix/gccgo_c.c | 6 + .../x/sys/unix/gccgo_linux_amd64.go | 1 + vendor/golang.org/x/sys/unix/ioctl.go | 10 + vendor/golang.org/x/sys/unix/ioctl_linux.go | 196 + vendor/golang.org/x/sys/unix/ioctl_zos.go | 74 + vendor/golang.org/x/sys/unix/mkall.sh | 29 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 74 +- vendor/golang.org/x/sys/unix/pagesize_unix.go | 1 + vendor/golang.org/x/sys/unix/ptrace_darwin.go | 12 + vendor/golang.org/x/sys/unix/ptrace_ios.go | 12 + vendor/golang.org/x/sys/unix/race.go | 1 + vendor/golang.org/x/sys/unix/race0.go | 3 +- .../x/sys/unix/readdirent_getdents.go | 1 + .../x/sys/unix/readdirent_getdirentries.go | 1 + vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 3 +- .../x/sys/unix/sockcmsg_unix_other.go | 13 +- vendor/golang.org/x/sys/unix/str.go | 1 + vendor/golang.org/x/sys/unix/syscall.go | 46 +- vendor/golang.org/x/sys/unix/syscall_aix.go | 29 +- .../golang.org/x/sys/unix/syscall_aix_ppc.go | 4 +- .../x/sys/unix/syscall_aix_ppc64.go | 4 +- vendor/golang.org/x/sys/unix/syscall_bsd.go | 45 +- .../x/sys/unix/syscall_darwin.1_12.go | 5 +- .../x/sys/unix/syscall_darwin.1_13.go | 27 +- .../golang.org/x/sys/unix/syscall_darwin.go | 210 +- .../x/sys/unix/syscall_darwin_386.1_11.go | 9 - .../x/sys/unix/syscall_darwin_386.go | 68 - .../x/sys/unix/syscall_darwin_amd64.1_11.go | 9 - .../x/sys/unix/syscall_darwin_amd64.go | 23 +- .../x/sys/unix/syscall_darwin_arm.1_11.go | 11 - .../x/sys/unix/syscall_darwin_arm.go | 68 - .../x/sys/unix/syscall_darwin_arm64.1_11.go | 11 - .../x/sys/unix/syscall_darwin_arm64.go | 25 +- .../x/sys/unix/syscall_darwin_libSystem.go | 10 +- .../x/sys/unix/syscall_dragonfly.go | 40 +- .../x/sys/unix/syscall_dragonfly_amd64.go | 1 + .../golang.org/x/sys/unix/syscall_freebsd.go | 36 +- .../x/sys/unix/syscall_freebsd_386.go | 1 + .../x/sys/unix/syscall_freebsd_amd64.go | 1 + .../x/sys/unix/syscall_freebsd_arm.go | 1 + .../x/sys/unix/syscall_freebsd_arm64.go | 1 + .../golang.org/x/sys/unix/syscall_illumos.go | 133 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 327 +- .../x/sys/unix/syscall_linux_386.go | 16 +- .../x/sys/unix/syscall_linux_amd64.go | 9 +- .../x/sys/unix/syscall_linux_amd64_gc.go | 4 +- .../x/sys/unix/syscall_linux_arm.go | 22 +- .../x/sys/unix/syscall_linux_arm64.go | 35 +- .../golang.org/x/sys/unix/syscall_linux_gc.go | 3 +- .../x/sys/unix/syscall_linux_gc_386.go | 3 +- .../x/sys/unix/syscall_linux_gc_arm.go | 14 + .../x/sys/unix/syscall_linux_gccgo_386.go | 1 + .../x/sys/unix/syscall_linux_gccgo_arm.go | 1 + .../x/sys/unix/syscall_linux_mips64x.go | 9 +- .../x/sys/unix/syscall_linux_mipsx.go | 15 +- .../x/sys/unix/syscall_linux_ppc.go | 276 + .../x/sys/unix/syscall_linux_ppc64x.go | 11 +- .../x/sys/unix/syscall_linux_riscv64.go | 15 +- .../x/sys/unix/syscall_linux_s390x.go | 11 +- .../x/sys/unix/syscall_linux_sparc64.go | 11 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 40 +- .../x/sys/unix/syscall_netbsd_386.go | 1 + .../x/sys/unix/syscall_netbsd_amd64.go | 1 + .../x/sys/unix/syscall_netbsd_arm.go | 1 + .../x/sys/unix/syscall_netbsd_arm64.go | 1 + .../golang.org/x/sys/unix/syscall_openbsd.go | 23 +- .../x/sys/unix/syscall_openbsd_386.go | 1 + .../x/sys/unix/syscall_openbsd_amd64.go | 1 + .../x/sys/unix/syscall_openbsd_arm.go | 1 + .../x/sys/unix/syscall_openbsd_arm64.go | 1 + .../x/sys/unix/syscall_openbsd_mips64.go | 35 + .../golang.org/x/sys/unix/syscall_solaris.go | 30 +- .../x/sys/unix/syscall_solaris_amd64.go | 1 + vendor/golang.org/x/sys/unix/syscall_unix.go | 18 +- .../golang.org/x/sys/unix/syscall_unix_gc.go | 5 +- .../x/sys/unix/syscall_unix_gc_ppc64x.go | 3 +- .../x/sys/unix/syscall_zos_s390x.go | 1829 ++++ vendor/golang.org/x/sys/unix/timestruct.go | 29 +- vendor/golang.org/x/sys/unix/xattr_bsd.go | 1 + .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1 + .../x/sys/unix/zerrors_aix_ppc64.go | 1 + .../x/sys/unix/zerrors_darwin_amd64.go | 97 +- .../x/sys/unix/zerrors_darwin_arm.go | 1784 ---- .../x/sys/unix/zerrors_darwin_arm64.go | 97 +- .../x/sys/unix/zerrors_dragonfly_amd64.go | 139 +- .../x/sys/unix/zerrors_freebsd_386.go | 18 + .../x/sys/unix/zerrors_freebsd_amd64.go | 18 + .../x/sys/unix/zerrors_freebsd_arm.go | 27 + .../x/sys/unix/zerrors_freebsd_arm64.go | 18 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 607 +- .../x/sys/unix/zerrors_linux_386.go | 40 +- .../x/sys/unix/zerrors_linux_amd64.go | 40 +- .../x/sys/unix/zerrors_linux_arm.go | 40 +- .../x/sys/unix/zerrors_linux_arm64.go | 44 +- .../x/sys/unix/zerrors_linux_mips.go | 40 +- .../x/sys/unix/zerrors_linux_mips64.go | 40 +- .../x/sys/unix/zerrors_linux_mips64le.go | 40 +- .../x/sys/unix/zerrors_linux_mipsle.go | 40 +- .../x/sys/unix/zerrors_linux_ppc.go | 879 ++ .../x/sys/unix/zerrors_linux_ppc64.go | 40 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 40 +- .../x/sys/unix/zerrors_linux_riscv64.go | 40 +- .../x/sys/unix/zerrors_linux_s390x.go | 42 +- .../x/sys/unix/zerrors_linux_sparc64.go | 40 +- .../x/sys/unix/zerrors_netbsd_386.go | 7 + .../x/sys/unix/zerrors_netbsd_amd64.go | 7 + .../x/sys/unix/zerrors_netbsd_arm.go | 7 + .../x/sys/unix/zerrors_netbsd_arm64.go | 7 + .../x/sys/unix/zerrors_openbsd_386.go | 8 + .../x/sys/unix/zerrors_openbsd_amd64.go | 8 + .../x/sys/unix/zerrors_openbsd_arm.go | 8 + .../x/sys/unix/zerrors_openbsd_arm64.go | 8 + ...arwin_386.go => zerrors_openbsd_mips64.go} | 1663 ++-- .../x/sys/unix/zerrors_solaris_amd64.go | 26 +- .../x/sys/unix/zerrors_zos_s390x.go | 860 ++ .../x/sys/unix/zptrace_armnn_linux.go | 1 + .../x/sys/unix/zptrace_mipsnn_linux.go | 1 + .../x/sys/unix/zptrace_mipsnnle_linux.go | 1 + .../x/sys/unix/zptrace_x86_linux.go | 1 + .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 1 + .../x/sys/unix/zsyscall_aix_ppc64.go | 1 + .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 4 +- .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 4 +- .../x/sys/unix/zsyscall_darwin_386.1_11.go | 1811 ---- .../x/sys/unix/zsyscall_darwin_386.1_13.go | 41 - .../x/sys/unix/zsyscall_darwin_386.1_13.s | 12 - .../x/sys/unix/zsyscall_darwin_386.go | 2499 ------ .../x/sys/unix/zsyscall_darwin_386.s | 284 - .../x/sys/unix/zsyscall_darwin_amd64.1_11.go | 1811 ---- .../x/sys/unix/zsyscall_darwin_amd64.1_13.go | 11 +- .../x/sys/unix/zsyscall_darwin_amd64.1_13.s | 19 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 852 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 859 +- .../x/sys/unix/zsyscall_darwin_arm.1_11.go | 1784 ---- .../x/sys/unix/zsyscall_darwin_arm.1_13.go | 41 - .../x/sys/unix/zsyscall_darwin_arm.1_13.s | 12 - .../x/sys/unix/zsyscall_darwin_arm.go | 2484 ------ .../x/sys/unix/zsyscall_darwin_arm.s | 282 - .../x/sys/unix/zsyscall_darwin_arm64.1_13.go | 11 +- .../x/sys/unix/zsyscall_darwin_arm64.1_13.s | 19 +- .../x/sys/unix/zsyscall_darwin_arm64.go | 837 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 857 +- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 45 +- .../x/sys/unix/zsyscall_freebsd_386.go | 1 + .../x/sys/unix/zsyscall_freebsd_amd64.go | 1 + .../x/sys/unix/zsyscall_freebsd_arm.go | 1 + .../x/sys/unix/zsyscall_freebsd_arm64.go | 1 + .../x/sys/unix/zsyscall_illumos_amd64.go | 43 +- .../golang.org/x/sys/unix/zsyscall_linux.go | 119 + .../x/sys/unix/zsyscall_linux_386.go | 3 +- .../x/sys/unix/zsyscall_linux_amd64.go | 3 +- .../x/sys/unix/zsyscall_linux_arm.go | 3 +- .../x/sys/unix/zsyscall_linux_arm64.go | 5 +- .../x/sys/unix/zsyscall_linux_mips.go | 3 +- .../x/sys/unix/zsyscall_linux_mips64.go | 3 +- .../x/sys/unix/zsyscall_linux_mips64le.go | 3 +- .../x/sys/unix/zsyscall_linux_mipsle.go | 3 +- .../x/sys/unix/zsyscall_linux_ppc.go | 762 ++ .../x/sys/unix/zsyscall_linux_ppc64.go | 3 +- .../x/sys/unix/zsyscall_linux_ppc64le.go | 3 +- .../x/sys/unix/zsyscall_linux_riscv64.go | 1 + .../x/sys/unix/zsyscall_linux_s390x.go | 3 +- .../x/sys/unix/zsyscall_linux_sparc64.go | 3 +- .../x/sys/unix/zsyscall_netbsd_386.go | 11 + .../x/sys/unix/zsyscall_netbsd_amd64.go | 11 + .../x/sys/unix/zsyscall_netbsd_arm.go | 11 + .../x/sys/unix/zsyscall_netbsd_arm64.go | 11 + .../x/sys/unix/zsyscall_openbsd_386.go | 1 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 1 + .../x/sys/unix/zsyscall_openbsd_arm.go | 1 + .../x/sys/unix/zsyscall_openbsd_arm64.go | 1 + ...m64.1_11.go => zsyscall_openbsd_mips64.go} | 515 +- .../x/sys/unix/zsyscall_solaris_amd64.go | 33 +- .../x/sys/unix/zsyscall_zos_s390x.go | 1255 +++ .../x/sys/unix/zsysctl_openbsd_386.go | 4 +- .../x/sys/unix/zsysctl_openbsd_amd64.go | 2 + .../x/sys/unix/zsysctl_openbsd_arm.go | 2 + .../x/sys/unix/zsysctl_openbsd_arm64.go | 1 + .../x/sys/unix/zsysctl_openbsd_mips64.go | 280 + .../x/sys/unix/zsysnum_darwin_386.go | 436 - .../x/sys/unix/zsysnum_darwin_amd64.go | 2 + .../x/sys/unix/zsysnum_darwin_arm.go | 436 - .../x/sys/unix/zsysnum_darwin_arm64.go | 2 + .../x/sys/unix/zsysnum_dragonfly_amd64.go | 256 +- .../x/sys/unix/zsysnum_freebsd_386.go | 1 + .../x/sys/unix/zsysnum_freebsd_amd64.go | 1 + .../x/sys/unix/zsysnum_freebsd_arm.go | 1 + .../x/sys/unix/zsysnum_freebsd_arm64.go | 1 + .../x/sys/unix/zsysnum_linux_386.go | 8 + .../x/sys/unix/zsysnum_linux_amd64.go | 8 + .../x/sys/unix/zsysnum_linux_arm.go | 8 + .../x/sys/unix/zsysnum_linux_arm64.go | 8 + .../x/sys/unix/zsysnum_linux_mips.go | 8 + .../x/sys/unix/zsysnum_linux_mips64.go | 8 + .../x/sys/unix/zsysnum_linux_mips64le.go | 8 + .../x/sys/unix/zsysnum_linux_mipsle.go | 8 + .../x/sys/unix/zsysnum_linux_ppc.go | 434 + .../x/sys/unix/zsysnum_linux_ppc64.go | 8 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 8 + .../x/sys/unix/zsysnum_linux_riscv64.go | 8 + .../x/sys/unix/zsysnum_linux_s390x.go | 8 + .../x/sys/unix/zsysnum_linux_sparc64.go | 8 + .../x/sys/unix/zsysnum_netbsd_386.go | 1 + .../x/sys/unix/zsysnum_netbsd_amd64.go | 1 + .../x/sys/unix/zsysnum_netbsd_arm.go | 1 + .../x/sys/unix/zsysnum_netbsd_arm64.go | 1 + .../x/sys/unix/zsysnum_openbsd_386.go | 1 + .../x/sys/unix/zsysnum_openbsd_amd64.go | 1 + .../x/sys/unix/zsysnum_openbsd_arm.go | 1 + .../x/sys/unix/zsysnum_openbsd_arm64.go | 1 + .../x/sys/unix/zsysnum_openbsd_mips64.go | 221 + .../x/sys/unix/zsysnum_zos_s390x.go | 2670 ++++++ .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 2 + .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 2 + .../x/sys/unix/ztypes_darwin_386.go | 499 -- .../x/sys/unix/ztypes_darwin_amd64.go | 164 +- .../x/sys/unix/ztypes_darwin_arm.go | 500 -- .../x/sys/unix/ztypes_darwin_arm64.go | 164 +- .../x/sys/unix/ztypes_dragonfly_amd64.go | 51 +- .../x/sys/unix/ztypes_freebsd_386.go | 16 +- .../x/sys/unix/ztypes_freebsd_amd64.go | 16 +- .../x/sys/unix/ztypes_freebsd_arm.go | 28 +- .../x/sys/unix/ztypes_freebsd_arm64.go | 16 +- .../x/sys/unix/ztypes_illumos_amd64.go | 40 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 2754 ++++-- .../golang.org/x/sys/unix/ztypes_linux_386.go | 42 +- .../x/sys/unix/ztypes_linux_amd64.go | 45 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 45 +- .../x/sys/unix/ztypes_linux_arm64.go | 45 +- .../x/sys/unix/ztypes_linux_mips.go | 45 +- .../x/sys/unix/ztypes_linux_mips64.go | 45 +- .../x/sys/unix/ztypes_linux_mips64le.go | 45 +- .../x/sys/unix/ztypes_linux_mipsle.go | 45 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 639 ++ .../x/sys/unix/ztypes_linux_ppc64.go | 45 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 45 +- .../x/sys/unix/ztypes_linux_riscv64.go | 45 +- .../x/sys/unix/ztypes_linux_s390x.go | 45 +- .../x/sys/unix/ztypes_linux_sparc64.go | 45 +- .../x/sys/unix/ztypes_netbsd_386.go | 6 +- .../x/sys/unix/ztypes_netbsd_amd64.go | 6 +- .../x/sys/unix/ztypes_netbsd_arm.go | 6 +- .../x/sys/unix/ztypes_netbsd_arm64.go | 6 +- .../x/sys/unix/ztypes_openbsd_386.go | 6 +- .../x/sys/unix/ztypes_openbsd_amd64.go | 6 +- .../x/sys/unix/ztypes_openbsd_arm.go | 6 +- .../x/sys/unix/ztypes_openbsd_arm64.go | 6 +- .../x/sys/unix/ztypes_openbsd_mips64.go | 569 ++ .../x/sys/unix/ztypes_solaris_amd64.go | 33 +- .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 406 + .../golang.org/x/sys/windows/dll_windows.go | 32 +- vendor/golang.org/x/sys/windows/empty.s | 1 + .../golang.org/x/sys/windows/env_windows.go | 11 +- .../golang.org/x/sys/windows/exec_windows.go | 98 + .../x/sys/windows/memory_windows.go | 25 +- vendor/golang.org/x/sys/windows/mkerrors.bash | 7 + .../x/sys/windows/security_windows.go | 59 +- vendor/golang.org/x/sys/windows/service.go | 8 + .../x/sys/windows/setupapierrors_windows.go | 100 + vendor/golang.org/x/sys/windows/syscall.go | 46 +- .../x/sys/windows/syscall_windows.go | 300 +- .../golang.org/x/sys/windows/types_windows.go | 1053 ++- .../x/sys/windows/types_windows_386.go | 13 + .../x/sys/windows/types_windows_amd64.go | 12 + .../x/sys/windows/types_windows_arm.go | 13 + .../x/sys/windows/types_windows_arm64.go | 34 + .../x/sys/windows/zerrors_windows.go | 2619 +++++- .../x/sys/windows/zsyscall_windows.go | 4919 +++++------ .../x/text/encoding/unicode/unicode.go | 94 +- .../golang.org/x/text/transform/transform.go | 6 +- vendor/golang.org/x/text/unicode/bidi/core.go | 8 +- .../x/text/unicode/bidi/tables11.0.0.go | 2 +- .../x/text/unicode/bidi/tables12.0.0.go | 1923 ++++ .../x/text/unicode/norm/tables11.0.0.go | 2 +- .../x/text/unicode/norm/tables12.0.0.go | 7710 +++++++++++++++++ vendor/golang.org/x/time/AUTHORS | 3 + vendor/golang.org/x/time/CONTRIBUTORS | 3 + vendor/golang.org/x/time/LICENSE | 27 + vendor/golang.org/x/time/PATENTS | 22 + vendor/golang.org/x/time/rate/rate.go | 400 + vendor/google.golang.org/grpc/.travis.yml | 42 - vendor/google.golang.org/grpc/MAINTAINERS.md | 5 +- vendor/google.golang.org/grpc/Makefile | 2 - vendor/google.golang.org/grpc/NOTICE.txt | 13 + vendor/google.golang.org/grpc/README.md | 2 +- .../grpc/balancer/balancer.go | 78 +- .../grpc/balancer/base/balancer.go | 44 +- .../grpc/balancer/roundrobin/roundrobin.go | 4 +- .../grpc/balancer_conn_wrappers.go | 116 +- vendor/google.golang.org/grpc/clientconn.go | 355 +- .../grpc/connectivity/connectivity.go | 35 +- .../grpc/credentials/credentials.go | 24 +- .../google.golang.org/grpc/credentials/tls.go | 3 + vendor/google.golang.org/grpc/dialoptions.go | 17 +- .../grpc/encoding/proto/proto.go | 14 +- vendor/google.golang.org/grpc/go.mod | 13 +- vendor/google.golang.org/grpc/go.sum | 51 +- .../google.golang.org/grpc/health/client.go | 117 + .../grpc/health/grpc_health_v1/health.pb.go | 313 + .../health/grpc_health_v1/health_grpc.pb.go | 201 + .../spiffe_appengine.go => health/logging.go} | 14 +- .../google.golang.org/grpc/health/server.go | 163 + vendor/google.golang.org/grpc/install_gae.sh | 6 - .../grpc/internal/binarylog/sink.go | 41 +- .../grpc/internal/channelz/funcs.go | 2 +- .../grpc/internal/channelz/types_linux.go | 2 - .../grpc/internal/channelz/types_nonlinux.go | 5 +- .../grpc/internal/channelz/util_linux.go | 2 - .../grpc/internal/channelz/util_nonlinux.go | 3 +- .../grpc/internal/credentials/credentials.go | 49 + .../grpc/internal/credentials/spiffe.go | 2 - .../grpc/internal/credentials/syscallconn.go | 2 - .../grpc/internal/credentials/util.go | 4 +- .../grpc/internal/envconfig/envconfig.go | 6 +- .../grpc/internal/grpcrand/grpcrand.go | 29 +- .../grpc/internal/internal.go | 11 +- .../grpc/internal/resolver/config_selector.go | 72 + .../internal/resolver/dns/dns_resolver.go | 52 +- .../grpc/internal/resolver/dns/go113.go | 33 - .../internal/serviceconfig/serviceconfig.go | 20 +- .../grpc/internal/status/status.go | 14 +- .../grpc/internal/syscall/syscall_linux.go | 2 - .../grpc/internal/syscall/syscall_nonlinux.go | 21 +- .../grpc/internal/transport/controlbuf.go | 56 +- .../grpc/internal/transport/handler_server.go | 3 +- .../grpc/internal/transport/http2_client.go | 287 +- .../grpc/internal/transport/http2_server.go | 243 +- .../grpc/internal/transport/http_util.go | 221 +- .../transport/networktype/networktype.go | 2 +- .../grpc/internal/transport/transport.go | 22 +- .../grpc/internal/xds/env/env.go | 95 + .../grpc/internal/xds_handshake_cluster.go | 40 + .../grpc/metadata/metadata.go | 100 +- .../google.golang.org/grpc/picker_wrapper.go | 2 +- vendor/google.golang.org/grpc/pickfirst.go | 23 +- .../grpc/reflection/README.md | 18 + .../grpc_reflection_v1alpha/reflection.pb.go | 953 ++ .../grpc_reflection_v1alpha/reflection.proto | 138 + .../reflection_grpc.pb.go | 139 + .../grpc/reflection/serverreflection.go | 496 ++ vendor/google.golang.org/grpc/regenerate.sh | 15 +- .../grpc/resolver/resolver.go | 2 +- .../grpc/resolver_conn_wrapper.go | 71 +- vendor/google.golang.org/grpc/rpc_util.go | 53 +- vendor/google.golang.org/grpc/server.go | 218 +- vendor/google.golang.org/grpc/stats/stats.go | 11 +- .../google.golang.org/grpc/status/status.go | 8 +- vendor/google.golang.org/grpc/stream.go | 220 +- vendor/google.golang.org/grpc/tap/tap.go | 16 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 75 +- .../protobuf/encoding/prototext/decode.go | 30 +- .../protobuf/encoding/prototext/encode.go | 84 +- .../protobuf/internal/descfmt/stringer.go | 2 + .../protobuf/internal/detrand/rand.go | 8 + .../encoding/messageset/messageset.go | 33 +- .../protobuf/internal/encoding/tag/tag.go | 2 +- .../protobuf/internal/encoding/text/encode.go | 8 +- .../protobuf/internal/fieldsort/fieldsort.go | 40 - .../protobuf/internal/filedesc/build.go | 3 + .../protobuf/internal/filedesc/desc.go | 77 +- .../protobuf/internal/filedesc/desc_lazy.go | 4 +- .../protobuf/internal/filedesc/desc_list.go | 172 +- .../internal/filedesc/desc_list_gen.go | 11 + .../protobuf/internal/impl/api_export.go | 2 +- .../protobuf/internal/impl/codec_field.go | 18 +- .../protobuf/internal/impl/codec_gen.go | 974 +-- .../protobuf/internal/impl/codec_map.go | 19 +- .../protobuf/internal/impl/codec_message.go | 68 +- .../internal/impl/codec_messageset.go | 21 +- .../protobuf/internal/impl/codec_reflect.go | 8 +- .../protobuf/internal/impl/convert.go | 29 + .../protobuf/internal/impl/decode.go | 16 +- .../protobuf/internal/impl/encode.go | 10 +- .../protobuf/internal/impl/legacy_export.go | 2 +- .../internal/impl/legacy_extension.go | 3 +- .../protobuf/internal/impl/legacy_message.go | 122 +- .../protobuf/internal/impl/merge.go | 6 +- .../protobuf/internal/impl/message.go | 69 +- .../protobuf/internal/impl/message_reflect.go | 125 +- .../internal/impl/message_reflect_field.go | 85 +- .../protobuf/internal/impl/pointer_reflect.go | 1 + .../protobuf/internal/impl/pointer_unsafe.go | 1 + .../protobuf/internal/mapsort/mapsort.go | 43 - .../protobuf/internal/order/order.go | 89 + .../protobuf/internal/order/range.go | 115 + .../protobuf/internal/version/version.go | 2 +- .../protobuf/proto/decode.go | 18 +- .../protobuf/proto/decode_gen.go | 128 +- .../protobuf/proto/encode.go | 55 +- .../google.golang.org/protobuf/proto/equal.go | 25 +- .../protobuf/proto/messageset.go | 7 +- .../google.golang.org/protobuf/proto/proto.go | 9 + .../protobuf/reflect/protodesc/desc.go | 276 + .../protobuf/reflect/protodesc/desc_init.go | 248 + .../reflect/protodesc/desc_resolve.go | 286 + .../reflect/protodesc/desc_validate.go | 374 + .../protobuf/reflect/protodesc/proto.go | 252 + .../protobuf/reflect/protoreflect/source.go | 84 +- .../reflect/protoreflect/source_gen.go | 461 + .../protobuf/reflect/protoreflect/type.go | 34 + .../reflect/protoregistry/registry.go | 157 +- .../types/descriptorpb/descriptor.pb.go | 4039 +++++++++ .../protobuf/types/known/anypb/any.pb.go | 22 +- .../types/known/durationpb/duration.pb.go | 20 +- .../protobuf/types/known/emptypb/empty.pb.go | 16 +- .../types/known/timestamppb/timestamp.pb.go | 29 +- .../square/go-jose.v2/.gitcookies.sh.enc | 1 + vendor/gopkg.in/square/go-jose.v2/.gitignore | 8 + vendor/gopkg.in/square/go-jose.v2/.travis.yml | 45 + .../gopkg.in/square/go-jose.v2/BUG-BOUNTY.md | 10 + .../square/go-jose.v2/CONTRIBUTING.md | 14 + vendor/gopkg.in/square/go-jose.v2/LICENSE | 202 + vendor/gopkg.in/square/go-jose.v2/README.md | 118 + .../gopkg.in/square/go-jose.v2/asymmetric.go | 592 ++ .../square/go-jose.v2/cipher/cbc_hmac.go | 196 + .../square/go-jose.v2/cipher/concat_kdf.go | 75 + .../square/go-jose.v2/cipher/ecdh_es.go | 86 + .../square/go-jose.v2/cipher/key_wrap.go | 109 + vendor/gopkg.in/square/go-jose.v2/crypter.go | 541 ++ .../square/go-jose.v2/doc.go} | 25 +- vendor/gopkg.in/square/go-jose.v2/encoding.go | 185 + .../gopkg.in/square/go-jose.v2/json/LICENSE | 27 + .../gopkg.in/square/go-jose.v2/json/README.md | 13 + .../gopkg.in/square/go-jose.v2/json/decode.go | 1183 +++ .../gopkg.in/square/go-jose.v2/json/encode.go | 1197 +++ .../gopkg.in/square/go-jose.v2/json/indent.go | 141 + .../square/go-jose.v2/json/scanner.go | 623 ++ .../gopkg.in/square/go-jose.v2/json/stream.go | 480 + .../gopkg.in/square/go-jose.v2/json/tags.go | 44 + vendor/gopkg.in/square/go-jose.v2/jwe.go | 294 + vendor/gopkg.in/square/go-jose.v2/jwk.go | 760 ++ vendor/gopkg.in/square/go-jose.v2/jws.go | 366 + .../gopkg.in/square/go-jose.v2/jwt/builder.go | 334 + .../gopkg.in/square/go-jose.v2/jwt/claims.go | 120 + .../square/go-jose.v2/jwt/doc.go} | 20 +- .../gopkg.in/square/go-jose.v2/jwt/errors.go | 53 + vendor/gopkg.in/square/go-jose.v2/jwt/jwt.go | 163 + .../square/go-jose.v2/jwt/validation.go | 114 + vendor/gopkg.in/square/go-jose.v2/opaque.go | 144 + vendor/gopkg.in/square/go-jose.v2/shared.go | 520 ++ vendor/gopkg.in/square/go-jose.v2/signing.go | 441 + .../gopkg.in/square/go-jose.v2/symmetric.go | 482 ++ vendor/modules.txt | 126 +- 1127 files changed, 119303 insertions(+), 28676 deletions(-) create mode 100644 secrets/hashicorpvault/vault_test.go create mode 100644 vendor/github.com/armon/go-metrics/.gitignore create mode 100644 vendor/github.com/armon/go-metrics/.travis.yml create mode 100644 vendor/github.com/armon/go-metrics/LICENSE create mode 100644 vendor/github.com/armon/go-metrics/README.md create mode 100644 vendor/github.com/armon/go-metrics/const_unix.go create mode 100644 vendor/github.com/armon/go-metrics/const_windows.go create mode 100644 vendor/github.com/armon/go-metrics/go.mod create mode 100644 vendor/github.com/armon/go-metrics/go.sum create mode 100644 vendor/github.com/armon/go-metrics/inmem.go create mode 100644 vendor/github.com/armon/go-metrics/inmem_endpoint.go create mode 100644 vendor/github.com/armon/go-metrics/inmem_signal.go create mode 100644 vendor/github.com/armon/go-metrics/metrics.go create mode 100644 vendor/github.com/armon/go-metrics/sink.go create mode 100644 vendor/github.com/armon/go-metrics/start.go create mode 100644 vendor/github.com/armon/go-metrics/statsd.go create mode 100644 vendor/github.com/armon/go-metrics/statsite.go create mode 100644 vendor/github.com/armon/go-radix/go.mod create mode 100644 vendor/github.com/cenkalti/backoff/v3/.gitignore create mode 100644 vendor/github.com/cenkalti/backoff/v3/.travis.yml create mode 100644 vendor/github.com/cenkalti/backoff/v3/LICENSE create mode 100644 vendor/github.com/cenkalti/backoff/v3/README.md create mode 100644 vendor/github.com/cenkalti/backoff/v3/backoff.go create mode 100644 vendor/github.com/cenkalti/backoff/v3/context.go create mode 100644 vendor/github.com/cenkalti/backoff/v3/exponential.go create mode 100644 vendor/github.com/cenkalti/backoff/v3/go.mod create mode 100644 vendor/github.com/cenkalti/backoff/v3/retry.go create mode 100644 vendor/github.com/cenkalti/backoff/v3/ticker.go create mode 100644 vendor/github.com/cenkalti/backoff/v3/tries.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go create mode 100644 vendor/github.com/golang/snappy/decode_arm64.s rename vendor/github.com/golang/snappy/{decode_amd64.go => decode_asm.go} (93%) create mode 100644 vendor/github.com/golang/snappy/encode_arm64.s rename vendor/github.com/golang/snappy/{encode_amd64.go => encode_asm.go} (97%) create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/LICENSE create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/README.md create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/doc.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/go.mod create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/handlers.go create mode 100644 vendor/github.com/hashicorp/go-plugin/.gitignore create mode 100644 vendor/github.com/hashicorp/go-plugin/LICENSE create mode 100644 vendor/github.com/hashicorp/go-plugin/README.md create mode 100644 vendor/github.com/hashicorp/go-plugin/client.go create mode 100644 vendor/github.com/hashicorp/go-plugin/discover.go create mode 100644 vendor/github.com/hashicorp/go-plugin/error.go create mode 100644 vendor/github.com/hashicorp/go-plugin/go.mod create mode 100644 vendor/github.com/hashicorp/go-plugin/go.sum create mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_broker.go create mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_client.go create mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_controller.go create mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_server.go create mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_stdio.go create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto create mode 100644 vendor/github.com/hashicorp/go-plugin/log_entry.go create mode 100644 vendor/github.com/hashicorp/go-plugin/mtls.go create mode 100644 vendor/github.com/hashicorp/go-plugin/mux_broker.go create mode 100644 vendor/github.com/hashicorp/go-plugin/plugin.go create mode 100644 vendor/github.com/hashicorp/go-plugin/process.go create mode 100644 vendor/github.com/hashicorp/go-plugin/process_posix.go create mode 100644 vendor/github.com/hashicorp/go-plugin/process_windows.go create mode 100644 vendor/github.com/hashicorp/go-plugin/protocol.go create mode 100644 vendor/github.com/hashicorp/go-plugin/rpc_client.go create mode 100644 vendor/github.com/hashicorp/go-plugin/rpc_server.go create mode 100644 vendor/github.com/hashicorp/go-plugin/server.go create mode 100644 vendor/github.com/hashicorp/go-plugin/server_mux.go create mode 100644 vendor/github.com/hashicorp/go-plugin/stream.go create mode 100644 vendor/github.com/hashicorp/go-plugin/testing.go create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/.gitignore rename vendor/github.com/hashicorp/{go-multierror => go-retryablehttp}/.travis.yml (54%) create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/LICENSE create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/Makefile create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/README.md create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/client.go create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/go.mod create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/go.sum create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-rootcerts/LICENSE create mode 100644 vendor/github.com/hashicorp/go-rootcerts/Makefile create mode 100644 vendor/github.com/hashicorp/go-rootcerts/README.md create mode 100644 vendor/github.com/hashicorp/go-rootcerts/doc.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/go.mod create mode 100644 vendor/github.com/hashicorp/go-rootcerts/go.sum create mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go create mode 100644 vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/mlock/LICENSE create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/mlock/go.mod create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/mlock/go.sum create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock.go create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unavail.go create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unix.go create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/parseutil/LICENSE create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/parseutil/go.mod create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/parseutil/go.sum create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parsepath.go create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parseutil.go create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/strutil/LICENSE create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/strutil/go.mod create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/strutil/go.sum create mode 100644 vendor/github.com/hashicorp/go-secure-stdlib/strutil/strutil.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/.gitignore create mode 100644 vendor/github.com/hashicorp/go-sockaddr/GNUmakefile create mode 100644 vendor/github.com/hashicorp/go-sockaddr/LICENSE create mode 100644 vendor/github.com/hashicorp/go-sockaddr/README.md create mode 100644 vendor/github.com/hashicorp/go-sockaddr/doc.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/go.mod create mode 100644 vendor/github.com/hashicorp/go-sockaddr/go.sum create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ifaddr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ifattr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ipaddr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ipv6addr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/rfc.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_android.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_default.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/sockaddr.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/unixsock.go create mode 100644 vendor/github.com/hashicorp/go-uuid/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-uuid/LICENSE create mode 100644 vendor/github.com/hashicorp/go-uuid/README.md create mode 100644 vendor/github.com/hashicorp/go-uuid/go.mod create mode 100644 vendor/github.com/hashicorp/go-uuid/uuid.go create mode 100644 vendor/github.com/hashicorp/go-version/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-version/LICENSE create mode 100644 vendor/github.com/hashicorp/go-version/README.md create mode 100644 vendor/github.com/hashicorp/go-version/constraint.go create mode 100644 vendor/github.com/hashicorp/go-version/go.mod create mode 100644 vendor/github.com/hashicorp/go-version/version.go create mode 100644 vendor/github.com/hashicorp/go-version/version_collection.go create mode 100644 vendor/github.com/hashicorp/vault/api/LICENSE create mode 100644 vendor/github.com/hashicorp/vault/api/README.md create mode 100644 vendor/github.com/hashicorp/vault/api/auth.go create mode 100644 vendor/github.com/hashicorp/vault/api/auth_token.go create mode 100644 vendor/github.com/hashicorp/vault/api/client.go create mode 100644 vendor/github.com/hashicorp/vault/api/go.mod create mode 100644 vendor/github.com/hashicorp/vault/api/go.sum create mode 100644 vendor/github.com/hashicorp/vault/api/help.go create mode 100644 vendor/github.com/hashicorp/vault/api/lifetime_watcher.go create mode 100644 vendor/github.com/hashicorp/vault/api/logical.go create mode 100644 vendor/github.com/hashicorp/vault/api/output_string.go create mode 100644 vendor/github.com/hashicorp/vault/api/plugin_helpers.go create mode 100644 vendor/github.com/hashicorp/vault/api/request.go create mode 100644 vendor/github.com/hashicorp/vault/api/response.go create mode 100644 vendor/github.com/hashicorp/vault/api/secret.go create mode 100644 vendor/github.com/hashicorp/vault/api/ssh.go create mode 100644 vendor/github.com/hashicorp/vault/api/ssh_agent.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_audit.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_auth.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_capabilities.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_config_cors.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_generate_root.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_health.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_init.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_leader.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_leases.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_monitor.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_mounts.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_plugins.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_policy.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_raft.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_rekey.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_rotate.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_seal.go create mode 100644 vendor/github.com/hashicorp/vault/api/sys_stepdown.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/LICENSE create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/compressutil/compress.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/agent.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/consts.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/error.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/plugin_types.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/consts/replication.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/cryptoutil/cryptoutil.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/errutil/error.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/hclutil/hcl.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/jsonutil/json.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/license/feature.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/locksutil/locks.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/logging/logging.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pathmanager/pathmanager.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/env.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/run_config.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/runner.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/tls.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/strutil/strutil.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/helper/wrapping/wrapinfo.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/audit.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/auth.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/connection.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/controlgroup.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/error.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/identity.pb.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/identity.proto create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/lease.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/logical.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/logical_storage.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/plugin.pb.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/plugin.proto create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/request.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/response.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/response_util.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/secret.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/storage.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/storage_inmem.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/storage_view.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/system_view.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/testing.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/token.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/logical/translate_response.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/cache.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/encoding.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/entry.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/error.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem_ha.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/latency.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/physical.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/physical_access.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/physical_view.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/testing.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/physical/transactions.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/version/cgo.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/version/version.go create mode 100644 vendor/github.com/hashicorp/vault/sdk/version/version_base.go create mode 100644 vendor/github.com/hashicorp/yamux/.gitignore create mode 100644 vendor/github.com/hashicorp/yamux/LICENSE create mode 100644 vendor/github.com/hashicorp/yamux/README.md create mode 100644 vendor/github.com/hashicorp/yamux/addr.go create mode 100644 vendor/github.com/hashicorp/yamux/const.go create mode 100644 vendor/github.com/hashicorp/yamux/mux.go create mode 100644 vendor/github.com/hashicorp/yamux/session.go create mode 100644 vendor/github.com/hashicorp/yamux/spec.md create mode 100644 vendor/github.com/hashicorp/yamux/stream.go create mode 100644 vendor/github.com/hashicorp/yamux/util.go create mode 100644 vendor/github.com/mattn/go-colorable/go.test.sh create mode 100644 vendor/github.com/mattn/go-isatty/go.test.sh delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_android.go create mode 100644 vendor/github.com/mattn/go-isatty/renovate.json create mode 100644 vendor/github.com/mitchellh/copystructure/.travis.yml create mode 100644 vendor/github.com/mitchellh/copystructure/LICENSE create mode 100644 vendor/github.com/mitchellh/copystructure/README.md create mode 100644 vendor/github.com/mitchellh/copystructure/copier_time.go create mode 100644 vendor/github.com/mitchellh/copystructure/copystructure.go create mode 100644 vendor/github.com/mitchellh/copystructure/go.mod create mode 100644 vendor/github.com/mitchellh/copystructure/go.sum create mode 100644 vendor/github.com/mitchellh/go-homedir/LICENSE create mode 100644 vendor/github.com/mitchellh/go-homedir/README.md create mode 100644 vendor/github.com/mitchellh/go-homedir/go.mod create mode 100644 vendor/github.com/mitchellh/go-homedir/homedir.go create mode 100644 vendor/github.com/mitchellh/go-testing-interface/.travis.yml create mode 100644 vendor/github.com/mitchellh/go-testing-interface/LICENSE create mode 100644 vendor/github.com/mitchellh/go-testing-interface/README.md create mode 100644 vendor/github.com/mitchellh/go-testing-interface/go.mod create mode 100644 vendor/github.com/mitchellh/go-testing-interface/testing.go create mode 100644 vendor/github.com/mitchellh/go-testing-interface/testing_go19.go create mode 100644 vendor/github.com/mitchellh/reflectwalk/.travis.yml create mode 100644 vendor/github.com/mitchellh/reflectwalk/LICENSE create mode 100644 vendor/github.com/mitchellh/reflectwalk/README.md create mode 100644 vendor/github.com/mitchellh/reflectwalk/go.mod create mode 100644 vendor/github.com/mitchellh/reflectwalk/location.go create mode 100644 vendor/github.com/mitchellh/reflectwalk/location_string.go create mode 100644 vendor/github.com/mitchellh/reflectwalk/reflectwalk.go create mode 100644 vendor/github.com/oklog/run/.gitignore create mode 100644 vendor/github.com/oklog/run/.travis.yml create mode 100644 vendor/github.com/oklog/run/LICENSE create mode 100644 vendor/github.com/oklog/run/README.md create mode 100644 vendor/github.com/oklog/run/group.go create mode 100644 vendor/github.com/pierrec/lz4/.gitignore create mode 100644 vendor/github.com/pierrec/lz4/.travis.yml create mode 100644 vendor/github.com/pierrec/lz4/LICENSE create mode 100644 vendor/github.com/pierrec/lz4/README.md create mode 100644 vendor/github.com/pierrec/lz4/block.go create mode 100644 vendor/github.com/pierrec/lz4/debug.go create mode 100644 vendor/github.com/pierrec/lz4/debug_stub.go create mode 100644 vendor/github.com/pierrec/lz4/decode_amd64.go create mode 100644 vendor/github.com/pierrec/lz4/decode_amd64.s create mode 100644 vendor/github.com/pierrec/lz4/decode_other.go create mode 100644 vendor/github.com/pierrec/lz4/errors.go create mode 100644 vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go create mode 100644 vendor/github.com/pierrec/lz4/lz4.go create mode 100644 vendor/github.com/pierrec/lz4/lz4_go1.10.go create mode 100644 vendor/github.com/pierrec/lz4/lz4_notgo1.10.go create mode 100644 vendor/github.com/pierrec/lz4/reader.go create mode 100644 vendor/github.com/pierrec/lz4/writer.go rename vendor/github.com/{mitchellh/mapstructure => ryanuber/go-glob}/.travis.yml (55%) create mode 100644 vendor/github.com/ryanuber/go-glob/LICENSE create mode 100644 vendor/github.com/ryanuber/go-glob/README.md create mode 100644 vendor/github.com/ryanuber/go-glob/glob.go create mode 100644 vendor/github.com/ryanuber/go-glob/go.mod delete mode 100644 vendor/go.uber.org/atomic/.travis.yml delete mode 100644 vendor/go.uber.org/atomic/atomic.go create mode 100644 vendor/go.uber.org/atomic/bool.go create mode 100644 vendor/go.uber.org/atomic/bool_ext.go create mode 100644 vendor/go.uber.org/atomic/doc.go create mode 100644 vendor/go.uber.org/atomic/duration.go create mode 100644 vendor/go.uber.org/atomic/duration_ext.go create mode 100644 vendor/go.uber.org/atomic/error_ext.go create mode 100644 vendor/go.uber.org/atomic/float64.go create mode 100644 vendor/go.uber.org/atomic/float64_ext.go create mode 100644 vendor/go.uber.org/atomic/gen.go create mode 100644 vendor/go.uber.org/atomic/int32.go create mode 100644 vendor/go.uber.org/atomic/int64.go create mode 100644 vendor/go.uber.org/atomic/nocmp.go create mode 100644 vendor/go.uber.org/atomic/string_ext.go create mode 100644 vendor/go.uber.org/atomic/time.go create mode 100644 vendor/go.uber.org/atomic/time_ext.go create mode 100644 vendor/go.uber.org/atomic/uint32.go create mode 100644 vendor/go.uber.org/atomic/uint64.go create mode 100644 vendor/go.uber.org/atomic/uintptr.go create mode 100644 vendor/go.uber.org/atomic/unsafe_pointer.go create mode 100644 vendor/go.uber.org/atomic/value.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/builder.go create mode 100644 vendor/golang.org/x/crypto/cryptobyte/string.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_amd64.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_generic.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/README create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe.go create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh create mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519.go create mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519_go113.go create mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go create mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go create mode 100644 vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go create mode 100644 vendor/golang.org/x/net/context/context.go create mode 100644 vendor/golang.org/x/net/context/go17.go create mode 100644 vendor/golang.org/x/net/context/go19.go create mode 100644 vendor/golang.org/x/net/context/pre_go17.go create mode 100644 vendor/golang.org/x/net/context/pre_go19.go rename vendor/golang.org/x/net/idna/{tables12.00.go => tables12.0.0.go} (99%) create mode 100644 vendor/golang.org/x/net/idna/tables13.0.0.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_unix.go create mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_zos_s390x.go create mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_zos_s390x.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_bsdvar.go create mode 100644 vendor/golang.org/x/net/internal/socket/sys_const_zos.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_darwin.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_dragonfly.go rename vendor/golang.org/x/net/internal/socket/{sys_go1_11_darwin.go => sys_zos_s390x.go} (52%) create mode 100644 vendor/golang.org/x/net/internal/socket/sys_zos_s390x.s create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_mips64.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_zos_s390x.go create mode 100644 vendor/golang.org/x/net/ipv4/control_zos.go create mode 100644 vendor/golang.org/x/net/ipv4/sys_zos.go create mode 100644 vendor/golang.org/x/net/ipv4/zsys_zos_s390x.go delete mode 100644 vendor/golang.org/x/net/route/syscall_go1_11_darwin.go rename vendor/golang.org/x/sys/cpu/{cpu_aix_ppc64.go => cpu_aix.go} (88%) create mode 100644 vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go create mode 100644 vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go rename vendor/golang.org/x/sys/unix/{asm_darwin_386.s => asm_bsd_386.s} (70%) rename vendor/golang.org/x/sys/unix/{asm_openbsd_amd64.s => asm_bsd_amd64.s} (71%) rename vendor/golang.org/x/sys/unix/{asm_netbsd_arm.s => asm_bsd_arm.s} (74%) rename vendor/golang.org/x/sys/unix/{asm_netbsd_amd64.s => asm_bsd_arm64.s} (73%) delete mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_darwin_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_arm.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_386.s delete mode 100644 vendor/golang.org/x/sys/unix/asm_openbsd_arm.s rename vendor/golang.org/x/sys/unix/{asm_openbsd_arm64.s => asm_openbsd_mips64.s} (89%) create mode 100644 vendor/golang.org/x/sys/unix/asm_zos_s390x.s create mode 100644 vendor/golang.org/x/sys/unix/dev_zos.go create mode 100644 vendor/golang.org/x/sys/unix/epoll_zos.go create mode 100644 vendor/golang.org/x/sys/unix/fstatfs_zos.go create mode 100644 vendor/golang.org/x/sys/unix/ioctl_linux.go create mode 100644 vendor/golang.org/x/sys/unix/ioctl_zos.go create mode 100644 vendor/golang.org/x/sys/unix/ptrace_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/ptrace_ios.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go rename vendor/golang.org/x/sys/unix/{zerrors_darwin_386.go => zerrors_openbsd_mips64.go} (52%) create mode 100644 vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go rename vendor/golang.org/x/sys/unix/{zsyscall_darwin_arm64.1_11.go => zsyscall_openbsd_mips64.go} (85%) create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_386.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/windows/setupapierrors_windows.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm64.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables12.0.0.go create mode 100644 vendor/golang.org/x/time/AUTHORS create mode 100644 vendor/golang.org/x/time/CONTRIBUTORS create mode 100644 vendor/golang.org/x/time/LICENSE create mode 100644 vendor/golang.org/x/time/PATENTS create mode 100644 vendor/golang.org/x/time/rate/rate.go delete mode 100644 vendor/google.golang.org/grpc/.travis.yml create mode 100644 vendor/google.golang.org/grpc/NOTICE.txt create mode 100644 vendor/google.golang.org/grpc/health/client.go create mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go create mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go rename vendor/google.golang.org/grpc/{internal/credentials/spiffe_appengine.go => health/logging.go} (74%) create mode 100644 vendor/google.golang.org/grpc/health/server.go delete mode 100644 vendor/google.golang.org/grpc/install_gae.sh create mode 100644 vendor/google.golang.org/grpc/internal/credentials/credentials.go delete mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/go113.go create mode 100644 vendor/google.golang.org/grpc/internal/xds/env/env.go create mode 100644 vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go create mode 100644 vendor/google.golang.org/grpc/reflection/README.md create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/serverreflection.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go delete mode 100644 vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go create mode 100644 vendor/google.golang.org/protobuf/internal/order/order.go create mode 100644 vendor/google.golang.org/protobuf/internal/order/range.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/proto.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go create mode 100644 vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc create mode 100644 vendor/gopkg.in/square/go-jose.v2/.gitignore create mode 100644 vendor/gopkg.in/square/go-jose.v2/.travis.yml create mode 100644 vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md create mode 100644 vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md create mode 100644 vendor/gopkg.in/square/go-jose.v2/LICENSE create mode 100644 vendor/gopkg.in/square/go-jose.v2/README.md create mode 100644 vendor/gopkg.in/square/go-jose.v2/asymmetric.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/crypter.go rename vendor/{google.golang.org/grpc/credentials/go12.go => gopkg.in/square/go-jose.v2/doc.go} (55%) create mode 100644 vendor/gopkg.in/square/go-jose.v2/encoding.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/json/LICENSE create mode 100644 vendor/gopkg.in/square/go-jose.v2/json/README.md create mode 100644 vendor/gopkg.in/square/go-jose.v2/json/decode.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/json/encode.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/json/indent.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/json/scanner.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/json/stream.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/json/tags.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/jwe.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/jwk.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/jws.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/jwt/builder.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/jwt/claims.go rename vendor/{google.golang.org/grpc/internal/credentials/syscallconn_appengine.go => gopkg.in/square/go-jose.v2/jwt/doc.go} (71%) create mode 100644 vendor/gopkg.in/square/go-jose.v2/jwt/errors.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/jwt/jwt.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/jwt/validation.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/opaque.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/shared.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/signing.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/symmetric.go diff --git a/command/ibft/ibft_init.go b/command/ibft/ibft_init.go index f4333db16e..e077649934 100644 --- a/command/ibft/ibft_init.go +++ b/command/ibft/ibft_init.go @@ -11,6 +11,7 @@ import ( "github.com/0xPolygon/polygon-sdk/secrets" "github.com/0xPolygon/polygon-sdk/secrets/hashicorpvault" "github.com/0xPolygon/polygon-sdk/secrets/local" + "github.com/hashicorp/go-hclog" "github.com/libp2p/go-libp2p-core/peer" "github.com/0xPolygon/polygon-sdk/crypto" @@ -107,7 +108,7 @@ func setupLocalSM(dataDir string) (secrets.SecretsManager, error) { } localSecretsManager, factoryErr := local.SecretsManagerFactory(&secrets.SecretsManagerParams{ - Logger: nil, + Logger: hclog.NewNullLogger(), Params: map[string]interface{}{ secrets.Path: dataDir, }, diff --git a/e2e/framework/testserver.go b/e2e/framework/testserver.go index 5ce0f8fd73..b3c5ef01d7 100644 --- a/e2e/framework/testserver.go +++ b/e2e/framework/testserver.go @@ -19,6 +19,7 @@ import ( "github.com/0xPolygon/polygon-sdk/command/helper" "github.com/0xPolygon/polygon-sdk/secrets" "github.com/0xPolygon/polygon-sdk/secrets/local" + "github.com/hashicorp/go-hclog" "github.com/0xPolygon/polygon-sdk/command/genesis" ibftCommand "github.com/0xPolygon/polygon-sdk/command/ibft" @@ -155,7 +156,7 @@ func (t *TestServer) InitIBFT() (*InitIBFTResult, error) { res := &InitIBFTResult{} localSecretsManager, factoryErr := local.SecretsManagerFactory(&secrets.SecretsManagerParams{ - Logger: nil, + Logger: hclog.NewNullLogger(), Params: map[string]interface{}{ secrets.Path: filepath.Join(cmd.Dir, t.Config.IBFTDir), }, diff --git a/go.mod b/go.mod index 05343a7bd5..d08dfa024a 100644 --- a/go.mod +++ b/go.mod @@ -5,16 +5,16 @@ go 1.14 require ( github.com/btcsuite/btcd v0.21.0-beta github.com/ethereum/go-ethereum v1.9.15 - github.com/golang/protobuf v1.4.3 + github.com/golang/protobuf v1.5.2 github.com/google/gopacket v1.1.18 // indirect github.com/google/uuid v1.1.4 github.com/gorilla/websocket v1.4.2 - github.com/hashicorp/go-hclog v0.14.1 - github.com/hashicorp/go-immutable-radix v1.3.0 - github.com/hashicorp/go-multierror v1.1.0 - github.com/hashicorp/go-uuid v1.0.1 // indirect + github.com/hashicorp/go-hclog v0.16.2 + github.com/hashicorp/go-immutable-radix v1.3.1 + github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru v0.5.4 github.com/hashicorp/hcl v1.0.0 + github.com/hashicorp/vault/api v1.3.0 github.com/imdario/mergo v0.3.7 github.com/libp2p/go-libp2p v0.12.0 github.com/libp2p/go-libp2p-core v0.7.0 @@ -30,8 +30,8 @@ require ( github.com/umbracle/go-eth-bn256 v0.0.0-20190607160430-b36caf4e0f6b github.com/umbracle/go-web3 v0.0.0-20210427125755-b411b4019e22 go.uber.org/zap v1.16.0 // indirect - golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad - google.golang.org/grpc v1.35.0 - google.golang.org/protobuf v1.25.0 + golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 + google.golang.org/grpc v1.41.0 + google.golang.org/protobuf v1.26.0 gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce ) diff --git a/go.sum b/go.sum index be894988d5..7695b61169 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,6 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= @@ -18,6 +19,7 @@ github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6L github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= github.com/Microsoft/go-winio v0.4.13/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= @@ -26,17 +28,26 @@ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrU github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/benbjohnson/clock v1.0.2/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= @@ -58,15 +69,21 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/containerd/continuity v0.0.0-20191214063359-1097c8bae83b/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= @@ -75,6 +92,7 @@ github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmf github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -104,26 +122,37 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ethereum/go-ethereum v1.9.15 h1:wrWl+QrtutRUJ9LZXdUqBoGoo2b1tOCYRDrAOQhCY3A= github.com/ethereum/go-ethereum v1.9.15/go.mod h1:slT8bPPRhXsyNTwHQxrOnjuTZ1sDXRajW11EkJ84QJ0= +github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNpjmVH56sVtS/RfclBAYocb4as= github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -139,6 +168,7 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -148,15 +178,22 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= github.com/google/gopacket v1.1.18 h1:lum7VRA9kdlvBi7/v2p7/zcbkduHaCH/SVVyurs7OpY= github.com/google/gopacket v1.1.18/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= @@ -171,26 +208,69 @@ github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0U github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.0 h1:8exGP7ego3OmkfksihtSouGMZ+hQrhxx+FVELeXpVPE= github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM= +github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1 h1:78ki3QBevHwYrVxnyVeaEz+7WtifHhauYF23es/0KlI= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1 h1:nd0HIW15E6FG1MsnArYaHfuw9C2zgzM8LxkG5Ty/788= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/vault/api v1.3.0 h1:uDy39PLSvy6gtKyjOCRPizy2QdFiIYSWBR2pxCEzYL8= +github.com/hashicorp/vault/api v1.3.0/go.mod h1:EabNQLI0VWbWoGlA+oBLC8PXmR9D60aUVgQGvangFWQ= +github.com/hashicorp/vault/sdk v0.3.0 h1:kR3dpxNkhh/wr6ycaJYqp6AFT/i2xaftbfnwZduTKEY= +github.com/hashicorp/vault/sdk v0.3.0/go.mod h1:aZ3fNuL5VNydQk8GcLJ2TV8YCRVvyaakYkhZRoVuhj0= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= @@ -251,10 +331,14 @@ github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0 github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= @@ -274,9 +358,12 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -466,6 +553,8 @@ github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -475,6 +564,8 @@ github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= @@ -493,9 +584,25 @@ github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKU github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= @@ -552,10 +659,13 @@ github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY= github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= @@ -581,9 +691,12 @@ github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= +github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -594,19 +707,31 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.2+incompatible h1:C89EOx/XBWwIXl8wm8OPJBd7kPF25UfsK2X7Ph/zCAk= github.com/ryanuber/columnize v2.1.2+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/shirou/gopsutil v2.20.5-0.20200531151128-663af789c085+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= @@ -644,6 +769,7 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d h1:gZZadD8H+fF+n9CmNhYL1Y0dJB+kLOmKd7FbPJLeGHs= github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= @@ -679,9 +805,12 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.0.0 h1:qsup4IcBdlmsnGfqyLl4Ntn3C2XCCuKAE7DwHpScyUo= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -715,6 +844,8 @@ golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -723,27 +854,37 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0 h1:Jcxah/M+oLZ/R4/z5RzfPzGbPXnVDPkEDtf2JnuxN+U= golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -751,6 +892,7 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -768,15 +910,26 @@ golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -795,25 +948,34 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.35.0 h1:TwIQcH3es+MojMVojxxfQ3l3OF2KzlRxML2xZq0kRo8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -824,6 +986,9 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -836,6 +1001,8 @@ gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKW gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200603215123-a4a8cb9d2cbc/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= +gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -843,8 +1010,11 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= diff --git a/secrets/hashicorpvault/hashicorpVault.go b/secrets/hashicorpvault/hashicorpVault.go index 5682896457..ffae9ef7c4 100644 --- a/secrets/hashicorpvault/hashicorpVault.go +++ b/secrets/hashicorpvault/hashicorpVault.go @@ -2,13 +2,19 @@ package hashicorpvault import ( "errors" + "fmt" "github.com/0xPolygon/polygon-sdk/secrets" + "github.com/hashicorp/go-hclog" + vault "github.com/hashicorp/vault/api" ) // VaultSecretsManager is a SecretsManager that // stores secrets on a Hashicorp Vault instance type VaultSecretsManager struct { + // Logger object + logger hclog.Logger + // Token used for Vault instance authentication token string @@ -17,6 +23,12 @@ type VaultSecretsManager struct { // The name of the current node, used for prefixing names of secrets name string + + // The base path to store the secrets in the KV-2 Vault storage + basePath string + + // The HTTP client used for interacting with the Vault server + client *vault.Client } // SecretsManagerFactory implements the factory method @@ -24,7 +36,9 @@ func SecretsManagerFactory( config *secrets.SecretsManagerParams, ) (secrets.SecretsManager, error) { // Set up the base object - vaultManager := &VaultSecretsManager{} + vaultManager := &VaultSecretsManager{ + logger: config.Logger.Named(string(secrets.HashicorpVault)), + } // Grab the token from the config token, ok := config.Params[secrets.Token] @@ -47,28 +61,111 @@ func SecretsManagerFactory( } vaultManager.name = name.(string) + // Set the base path to store the secrets in the KV-2 Vault storage + vaultManager.basePath = fmt.Sprintf("secret/data/%s", vaultManager.name) + // Run the initial setup _ = vaultManager.Setup() return vaultManager, nil } +// Setup sets up the Hashicorp Vault secrets manager func (v *VaultSecretsManager) Setup() error { - panic("implement me") + config := vault.DefaultConfig() + + // Set the server URL + config.Address = v.serverURL + client, err := vault.NewClient(config) + if err != nil { + return fmt.Errorf("unable to initialize Vault client: %v", err) + } + + // Set the access token + client.SetToken(v.token) + + v.client = client + + return nil +} + +// constructSecretPath is a helper method for constructing a path to the secret +func (v *VaultSecretsManager) constructSecretPath(name string) string { + return fmt.Sprintf("%s/%s", v.basePath, name) } +// GetSecret fetches a secret from the Hashicorp Vault server func (v *VaultSecretsManager) GetSecret(name string) (interface{}, error) { - panic("implement me") + secret, err := v.client.Logical().Read(v.constructSecretPath(name)) + if err != nil { + return nil, fmt.Errorf("unable to read secret from Vault, %v", err) + } + + // KV-2 (versioned key-value storage) in Vault stores data in the following format: + // { + // "data": { + // key: value + // } + // } + data, ok := secret.Data["data"].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf( + "unable to assert type for secret from Vault, %T %#v", + secret.Data["data"], + secret.Data["data"], + ) + } + + value, ok := data[name] + if !ok { + return nil, secrets.ErrSecretNotFound + } + + return value, nil } +// SetSecret saves a secret to the Hashicorp Vault server func (v *VaultSecretsManager) SetSecret(name string, value interface{}) error { - panic("implement me") + // Check if overwrite is possible + _, err := v.GetSecret(name) + if err == secrets.ErrSecretNotFound { + v.logger.Warn(fmt.Sprintf("Overwriting secret: %s", name)) + } + + // Construct the data wrapper + data := make(map[string]interface{}) + data[name] = value + + _, err = v.client.Logical().Write(v.constructSecretPath(name), map[string]interface{}{ + "data": data, + }) + if err != nil { + return fmt.Errorf("unable to store secret (%s), %v", name, err) + } + + return nil } +// HasSecret checks if the secret is present on the Hashicorp Vault server func (v *VaultSecretsManager) HasSecret(name string) bool { - panic("implement me") + _, err := v.GetSecret(name) + + return err == nil } +// RemoveSecret removes a secret from the Hashicorp Vault server func (v *VaultSecretsManager) RemoveSecret(name string) error { - panic("implement me") + // Check if overwrite is possible + _, err := v.GetSecret(name) + if err != nil { + return err + } + + // Delete the secret from Vault storage + _, err = v.client.Logical().Delete(v.constructSecretPath(name)) + if err != nil { + return fmt.Errorf("unable to delete secret (%s), %v", name, err) + } + + return nil } diff --git a/secrets/hashicorpvault/vault_test.go b/secrets/hashicorpvault/vault_test.go new file mode 100644 index 0000000000..1df7aff94e --- /dev/null +++ b/secrets/hashicorpvault/vault_test.go @@ -0,0 +1,65 @@ +package hashicorpvault + +import ( + "fmt" + "testing" + + "github.com/0xPolygon/polygon-sdk/crypto" + vault "github.com/hashicorp/vault/api" +) + +func readSecret(client *vault.Client, path string, key string, t *testing.T) { + secret, err := client.Logical().Read(path) + if err != nil { + t.Fatalf("unable to read secret: %v", err) + } + data, ok := secret.Data["data"].(map[string]interface{}) + if !ok { + t.Fatalf("data type assertion failed: %T %#v", secret.Data["data"], secret.Data["data"]) + } + // data map can contain more than one key-value pair, in this case we're just grabbing one of them + value, ok := data[key].(string) + if !ok { + t.Fatalf("value type assertion failed: %T %#v", data[key], data[key]) + } + + fmt.Printf("Value is %s", value) +} + +//secret/data/validator-1 +func writeSecret(client *vault.Client, path string, key string, value interface{}, t *testing.T) { + data := make(map[string]interface{}) + + data[key] = value + _, err := client.Logical().Write(path, map[string]interface{}{ + "data": data, + }) + if err != nil { + t.Fatalf("unable to store secret: %v", err) + } +} + +func TestVaultConnection(t *testing.T) { + config := vault.DefaultConfig() // modify for more granular configuration + config.Address = "http://127.0.0.1:8200" + client, err := vault.NewClient(config) + if err != nil { + t.Fatalf("unable to initialize Vault client: %v", err) + } + + // WARNING: Storing any long-lived token with secret access in an environment variable poses a security risk. + // Additionally, root tokens should never be used in production or against Vault installations containing real secrets. + // See the files starting in auth-* for examples of how to securely log in to Vault using various auth methods. + + _, privKeyEncoded, _ := crypto.GenerateAndEncodePrivateKey() + + client.SetToken("s.e7Mm84sMpwug5TXoIr2tGIiB") + + readSecret(client, "secret/data/hello", "foo", t) + + keyPath := "secret/data/validator-1" + keyKey := "validator-key" + + writeSecret(client, keyPath, keyKey, privKeyEncoded, t) + readSecret(client, keyPath, keyKey, t) +} diff --git a/secrets/local/local.go b/secrets/local/local.go index 7717985396..445233989a 100644 --- a/secrets/local/local.go +++ b/secrets/local/local.go @@ -9,11 +9,15 @@ import ( "sync" "github.com/0xPolygon/polygon-sdk/secrets" + "github.com/hashicorp/go-hclog" ) // LocalSecretsManager is a SecretsManager that // stores secrets locally on disk type LocalSecretsManager struct { + // Logger object + logger hclog.Logger + // Path to the base working directory path string @@ -30,6 +34,7 @@ func SecretsManagerFactory( ) (secrets.SecretsManager, error) { // Set up the base object localManager := &LocalSecretsManager{ + logger: config.Logger.Named(string(secrets.Local)), secretPathMap: make(map[string]string), } diff --git a/vendor/github.com/armon/go-metrics/.gitignore b/vendor/github.com/armon/go-metrics/.gitignore new file mode 100644 index 0000000000..e5750f5720 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +/metrics.out + +.idea diff --git a/vendor/github.com/armon/go-metrics/.travis.yml b/vendor/github.com/armon/go-metrics/.travis.yml new file mode 100644 index 0000000000..87d230c8d7 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - "1.x" + +env: + - GO111MODULE=on + +install: + - go get ./... + +script: + - go test ./... diff --git a/vendor/github.com/armon/go-metrics/LICENSE b/vendor/github.com/armon/go-metrics/LICENSE new file mode 100644 index 0000000000..106569e542 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-metrics/README.md b/vendor/github.com/armon/go-metrics/README.md new file mode 100644 index 0000000000..aa73348c08 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/README.md @@ -0,0 +1,91 @@ +go-metrics +========== + +This library provides a `metrics` package which can be used to instrument code, +expose application metrics, and profile runtime performance in a flexible manner. + +Current API: [![GoDoc](https://godoc.org/github.com/armon/go-metrics?status.svg)](https://godoc.org/github.com/armon/go-metrics) + +Sinks +----- + +The `metrics` package makes use of a `MetricSink` interface to support delivery +to any type of backend. Currently the following sinks are provided: + +* StatsiteSink : Sinks to a [statsite](https://github.com/armon/statsite/) instance (TCP) +* StatsdSink: Sinks to a [StatsD](https://github.com/etsy/statsd/) / statsite instance (UDP) +* PrometheusSink: Sinks to a [Prometheus](http://prometheus.io/) metrics endpoint (exposed via HTTP for scrapes) +* InmemSink : Provides in-memory aggregation, can be used to export stats +* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example. +* BlackholeSink : Sinks to nowhere + +In addition to the sinks, the `InmemSignal` can be used to catch a signal, +and dump a formatted output of recent metrics. For example, when a process gets +a SIGUSR1, it can dump to stderr recent performance metrics for debugging. + +Labels +------ + +Most metrics do have an equivalent ending with `WithLabels`, such methods +allow to push metrics with labels and use some features of underlying Sinks +(ex: translated into Prometheus labels). + +Since some of these labels may increase greatly cardinality of metrics, the +library allow to filter labels using a blacklist/whitelist filtering system +which is global to all metrics. + +* If `Config.AllowedLabels` is not nil, then only labels specified in this value will be sent to underlying Sink, otherwise, all labels are sent by default. +* If `Config.BlockedLabels` is not nil, any label specified in this value will not be sent to underlying Sinks. + +By default, both `Config.AllowedLabels` and `Config.BlockedLabels` are nil, meaning that +no tags are filetered at all, but it allow to a user to globally block some tags with high +cardinality at application level. + +Examples +-------- + +Here is an example of using the package: + +```go +func SlowMethod() { + // Profiling the runtime of a method + defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now()) +} + +// Configure a statsite sink as the global metrics sink +sink, _ := metrics.NewStatsiteSink("statsite:8125") +metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink) + +// Emit a Key/Value pair +metrics.EmitKey([]string{"questions", "meaning of life"}, 42) +``` + +Here is an example of setting up a signal handler: + +```go +// Setup the inmem sink and signal handler +inm := metrics.NewInmemSink(10*time.Second, time.Minute) +sig := metrics.DefaultInmemSignal(inm) +metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm) + +// Run some code +inm.SetGauge([]string{"foo"}, 42) +inm.EmitKey([]string{"bar"}, 30) + +inm.IncrCounter([]string{"baz"}, 42) +inm.IncrCounter([]string{"baz"}, 1) +inm.IncrCounter([]string{"baz"}, 80) + +inm.AddSample([]string{"method", "wow"}, 42) +inm.AddSample([]string{"method", "wow"}, 100) +inm.AddSample([]string{"method", "wow"}, 22) + +.... +``` + +When a signal comes in, output like the following will be dumped to stderr: + + [2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000 + [2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000 + [2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509 + [2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513 \ No newline at end of file diff --git a/vendor/github.com/armon/go-metrics/const_unix.go b/vendor/github.com/armon/go-metrics/const_unix.go new file mode 100644 index 0000000000..31098dd57e --- /dev/null +++ b/vendor/github.com/armon/go-metrics/const_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + DefaultSignal = syscall.SIGUSR1 +) diff --git a/vendor/github.com/armon/go-metrics/const_windows.go b/vendor/github.com/armon/go-metrics/const_windows.go new file mode 100644 index 0000000000..38136af3e4 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/const_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + // Windows has no SIGUSR1, use SIGBREAK + DefaultSignal = syscall.Signal(21) +) diff --git a/vendor/github.com/armon/go-metrics/go.mod b/vendor/github.com/armon/go-metrics/go.mod new file mode 100644 index 0000000000..e3a656ed78 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/go.mod @@ -0,0 +1,17 @@ +module github.com/armon/go-metrics + +go 1.12 + +require ( + github.com/DataDog/datadog-go v3.2.0+incompatible + github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible + github.com/circonus-labs/circonusllhist v0.1.3 // indirect + github.com/golang/protobuf v1.3.2 + github.com/hashicorp/go-immutable-radix v1.0.0 + github.com/hashicorp/go-retryablehttp v0.5.3 // indirect + github.com/pascaldekloe/goe v0.1.0 + github.com/prometheus/client_golang v1.4.0 + github.com/prometheus/client_model v0.2.0 + github.com/prometheus/common v0.9.1 + github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 // indirect +) diff --git a/vendor/github.com/armon/go-metrics/go.sum b/vendor/github.com/armon/go-metrics/go.sum new file mode 100644 index 0000000000..519481e6b5 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/go.sum @@ -0,0 +1,125 @@ +github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0 h1:YVIb/fVcOTMSqtqZWSKnHpSLBxu8DKgxq8z6RuBZwqI= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/armon/go-metrics/inmem.go b/vendor/github.com/armon/go-metrics/inmem.go new file mode 100644 index 0000000000..7c427aca97 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem.go @@ -0,0 +1,339 @@ +package metrics + +import ( + "bytes" + "fmt" + "math" + "net/url" + "strings" + "sync" + "time" +) + +var spaceReplacer = strings.NewReplacer(" ", "_") + +// InmemSink provides a MetricSink that does in-memory aggregation +// without sending metrics over a network. It can be embedded within +// an application to provide profiling information. +type InmemSink struct { + // How long is each aggregation interval + interval time.Duration + + // Retain controls how many metrics interval we keep + retain time.Duration + + // maxIntervals is the maximum length of intervals. + // It is retain / interval. + maxIntervals int + + // intervals is a slice of the retained intervals + intervals []*IntervalMetrics + intervalLock sync.RWMutex + + rateDenom float64 +} + +// IntervalMetrics stores the aggregated metrics +// for a specific interval +type IntervalMetrics struct { + sync.RWMutex + + // The start time of the interval + Interval time.Time + + // Gauges maps the key to the last set value + Gauges map[string]GaugeValue + + // Points maps the string to the list of emitted values + // from EmitKey + Points map[string][]float32 + + // Counters maps the string key to a sum of the counter + // values + Counters map[string]SampledValue + + // Samples maps the key to an AggregateSample, + // which has the rolled up view of a sample + Samples map[string]SampledValue + + // done is closed when this interval has ended, and a new IntervalMetrics + // has been created to receive any future metrics. + done chan struct{} +} + +// NewIntervalMetrics creates a new IntervalMetrics for a given interval +func NewIntervalMetrics(intv time.Time) *IntervalMetrics { + return &IntervalMetrics{ + Interval: intv, + Gauges: make(map[string]GaugeValue), + Points: make(map[string][]float32), + Counters: make(map[string]SampledValue), + Samples: make(map[string]SampledValue), + done: make(chan struct{}), + } +} + +// AggregateSample is used to hold aggregate metrics +// about a sample +type AggregateSample struct { + Count int // The count of emitted pairs + Rate float64 // The values rate per time unit (usually 1 second) + Sum float64 // The sum of values + SumSq float64 `json:"-"` // The sum of squared values + Min float64 // Minimum value + Max float64 // Maximum value + LastUpdated time.Time `json:"-"` // When value was last updated +} + +// Computes a Stddev of the values +func (a *AggregateSample) Stddev() float64 { + num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2) + div := float64(a.Count * (a.Count - 1)) + if div == 0 { + return 0 + } + return math.Sqrt(num / div) +} + +// Computes a mean of the values +func (a *AggregateSample) Mean() float64 { + if a.Count == 0 { + return 0 + } + return a.Sum / float64(a.Count) +} + +// Ingest is used to update a sample +func (a *AggregateSample) Ingest(v float64, rateDenom float64) { + a.Count++ + a.Sum += v + a.SumSq += (v * v) + if v < a.Min || a.Count == 1 { + a.Min = v + } + if v > a.Max || a.Count == 1 { + a.Max = v + } + a.Rate = float64(a.Sum) / rateDenom + a.LastUpdated = time.Now() +} + +func (a *AggregateSample) String() string { + if a.Count == 0 { + return "Count: 0" + } else if a.Stddev() == 0 { + return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated) + } else { + return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s", + a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated) + } +} + +// NewInmemSinkFromURL creates an InmemSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewInmemSinkFromURL(u *url.URL) (MetricSink, error) { + params := u.Query() + + interval, err := time.ParseDuration(params.Get("interval")) + if err != nil { + return nil, fmt.Errorf("Bad 'interval' param: %s", err) + } + + retain, err := time.ParseDuration(params.Get("retain")) + if err != nil { + return nil, fmt.Errorf("Bad 'retain' param: %s", err) + } + + return NewInmemSink(interval, retain), nil +} + +// NewInmemSink is used to construct a new in-memory sink. +// Uses an aggregation interval and maximum retention period. +func NewInmemSink(interval, retain time.Duration) *InmemSink { + rateTimeUnit := time.Second + i := &InmemSink{ + interval: interval, + retain: retain, + maxIntervals: int(retain / interval), + rateDenom: float64(interval.Nanoseconds()) / float64(rateTimeUnit.Nanoseconds()), + } + i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals) + return i +} + +func (i *InmemSink) SetGauge(key []string, val float32) { + i.SetGaugeWithLabels(key, val, nil) +} + +func (i *InmemSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + intv.Gauges[k] = GaugeValue{Name: name, Value: val, Labels: labels} +} + +func (i *InmemSink) EmitKey(key []string, val float32) { + k := i.flattenKey(key) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + vals := intv.Points[k] + intv.Points[k] = append(vals, val) +} + +func (i *InmemSink) IncrCounter(key []string, val float32) { + i.IncrCounterWithLabels(key, val, nil) +} + +func (i *InmemSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg, ok := intv.Counters[k] + if !ok { + agg = SampledValue{ + Name: name, + AggregateSample: &AggregateSample{}, + Labels: labels, + } + intv.Counters[k] = agg + } + agg.Ingest(float64(val), i.rateDenom) +} + +func (i *InmemSink) AddSample(key []string, val float32) { + i.AddSampleWithLabels(key, val, nil) +} + +func (i *InmemSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg, ok := intv.Samples[k] + if !ok { + agg = SampledValue{ + Name: name, + AggregateSample: &AggregateSample{}, + Labels: labels, + } + intv.Samples[k] = agg + } + agg.Ingest(float64(val), i.rateDenom) +} + +// Data is used to retrieve all the aggregated metrics +// Intervals may be in use, and a read lock should be acquired +func (i *InmemSink) Data() []*IntervalMetrics { + // Get the current interval, forces creation + i.getInterval() + + i.intervalLock.RLock() + defer i.intervalLock.RUnlock() + + n := len(i.intervals) + intervals := make([]*IntervalMetrics, n) + + copy(intervals[:n-1], i.intervals[:n-1]) + current := i.intervals[n-1] + + // make its own copy for current interval + intervals[n-1] = &IntervalMetrics{} + copyCurrent := intervals[n-1] + current.RLock() + *copyCurrent = *current + // RWMutex is not safe to copy, so create a new instance on the copy + copyCurrent.RWMutex = sync.RWMutex{} + + copyCurrent.Gauges = make(map[string]GaugeValue, len(current.Gauges)) + for k, v := range current.Gauges { + copyCurrent.Gauges[k] = v + } + // saved values will be not change, just copy its link + copyCurrent.Points = make(map[string][]float32, len(current.Points)) + for k, v := range current.Points { + copyCurrent.Points[k] = v + } + copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters)) + for k, v := range current.Counters { + copyCurrent.Counters[k] = v.deepCopy() + } + copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples)) + for k, v := range current.Samples { + copyCurrent.Samples[k] = v.deepCopy() + } + current.RUnlock() + + return intervals +} + +// getInterval returns the current interval. A new interval is created if no +// previous interval exists, or if the current time is beyond the window for the +// current interval. +func (i *InmemSink) getInterval() *IntervalMetrics { + intv := time.Now().Truncate(i.interval) + + // Attempt to return the existing interval first, because it only requires + // a read lock. + i.intervalLock.RLock() + n := len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + defer i.intervalLock.RUnlock() + return i.intervals[n-1] + } + i.intervalLock.RUnlock() + + i.intervalLock.Lock() + defer i.intervalLock.Unlock() + + // Re-check for an existing interval now that the lock is re-acquired. + n = len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + return i.intervals[n-1] + } + + current := NewIntervalMetrics(intv) + i.intervals = append(i.intervals, current) + if n > 0 { + close(i.intervals[n-1].done) + } + + n++ + // Prune old intervals if the count exceeds the max. + if n >= i.maxIntervals { + copy(i.intervals[0:], i.intervals[n-i.maxIntervals:]) + i.intervals = i.intervals[:i.maxIntervals] + } + return current +} + +// Flattens the key for formatting, removes spaces +func (i *InmemSink) flattenKey(parts []string) string { + buf := &bytes.Buffer{} + + joined := strings.Join(parts, ".") + + spaceReplacer.WriteString(buf, joined) + + return buf.String() +} + +// Flattens the key for formatting along with its labels, removes spaces +func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) { + key := i.flattenKey(parts) + buf := bytes.NewBufferString(key) + + for _, label := range labels { + spaceReplacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value)) + } + + return buf.String(), key +} diff --git a/vendor/github.com/armon/go-metrics/inmem_endpoint.go b/vendor/github.com/armon/go-metrics/inmem_endpoint.go new file mode 100644 index 0000000000..24eefa9638 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem_endpoint.go @@ -0,0 +1,162 @@ +package metrics + +import ( + "context" + "fmt" + "net/http" + "sort" + "time" +) + +// MetricsSummary holds a roll-up of metrics info for a given interval +type MetricsSummary struct { + Timestamp string + Gauges []GaugeValue + Points []PointValue + Counters []SampledValue + Samples []SampledValue +} + +type GaugeValue struct { + Name string + Hash string `json:"-"` + Value float32 + + Labels []Label `json:"-"` + DisplayLabels map[string]string `json:"Labels"` +} + +type PointValue struct { + Name string + Points []float32 +} + +type SampledValue struct { + Name string + Hash string `json:"-"` + *AggregateSample + Mean float64 + Stddev float64 + + Labels []Label `json:"-"` + DisplayLabels map[string]string `json:"Labels"` +} + +// deepCopy allocates a new instance of AggregateSample +func (source *SampledValue) deepCopy() SampledValue { + dest := *source + if source.AggregateSample != nil { + dest.AggregateSample = &AggregateSample{} + *dest.AggregateSample = *source.AggregateSample + } + return dest +} + +// DisplayMetrics returns a summary of the metrics from the most recent finished interval. +func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + data := i.Data() + + var interval *IntervalMetrics + n := len(data) + switch { + case n == 0: + return nil, fmt.Errorf("no metric intervals have been initialized yet") + case n == 1: + // Show the current interval if it's all we have + interval = data[0] + default: + // Show the most recent finished interval if we have one + interval = data[n-2] + } + + return newMetricSummaryFromInterval(interval), nil +} + +func newMetricSummaryFromInterval(interval *IntervalMetrics) MetricsSummary { + interval.RLock() + defer interval.RUnlock() + + summary := MetricsSummary{ + Timestamp: interval.Interval.Round(time.Second).UTC().String(), + Gauges: make([]GaugeValue, 0, len(interval.Gauges)), + Points: make([]PointValue, 0, len(interval.Points)), + } + + // Format and sort the output of each metric type, so it gets displayed in a + // deterministic order. + for name, points := range interval.Points { + summary.Points = append(summary.Points, PointValue{name, points}) + } + sort.Slice(summary.Points, func(i, j int) bool { + return summary.Points[i].Name < summary.Points[j].Name + }) + + for hash, value := range interval.Gauges { + value.Hash = hash + value.DisplayLabels = make(map[string]string) + for _, label := range value.Labels { + value.DisplayLabels[label.Name] = label.Value + } + value.Labels = nil + + summary.Gauges = append(summary.Gauges, value) + } + sort.Slice(summary.Gauges, func(i, j int) bool { + return summary.Gauges[i].Hash < summary.Gauges[j].Hash + }) + + summary.Counters = formatSamples(interval.Counters) + summary.Samples = formatSamples(interval.Samples) + + return summary +} + +func formatSamples(source map[string]SampledValue) []SampledValue { + output := make([]SampledValue, 0, len(source)) + for hash, sample := range source { + displayLabels := make(map[string]string) + for _, label := range sample.Labels { + displayLabels[label.Name] = label.Value + } + + output = append(output, SampledValue{ + Name: sample.Name, + Hash: hash, + AggregateSample: sample.AggregateSample, + Mean: sample.AggregateSample.Mean(), + Stddev: sample.AggregateSample.Stddev(), + DisplayLabels: displayLabels, + }) + } + sort.Slice(output, func(i, j int) bool { + return output[i].Hash < output[j].Hash + }) + + return output +} + +type Encoder interface { + Encode(interface{}) error +} + +// Stream writes metrics using encoder.Encode each time an interval ends. Runs +// until the request context is cancelled, or the encoder returns an error. +// The caller is responsible for logging any errors from encoder. +func (i *InmemSink) Stream(ctx context.Context, encoder Encoder) { + interval := i.getInterval() + + for { + select { + case <-interval.done: + summary := newMetricSummaryFromInterval(interval) + if err := encoder.Encode(summary); err != nil { + return + } + + // update interval to the next one + interval = i.getInterval() + case <-ctx.Done(): + return + } + } +} diff --git a/vendor/github.com/armon/go-metrics/inmem_signal.go b/vendor/github.com/armon/go-metrics/inmem_signal.go new file mode 100644 index 0000000000..0937f4aedf --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem_signal.go @@ -0,0 +1,117 @@ +package metrics + +import ( + "bytes" + "fmt" + "io" + "os" + "os/signal" + "strings" + "sync" + "syscall" +) + +// InmemSignal is used to listen for a given signal, and when received, +// to dump the current metrics from the InmemSink to an io.Writer +type InmemSignal struct { + signal syscall.Signal + inm *InmemSink + w io.Writer + sigCh chan os.Signal + + stop bool + stopCh chan struct{} + stopLock sync.Mutex +} + +// NewInmemSignal creates a new InmemSignal which listens for a given signal, +// and dumps the current metrics out to a writer +func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal { + i := &InmemSignal{ + signal: sig, + inm: inmem, + w: w, + sigCh: make(chan os.Signal, 1), + stopCh: make(chan struct{}), + } + signal.Notify(i.sigCh, sig) + go i.run() + return i +} + +// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1 +// and writes output to stderr. Windows uses SIGBREAK +func DefaultInmemSignal(inmem *InmemSink) *InmemSignal { + return NewInmemSignal(inmem, DefaultSignal, os.Stderr) +} + +// Stop is used to stop the InmemSignal from listening +func (i *InmemSignal) Stop() { + i.stopLock.Lock() + defer i.stopLock.Unlock() + + if i.stop { + return + } + i.stop = true + close(i.stopCh) + signal.Stop(i.sigCh) +} + +// run is a long running routine that handles signals +func (i *InmemSignal) run() { + for { + select { + case <-i.sigCh: + i.dumpStats() + case <-i.stopCh: + return + } + } +} + +// dumpStats is used to dump the data to output writer +func (i *InmemSignal) dumpStats() { + buf := bytes.NewBuffer(nil) + + data := i.inm.Data() + // Skip the last period which is still being aggregated + for j := 0; j < len(data)-1; j++ { + intv := data[j] + intv.RLock() + for _, val := range intv.Gauges { + name := i.flattenLabels(val.Name, val.Labels) + fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val.Value) + } + for name, vals := range intv.Points { + for _, val := range vals { + fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val) + } + } + for _, agg := range intv.Counters { + name := i.flattenLabels(agg.Name, agg.Labels) + fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg.AggregateSample) + } + for _, agg := range intv.Samples { + name := i.flattenLabels(agg.Name, agg.Labels) + fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg.AggregateSample) + } + intv.RUnlock() + } + + // Write out the bytes + i.w.Write(buf.Bytes()) +} + +// Flattens the key for formatting along with its labels, removes spaces +func (i *InmemSignal) flattenLabels(name string, labels []Label) string { + buf := bytes.NewBufferString(name) + replacer := strings.NewReplacer(" ", "_", ":", "_") + + for _, label := range labels { + replacer.WriteString(buf, ".") + replacer.WriteString(buf, label.Value) + } + + return buf.String() +} diff --git a/vendor/github.com/armon/go-metrics/metrics.go b/vendor/github.com/armon/go-metrics/metrics.go new file mode 100644 index 0000000000..6753b13bb2 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/metrics.go @@ -0,0 +1,293 @@ +package metrics + +import ( + "runtime" + "strings" + "time" + + "github.com/hashicorp/go-immutable-radix" +) + +type Label struct { + Name string + Value string +} + +func (m *Metrics) SetGauge(key []string, val float32) { + m.SetGaugeWithLabels(key, val, nil) +} + +func (m *Metrics) SetGaugeWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" { + if m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } else if m.EnableHostname { + key = insert(0, m.HostName, key) + } + } + if m.EnableTypePrefix { + key = insert(0, "gauge", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.SetGaugeWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) EmitKey(key []string, val float32) { + if m.EnableTypePrefix { + key = insert(0, "kv", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + allowed, _ := m.allowMetric(key, nil) + if !allowed { + return + } + m.sink.EmitKey(key, val) +} + +func (m *Metrics) IncrCounter(key []string, val float32) { + m.IncrCounterWithLabels(key, val, nil) +} + +func (m *Metrics) IncrCounterWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "counter", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.IncrCounterWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) AddSample(key []string, val float32) { + m.AddSampleWithLabels(key, val, nil) +} + +func (m *Metrics) AddSampleWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "sample", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.AddSampleWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) MeasureSince(key []string, start time.Time) { + m.MeasureSinceWithLabels(key, start, nil) +} + +func (m *Metrics) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "timer", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + now := time.Now() + elapsed := now.Sub(start) + msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity) + m.sink.AddSampleWithLabels(key, msec, labelsFiltered) +} + +// UpdateFilter overwrites the existing filter with the given rules. +func (m *Metrics) UpdateFilter(allow, block []string) { + m.UpdateFilterAndLabels(allow, block, m.AllowedLabels, m.BlockedLabels) +} + +// UpdateFilterAndLabels overwrites the existing filter with the given rules. +func (m *Metrics) UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) { + m.filterLock.Lock() + defer m.filterLock.Unlock() + + m.AllowedPrefixes = allow + m.BlockedPrefixes = block + + if allowedLabels == nil { + // Having a white list means we take only elements from it + m.allowedLabels = nil + } else { + m.allowedLabels = make(map[string]bool) + for _, v := range allowedLabels { + m.allowedLabels[v] = true + } + } + m.blockedLabels = make(map[string]bool) + for _, v := range blockedLabels { + m.blockedLabels[v] = true + } + m.AllowedLabels = allowedLabels + m.BlockedLabels = blockedLabels + + m.filter = iradix.New() + for _, prefix := range m.AllowedPrefixes { + m.filter, _, _ = m.filter.Insert([]byte(prefix), true) + } + for _, prefix := range m.BlockedPrefixes { + m.filter, _, _ = m.filter.Insert([]byte(prefix), false) + } +} + +// labelIsAllowed return true if a should be included in metric +// the caller should lock m.filterLock while calling this method +func (m *Metrics) labelIsAllowed(label *Label) bool { + labelName := (*label).Name + if m.blockedLabels != nil { + _, ok := m.blockedLabels[labelName] + if ok { + // If present, let's remove this label + return false + } + } + if m.allowedLabels != nil { + _, ok := m.allowedLabels[labelName] + return ok + } + // Allow by default + return true +} + +// filterLabels return only allowed labels +// the caller should lock m.filterLock while calling this method +func (m *Metrics) filterLabels(labels []Label) []Label { + if labels == nil { + return nil + } + toReturn := []Label{} + for _, label := range labels { + if m.labelIsAllowed(&label) { + toReturn = append(toReturn, label) + } + } + return toReturn +} + +// Returns whether the metric should be allowed based on configured prefix filters +// Also return the applicable labels +func (m *Metrics) allowMetric(key []string, labels []Label) (bool, []Label) { + m.filterLock.RLock() + defer m.filterLock.RUnlock() + + if m.filter == nil || m.filter.Len() == 0 { + return m.Config.FilterDefault, m.filterLabels(labels) + } + + _, allowed, ok := m.filter.Root().LongestPrefix([]byte(strings.Join(key, "."))) + if !ok { + return m.Config.FilterDefault, m.filterLabels(labels) + } + + return allowed.(bool), m.filterLabels(labels) +} + +// Periodically collects runtime stats to publish +func (m *Metrics) collectStats() { + for { + time.Sleep(m.ProfileInterval) + m.EmitRuntimeStats() + } +} + +// Emits various runtime statsitics +func (m *Metrics) EmitRuntimeStats() { + // Export number of Goroutines + numRoutines := runtime.NumGoroutine() + m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines)) + + // Export memory stats + var stats runtime.MemStats + runtime.ReadMemStats(&stats) + m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc)) + m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys)) + m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs)) + m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees)) + m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects)) + m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs)) + m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC)) + + // Export info about the last few GC runs + num := stats.NumGC + + // Handle wrap around + if num < m.lastNumGC { + m.lastNumGC = 0 + } + + // Ensure we don't scan more than 256 + if num-m.lastNumGC >= 256 { + m.lastNumGC = num - 255 + } + + for i := m.lastNumGC; i < num; i++ { + pause := stats.PauseNs[i%256] + m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause)) + } + m.lastNumGC = num +} + +// Creates a new slice with the provided string value as the first element +// and the provided slice values as the remaining values. +// Ordering of the values in the provided input slice is kept in tact in the output slice. +func insert(i int, v string, s []string) []string { + // Allocate new slice to avoid modifying the input slice + newS := make([]string, len(s)+1) + + // Copy s[0, i-1] into newS + for j := 0; j < i; j++ { + newS[j] = s[j] + } + + // Insert provided element at index i + newS[i] = v + + // Copy s[i, len(s)-1] into newS starting at newS[i+1] + for j := i; j < len(s); j++ { + newS[j+1] = s[j] + } + + return newS +} diff --git a/vendor/github.com/armon/go-metrics/sink.go b/vendor/github.com/armon/go-metrics/sink.go new file mode 100644 index 0000000000..0b7d6e4be4 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/sink.go @@ -0,0 +1,115 @@ +package metrics + +import ( + "fmt" + "net/url" +) + +// The MetricSink interface is used to transmit metrics information +// to an external system +type MetricSink interface { + // A Gauge should retain the last value it is set to + SetGauge(key []string, val float32) + SetGaugeWithLabels(key []string, val float32, labels []Label) + + // Should emit a Key/Value pair for each call + EmitKey(key []string, val float32) + + // Counters should accumulate values + IncrCounter(key []string, val float32) + IncrCounterWithLabels(key []string, val float32, labels []Label) + + // Samples are for timing information, where quantiles are used + AddSample(key []string, val float32) + AddSampleWithLabels(key []string, val float32, labels []Label) +} + +// BlackholeSink is used to just blackhole messages +type BlackholeSink struct{} + +func (*BlackholeSink) SetGauge(key []string, val float32) {} +func (*BlackholeSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {} +func (*BlackholeSink) EmitKey(key []string, val float32) {} +func (*BlackholeSink) IncrCounter(key []string, val float32) {} +func (*BlackholeSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {} +func (*BlackholeSink) AddSample(key []string, val float32) {} +func (*BlackholeSink) AddSampleWithLabels(key []string, val float32, labels []Label) {} + +// FanoutSink is used to sink to fanout values to multiple sinks +type FanoutSink []MetricSink + +func (fh FanoutSink) SetGauge(key []string, val float32) { + fh.SetGaugeWithLabels(key, val, nil) +} + +func (fh FanoutSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.SetGaugeWithLabels(key, val, labels) + } +} + +func (fh FanoutSink) EmitKey(key []string, val float32) { + for _, s := range fh { + s.EmitKey(key, val) + } +} + +func (fh FanoutSink) IncrCounter(key []string, val float32) { + fh.IncrCounterWithLabels(key, val, nil) +} + +func (fh FanoutSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.IncrCounterWithLabels(key, val, labels) + } +} + +func (fh FanoutSink) AddSample(key []string, val float32) { + fh.AddSampleWithLabels(key, val, nil) +} + +func (fh FanoutSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.AddSampleWithLabels(key, val, labels) + } +} + +// sinkURLFactoryFunc is an generic interface around the *SinkFromURL() function provided +// by each sink type +type sinkURLFactoryFunc func(*url.URL) (MetricSink, error) + +// sinkRegistry supports the generic NewMetricSink function by mapping URL +// schemes to metric sink factory functions +var sinkRegistry = map[string]sinkURLFactoryFunc{ + "statsd": NewStatsdSinkFromURL, + "statsite": NewStatsiteSinkFromURL, + "inmem": NewInmemSinkFromURL, +} + +// NewMetricSinkFromURL allows a generic URL input to configure any of the +// supported sinks. The scheme of the URL identifies the type of the sink, the +// and query parameters are used to set options. +// +// "statsd://" - Initializes a StatsdSink. The host and port are passed through +// as the "addr" of the sink +// +// "statsite://" - Initializes a StatsiteSink. The host and port become the +// "addr" of the sink +// +// "inmem://" - Initializes an InmemSink. The host and port are ignored. The +// "interval" and "duration" query parameters must be specified with valid +// durations, see NewInmemSink for details. +func NewMetricSinkFromURL(urlStr string) (MetricSink, error) { + u, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + + sinkURLFactoryFunc := sinkRegistry[u.Scheme] + if sinkURLFactoryFunc == nil { + return nil, fmt.Errorf( + "cannot create metric sink, unrecognized sink name: %q", u.Scheme) + } + + return sinkURLFactoryFunc(u) +} diff --git a/vendor/github.com/armon/go-metrics/start.go b/vendor/github.com/armon/go-metrics/start.go new file mode 100644 index 0000000000..6aa0bd389a --- /dev/null +++ b/vendor/github.com/armon/go-metrics/start.go @@ -0,0 +1,146 @@ +package metrics + +import ( + "os" + "sync" + "sync/atomic" + "time" + + iradix "github.com/hashicorp/go-immutable-radix" +) + +// Config is used to configure metrics settings +type Config struct { + ServiceName string // Prefixed with keys to separate services + HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname + EnableHostname bool // Enable prefixing gauge values with hostname + EnableHostnameLabel bool // Enable adding hostname to labels + EnableServiceLabel bool // Enable adding service to labels + EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory) + EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer") + TimerGranularity time.Duration // Granularity of timers. + ProfileInterval time.Duration // Interval to profile runtime metrics + + AllowedPrefixes []string // A list of metric prefixes to allow, with '.' as the separator + BlockedPrefixes []string // A list of metric prefixes to block, with '.' as the separator + AllowedLabels []string // A list of metric labels to allow, with '.' as the separator + BlockedLabels []string // A list of metric labels to block, with '.' as the separator + FilterDefault bool // Whether to allow metrics by default +} + +// Metrics represents an instance of a metrics sink that can +// be used to emit +type Metrics struct { + Config + lastNumGC uint32 + sink MetricSink + filter *iradix.Tree + allowedLabels map[string]bool + blockedLabels map[string]bool + filterLock sync.RWMutex // Lock filters and allowedLabels/blockedLabels access +} + +// Shared global metrics instance +var globalMetrics atomic.Value // *Metrics + +func init() { + // Initialize to a blackhole sink to avoid errors + globalMetrics.Store(&Metrics{sink: &BlackholeSink{}}) +} + +// Default returns the shared global metrics instance. +func Default() *Metrics { + return globalMetrics.Load().(*Metrics) +} + +// DefaultConfig provides a sane default configuration +func DefaultConfig(serviceName string) *Config { + c := &Config{ + ServiceName: serviceName, // Use client provided service + HostName: "", + EnableHostname: true, // Enable hostname prefix + EnableRuntimeMetrics: true, // Enable runtime profiling + EnableTypePrefix: false, // Disable type prefix + TimerGranularity: time.Millisecond, // Timers are in milliseconds + ProfileInterval: time.Second, // Poll runtime every second + FilterDefault: true, // Don't filter metrics by default + } + + // Try to get the hostname + name, _ := os.Hostname() + c.HostName = name + return c +} + +// New is used to create a new instance of Metrics +func New(conf *Config, sink MetricSink) (*Metrics, error) { + met := &Metrics{} + met.Config = *conf + met.sink = sink + met.UpdateFilterAndLabels(conf.AllowedPrefixes, conf.BlockedPrefixes, conf.AllowedLabels, conf.BlockedLabels) + + // Start the runtime collector + if conf.EnableRuntimeMetrics { + go met.collectStats() + } + return met, nil +} + +// NewGlobal is the same as New, but it assigns the metrics object to be +// used globally as well as returning it. +func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) { + metrics, err := New(conf, sink) + if err == nil { + globalMetrics.Store(metrics) + } + return metrics, err +} + +// Proxy all the methods to the globalMetrics instance +func SetGauge(key []string, val float32) { + globalMetrics.Load().(*Metrics).SetGauge(key, val) +} + +func SetGaugeWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).SetGaugeWithLabels(key, val, labels) +} + +func EmitKey(key []string, val float32) { + globalMetrics.Load().(*Metrics).EmitKey(key, val) +} + +func IncrCounter(key []string, val float32) { + globalMetrics.Load().(*Metrics).IncrCounter(key, val) +} + +func IncrCounterWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).IncrCounterWithLabels(key, val, labels) +} + +func AddSample(key []string, val float32) { + globalMetrics.Load().(*Metrics).AddSample(key, val) +} + +func AddSampleWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).AddSampleWithLabels(key, val, labels) +} + +func MeasureSince(key []string, start time.Time) { + globalMetrics.Load().(*Metrics).MeasureSince(key, start) +} + +func MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { + globalMetrics.Load().(*Metrics).MeasureSinceWithLabels(key, start, labels) +} + +func UpdateFilter(allow, block []string) { + globalMetrics.Load().(*Metrics).UpdateFilter(allow, block) +} + +// UpdateFilterAndLabels set allow/block prefixes of metrics while allowedLabels +// and blockedLabels - when not nil - allow filtering of labels in order to +// block/allow globally labels (especially useful when having large number of +// values for a given label). See README.md for more information about usage. +func UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) { + globalMetrics.Load().(*Metrics).UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels) +} diff --git a/vendor/github.com/armon/go-metrics/statsd.go b/vendor/github.com/armon/go-metrics/statsd.go new file mode 100644 index 0000000000..1bfffce46e --- /dev/null +++ b/vendor/github.com/armon/go-metrics/statsd.go @@ -0,0 +1,184 @@ +package metrics + +import ( + "bytes" + "fmt" + "log" + "net" + "net/url" + "strings" + "time" +) + +const ( + // statsdMaxLen is the maximum size of a packet + // to send to statsd + statsdMaxLen = 1400 +) + +// StatsdSink provides a MetricSink that can be used +// with a statsite or statsd metrics server. It uses +// only UDP packets, while StatsiteSink uses TCP. +type StatsdSink struct { + addr string + metricQueue chan string +} + +// NewStatsdSinkFromURL creates an StatsdSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewStatsdSinkFromURL(u *url.URL) (MetricSink, error) { + return NewStatsdSink(u.Host) +} + +// NewStatsdSink is used to create a new StatsdSink +func NewStatsdSink(addr string) (*StatsdSink, error) { + s := &StatsdSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsd +func (s *StatsdSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsdSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsdSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsdSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsdSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsdSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsdSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +func (s *StatsdSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsdSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Flattens the key along with labels for formatting, removes spaces +func (s *StatsdSink) flattenKeyLabels(parts []string, labels []Label) string { + for _, label := range labels { + parts = append(parts, label.Value) + } + return s.flattenKey(parts) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsdSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsdSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Create a buffer + buf := bytes.NewBuffer(nil) + + // Attempt to connect + sock, err = net.Dial("udp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsd! Err: %s", err) + goto WAIT + } + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Check if this would overflow the packet size + if len(metric)+buf.Len() > statsdMaxLen { + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error writing to statsd! Err: %s", err) + goto WAIT + } + } + + // Append to the buffer + buf.WriteString(metric) + + case <-ticker.C: + if buf.Len() == 0 { + continue + } + + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error flushing to statsd! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/vendor/github.com/armon/go-metrics/statsite.go b/vendor/github.com/armon/go-metrics/statsite.go new file mode 100644 index 0000000000..6c0d284d2d --- /dev/null +++ b/vendor/github.com/armon/go-metrics/statsite.go @@ -0,0 +1,172 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "net/url" + "strings" + "time" +) + +const ( + // We force flush the statsite metrics after this period of + // inactivity. Prevents stats from getting stuck in a buffer + // forever. + flushInterval = 100 * time.Millisecond +) + +// NewStatsiteSinkFromURL creates an StatsiteSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewStatsiteSinkFromURL(u *url.URL) (MetricSink, error) { + return NewStatsiteSink(u.Host) +} + +// StatsiteSink provides a MetricSink that can be used with a +// statsite metrics server +type StatsiteSink struct { + addr string + metricQueue chan string +} + +// NewStatsiteSink is used to create a new StatsiteSink +func NewStatsiteSink(addr string) (*StatsiteSink, error) { + s := &StatsiteSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsite +func (s *StatsiteSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsiteSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsiteSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsiteSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsiteSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsiteSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsiteSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +func (s *StatsiteSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsiteSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Flattens the key along with labels for formatting, removes spaces +func (s *StatsiteSink) flattenKeyLabels(parts []string, labels []Label) string { + for _, label := range labels { + parts = append(parts, label.Value) + } + return s.flattenKey(parts) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsiteSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsiteSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + var buffered *bufio.Writer + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Attempt to connect + sock, err = net.Dial("tcp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsite! Err: %s", err) + goto WAIT + } + + // Create a buffered writer + buffered = bufio.NewWriter(sock) + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Try to send to statsite + _, err := buffered.Write([]byte(metric)) + if err != nil { + log.Printf("[ERR] Error writing to statsite! Err: %s", err) + goto WAIT + } + case <-ticker.C: + if err := buffered.Flush(); err != nil { + log.Printf("[ERR] Error flushing to statsite! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/vendor/github.com/armon/go-radix/go.mod b/vendor/github.com/armon/go-radix/go.mod new file mode 100644 index 0000000000..4336aa29ea --- /dev/null +++ b/vendor/github.com/armon/go-radix/go.mod @@ -0,0 +1 @@ +module github.com/armon/go-radix diff --git a/vendor/github.com/cenkalti/backoff/v3/.gitignore b/vendor/github.com/cenkalti/backoff/v3/.gitignore new file mode 100644 index 0000000000..00268614f0 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/cenkalti/backoff/v3/.travis.yml b/vendor/github.com/cenkalti/backoff/v3/.travis.yml new file mode 100644 index 0000000000..47a6a46ec2 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/.travis.yml @@ -0,0 +1,10 @@ +language: go +go: + - 1.7 + - 1.x + - tip +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/cenkalti/backoff/v3/LICENSE b/vendor/github.com/cenkalti/backoff/v3/LICENSE new file mode 100644 index 0000000000..89b8179965 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/v3/README.md b/vendor/github.com/cenkalti/backoff/v3/README.md new file mode 100644 index 0000000000..55ebc98fc2 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/README.md @@ -0,0 +1,30 @@ +# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] + +This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. + +[Exponential backoff][exponential backoff wiki] +is an algorithm that uses feedback to multiplicatively decrease the rate of some process, +in order to gradually find an acceptable rate. +The retries exponentially increase and stop increasing when a certain threshold is met. + +## Usage + +See https://godoc.org/github.com/cenkalti/backoff#pkg-examples + +## Contributing + +* I would like to keep this library as small as possible. +* Please don't send a PR without opening an issue and discussing it first. +* If proposed change is not a common use case, I will probably not accept it. + +[godoc]: https://godoc.org/github.com/cenkalti/backoff +[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png +[travis]: https://travis-ci.org/cenkalti/backoff +[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master +[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master +[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master + +[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java +[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff + +[advanced example]: https://godoc.org/github.com/cenkalti/backoff#example_ diff --git a/vendor/github.com/cenkalti/backoff/v3/backoff.go b/vendor/github.com/cenkalti/backoff/v3/backoff.go new file mode 100644 index 0000000000..3676ee405d --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/backoff.go @@ -0,0 +1,66 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Use Retry function for retrying operations that may fail. +// If Retry does not meet your needs, +// copy/paste the function into your project and modify as you wish. +// +// There is also Ticker type similar to time.Ticker. +// You can use it if you need to work with channels. +// +// See Examples section below for usage examples. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // or backoff. Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff(); + // if (duration == backoff.Stop) { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Stop indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/vendor/github.com/cenkalti/backoff/v3/context.go b/vendor/github.com/cenkalti/backoff/v3/context.go new file mode 100644 index 0000000000..7706faa2b6 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/context.go @@ -0,0 +1,63 @@ +package backoff + +import ( + "context" + "time" +) + +// BackOffContext is a backoff policy that stops retrying after the context +// is canceled. +type BackOffContext interface { + BackOff + Context() context.Context +} + +type backOffContext struct { + BackOff + ctx context.Context +} + +// WithContext returns a BackOffContext with context ctx +// +// ctx must not be nil +func WithContext(b BackOff, ctx context.Context) BackOffContext { + if ctx == nil { + panic("nil context") + } + + if b, ok := b.(*backOffContext); ok { + return &backOffContext{ + BackOff: b.BackOff, + ctx: ctx, + } + } + + return &backOffContext{ + BackOff: b, + ctx: ctx, + } +} + +func ensureContext(b BackOff) BackOffContext { + if cb, ok := b.(BackOffContext); ok { + return cb + } + return WithContext(b, context.Background()) +} + +func (b *backOffContext) Context() context.Context { + return b.ctx +} + +func (b *backOffContext) NextBackOff() time.Duration { + select { + case <-b.ctx.Done(): + return Stop + default: + } + next := b.BackOff.NextBackOff() + if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next { + return Stop + } + return next +} diff --git a/vendor/github.com/cenkalti/backoff/v3/exponential.go b/vendor/github.com/cenkalti/backoff/v3/exponential.go new file mode 100644 index 0000000000..a031a65979 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/exponential.go @@ -0,0 +1,153 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + // After MaxElapsedTime the ExponentialBackOff stops. + // It never stops if MaxElapsedTime == 0. + MaxElapsedTime time.Duration + Clock Clock + + currentInterval time.Duration + startTime time.Time +} + +// Clock is an interface that returns current time for BackOff. +type Clock interface { + Now() time.Time +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second + DefaultMaxElapsedTime = 15 * time.Minute +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + b := &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + MaxElapsedTime: DefaultMaxElapsedTime, + Clock: SystemClock, + } + b.Reset() + return b +} + +type systemClock struct{} + +func (t systemClock) Now() time.Time { + return time.Now() +} + +// SystemClock implements Clock interface that uses time.Now(). +var SystemClock = systemClock{} + +// Reset the interval back to the initial retry interval and restarts the timer. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval + b.startTime = b.Clock.Now() +} + +// NextBackOff calculates the next backoff interval using the formula: +// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + // Make sure we have not gone over the maximum elapsed time. + if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime { + return Stop + } + defer b.incrementCurrentInterval() + return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) +} + +// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance +// is created and is reset when Reset() is called. +// +// The elapsed time is computed using time.Now().UnixNano(). It is +// safe to call even while the backoff policy is used by a running +// ticker. +func (b *ExponentialBackOff) GetElapsedTime() time.Duration { + return b.Clock.Now().Sub(b.startTime) +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/v3/go.mod b/vendor/github.com/cenkalti/backoff/v3/go.mod new file mode 100644 index 0000000000..479e62adaf --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/go.mod @@ -0,0 +1,3 @@ +module github.com/cenkalti/backoff/v3 + +go 1.12 diff --git a/vendor/github.com/cenkalti/backoff/v3/retry.go b/vendor/github.com/cenkalti/backoff/v3/retry.go new file mode 100644 index 0000000000..e936a506f8 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/retry.go @@ -0,0 +1,82 @@ +package backoff + +import "time" + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the operation o until it does not return error or BackOff stops. +// o is guaranteed to be run at least once. +// +// If o returns a *PermanentError, the operation is not retried, and the +// wrapped error is returned. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) } + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b BackOff, notify Notify) error { + var err error + var next time.Duration + var t *time.Timer + + cb := ensureContext(b) + + b.Reset() + for { + if err = operation(); err == nil { + return nil + } + + if permanent, ok := err.(*PermanentError); ok { + return permanent.Err + } + + if next = cb.NextBackOff(); next == Stop { + return err + } + + if notify != nil { + notify(err, next) + } + + if t == nil { + t = time.NewTimer(next) + defer t.Stop() + } else { + t.Reset(next) + } + + select { + case <-cb.Context().Done(): + return err + case <-t.C: + } + } +} + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) *PermanentError { + return &PermanentError{ + Err: err, + } +} diff --git a/vendor/github.com/cenkalti/backoff/v3/ticker.go b/vendor/github.com/cenkalti/backoff/v3/ticker.go new file mode 100644 index 0000000000..e41084b0ef --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/ticker.go @@ -0,0 +1,82 @@ +package backoff + +import ( + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOffContext + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. +func NewTicker(b BackOff) *Ticker { + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: ensureContext(b), + stop: make(chan struct{}), + } + t.b.Reset() + go t.run() + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + case <-t.b.Context().Done(): + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + return time.After(next) +} diff --git a/vendor/github.com/cenkalti/backoff/v3/tries.go b/vendor/github.com/cenkalti/backoff/v3/tries.go new file mode 100644 index 0000000000..cfeefd9b76 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v3/tries.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +/* +WithMaxRetries creates a wrapper around another BackOff, which will +return Stop if NextBackOff() has been called too many times since +the last time Reset() was called + +Note: Implementation is not thread-safe. +*/ +func WithMaxRetries(b BackOff, max uint64) BackOff { + return &backOffTries{delegate: b, maxTries: max} +} + +type backOffTries struct { + delegate BackOff + maxTries uint64 + numTries uint64 +} + +func (b *backOffTries) NextBackOff() time.Duration { + if b.maxTries > 0 { + if b.maxTries <= b.numTries { + return Stop + } + b.numTries++ + } + return b.delegate.NextBackOff() +} + +func (b *backOffTries) Reset() { + b.numTries = 0 + b.delegate.Reset() +} diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go index 1e7ff64205..066b4323b4 100644 --- a/vendor/github.com/golang/protobuf/proto/registry.go +++ b/vendor/github.com/golang/protobuf/proto/registry.go @@ -13,6 +13,7 @@ import ( "strings" "sync" + "google.golang.org/protobuf/reflect/protodesc" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/runtime/protoimpl" @@ -62,14 +63,7 @@ func FileDescriptor(s filePath) fileDescGZIP { // Find the descriptor in the v2 registry. var b []byte if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { - if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok { - b = fd.ProtoLegacyRawDesc() - } else { - // TODO: Use protodesc.ToFileDescriptorProto to construct - // a descriptorpb.FileDescriptorProto and marshal it. - // However, doing so causes the proto package to have a dependency - // on descriptorpb, leading to cyclic dependency issues. - } + b, _ = Marshal(protodesc.ToFileDescriptorProto(fd)) } // Locally cache the raw descriptor form for the file. diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go new file mode 100644 index 0000000000..63dc057851 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -0,0 +1,200 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto + +package descriptor + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/descriptor.proto. + +type FieldDescriptorProto_Type = descriptorpb.FieldDescriptorProto_Type + +const FieldDescriptorProto_TYPE_DOUBLE = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE +const FieldDescriptorProto_TYPE_FLOAT = descriptorpb.FieldDescriptorProto_TYPE_FLOAT +const FieldDescriptorProto_TYPE_INT64 = descriptorpb.FieldDescriptorProto_TYPE_INT64 +const FieldDescriptorProto_TYPE_UINT64 = descriptorpb.FieldDescriptorProto_TYPE_UINT64 +const FieldDescriptorProto_TYPE_INT32 = descriptorpb.FieldDescriptorProto_TYPE_INT32 +const FieldDescriptorProto_TYPE_FIXED64 = descriptorpb.FieldDescriptorProto_TYPE_FIXED64 +const FieldDescriptorProto_TYPE_FIXED32 = descriptorpb.FieldDescriptorProto_TYPE_FIXED32 +const FieldDescriptorProto_TYPE_BOOL = descriptorpb.FieldDescriptorProto_TYPE_BOOL +const FieldDescriptorProto_TYPE_STRING = descriptorpb.FieldDescriptorProto_TYPE_STRING +const FieldDescriptorProto_TYPE_GROUP = descriptorpb.FieldDescriptorProto_TYPE_GROUP +const FieldDescriptorProto_TYPE_MESSAGE = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE +const FieldDescriptorProto_TYPE_BYTES = descriptorpb.FieldDescriptorProto_TYPE_BYTES +const FieldDescriptorProto_TYPE_UINT32 = descriptorpb.FieldDescriptorProto_TYPE_UINT32 +const FieldDescriptorProto_TYPE_ENUM = descriptorpb.FieldDescriptorProto_TYPE_ENUM +const FieldDescriptorProto_TYPE_SFIXED32 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED32 +const FieldDescriptorProto_TYPE_SFIXED64 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED64 +const FieldDescriptorProto_TYPE_SINT32 = descriptorpb.FieldDescriptorProto_TYPE_SINT32 +const FieldDescriptorProto_TYPE_SINT64 = descriptorpb.FieldDescriptorProto_TYPE_SINT64 + +var FieldDescriptorProto_Type_name = descriptorpb.FieldDescriptorProto_Type_name +var FieldDescriptorProto_Type_value = descriptorpb.FieldDescriptorProto_Type_value + +type FieldDescriptorProto_Label = descriptorpb.FieldDescriptorProto_Label + +const FieldDescriptorProto_LABEL_OPTIONAL = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL +const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED +const FieldDescriptorProto_LABEL_REPEATED = descriptorpb.FieldDescriptorProto_LABEL_REPEATED + +var FieldDescriptorProto_Label_name = descriptorpb.FieldDescriptorProto_Label_name +var FieldDescriptorProto_Label_value = descriptorpb.FieldDescriptorProto_Label_value + +type FileOptions_OptimizeMode = descriptorpb.FileOptions_OptimizeMode + +const FileOptions_SPEED = descriptorpb.FileOptions_SPEED +const FileOptions_CODE_SIZE = descriptorpb.FileOptions_CODE_SIZE +const FileOptions_LITE_RUNTIME = descriptorpb.FileOptions_LITE_RUNTIME + +var FileOptions_OptimizeMode_name = descriptorpb.FileOptions_OptimizeMode_name +var FileOptions_OptimizeMode_value = descriptorpb.FileOptions_OptimizeMode_value + +type FieldOptions_CType = descriptorpb.FieldOptions_CType + +const FieldOptions_STRING = descriptorpb.FieldOptions_STRING +const FieldOptions_CORD = descriptorpb.FieldOptions_CORD +const FieldOptions_STRING_PIECE = descriptorpb.FieldOptions_STRING_PIECE + +var FieldOptions_CType_name = descriptorpb.FieldOptions_CType_name +var FieldOptions_CType_value = descriptorpb.FieldOptions_CType_value + +type FieldOptions_JSType = descriptorpb.FieldOptions_JSType + +const FieldOptions_JS_NORMAL = descriptorpb.FieldOptions_JS_NORMAL +const FieldOptions_JS_STRING = descriptorpb.FieldOptions_JS_STRING +const FieldOptions_JS_NUMBER = descriptorpb.FieldOptions_JS_NUMBER + +var FieldOptions_JSType_name = descriptorpb.FieldOptions_JSType_name +var FieldOptions_JSType_value = descriptorpb.FieldOptions_JSType_value + +type MethodOptions_IdempotencyLevel = descriptorpb.MethodOptions_IdempotencyLevel + +const MethodOptions_IDEMPOTENCY_UNKNOWN = descriptorpb.MethodOptions_IDEMPOTENCY_UNKNOWN +const MethodOptions_NO_SIDE_EFFECTS = descriptorpb.MethodOptions_NO_SIDE_EFFECTS +const MethodOptions_IDEMPOTENT = descriptorpb.MethodOptions_IDEMPOTENT + +var MethodOptions_IdempotencyLevel_name = descriptorpb.MethodOptions_IdempotencyLevel_name +var MethodOptions_IdempotencyLevel_value = descriptorpb.MethodOptions_IdempotencyLevel_value + +type FileDescriptorSet = descriptorpb.FileDescriptorSet +type FileDescriptorProto = descriptorpb.FileDescriptorProto +type DescriptorProto = descriptorpb.DescriptorProto +type ExtensionRangeOptions = descriptorpb.ExtensionRangeOptions +type FieldDescriptorProto = descriptorpb.FieldDescriptorProto +type OneofDescriptorProto = descriptorpb.OneofDescriptorProto +type EnumDescriptorProto = descriptorpb.EnumDescriptorProto +type EnumValueDescriptorProto = descriptorpb.EnumValueDescriptorProto +type ServiceDescriptorProto = descriptorpb.ServiceDescriptorProto +type MethodDescriptorProto = descriptorpb.MethodDescriptorProto + +const Default_MethodDescriptorProto_ClientStreaming = descriptorpb.Default_MethodDescriptorProto_ClientStreaming +const Default_MethodDescriptorProto_ServerStreaming = descriptorpb.Default_MethodDescriptorProto_ServerStreaming + +type FileOptions = descriptorpb.FileOptions + +const Default_FileOptions_JavaMultipleFiles = descriptorpb.Default_FileOptions_JavaMultipleFiles +const Default_FileOptions_JavaStringCheckUtf8 = descriptorpb.Default_FileOptions_JavaStringCheckUtf8 +const Default_FileOptions_OptimizeFor = descriptorpb.Default_FileOptions_OptimizeFor +const Default_FileOptions_CcGenericServices = descriptorpb.Default_FileOptions_CcGenericServices +const Default_FileOptions_JavaGenericServices = descriptorpb.Default_FileOptions_JavaGenericServices +const Default_FileOptions_PyGenericServices = descriptorpb.Default_FileOptions_PyGenericServices +const Default_FileOptions_PhpGenericServices = descriptorpb.Default_FileOptions_PhpGenericServices +const Default_FileOptions_Deprecated = descriptorpb.Default_FileOptions_Deprecated +const Default_FileOptions_CcEnableArenas = descriptorpb.Default_FileOptions_CcEnableArenas + +type MessageOptions = descriptorpb.MessageOptions + +const Default_MessageOptions_MessageSetWireFormat = descriptorpb.Default_MessageOptions_MessageSetWireFormat +const Default_MessageOptions_NoStandardDescriptorAccessor = descriptorpb.Default_MessageOptions_NoStandardDescriptorAccessor +const Default_MessageOptions_Deprecated = descriptorpb.Default_MessageOptions_Deprecated + +type FieldOptions = descriptorpb.FieldOptions + +const Default_FieldOptions_Ctype = descriptorpb.Default_FieldOptions_Ctype +const Default_FieldOptions_Jstype = descriptorpb.Default_FieldOptions_Jstype +const Default_FieldOptions_Lazy = descriptorpb.Default_FieldOptions_Lazy +const Default_FieldOptions_Deprecated = descriptorpb.Default_FieldOptions_Deprecated +const Default_FieldOptions_Weak = descriptorpb.Default_FieldOptions_Weak + +type OneofOptions = descriptorpb.OneofOptions +type EnumOptions = descriptorpb.EnumOptions + +const Default_EnumOptions_Deprecated = descriptorpb.Default_EnumOptions_Deprecated + +type EnumValueOptions = descriptorpb.EnumValueOptions + +const Default_EnumValueOptions_Deprecated = descriptorpb.Default_EnumValueOptions_Deprecated + +type ServiceOptions = descriptorpb.ServiceOptions + +const Default_ServiceOptions_Deprecated = descriptorpb.Default_ServiceOptions_Deprecated + +type MethodOptions = descriptorpb.MethodOptions + +const Default_MethodOptions_Deprecated = descriptorpb.Default_MethodOptions_Deprecated +const Default_MethodOptions_IdempotencyLevel = descriptorpb.Default_MethodOptions_IdempotencyLevel + +type UninterpretedOption = descriptorpb.UninterpretedOption +type SourceCodeInfo = descriptorpb.SourceCodeInfo +type GeneratedCodeInfo = descriptorpb.GeneratedCodeInfo +type DescriptorProto_ExtensionRange = descriptorpb.DescriptorProto_ExtensionRange +type DescriptorProto_ReservedRange = descriptorpb.DescriptorProto_ReservedRange +type EnumDescriptorProto_EnumReservedRange = descriptorpb.EnumDescriptorProto_EnumReservedRange +type UninterpretedOption_NamePart = descriptorpb.UninterpretedOption_NamePart +type SourceCodeInfo_Location = descriptorpb.SourceCodeInfo_Location +type GeneratedCodeInfo_Annotation = descriptorpb.GeneratedCodeInfo_Annotation + +var File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = []byte{ + 0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, + 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x32, +} + +var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() } +func file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() { + if File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto = out.File + file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = nil + file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = nil + file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index e729dcff13..85f9f57365 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -19,6 +19,8 @@ const urlPrefix = "type.googleapis.com/" // AnyMessageName returns the message name contained in an anypb.Any message. // Most type assertions should use the Is function instead. +// +// Deprecated: Call the any.MessageName method instead. func AnyMessageName(any *anypb.Any) (string, error) { name, err := anyMessageName(any) return string(name), err @@ -38,6 +40,8 @@ func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { } // MarshalAny marshals the given message m into an anypb.Any message. +// +// Deprecated: Call the anypb.New function instead. func MarshalAny(m proto.Message) (*anypb.Any, error) { switch dm := m.(type) { case DynamicAny: @@ -58,6 +62,9 @@ func MarshalAny(m proto.Message) (*anypb.Any, error) { // Empty returns a new message of the type specified in an anypb.Any message. // It returns protoregistry.NotFound if the corresponding message type could not // be resolved in the global registry. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead +// to resolve the message name and create a new instance of it. func Empty(any *anypb.Any) (proto.Message, error) { name, err := anyMessageName(any) if err != nil { @@ -76,6 +83,8 @@ func Empty(any *anypb.Any) (proto.Message, error) { // // The target message m may be a *DynamicAny message. If the underlying message // type could not be resolved, then this returns protoregistry.NotFound. +// +// Deprecated: Call the any.UnmarshalTo method instead. func UnmarshalAny(any *anypb.Any, m proto.Message) error { if dm, ok := m.(*DynamicAny); ok { if dm.Message == nil { @@ -100,6 +109,8 @@ func UnmarshalAny(any *anypb.Any, m proto.Message) error { } // Is reports whether the Any message contains a message of the specified type. +// +// Deprecated: Call the any.MessageIs method instead. func Is(any *anypb.Any, m proto.Message) bool { if any == nil || m == nil { return false @@ -119,6 +130,9 @@ func Is(any *anypb.Any, m proto.Message) bool { // var x ptypes.DynamicAny // if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } // fmt.Printf("unmarshaled message: %v", x.Message) +// +// Deprecated: Use the any.UnmarshalNew method instead to unmarshal +// the any message contents into a new instance of the underlying message. type DynamicAny struct{ proto.Message } func (m DynamicAny) String() string { diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go index fb9edd5c62..d3c33259d2 100644 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -3,4 +3,8 @@ // license that can be found in the LICENSE file. // Package ptypes provides functionality for interacting with well-known types. +// +// Deprecated: Well-known types have specialized functionality directly +// injected into the generated packages for each message type. +// See the deprecation notice for each function for the suggested alternative. package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go index 6110ae8a41..b2b55dd851 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -21,6 +21,8 @@ const ( // Duration converts a durationpb.Duration to a time.Duration. // Duration returns an error if dur is invalid or overflows a time.Duration. +// +// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead. func Duration(dur *durationpb.Duration) (time.Duration, error) { if err := validateDuration(dur); err != nil { return 0, err @@ -39,6 +41,8 @@ func Duration(dur *durationpb.Duration) (time.Duration, error) { } // DurationProto converts a time.Duration to a durationpb.Duration. +// +// Deprecated: Call the durationpb.New function instead. func DurationProto(d time.Duration) *durationpb.Duration { nanos := d.Nanoseconds() secs := nanos / 1e9 diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go index 026d0d4915..8368a3f70d 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -33,6 +33,8 @@ const ( // // A nil Timestamp returns an error. The first return value in that case is // undefined. +// +// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead. func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { // Don't return the zero value on error, because corresponds to a valid // timestamp. Instead return whatever time.Unix gives us. @@ -46,6 +48,8 @@ func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { } // TimestampNow returns a google.protobuf.Timestamp for the current time. +// +// Deprecated: Call the timestamppb.Now function instead. func TimestampNow() *timestamppb.Timestamp { ts, err := TimestampProto(time.Now()) if err != nil { @@ -56,6 +60,8 @@ func TimestampNow() *timestamppb.Timestamp { // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. +// +// Deprecated: Call the timestamppb.New function instead. func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { ts := ×tamppb.Timestamp{ Seconds: t.Unix(), @@ -69,6 +75,9 @@ func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { // TimestampString returns the RFC 3339 string for valid Timestamps. // For invalid Timestamps, it returns an error message in parentheses. +// +// Deprecated: Call the ts.AsTime method instead, +// followed by a call to the Format method on the time.Time value. func TimestampString(ts *timestamppb.Timestamp) string { t, err := Timestamp(ts) if err != nil { diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS index bcfa19520a..52ccb5a934 100644 --- a/vendor/github.com/golang/snappy/AUTHORS +++ b/vendor/github.com/golang/snappy/AUTHORS @@ -8,8 +8,11 @@ # Please keep the list sorted. +Amazon.com, Inc Damian Gryski +Eric Buth Google Inc. Jan Mercl <0xjnml@gmail.com> +Klaus Post Rodolfo Carvalho Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS index 931ae31606..ea6524ddd0 100644 --- a/vendor/github.com/golang/snappy/CONTRIBUTORS +++ b/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -26,9 +26,13 @@ # Please keep the list sorted. +Alex Legg Damian Gryski +Eric Buth Jan Mercl <0xjnml@gmail.com> +Jonathan Swinney Kai Backman +Klaus Post Marc-Antoine Ruel Nigel Tao Rob Pike diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go index 72efb0353d..23c6e26c6b 100644 --- a/vendor/github.com/golang/snappy/decode.go +++ b/vendor/github.com/golang/snappy/decode.go @@ -52,6 +52,8 @@ const ( // Otherwise, a newly allocated slice will be returned. // // The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. func Decode(dst, src []byte) ([]byte, error) { dLen, s, err := decodedLen(src) if err != nil { @@ -83,6 +85,8 @@ func NewReader(r io.Reader) *Reader { } // Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. type Reader struct { r io.Reader err error @@ -114,32 +118,23 @@ func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { return true } -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - for { - if r.i < r.j { - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil - } +func (r *Reader) fill() error { + for r.i >= r.j { if !r.readFull(r.buf[:4], true) { - return 0, r.err + return r.err } chunkType := r.buf[0] if !r.readHeader { if chunkType != chunkTypeStreamIdentifier { r.err = ErrCorrupt - return 0, r.err + return r.err } r.readHeader = true } chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 if chunkLen > len(r.buf) { r.err = ErrUnsupported - return 0, r.err + return r.err } // The chunk types are specified at @@ -149,11 +144,11 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.2. Compressed data (chunk type 0x00). if chunkLen < checksumSize { r.err = ErrCorrupt - return 0, r.err + return r.err } buf := r.buf[:chunkLen] if !r.readFull(buf, false) { - return 0, r.err + return r.err } checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 buf = buf[checksumSize:] @@ -161,19 +156,19 @@ func (r *Reader) Read(p []byte) (int, error) { n, err := DecodedLen(buf) if err != nil { r.err = err - return 0, r.err + return r.err } if n > len(r.decoded) { r.err = ErrCorrupt - return 0, r.err + return r.err } if _, err := Decode(r.decoded, buf); err != nil { r.err = err - return 0, r.err + return r.err } if crc(r.decoded[:n]) != checksum { r.err = ErrCorrupt - return 0, r.err + return r.err } r.i, r.j = 0, n continue @@ -182,25 +177,25 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.3. Uncompressed data (chunk type 0x01). if chunkLen < checksumSize { r.err = ErrCorrupt - return 0, r.err + return r.err } buf := r.buf[:checksumSize] if !r.readFull(buf, false) { - return 0, r.err + return r.err } checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 // Read directly into r.decoded instead of via r.buf. n := chunkLen - checksumSize if n > len(r.decoded) { r.err = ErrCorrupt - return 0, r.err + return r.err } if !r.readFull(r.decoded[:n], false) { - return 0, r.err + return r.err } if crc(r.decoded[:n]) != checksum { r.err = ErrCorrupt - return 0, r.err + return r.err } r.i, r.j = 0, n continue @@ -209,15 +204,15 @@ func (r *Reader) Read(p []byte) (int, error) { // Section 4.1. Stream identifier (chunk type 0xff). if chunkLen != len(magicBody) { r.err = ErrCorrupt - return 0, r.err + return r.err } if !r.readFull(r.buf[:len(magicBody)], false) { - return 0, r.err + return r.err } for i := 0; i < len(magicBody); i++ { if r.buf[i] != magicBody[i] { r.err = ErrCorrupt - return 0, r.err + return r.err } } continue @@ -226,12 +221,44 @@ func (r *Reader) Read(p []byte) (int, error) { if chunkType <= 0x7f { // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). r.err = ErrUnsupported - return 0, r.err + return r.err } // Section 4.4 Padding (chunk type 0xfe). // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). if !r.readFull(r.buf[:chunkLen], false) { - return 0, r.err + return r.err } } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil } diff --git a/vendor/github.com/golang/snappy/decode_arm64.s b/vendor/github.com/golang/snappy/decode_arm64.s new file mode 100644 index 0000000000..7a3ead17ea --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_arm64.s @@ -0,0 +1,494 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - R2 scratch +// - R3 scratch +// - R4 length or x +// - R5 offset +// - R6 &src[s] +// - R7 &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly R7 - R8, and len(dst)-d is R10 - R7. +// The s variable is implicitly R6 - R11, and len(src)-s is R13 - R6. +TEXT ·decode(SB), NOSPLIT, $56-56 + // Initialize R6, R7 and R8-R13. + MOVD dst_base+0(FP), R8 + MOVD dst_len+8(FP), R9 + MOVD R8, R7 + MOVD R8, R10 + ADD R9, R10, R10 + MOVD src_base+24(FP), R11 + MOVD src_len+32(FP), R12 + MOVD R11, R6 + MOVD R11, R13 + ADD R12, R13, R13 + +loop: + // for s < len(src) + CMP R13, R6 + BEQ end + + // R4 = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBU (R6), R4 + MOVW R4, R3 + ANDW $3, R3 + MOVW $1, R1 + CMPW R1, R3 + BGE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + MOVW $60, R1 + LSRW $2, R4, R4 + CMPW R4, R1 + BLS tagLit60Plus + + // case x < 60: + // s++ + ADD $1, R6, R6 + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that R4 == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // R4 can hold 64 bits, so the increment cannot overflow. + ADD $1, R4, R4 + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // R2 = len(dst) - d + // R3 = len(src) - s + MOVD R10, R2 + SUB R7, R2, R2 + MOVD R13, R3 + SUB R6, R3, R3 + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMP $16, R4 + BGT callMemmove + CMP $16, R2 + BLT callMemmove + CMP $16, R3 + BLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + LDP 0(R6), (R14, R15) + STP (R14, R15), 0(R7) + + // d += length + // s += length + ADD R4, R7, R7 + ADD R4, R6, R6 + B loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMP R2, R4 + BGT errCorrupt + CMP R3, R4 + BGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // R7, R6 and R4 as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVD R7, 8(RSP) + MOVD R6, 16(RSP) + MOVD R4, 24(RSP) + MOVD R7, 32(RSP) + MOVD R6, 40(RSP) + MOVD R4, 48(RSP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVD 32(RSP), R7 + MOVD 40(RSP), R6 + MOVD 48(RSP), R4 + MOVD dst_base+0(FP), R8 + MOVD dst_len+8(FP), R9 + MOVD R8, R10 + ADD R9, R10, R10 + MOVD src_base+24(FP), R11 + MOVD src_len+32(FP), R12 + MOVD R11, R13 + ADD R12, R13, R13 + + // d += length + // s += length + ADD R4, R7, R7 + ADD R4, R6, R6 + B loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADD R4, R6, R6 + SUB $58, R6, R6 + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // case x == 60: + MOVW $61, R1 + CMPW R1, R4 + BEQ tagLit61 + BGT tagLit62Plus + + // x = uint32(src[s-1]) + MOVBU -1(R6), R4 + B doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVHU -2(R6), R4 + B doLit + +tagLit62Plus: + CMPW $62, R4 + BHI tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVHU -3(R6), R4 + MOVBU -1(R6), R3 + ORR R3<<16, R4 + B doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVWU -4(R6), R4 + B doLit + + // The code above handles literal tags. + // ---------------------------------------- + // The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADD $5, R6, R6 + + // if uint(s) > uint(len(src)) { etc } + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // length = 1 + int(src[s-5])>>2 + MOVD $1, R1 + ADD R4>>2, R1, R4 + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVWU -4(R6), R5 + B doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADD $3, R6, R6 + + // if uint(s) > uint(len(src)) { etc } + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // length = 1 + int(src[s-3])>>2 + MOVD $1, R1 + ADD R4>>2, R1, R4 + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVHU -2(R6), R5 + B doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - R3 == src[s] & 0x03 + // - R4 == src[s] + CMP $2, R3 + BEQ tagCopy2 + BGT tagCopy4 + + // case tagCopy1: + // s += 2 + ADD $2, R6, R6 + + // if uint(s) > uint(len(src)) { etc } + MOVD R6, R3 + SUB R11, R3, R3 + CMP R12, R3 + BGT errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVD R4, R5 + AND $0xe0, R5 + MOVBU -1(R6), R3 + ORR R5<<3, R3, R5 + + // length = 4 + int(src[s-2])>>2&0x7 + MOVD $7, R1 + AND R4>>2, R1, R4 + ADD $4, R4, R4 + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - R4 == length && R4 > 0 + // - R5 == offset + + // if offset <= 0 { etc } + MOVD $0, R1 + CMP R1, R5 + BLE errCorrupt + + // if d < offset { etc } + MOVD R7, R3 + SUB R8, R3, R3 + CMP R5, R3 + BLT errCorrupt + + // if length > len(dst)-d { etc } + MOVD R10, R3 + SUB R7, R3, R3 + CMP R3, R4 + BGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVD R10, R14 + SUB R7, R14, R14 + MOVD R7, R15 + SUB R5, R15, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMP $16, R4 + BGT slowForwardCopy + CMP $8, R5 + BLT slowForwardCopy + CMP $16, R14 + BLT slowForwardCopy + MOVD 0(R15), R2 + MOVD R2, 0(R7) + MOVD 8(R15), R3 + MOVD R3, 8(R7) + ADD R4, R7, R7 + B loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUB $10, R14, R14 + CMP R14, R4 + BGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMP $8, R5 + BGE fixUpSlowForwardCopy + MOVD (R15), R3 + MOVD R3, (R7) + SUB R5, R4, R4 + ADD R5, R7, R7 + ADD R5, R5, R5 + B makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by R7 being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save R7 to R2 so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVD R7, R2 + ADD R4, R7, R7 + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + MOVD $0, R1 + CMP R1, R4 + BLE loop + MOVD (R15), R3 + MOVD R3, (R2) + ADD $8, R15, R15 + ADD $8, R2, R2 + SUB $8, R4, R4 + B finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), R3 + MOVB R3, (R7) + ADD $1, R15, R15 + ADD $1, R7, R7 + SUB $1, R4, R4 + CBNZ R4, verySlowForwardCopy + B loop + + // The code above handles copy tags. + // ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMP R10, R7 + BNE errCorrupt + + // return 0 + MOVD $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVD $1, R2 + MOVD R2, ret+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_asm.go similarity index 93% rename from vendor/github.com/golang/snappy/decode_amd64.go rename to vendor/github.com/golang/snappy/decode_asm.go index fcd192b849..7082b34919 100644 --- a/vendor/github.com/golang/snappy/decode_amd64.go +++ b/vendor/github.com/golang/snappy/decode_asm.go @@ -5,6 +5,7 @@ // +build !appengine // +build gc // +build !noasm +// +build amd64 arm64 package snappy diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go index 8c9f2049bc..2f672be557 100644 --- a/vendor/github.com/golang/snappy/decode_other.go +++ b/vendor/github.com/golang/snappy/decode_other.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 appengine !gc noasm +// +build !amd64,!arm64 appengine !gc noasm package snappy @@ -85,14 +85,28 @@ func decode(dst, src []byte) int { if offset <= 0 || d < offset || length > len(dst)-d { return decodeErrCodeCorrupt } - // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike - // the built-in copy function, this byte-by-byte copy always runs + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset >= length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs // forwards, even if the slices overlap. Conceptually, this is: // // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - for end := d + length; d != end; d++ { - dst[d] = dst[d-offset] + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] } + d += length } if d != len(dst) { return decodeErrCodeCorrupt diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go index 8d393e904b..7f23657076 100644 --- a/vendor/github.com/golang/snappy/encode.go +++ b/vendor/github.com/golang/snappy/encode.go @@ -15,6 +15,8 @@ import ( // Otherwise, a newly allocated slice will be returned. // // The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. func Encode(dst, src []byte) []byte { if n := MaxEncodedLen(len(src)); n < 0 { panic(ErrTooLarge) @@ -139,6 +141,8 @@ func NewBufferedWriter(w io.Writer) *Writer { } // Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. type Writer struct { w io.Writer err error diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s new file mode 100644 index 0000000000..f8d54adfc5 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_arm64.s @@ -0,0 +1,722 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - R3 len(lit) +// - R4 n +// - R6 return value +// - R8 &dst[i] +// - R10 &lit[0] +// +// The 32 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $32-56 + MOVD dst_base+0(FP), R8 + MOVD lit_base+24(FP), R10 + MOVD lit_len+32(FP), R3 + MOVD R3, R6 + MOVW R3, R4 + SUBW $1, R4, R4 + + CMPW $60, R4 + BLT oneByte + CMPW $256, R4 + BLT twoBytes + +threeBytes: + MOVD $0xf4, R2 + MOVB R2, 0(R8) + MOVW R4, 1(R8) + ADD $3, R8, R8 + ADD $3, R6, R6 + B memmove + +twoBytes: + MOVD $0xf0, R2 + MOVB R2, 0(R8) + MOVB R4, 1(R8) + ADD $2, R8, R8 + ADD $2, R6, R6 + B memmove + +oneByte: + LSLW $2, R4, R4 + MOVB R4, 0(R8) + ADD $1, R8, R8 + ADD $1, R6, R6 + +memmove: + MOVD R6, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // R8, R10 and R3 as arguments. + MOVD R8, 8(RSP) + MOVD R10, 16(RSP) + MOVD R3, 24(RSP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - R3 length +// - R7 &dst[0] +// - R8 &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVD dst_base+0(FP), R8 + MOVD R8, R7 + MOVD offset+24(FP), R11 + MOVD length+32(FP), R3 + +loop0: + // for length >= 68 { etc } + CMPW $68, R3 + BLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVD $0xfe, R2 + MOVB R2, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUB $64, R3, R3 + B loop0 + +step1: + // if length > 64 { etc } + CMP $64, R3 + BLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVD $0xee, R2 + MOVB R2, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUB $60, R3, R3 + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMP $12, R3 + BGE step3 + CMPW $2048, R11 + BGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(R8) + LSRW $3, R11, R11 + AND $0xe0, R11, R11 + SUB $4, R3, R3 + LSLW $2, R3 + AND $0xff, R3, R3 + ORRW R3, R11, R11 + ORRW $1, R11, R11 + MOVB R11, 0(R8) + ADD $2, R8, R8 + + // Return the number of bytes written. + SUB R7, R8, R8 + MOVD R8, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUB $1, R3, R3 + AND $0xff, R3, R3 + LSLW $2, R3, R3 + ORRW $2, R3, R3 + MOVB R3, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + + // Return the number of bytes written. + SUB R7, R8, R8 + MOVD R8, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - R6 &src[0] +// - R7 &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVD src_base+0(FP), R6 + MOVD src_len+8(FP), R14 + MOVD i+24(FP), R15 + MOVD j+32(FP), R7 + ADD R6, R14, R14 + ADD R6, R15, R15 + ADD R6, R7, R7 + MOVD R14, R13 + SUB $8, R13, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMP R13, R7 + BHI cmp1 + MOVD (R15), R3 + MOVD (R7), R4 + CMP R4, R3 + BNE bsf + ADD $8, R15, R15 + ADD $8, R7, R7 + B cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. + // RBIT reverses the bit order, then CLZ counts the leading zeros, the + // combination of which finds the least significant bit which is set. + // The arm64 architecture is little-endian, and the shift by 3 converts + // a bit index to a byte index. + EOR R3, R4, R4 + RBIT R4, R4 + CLZ R4, R4 + ADD R4>>3, R7, R7 + + // Convert from &src[ret] to ret. + SUB R6, R7, R7 + MOVD R7, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMP R7, R14 + BLS extendMatchEnd + MOVB (R15), R3 + MOVB (R7), R4 + CMP R4, R3 + BNE extendMatchEnd + ADD $1, R15, R15 + ADD $1, R7, R7 + B cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUB R6, R7, R7 + MOVD R7, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - R3 . . +// - R4 . . +// - R5 64 shift +// - R6 72 &src[0], tableSize +// - R7 80 &src[s] +// - R8 88 &dst[d] +// - R9 96 sLimit +// - R10 . &src[nextEmit] +// - R11 104 prevHash, currHash, nextHash, offset +// - R12 112 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 120 candidate +// - R16 . hash constant, 0x1e35a7bd +// - R17 . &table +// - . 128 table +// +// The second column (64, 72, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 64 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 64 + 64 = 32896. +TEXT ·encodeBlock(SB), 0, $32896-56 + MOVD dst_base+0(FP), R8 + MOVD src_base+24(FP), R7 + MOVD src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVD $24, R5 + MOVD $256, R6 + MOVW $0xa7bd, R16 + MOVKW $(0x1e35<<16), R16 + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + MOVD $16384, R2 + CMP R2, R6 + BGE varTable + CMP R14, R6 + BGE varTable + SUB $1, R5, R5 + LSL $1, R6, R6 + B calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each + // iterations writes 64 bytes, so we can do only tableSize/32 writes + // instead of the 2048 writes that would zero-initialize all of table's + // 32768 bytes. This clear could overrun the first tableSize elements, but + // it won't overrun the allocated stack size. + ADD $128, RSP, R17 + MOVD R17, R4 + + // !!! R6 = &src[tableSize] + ADD R6<<1, R17, R6 + +memclr: + STP.P (ZR, ZR), 64(R4) + STP (ZR, ZR), -48(R4) + STP (ZR, ZR), -32(R4) + STP (ZR, ZR), -16(R4) + CMP R4, R6 + BHI memclr + + // !!! R6 = &src[0] + MOVD R7, R6 + + // sLimit := len(src) - inputMargin + MOVD R14, R9 + SUB $15, R9, R9 + + // !!! Pre-emptively spill R5, R6 and R9 to the stack. Their values don't + // change for the rest of the function. + MOVD R5, 64(RSP) + MOVD R6, 72(RSP) + MOVD R9, 96(RSP) + + // nextEmit := 0 + MOVD R6, R10 + + // s := 1 + ADD $1, R7, R7 + + // nextHash := hash(load32(src, s), shift) + MOVW 0(R7), R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + +outer: + // for { etc } + + // skip := 32 + MOVD $32, R12 + + // nextS := s + MOVD R7, R13 + + // candidate := 0 + MOVD $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVD R13, R7 + + // bytesBetweenHashLookups := skip >> 5 + MOVD R12, R14 + LSR $5, R14, R14 + + // nextS = s + bytesBetweenHashLookups + ADD R14, R13, R13 + + // skip += bytesBetweenHashLookups + ADD R14, R12, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVD R13, R3 + SUB R6, R3, R3 + CMP R9, R3 + BHI emitRemainder + + // candidate = int(table[nextHash]) + MOVHU 0(R17)(R11<<1), R15 + + // table[nextHash] = uint16(s) + MOVD R7, R3 + SUB R6, R3, R3 + + MOVH R3, 0(R17)(R11<<1) + + // nextHash = hash(load32(src, nextS), shift) + MOVW 0(R13), R11 + MULW R16, R11 + LSRW R5, R11, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVW 0(R7), R3 + MOVW (R6)(R15), R4 + CMPW R4, R3 + BNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVD R7, R3 + SUB R10, R3, R3 + CMP $16, R3 + BLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVW R3, R4 + SUBW $1, R4, R4 + + MOVW $60, R2 + CMPW R2, R4 + BLT inlineEmitLiteralOneByte + MOVW $256, R2 + CMPW R2, R4 + BLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVD $0xf4, R1 + MOVB R1, 0(R8) + MOVW R4, 1(R8) + ADD $3, R8, R8 + B inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVD $0xf0, R1 + MOVB R1, 0(R8) + MOVB R4, 1(R8) + ADD $2, R8, R8 + B inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + LSLW $2, R4, R4 + MOVB R4, 0(R8) + ADD $1, R8, R8 + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // R8, R10 and R3 as arguments. + MOVD R8, 8(RSP) + MOVD R10, 16(RSP) + MOVD R3, 24(RSP) + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADD R3, R8, R8 + MOVD R7, 80(RSP) + MOVD R8, 88(RSP) + MOVD R15, 120(RSP) + CALL runtime·memmove(SB) + MOVD 64(RSP), R5 + MOVD 72(RSP), R6 + MOVD 80(RSP), R7 + MOVD 88(RSP), R8 + MOVD 96(RSP), R9 + MOVD 120(RSP), R15 + ADD $128, RSP, R17 + MOVW $0xa7bd, R16 + MOVKW $(0x1e35<<16), R16 + B inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB R3, R4 + SUBW $1, R4, R4 + AND $0xff, R4, R4 + LSLW $2, R4, R4 + MOVB R4, (R8) + ADD $1, R8, R8 + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + LDP 0(R10), (R0, R1) + STP (R0, R1), 0(R8) + ADD R3, R8, R8 + +inner1: + // for { etc } + + // base := s + MOVD R7, R12 + + // !!! offset := base - candidate + MOVD R12, R11 + SUB R15, R11, R11 + SUB R6, R11, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVD src_len+32(FP), R14 + ADD R6, R14, R14 + + // !!! R13 = &src[len(src) - 8] + MOVD R14, R13 + SUB $8, R13, R13 + + // !!! R15 = &src[candidate + 4] + ADD $4, R15, R15 + ADD R6, R15, R15 + + // !!! s += 4 + ADD $4, R7, R7 + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMP R13, R7 + BHI inlineExtendMatchCmp1 + MOVD (R15), R3 + MOVD (R7), R4 + CMP R4, R3 + BNE inlineExtendMatchBSF + ADD $8, R15, R15 + ADD $8, R7, R7 + B inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. + // RBIT reverses the bit order, then CLZ counts the leading zeros, the + // combination of which finds the least significant bit which is set. + // The arm64 architecture is little-endian, and the shift by 3 converts + // a bit index to a byte index. + EOR R3, R4, R4 + RBIT R4, R4 + CLZ R4, R4 + ADD R4>>3, R7, R7 + B inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMP R7, R14 + BLS inlineExtendMatchEnd + MOVB (R15), R3 + MOVB (R7), R4 + CMP R4, R3 + BNE inlineExtendMatchEnd + ADD $1, R15, R15 + ADD $1, R7, R7 + B inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVD R7, R3 + SUB R12, R3, R3 + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + MOVW $68, R2 + CMPW R2, R3 + BLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVD $0xfe, R1 + MOVB R1, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUBW $64, R3, R3 + B inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + MOVW $64, R2 + CMPW R2, R3 + BLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVD $0xee, R1 + MOVB R1, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + SUBW $60, R3, R3 + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + MOVW $12, R2 + CMPW R2, R3 + BGE inlineEmitCopyStep3 + MOVW $2048, R2 + CMPW R2, R11 + BGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(R8) + LSRW $8, R11, R11 + LSLW $5, R11, R11 + SUBW $4, R3, R3 + AND $0xff, R3, R3 + LSLW $2, R3, R3 + ORRW R3, R11, R11 + ORRW $1, R11, R11 + MOVB R11, 0(R8) + ADD $2, R8, R8 + B inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBW $1, R3, R3 + LSLW $2, R3, R3 + ORRW $2, R3, R3 + MOVB R3, 0(R8) + MOVW R11, 1(R8) + ADD $3, R8, R8 + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVD R7, R10 + + // if s >= sLimit { goto emitRemainder } + MOVD R7, R3 + SUB R6, R3, R3 + CMP R3, R9 + BLS emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVD -1(R7), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // table[prevHash] = uint16(s-1) + MOVD R7, R3 + SUB R6, R3, R3 + SUB $1, R3, R3 + + MOVHU R3, 0(R17)(R11<<1) + + // currHash := hash(uint32(x>>8), shift) + LSR $8, R14, R14 + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // candidate = int(table[currHash]) + MOVHU 0(R17)(R11<<1), R15 + + // table[currHash] = uint16(s) + ADD $1, R3, R3 + MOVHU R3, 0(R17)(R11<<1) + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVW (R6)(R15), R4 + CMPW R4, R14 + BEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + LSR $8, R14, R14 + MOVW R14, R11 + MULW R16, R11, R11 + LSRW R5, R11, R11 + + // s++ + ADD $1, R7, R7 + + // break out of the inner1 for loop, i.e. continue the outer loop. + B outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVD src_len+32(FP), R3 + ADD R6, R3, R3 + CMP R3, R10 + BEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVD R8, 8(RSP) + MOVD $0, 16(RSP) // Unnecessary, as the callee ignores it, but conservative. + MOVD $0, 24(RSP) // Unnecessary, as the callee ignores it, but conservative. + MOVD R10, 32(RSP) + SUB R10, R3, R3 + MOVD R3, 40(RSP) + MOVD R3, 48(RSP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVD R8, 88(RSP) + CALL ·emitLiteral(SB) + MOVD 88(RSP), R8 + + // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVD 56(RSP), R1 + ADD R1, R8, R8 + +encodeBlockEnd: + MOVD dst_base+0(FP), R3 + SUB R3, R8, R8 + MOVD R8, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_asm.go similarity index 97% rename from vendor/github.com/golang/snappy/encode_amd64.go rename to vendor/github.com/golang/snappy/encode_asm.go index 150d91bc8b..107c1e7141 100644 --- a/vendor/github.com/golang/snappy/encode_amd64.go +++ b/vendor/github.com/golang/snappy/encode_asm.go @@ -5,6 +5,7 @@ // +build !appengine // +build gc // +build !noasm +// +build amd64 arm64 package snappy diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go index dbcae905e6..296d7f0beb 100644 --- a/vendor/github.com/golang/snappy/encode_other.go +++ b/vendor/github.com/golang/snappy/encode_other.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 appengine !gc noasm +// +build !amd64,!arm64 appengine !gc noasm package snappy diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go index a733bef18c..44e368e569 100644 --- a/vendor/github.com/hashicorp/errwrap/errwrap.go +++ b/vendor/github.com/hashicorp/errwrap/errwrap.go @@ -44,6 +44,8 @@ func Wrap(outer, inner error) error { // // format is the format of the error message. The string '{{err}}' will // be replaced with the original error message. +// +// Deprecated: Use fmt.Errorf() func Wrapf(format string, err error) error { outerMsg := "" if err != nil { @@ -148,6 +150,9 @@ func Walk(err error, cb WalkFunc) { for _, err := range e.WrappedErrors() { Walk(err, cb) } + case interface{ Unwrap() error }: + cb(err) + Walk(e.Unwrap(), cb) default: cb(err) } @@ -167,3 +172,7 @@ func (w *wrappedError) Error() string { func (w *wrappedError) WrappedErrors() []error { return []error{w.Outer, w.Inner} } + +func (w *wrappedError) Unwrap() error { + return w.Inner +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE new file mode 100644 index 0000000000..e87a115e46 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md new file mode 100644 index 0000000000..036e5313fc --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/README.md @@ -0,0 +1,30 @@ +# cleanhttp + +Functions for accessing "clean" Go http.Client values + +------------- + +The Go standard library contains a default `http.Client` called +`http.DefaultClient`. It is a common idiom in Go code to start with +`http.DefaultClient` and tweak it as necessary, and in fact, this is +encouraged; from the `http` package documentation: + +> The Client's Transport typically has internal state (cached TCP connections), +so Clients should be reused instead of created as needed. Clients are safe for +concurrent use by multiple goroutines. + +Unfortunately, this is a shared value, and it is not uncommon for libraries to +assume that they are free to modify it at will. With enough dependencies, it +can be very easy to encounter strange problems and race conditions due to +manipulation of this shared value across libraries and goroutines (clients are +safe for concurrent use, but writing values to the client struct itself is not +protected). + +Making things worse is the fact that a bare `http.Client` will use a default +`http.Transport` called `http.DefaultTransport`, which is another global value +that behaves the same way. So it is not simply enough to replace +`http.DefaultClient` with `&http.Client{}`. + +This repository provides some simple functions to get a "clean" `http.Client` +-- one that uses the same default values as the Go standard library, but +returns a client that does not share any state with other clients. diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go new file mode 100644 index 0000000000..8d306bf513 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go @@ -0,0 +1,57 @@ +package cleanhttp + +import ( + "net" + "net/http" + "runtime" + "time" +) + +// DefaultTransport returns a new http.Transport with similar default values to +// http.DefaultTransport, but with idle connections and keepalives disabled. +func DefaultTransport() *http.Transport { + transport := DefaultPooledTransport() + transport.DisableKeepAlives = true + transport.MaxIdleConnsPerHost = -1 + return transport +} + +// DefaultPooledTransport returns a new http.Transport with similar default +// values to http.DefaultTransport. Do not use this for transient transports as +// it can leak file descriptors over time. Only use this for transports that +// will be re-used for the same host(s). +func DefaultPooledTransport() *http.Transport { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, + } + return transport +} + +// DefaultClient returns a new http.Client with similar default values to +// http.Client, but with a non-shared Transport, idle connections disabled, and +// keepalives disabled. +func DefaultClient() *http.Client { + return &http.Client{ + Transport: DefaultTransport(), + } +} + +// DefaultPooledClient returns a new http.Client with similar default values to +// http.Client, but with a shared Transport. Do not use this function for +// transient clients as it can leak file descriptors over time. Only use this +// for clients that will be re-used for the same host(s). +func DefaultPooledClient() *http.Client { + return &http.Client{ + Transport: DefaultPooledTransport(), + } +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go new file mode 100644 index 0000000000..05841092a7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/doc.go @@ -0,0 +1,20 @@ +// Package cleanhttp offers convenience utilities for acquiring "clean" +// http.Transport and http.Client structs. +// +// Values set on http.DefaultClient and http.DefaultTransport affect all +// callers. This can have detrimental effects, esepcially in TLS contexts, +// where client or root certificates set to talk to multiple endpoints can end +// up displacing each other, leading to hard-to-debug issues. This package +// provides non-shared http.Client and http.Transport structs to ensure that +// the configuration will not be overwritten by other parts of the application +// or dependencies. +// +// The DefaultClient and DefaultTransport functions disable idle connections +// and keepalives. Without ensuring that idle connections are closed before +// garbage collection, short-term clients/transports can leak file descriptors, +// eventually leading to "too many open files" errors. If you will be +// connecting to the same hosts repeatedly from the same client, you can use +// DefaultPooledClient to receive a client that has connection pooling +// semantics similar to http.DefaultClient. +// +package cleanhttp diff --git a/vendor/github.com/hashicorp/go-cleanhttp/go.mod b/vendor/github.com/hashicorp/go-cleanhttp/go.mod new file mode 100644 index 0000000000..310f07569f --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-cleanhttp diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go new file mode 100644 index 0000000000..3c845dc0dc --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go @@ -0,0 +1,48 @@ +package cleanhttp + +import ( + "net/http" + "strings" + "unicode" +) + +// HandlerInput provides input options to cleanhttp's handlers +type HandlerInput struct { + ErrStatus int +} + +// PrintablePathCheckHandler is a middleware that ensures the request path +// contains only printable runes. +func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler { + // Nil-check on input to make it optional + if input == nil { + input = &HandlerInput{ + ErrStatus: http.StatusBadRequest, + } + } + + // Default to http.StatusBadRequest on error + if input.ErrStatus == 0 { + input.ErrStatus = http.StatusBadRequest + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r != nil { + // Check URL path for non-printable characters + idx := strings.IndexFunc(r.URL.Path, func(c rune) bool { + return !unicode.IsPrint(c) + }) + + if idx != -1 { + w.WriteHeader(input.ErrStatus) + return + } + + if next != nil { + next.ServeHTTP(w, r) + } + } + + return + }) +} diff --git a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go index 08a6677eb7..631baf2f0c 100644 --- a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go @@ -18,8 +18,13 @@ type interceptLogger struct { } func NewInterceptLogger(opts *LoggerOptions) InterceptLogger { + l := newLogger(opts) + if l.callerOffset > 0 { + // extra frames for interceptLogger.{Warn,Info,Log,etc...}, and interceptLogger.log + l.callerOffset += 2 + } intercept := &interceptLogger{ - Logger: New(opts), + Logger: l, mu: new(sync.Mutex), sinkCount: new(int32), Sinks: make(map[SinkAdapter]struct{}), @@ -31,6 +36,14 @@ func NewInterceptLogger(opts *LoggerOptions) InterceptLogger { } func (i *interceptLogger) Log(level Level, msg string, args ...interface{}) { + i.log(level, msg, args...) +} + +// log is used to make the caller stack frame lookup consistent. If Warn,Info,etc +// all called Log then direct calls to Log would have a different stack frame +// depth. By having all the methods call the same helper we ensure the stack +// frame depth is the same. +func (i *interceptLogger) log(level Level, msg string, args ...interface{}) { i.Logger.Log(level, msg, args...) if atomic.LoadInt32(i.sinkCount) == 0 { return @@ -45,72 +58,27 @@ func (i *interceptLogger) Log(level Level, msg string, args ...interface{}) { // Emit the message and args at TRACE level to log and sinks func (i *interceptLogger) Trace(msg string, args ...interface{}) { - i.Logger.Trace(msg, args...) - if atomic.LoadInt32(i.sinkCount) == 0 { - return - } - - i.mu.Lock() - defer i.mu.Unlock() - for s := range i.Sinks { - s.Accept(i.Name(), Trace, msg, i.retrieveImplied(args...)...) - } + i.log(Trace, msg, args...) } // Emit the message and args at DEBUG level to log and sinks func (i *interceptLogger) Debug(msg string, args ...interface{}) { - i.Logger.Debug(msg, args...) - if atomic.LoadInt32(i.sinkCount) == 0 { - return - } - - i.mu.Lock() - defer i.mu.Unlock() - for s := range i.Sinks { - s.Accept(i.Name(), Debug, msg, i.retrieveImplied(args...)...) - } + i.log(Debug, msg, args...) } // Emit the message and args at INFO level to log and sinks func (i *interceptLogger) Info(msg string, args ...interface{}) { - i.Logger.Info(msg, args...) - if atomic.LoadInt32(i.sinkCount) == 0 { - return - } - - i.mu.Lock() - defer i.mu.Unlock() - for s := range i.Sinks { - s.Accept(i.Name(), Info, msg, i.retrieveImplied(args...)...) - } + i.log(Info, msg, args...) } // Emit the message and args at WARN level to log and sinks func (i *interceptLogger) Warn(msg string, args ...interface{}) { - i.Logger.Warn(msg, args...) - if atomic.LoadInt32(i.sinkCount) == 0 { - return - } - - i.mu.Lock() - defer i.mu.Unlock() - for s := range i.Sinks { - s.Accept(i.Name(), Warn, msg, i.retrieveImplied(args...)...) - } + i.log(Warn, msg, args...) } // Emit the message and args at ERROR level to log and sinks func (i *interceptLogger) Error(msg string, args ...interface{}) { - i.Logger.Error(msg, args...) - if atomic.LoadInt32(i.sinkCount) == 0 { - return - } - - i.mu.Lock() - defer i.mu.Unlock() - for s := range i.Sinks { - s.Accept(i.Name(), Error, msg, i.retrieveImplied(args...)...) - } + i.log(Error, msg, args...) } func (i *interceptLogger) retrieveImplied(args ...interface{}) []interface{} { @@ -123,17 +91,11 @@ func (i *interceptLogger) retrieveImplied(args ...interface{}) []interface{} { return cp } -// Create a new sub-Logger that a name decending from the current name. +// Create a new sub-Logger that a name descending from the current name. // This is used to create a subsystem specific Logger. // Registered sinks will subscribe to these messages as well. func (i *interceptLogger) Named(name string) Logger { - var sub interceptLogger - - sub = *i - - sub.Logger = i.Logger.Named(name) - - return &sub + return i.NamedIntercept(name) } // Create a new sub-Logger with an explicit name. This ignores the current @@ -141,13 +103,7 @@ func (i *interceptLogger) Named(name string) Logger { // within the normal hierarchy. Registered sinks will subscribe // to these messages as well. func (i *interceptLogger) ResetNamed(name string) Logger { - var sub interceptLogger - - sub = *i - - sub.Logger = i.Logger.ResetNamed(name) - - return &sub + return i.ResetNamedIntercept(name) } // Create a new sub-Logger that a name decending from the current name. @@ -157,9 +113,7 @@ func (i *interceptLogger) NamedIntercept(name string) InterceptLogger { var sub interceptLogger sub = *i - sub.Logger = i.Logger.Named(name) - return &sub } @@ -171,9 +125,7 @@ func (i *interceptLogger) ResetNamedIntercept(name string) InterceptLogger { var sub interceptLogger sub = *i - sub.Logger = i.Logger.ResetNamed(name) - return &sub } @@ -210,18 +162,23 @@ func (i *interceptLogger) DeregisterSink(sink SinkAdapter) { atomic.AddInt32(i.sinkCount, -1) } -// Create a *log.Logger that will send it's data through this Logger. This -// allows packages that expect to be using the standard library to log to -// actually use this logger, which will also send to any registered sinks. func (i *interceptLogger) StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger { + return i.StandardLogger(opts) +} + +func (i *interceptLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { if opts == nil { opts = &StandardLoggerOptions{} } - return log.New(i.StandardWriterIntercept(opts), "", 0) + return log.New(i.StandardWriter(opts), "", 0) } func (i *interceptLogger) StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer { + return i.StandardWriter(opts) +} + +func (i *interceptLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { return &stdlogAdapter{ log: i, inferLevels: opts.InferLevels, diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go index 7158125de2..d491ae8f97 100644 --- a/vendor/github.com/hashicorp/go-hclog/intlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -10,7 +10,6 @@ import ( "log" "os" "reflect" - "regexp" "runtime" "sort" "strconv" @@ -22,10 +21,14 @@ import ( "github.com/fatih/color" ) -// TimeFormat to use for logging. This is a version of RFC3339 that contains -// contains millisecond precision +// TimeFormat is the time format to use for plain (non-JSON) output. +// This is a version of RFC3339 that contains millisecond precision. const TimeFormat = "2006-01-02T15:04:05.000Z0700" +// TimeFormatJSON is the time format to use for JSON output. +// This is a version of RFC3339 that contains microsecond precision. +const TimeFormatJSON = "2006-01-02T15:04:05.000000Z07:00" + // errJsonUnsupportedTypeMsg is included in log json entries, if an arg cannot be serialized to json const errJsonUnsupportedTypeMsg = "logging contained values that don't serialize to json" @@ -53,10 +56,11 @@ var _ Logger = &intLogger{} // intLogger is an internal logger implementation. Internal in that it is // defined entirely by this package. type intLogger struct { - json bool - caller bool - name string - timeFormat string + json bool + callerOffset int + name string + timeFormat string + disableTime bool // This is an interface so that it's shared by any derived loggers, since // those derived loggers share the bufio.Writer as well. @@ -67,6 +71,9 @@ type intLogger struct { implied []interface{} exclude func(level Level, msg string, args ...interface{}) bool + + // create subloggers with their own level setting + independentLevels bool } // New returns a configured logger. @@ -77,7 +84,12 @@ func New(opts *LoggerOptions) Logger { // NewSinkAdapter returns a SinkAdapter with configured settings // defined by LoggerOptions func NewSinkAdapter(opts *LoggerOptions) SinkAdapter { - return newLogger(opts) + l := newLogger(opts) + if l.callerOffset > 0 { + // extra frames for interceptLogger.{Warn,Info,Log,etc...}, and SinkAdapter.Accept + l.callerOffset += 2 + } + return l } func newLogger(opts *LoggerOptions) *intLogger { @@ -101,29 +113,38 @@ func newLogger(opts *LoggerOptions) *intLogger { } l := &intLogger{ - json: opts.JSONFormat, - caller: opts.IncludeLocation, - name: opts.Name, - timeFormat: TimeFormat, - mutex: mutex, - writer: newWriter(output, opts.Color), - level: new(int32), - exclude: opts.Exclude, + json: opts.JSONFormat, + name: opts.Name, + timeFormat: TimeFormat, + disableTime: opts.DisableTime, + mutex: mutex, + writer: newWriter(output, opts.Color), + level: new(int32), + exclude: opts.Exclude, + independentLevels: opts.IndependentLevels, + } + if opts.IncludeLocation { + l.callerOffset = offsetIntLogger + opts.AdditionalLocationOffset } - l.setColorization(opts) - - if opts.DisableTime { - l.timeFormat = "" - } else if opts.TimeFormat != "" { + if l.json { + l.timeFormat = TimeFormatJSON + } + if opts.TimeFormat != "" { l.timeFormat = opts.TimeFormat } + l.setColorization(opts) + atomic.StoreInt32(l.level, int32(level)) return l } +// offsetIntLogger is the stack frame offset in the call stack for the caller to +// one of the Warn,Info,Log,etc methods. +const offsetIntLogger = 3 + // Log a message and a set of key/value pairs if the given level is at // or more severe that the threshold configured in the Logger. func (l *intLogger) log(name string, level Level, msg string, args ...interface{}) { @@ -178,11 +199,10 @@ func trimCallerPath(path string) string { return path[idx+1:] } -var logImplFile = regexp.MustCompile(`.+intlogger.go|.+interceptlogger.go$`) - // Non-JSON logging format function func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) { - if len(l.timeFormat) > 0 { + + if !l.disableTime { l.writer.WriteString(t.Format(l.timeFormat)) l.writer.WriteByte(' ') } @@ -194,18 +214,8 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, l.writer.WriteString("[?????]") } - offset := 3 - if l.caller { - // Check if the caller is inside our package and inside - // a logger implementation file - if _, file, _, ok := runtime.Caller(3); ok { - match := logImplFile.MatchString(file) - if match { - offset = 4 - } - } - - if _, file, line, ok := runtime.Caller(offset); ok { + if l.callerOffset > 0 { + if _, file, line, ok := runtime.Caller(l.callerOffset); ok { l.writer.WriteByte(' ') l.writer.WriteString(trimCallerPath(file)) l.writer.WriteByte(':') @@ -251,6 +261,9 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, switch st := args[i+1].(type) { case string: val = st + if st == "" { + val = `""` + } case int: val = strconv.FormatInt(int64(st), 10) case int64: @@ -282,6 +295,9 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, continue FOR case Format: val = fmt.Sprintf(st[0].(string), st[1:]...) + case Quote: + raw = true + val = strconv.Quote(string(st)) default: v := reflect.ValueOf(st) if v.Kind() == reflect.Slice { @@ -292,20 +308,32 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, } } - l.writer.WriteByte(' ') + var key string + switch st := args[i].(type) { case string: - l.writer.WriteString(st) + key = st default: - l.writer.WriteString(fmt.Sprintf("%s", st)) + key = fmt.Sprintf("%s", st) } - l.writer.WriteByte('=') - if !raw && strings.ContainsAny(val, " \t\n\r") { + if strings.Contains(val, "\n") { + l.writer.WriteString("\n ") + l.writer.WriteString(key) + l.writer.WriteString("=\n") + writeIndent(l.writer, val, " | ") + l.writer.WriteString(" ") + } else if !raw && strings.ContainsAny(val, " \t") { + l.writer.WriteByte(' ') + l.writer.WriteString(key) + l.writer.WriteByte('=') l.writer.WriteByte('"') l.writer.WriteString(val) l.writer.WriteByte('"') } else { + l.writer.WriteByte(' ') + l.writer.WriteString(key) + l.writer.WriteByte('=') l.writer.WriteString(val) } } @@ -315,6 +343,26 @@ func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, if stacktrace != "" { l.writer.WriteString(string(stacktrace)) + l.writer.WriteString("\n") + } +} + +func writeIndent(w *writer, str string, indent string) { + for { + nl := strings.IndexByte(str, "\n"[0]) + if nl == -1 { + if str != "" { + w.WriteString(indent) + w.WriteString(str) + w.WriteString("\n") + } + return + } + + w.WriteString(indent) + w.WriteString(str[:nl]) + w.WriteString("\n") + str = str[nl+1:] } } @@ -334,22 +382,19 @@ func (l *intLogger) renderSlice(v reflect.Value) string { switch sv.Kind() { case reflect.String: - val = sv.String() + val = strconv.Quote(sv.String()) case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: val = strconv.FormatInt(sv.Int(), 10) case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: val = strconv.FormatUint(sv.Uint(), 10) default: val = fmt.Sprintf("%v", sv.Interface()) + if strings.ContainsAny(val, " \t\n\r") { + val = strconv.Quote(val) + } } - if strings.ContainsAny(val, " \t\n\r") { - buf.WriteByte('"') - buf.WriteString(val) - buf.WriteByte('"') - } else { - buf.WriteString(val) - } + buf.WriteString(val) } buf.WriteRune(']') @@ -415,8 +460,10 @@ func (l *intLogger) logJSON(t time.Time, name string, level Level, msg string, a func (l intLogger) jsonMapEntry(t time.Time, name string, level Level, msg string) map[string]interface{} { vals := map[string]interface{}{ - "@message": msg, - "@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"), + "@message": msg, + } + if !l.disableTime { + vals["@timestamp"] = t.Format(l.timeFormat) } var levelStr string @@ -441,8 +488,8 @@ func (l intLogger) jsonMapEntry(t time.Time, name string, level Level, msg strin vals["@module"] = name } - if l.caller { - if _, file, line, ok := runtime.Caller(4); ok { + if l.callerOffset > 0 { + if _, file, line, ok := runtime.Caller(l.callerOffset + 1); ok { vals["@caller"] = fmt.Sprintf("%s:%d", file, line) } } @@ -517,7 +564,7 @@ func (l *intLogger) With(args ...interface{}) Logger { args = args[:len(args)-1] } - sl := *l + sl := l.copy() result := make(map[string]interface{}, len(l.implied)+len(args)) keys := make([]string, 0, len(l.implied)+len(args)) @@ -551,13 +598,13 @@ func (l *intLogger) With(args ...interface{}) Logger { sl.implied = append(sl.implied, MissingKey, extra) } - return &sl + return sl } // Create a new sub-Logger that a name decending from the current name. // This is used to create a subsystem specific Logger. func (l *intLogger) Named(name string) Logger { - sl := *l + sl := l.copy() if sl.name != "" { sl.name = sl.name + "." + name @@ -565,18 +612,18 @@ func (l *intLogger) Named(name string) Logger { sl.name = name } - return &sl + return sl } // Create a new sub-Logger with an explicit name. This ignores the current // name. This is used to create a standalone logger that doesn't fall // within the normal hierarchy. func (l *intLogger) ResetNamed(name string) Logger { - sl := *l + sl := l.copy() sl.name = name - return &sl + return sl } func (l *intLogger) ResetOutput(opts *LoggerOptions) error { @@ -632,8 +679,15 @@ func (l *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { } func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + newLog := *l + if l.callerOffset > 0 { + // the stack is + // logger.printf() -> l.Output() ->l.out.writer(hclog:stdlogAdaptor.write) -> hclog:stdlogAdaptor.dispatch() + // So plus 4. + newLog.callerOffset = l.callerOffset + 4 + } return &stdlogAdapter{ - log: l, + log: &newLog, inferLevels: opts.InferLevels, forceLevel: opts.ForceLevel, } @@ -663,3 +717,16 @@ func (i *intLogger) ImpliedArgs() []interface{} { func (i *intLogger) Name() string { return i.name } + +// copy returns a shallow copy of the intLogger, replacing the level pointer +// when necessary +func (l *intLogger) copy() *intLogger { + sl := *l + + if l.independentLevels { + sl.level = new(int32) + *sl.level = *l.level + } + + return &sl +} diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go index 8d5eed76e5..6a4665ba9f 100644 --- a/vendor/github.com/hashicorp/go-hclog/logger.go +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -38,6 +38,9 @@ const ( // Error information about unrecoverable events. Error Level = 5 + + // Off disables all logging output. + Off Level = 6 ) // Format is a simple convience type for when formatting is required. When @@ -64,6 +67,12 @@ type Octal int // text output. For example: L.Info("bits", Binary(17)) type Binary int +// A simple shortcut to format strings with Go quoting. Control and +// non-printable characters will be escaped with their backslash equivalents in +// output. Intended for untrusted or multiline strings which should be logged +// as concisely as possible. +type Quote string + // ColorOption expresses how the output should be colored, if at all. type ColorOption uint8 @@ -96,6 +105,8 @@ func LevelFromString(levelStr string) Level { return Warn case "error": return Error + case "off": + return Off default: return NoLevel } @@ -115,6 +126,8 @@ func (l Level) String() string { return "error" case NoLevel: return "none" + case Off: + return "off" default: return "unknown" } @@ -179,7 +192,8 @@ type Logger interface { // the current name as well. ResetNamed(name string) Logger - // Updates the level. This should affect all sub-loggers as well. If an + // Updates the level. This should affect all related loggers as well, + // unless they were created with IndependentLevels. If an // implementation cannot update the level on the fly, it should no-op. SetLevel(level Level) @@ -227,6 +241,10 @@ type LoggerOptions struct { // Include file and line information in each log line IncludeLocation bool + // AdditionalLocationOffset is the number of additional stack levels to skip + // when finding the file and line information for the log line + AdditionalLocationOffset int + // The time format to use instead of the default TimeFormat string @@ -243,6 +261,12 @@ type LoggerOptions struct { // This is useful when interacting with a system that you wish to suppress the log // message for (because it's too noisy, etc) Exclude func(level Level, msg string, args ...interface{}) bool + + // IndependentLevels causes subloggers to be created with an independent + // copy of this logger's level. This means that using SetLevel on this + // logger will not effect any subloggers, and SetLevel on any subloggers + // will not effect the parent or sibling loggers. + IndependentLevels bool } // InterceptLogger describes the interface for using a logger @@ -271,10 +295,10 @@ type InterceptLogger interface { // the current name as well. ResetNamedIntercept(name string) InterceptLogger - // Return a value that conforms to the stdlib log.Logger interface + // Deprecated: use StandardLogger StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger - // Return a value that conforms to io.Writer, which can be passed into log.SetOutput() + // Deprecated: use StandardWriter StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer } diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go index f35d875d32..271d546d5c 100644 --- a/vendor/github.com/hashicorp/go-hclog/stdlog.go +++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go @@ -64,7 +64,7 @@ func (s *stdlogAdapter) pickLevel(str string) (Level, string) { case strings.HasPrefix(str, "[INFO]"): return Info, strings.TrimSpace(str[6:]) case strings.HasPrefix(str, "[WARN]"): - return Warn, strings.TrimSpace(str[7:]) + return Warn, strings.TrimSpace(str[6:]) case strings.HasPrefix(str, "[ERROR]"): return Error, strings.TrimSpace(str[7:]) case strings.HasPrefix(str, "[ERR]"): diff --git a/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md index 6331af921b..86c6d03fba 100644 --- a/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md +++ b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md @@ -1,3 +1,5 @@ +# UNRELEASED + # 1.3.0 (September 17th, 2020) FEATURES diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/vendor/github.com/hashicorp/go-immutable-radix/iter.go index cd16d3beae..f17d0a644f 100644 --- a/vendor/github.com/hashicorp/go-immutable-radix/iter.go +++ b/vendor/github.com/hashicorp/go-immutable-radix/iter.go @@ -20,7 +20,7 @@ func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { watch = n.mutateCh search := prefix for { - // Check for key exhaution + // Check for key exhaustion if len(search) == 0 { i.node = n return @@ -60,10 +60,13 @@ func (i *Iterator) recurseMin(n *Node) *Node { if n.leaf != nil { return n } - if len(n.edges) > 0 { + nEdges := len(n.edges) + if nEdges > 1 { // Add all the other edges to the stack (the min node will be added as // we recurse) i.stack = append(i.stack, n.edges[1:]) + } + if nEdges > 0 { return i.recurseMin(n.edges[0].node) } // Shouldn't be possible @@ -77,16 +80,32 @@ func (i *Iterator) recurseMin(n *Node) *Node { func (i *Iterator) SeekLowerBound(key []byte) { // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we // go because we need only a subset of edges of many nodes in the path to the - // leaf with the lower bound. + // leaf with the lower bound. Note that the iterator will still recurse into + // children that we don't traverse on the way to the reverse lower bound as it + // walks the stack. i.stack = []edges{} + // i.node starts off in the common case as pointing to the root node of the + // tree. By the time we return we have either found a lower bound and setup + // the stack to traverse all larger keys, or we have not and the stack and + // node should both be nil to prevent the iterator from assuming it is just + // iterating the whole tree from the root node. Either way this needs to end + // up as nil so just set it here. n := i.node + i.node = nil search := key found := func(n *Node) { - i.node = n i.stack = append(i.stack, edges{edge{node: n}}) } + findMin := func(n *Node) { + n = i.recurseMin(n) + if n != nil { + found(n) + return + } + } + for { // Compare current prefix with the search key's same-length prefix. var prefixCmp int @@ -100,10 +119,7 @@ func (i *Iterator) SeekLowerBound(key []byte) { // Prefix is larger, that means the lower bound is greater than the search // and from now on we need to follow the minimum path to the smallest // leaf under this subtree. - n = i.recurseMin(n) - if n != nil { - found(n) - } + findMin(n) return } @@ -115,27 +131,29 @@ func (i *Iterator) SeekLowerBound(key []byte) { } // Prefix is equal, we are still heading for an exact match. If this is a - // leaf we're done. - if n.leaf != nil { - if bytes.Compare(n.leaf.key, key) < 0 { - i.node = nil - return - } + // leaf and an exact match we're done. + if n.leaf != nil && bytes.Equal(n.leaf.key, key) { found(n) return } - // Consume the search prefix - if len(n.prefix) > len(search) { - search = []byte{} - } else { - search = search[len(n.prefix):] + // Consume the search prefix if the current node has one. Note that this is + // safe because if n.prefix is longer than the search slice prefixCmp would + // have been > 0 above and the method would have already returned. + search = search[len(n.prefix):] + + if len(search) == 0 { + // We've exhausted the search key, but the current node is not an exact + // match or not a leaf. That means that the leaf value if it exists, and + // all child nodes must be strictly greater, the smallest key in this + // subtree must be the lower bound. + findMin(n) + return } // Otherwise, take the lower bound next edge. idx, lbNode := n.getLowerBoundEdge(search[0]) if lbNode == nil { - i.node = nil return } @@ -144,7 +162,6 @@ func (i *Iterator) SeekLowerBound(key []byte) { i.stack = append(i.stack, n.edges[idx+1:]) } - i.node = lbNode // Recurse n = lbNode } diff --git a/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go index 762471bc36..554fa7129c 100644 --- a/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go +++ b/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go @@ -8,6 +8,16 @@ import ( // in reverse in-order type ReverseIterator struct { i *Iterator + + // expandedParents stores the set of parent nodes whose relevant children have + // already been pushed into the stack. This can happen during seek or during + // iteration. + // + // Unlike forward iteration we need to recurse into children before we can + // output the value stored in an internal leaf since all children are greater. + // We use this to track whether we have already ensured all the children are + // in the stack. + expandedParents map[*Node]struct{} } // NewReverseIterator returns a new ReverseIterator at a node @@ -28,22 +38,6 @@ func (ri *ReverseIterator) SeekPrefix(prefix []byte) { ri.i.SeekPrefixWatch(prefix) } -func (ri *ReverseIterator) recurseMax(n *Node) *Node { - // Traverse to the maximum child - if n.leaf != nil { - return n - } - if len(n.edges) > 0 { - // Add all the other edges to the stack (the max node will be added as - // we recurse) - m := len(n.edges) - ri.i.stack = append(ri.i.stack, n.edges[:m-1]) - return ri.recurseMax(n.edges[m-1].node) - } - // Shouldn't be possible - return nil -} - // SeekReverseLowerBound is used to seek the iterator to the largest key that is // lower or equal to the given key. There is no watch variant as it's hard to // predict based on the radix structure which node(s) changes might affect the @@ -51,14 +45,32 @@ func (ri *ReverseIterator) recurseMax(n *Node) *Node { func (ri *ReverseIterator) SeekReverseLowerBound(key []byte) { // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we // go because we need only a subset of edges of many nodes in the path to the - // leaf with the lower bound. + // leaf with the lower bound. Note that the iterator will still recurse into + // children that we don't traverse on the way to the reverse lower bound as it + // walks the stack. ri.i.stack = []edges{} + // ri.i.node starts off in the common case as pointing to the root node of the + // tree. By the time we return we have either found a lower bound and setup + // the stack to traverse all larger keys, or we have not and the stack and + // node should both be nil to prevent the iterator from assuming it is just + // iterating the whole tree from the root node. Either way this needs to end + // up as nil so just set it here. n := ri.i.node + ri.i.node = nil search := key + if ri.expandedParents == nil { + ri.expandedParents = make(map[*Node]struct{}) + } + found := func(n *Node) { - ri.i.node = n ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) + // We need to mark this node as expanded in advance too otherwise the + // iterator will attempt to walk all of its children even though they are + // greater than the lower bound we have found. We've expanded it in the + // sense that all of its children that we want to walk are already in the + // stack (i.e. none of them). + ri.expandedParents[n] = struct{}{} } for { @@ -71,41 +83,73 @@ func (ri *ReverseIterator) SeekReverseLowerBound(key []byte) { } if prefixCmp < 0 { - // Prefix is smaller than search prefix, that means there is no lower bound. - // But we are looking in reverse, so the reverse lower bound will be the - // largest leaf under this subtree, since it is the value that would come - // right before the current search prefix if it were in the tree. So we need - // to follow the maximum path in this subtree to find it. - n = ri.recurseMax(n) - if n != nil { - found(n) - } + // Prefix is smaller than search prefix, that means there is no exact + // match for the search key. But we are looking in reverse, so the reverse + // lower bound will be the largest leaf under this subtree, since it is + // the value that would come right before the current search key if it + // were in the tree. So we need to follow the maximum path in this subtree + // to find it. Note that this is exactly what the iterator will already do + // if it finds a node in the stack that has _not_ been marked as expanded + // so in this one case we don't call `found` and instead let the iterator + // do the expansion and recursion through all the children. + ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) return } if prefixCmp > 0 { - // Prefix is larger than search prefix, that means there is no reverse lower - // bound since nothing comes before our current search prefix. - ri.i.node = nil + // Prefix is larger than search prefix, or there is no prefix but we've + // also exhausted the search key. Either way, that means there is no + // reverse lower bound since nothing comes before our current search + // prefix. return } - // Prefix is equal, we are still heading for an exact match. If this is a - // leaf we're done. - if n.leaf != nil { - if bytes.Compare(n.leaf.key, key) < 0 { - ri.i.node = nil + // If this is a leaf, something needs to happen! Note that if it's a leaf + // and prefixCmp was zero (which it must be to get here) then the leaf value + // is either an exact match for the search, or it's lower. It can't be + // greater. + if n.isLeaf() { + + // Firstly, if it's an exact match, we're done! + if bytes.Equal(n.leaf.key, key) { + found(n) return } - found(n) - return + + // It's not so this node's leaf value must be lower and could still be a + // valid contender for reverse lower bound. + + // If it has no children then we are also done. + if len(n.edges) == 0 { + // This leaf is the lower bound. + found(n) + return + } + + // Finally, this leaf is internal (has children) so we'll keep searching, + // but we need to add it to the iterator's stack since it has a leaf value + // that needs to be iterated over. It needs to be added to the stack + // before its children below as it comes first. + ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) + // We also need to mark it as expanded since we'll be adding any of its + // relevant children below and so don't want the iterator to re-add them + // on its way back up the stack. + ri.expandedParents[n] = struct{}{} } - // Consume the search prefix - if len(n.prefix) > len(search) { - search = []byte{} - } else { - search = search[len(n.prefix):] + // Consume the search prefix. Note that this is safe because if n.prefix is + // longer than the search slice prefixCmp would have been > 0 above and the + // method would have already returned. + search = search[len(n.prefix):] + + if len(search) == 0 { + // We've exhausted the search key but we are not at a leaf. That means all + // children are greater than the search key so a reverse lower bound + // doesn't exist in this subtree. Note that there might still be one in + // the whole radix tree by following a different path somewhere further + // up. If that's the case then the iterator's stack will contain all the + // smaller nodes already and Previous will walk through them correctly. + return } // Otherwise, take the lower bound next edge. @@ -125,14 +169,12 @@ func (ri *ReverseIterator) SeekReverseLowerBound(key []byte) { ri.i.stack = append(ri.i.stack, n.edges[:idx]) } - // Exit if there's not lower bound edge. The stack will have the - // previous nodes already. + // Exit if there's no lower bound edge. The stack will have the previous + // nodes already. if lbNode == nil { - ri.i.node = nil return } - ri.i.node = lbNode // Recurse n = lbNode } @@ -149,6 +191,10 @@ func (ri *ReverseIterator) Previous() ([]byte, interface{}, bool) { } } + if ri.expandedParents == nil { + ri.expandedParents = make(map[*Node]struct{}) + } + for len(ri.i.stack) > 0 { // Inspect the last element of the stack n := len(ri.i.stack) @@ -156,22 +202,38 @@ func (ri *ReverseIterator) Previous() ([]byte, interface{}, bool) { m := len(last) elem := last[m-1].node - // Update the stack + _, alreadyExpanded := ri.expandedParents[elem] + + // If this is an internal node and we've not seen it already, we need to + // leave it in the stack so we can return its possible leaf value _after_ + // we've recursed through all its children. + if len(elem.edges) > 0 && !alreadyExpanded { + // record that we've seen this node! + ri.expandedParents[elem] = struct{}{} + // push child edges onto stack and skip the rest of the loop to recurse + // into the largest one. + ri.i.stack = append(ri.i.stack, elem.edges) + continue + } + + // Remove the node from the stack if m > 1 { ri.i.stack[n-1] = last[:m-1] } else { ri.i.stack = ri.i.stack[:n-1] } - - // Push the edges onto the frontier - if len(elem.edges) > 0 { - ri.i.stack = append(ri.i.stack, elem.edges) + // We don't need this state any more as it's no longer in the stack so we + // won't visit it again + if alreadyExpanded { + delete(ri.expandedParents, elem) } - // Return the leaf values if any + // If this is a leaf, return it if elem.leaf != nil { return elem.leaf.key, elem.leaf.val, true } + + // it's not a leaf so keep walking the stack to find the previous leaf } return nil, nil, false } diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md index e92fa614cd..71dd308ed8 100644 --- a/vendor/github.com/hashicorp/go-multierror/README.md +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -1,10 +1,11 @@ # go-multierror -[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis] -[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] +[![CircleCI](https://img.shields.io/circleci/build/github/hashicorp/go-multierror/master)](https://circleci.com/gh/hashicorp/go-multierror) +[![Go Reference](https://pkg.go.dev/badge/github.com/hashicorp/go-multierror.svg)](https://pkg.go.dev/github.com/hashicorp/go-multierror) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/hashicorp/go-multierror) -[travis]: https://travis-ci.org/hashicorp/go-multierror -[godocs]: https://godoc.org/github.com/hashicorp/go-multierror +[circleci]: https://app.circleci.com/pipelines/github/hashicorp/go-multierror +[godocs]: https://pkg.go.dev/github.com/hashicorp/go-multierror `go-multierror` is a package for Go that provides a mechanism for representing a list of `error` values as a single `error`. @@ -24,7 +25,25 @@ for introspecting on error values. Install using `go get github.com/hashicorp/go-multierror`. Full documentation is available at -http://godoc.org/github.com/hashicorp/go-multierror +https://pkg.go.dev/github.com/hashicorp/go-multierror + +### Requires go version 1.13 or newer + +`go-multierror` requires go version 1.13 or newer. Go 1.13 introduced +[error wrapping](https://golang.org/doc/go1.13#error_wrapping), which +this library takes advantage of. + +If you need to use an earlier version of go, you can use the +[v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) +tag, which doesn't rely on features in go 1.13. + +If you see compile errors that look like the below, it's likely that +you're on an older version of go: + +``` +/go/src/github.com/hashicorp/go-multierror/multierror.go:112:9: undefined: errors.As +/go/src/github.com/hashicorp/go-multierror/multierror.go:117:9: undefined: errors.Is +``` ## Usage diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go index 775b6e753e..3e2589bfde 100644 --- a/vendor/github.com/hashicorp/go-multierror/append.go +++ b/vendor/github.com/hashicorp/go-multierror/append.go @@ -6,6 +6,8 @@ package multierror // If err is not a multierror.Error, then it will be turned into // one. If any of the errs are multierr.Error, they will be flattened // one level into err. +// Any nil errors within errs will be ignored. If err is nil, a new +// *Error will be returned. func Append(err error, errs ...error) *Error { switch err := err.(type) { case *Error: diff --git a/vendor/github.com/hashicorp/go-multierror/go.mod b/vendor/github.com/hashicorp/go-multierror/go.mod index 0afe8e6f9d..141cc4ccb2 100644 --- a/vendor/github.com/hashicorp/go-multierror/go.mod +++ b/vendor/github.com/hashicorp/go-multierror/go.mod @@ -1,5 +1,5 @@ module github.com/hashicorp/go-multierror -go 1.14 +go 1.13 require github.com/hashicorp/errwrap v1.0.0 diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go index d05dd92698..f545743264 100644 --- a/vendor/github.com/hashicorp/go-multierror/multierror.go +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -40,14 +40,17 @@ func (e *Error) GoString() string { return fmt.Sprintf("*%#v", *e) } -// WrappedErrors returns the list of errors that this Error is wrapping. -// It is an implementation of the errwrap.Wrapper interface so that -// multierror.Error can be used with that library. +// WrappedErrors returns the list of errors that this Error is wrapping. It is +// an implementation of the errwrap.Wrapper interface so that multierror.Error +// can be used with that library. // -// This method is not safe to be called concurrently and is no different -// than accessing the Errors field directly. It is implemented only to -// satisfy the errwrap.Wrapper interface. +// This method is not safe to be called concurrently. Unlike accessing the +// Errors field directly, this function also checks if the multierror is nil to +// prevent a null-pointer panic. It satisfies the errwrap.Wrapper interface. func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } return e.Errors } diff --git a/vendor/github.com/hashicorp/go-plugin/.gitignore b/vendor/github.com/hashicorp/go-plugin/.gitignore new file mode 100644 index 0000000000..4befed30a1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/.gitignore @@ -0,0 +1,2 @@ +.DS_Store +.idea diff --git a/vendor/github.com/hashicorp/go-plugin/LICENSE b/vendor/github.com/hashicorp/go-plugin/LICENSE new file mode 100644 index 0000000000..82b4de97c7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor†+ + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version†+ + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution†+ + means Covered Software of a particular Contributor. + +1.4. “Covered Software†+ + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses†+ means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form†+ + means any form of the work other than Source Code Form. + +1.7. “Larger Work†+ + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License†+ + means this document. + +1.9. “Licensable†+ + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications†+ + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims†of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License†+ + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form†+ + means the form of the work preferred for making modifications. + +1.14. “You†(or “Yourâ€) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You†includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control†means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is†basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses†Notice + + This Source Code Form is “Incompatible + With Secondary Licensesâ€, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md new file mode 100644 index 0000000000..46ee09fc0c --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/README.md @@ -0,0 +1,163 @@ +# Go Plugin System over RPC + +`go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system +that has been in use by HashiCorp tooling for over 4 years. While initially +created for [Packer](https://www.packer.io), it is additionally in use by +[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), and +[Vault](https://www.vaultproject.io). + +While the plugin system is over RPC, it is currently only designed to work +over a local [reliable] network. Plugins over a real network are not supported +and will lead to unexpected behavior. + +This plugin system has been used on millions of machines across many different +projects and has proven to be battle hardened and ready for production use. + +## Features + +The HashiCorp plugin system supports a number of features: + +**Plugins are Go interface implementations.** This makes writing and consuming +plugins feel very natural. To a plugin author: you just implement an +interface as if it were going to run in the same process. For a plugin user: +you just use and call functions on an interface as if it were in the same +process. This plugin system handles the communication in between. + +**Cross-language support.** Plugins can be written (and consumed) by +almost every major language. This library supports serving plugins via +[gRPC](http://www.grpc.io). gRPC-based plugins enable plugins to be written +in any language. + +**Complex arguments and return values are supported.** This library +provides APIs for handling complex arguments and return values such +as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library +(`MuxBroker`) for creating new connections between the client/server to +serve additional interfaces or transfer raw data. + +**Bidirectional communication.** Because the plugin system supports +complex arguments, the host process can send it interface implementations +and the plugin can call back into the host process. + +**Built-in Logging.** Any plugins that use the `log` standard library +will have log data automatically sent to the host process. The host +process will mirror this output prefixed with the path to the plugin +binary. This makes debugging with plugins simple. If the host system +uses [hclog](https://github.com/hashicorp/go-hclog) then the log data +will be structured. If the plugin also uses hclog, logs from the plugin +will be sent to the host hclog and be structured. + +**Protocol Versioning.** A very basic "protocol version" is supported that +can be incremented to invalidate any previous plugins. This is useful when +interface signatures are changing, protocol level changes are necessary, +etc. When a protocol version is incompatible, a human friendly error +message is shown to the end user. + +**Stdout/Stderr Syncing.** While plugins are subprocesses, they can continue +to use stdout/stderr as usual and the output will get mirrored back to +the host process. The host process can control what `io.Writer` these +streams go to to prevent this from happening. + +**TTY Preservation.** Plugin subprocesses are connected to the identical +stdin file descriptor as the host process, allowing software that requires +a TTY to work. For example, a plugin can execute `ssh` and even though there +are multiple subprocesses and RPC happening, it will look and act perfectly +to the end user. + +**Host upgrade while a plugin is running.** Plugins can be "reattached" +so that the host process can be upgraded while the plugin is still running. +This requires the host/plugin to know this is possible and daemonize +properly. `NewClient` takes a `ReattachConfig` to determine if and how to +reattach. + +**Cryptographically Secure Plugins.** Plugins can be verified with an expected +checksum and RPC communications can be configured to use TLS. The host process +must be properly secured to protect this configuration. + +## Architecture + +The HashiCorp plugin system works by launching subprocesses and communicating +over RPC (using standard `net/rpc` or [gRPC](http://www.grpc.io)). A single +connection is made between any plugin and the host process. For net/rpc-based +plugins, we use a [connection multiplexing](https://github.com/hashicorp/yamux) +library to multiplex any other connections on top. For gRPC-based plugins, +the HTTP2 protocol handles multiplexing. + +This architecture has a number of benefits: + + * Plugins can't crash your host process: A panic in a plugin doesn't + panic the plugin user. + + * Plugins are very easy to write: just write a Go application and `go build`. + Or use any other language to write a gRPC server with a tiny amount of + boilerplate to support go-plugin. + + * Plugins are very easy to install: just put the binary in a location where + the host will find it (depends on the host but this library also provides + helpers), and the plugin host handles the rest. + + * Plugins can be relatively secure: The plugin only has access to the + interfaces and args given to it, not to the entire memory space of the + process. Additionally, go-plugin can communicate with the plugin over + TLS. + +## Usage + +To use the plugin system, you must take the following steps. These are +high-level steps that must be done. Examples are available in the +`examples/` directory. + + 1. Choose the interface(s) you want to expose for plugins. + + 2. For each interface, implement an implementation of that interface + that communicates over a `net/rpc` connection or over a + [gRPC](http://www.grpc.io) connection or both. You'll have to implement + both a client and server implementation. + + 3. Create a `Plugin` implementation that knows how to create the RPC + client/server for a given plugin type. + + 4. Plugin authors call `plugin.Serve` to serve a plugin from the + `main` function. + + 5. Plugin users use `plugin.Client` to launch a subprocess and request + an interface implementation over RPC. + +That's it! In practice, step 2 is the most tedious and time consuming step. +Even so, it isn't very difficult and you can see examples in the `examples/` +directory as well as throughout our various open source projects. + +For complete API documentation, see [GoDoc](https://godoc.org/github.com/hashicorp/go-plugin). + +## Roadmap + +Our plugin system is constantly evolving. As we use the plugin system for +new projects or for new features in existing projects, we constantly find +improvements we can make. + +At this point in time, the roadmap for the plugin system is: + +**Semantic Versioning.** Plugins will be able to implement a semantic version. +This plugin system will give host processes a system for constraining +versions. This is in addition to the protocol versioning already present +which is more for larger underlying changes. + +## What About Shared Libraries? + +When we started using plugins (late 2012, early 2013), plugins over RPC +were the only option since Go didn't support dynamic library loading. Today, +Go supports the [plugin](https://golang.org/pkg/plugin/) standard library with +a number of limitations. Since 2012, our plugin system has stabilized +from tens of millions of users using it, and has many benefits we've come to +value greatly. + +For example, we use this plugin system in +[Vault](https://www.vaultproject.io) where dynamic library loading is +not acceptable for security reasons. That is an extreme +example, but we believe our library system has more upsides than downsides +over dynamic library loading and since we've had it built and tested for years, +we'll continue to use it. + +Shared libraries have one major advantage over our system which is much +higher performance. In real world scenarios across our various tools, +we've never required any more performance out of our plugin system and it +has seen very high throughput, so this isn't a concern for us at the moment. diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go new file mode 100644 index 0000000000..67dca88357 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -0,0 +1,1048 @@ +package plugin + +import ( + "bufio" + "context" + "crypto/subtle" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-hclog" + "google.golang.org/grpc" +) + +// If this is 1, then we've called CleanupClients. This can be used +// by plugin RPC implementations to change error behavior since you +// can expected network connection errors at this point. This should be +// read by using sync/atomic. +var Killed uint32 = 0 + +// This is a slice of the "managed" clients which are cleaned up when +// calling Cleanup +var managedClients = make([]*Client, 0, 5) +var managedClientsLock sync.Mutex + +// Error types +var ( + // ErrProcessNotFound is returned when a client is instantiated to + // reattach to an existing process and it isn't found. + ErrProcessNotFound = errors.New("Reattachment process not found") + + // ErrChecksumsDoNotMatch is returned when binary's checksum doesn't match + // the one provided in the SecureConfig. + ErrChecksumsDoNotMatch = errors.New("checksums did not match") + + // ErrSecureNoChecksum is returned when an empty checksum is provided to the + // SecureConfig. + ErrSecureConfigNoChecksum = errors.New("no checksum provided") + + // ErrSecureNoHash is returned when a nil Hash object is provided to the + // SecureConfig. + ErrSecureConfigNoHash = errors.New("no hash implementation provided") + + // ErrSecureConfigAndReattach is returned when both Reattach and + // SecureConfig are set. + ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set") +) + +// Client handles the lifecycle of a plugin application. It launches +// plugins, connects to them, dispenses interface implementations, and handles +// killing the process. +// +// Plugin hosts should use one Client for each plugin executable. To +// dispense a plugin type, use the `Client.Client` function, and then +// cal `Dispense`. This awkward API is mostly historical but is used to split +// the client that deals with subprocess management and the client that +// does RPC management. +// +// See NewClient and ClientConfig for using a Client. +type Client struct { + config *ClientConfig + exited bool + l sync.Mutex + address net.Addr + process *os.Process + client ClientProtocol + protocol Protocol + logger hclog.Logger + doneCtx context.Context + ctxCancel context.CancelFunc + negotiatedVersion int + + // clientWaitGroup is used to manage the lifecycle of the plugin management + // goroutines. + clientWaitGroup sync.WaitGroup + + // stderrWaitGroup is used to prevent the command's Wait() function from + // being called before we've finished reading from the stderr pipe. + stderrWaitGroup sync.WaitGroup + + // processKilled is used for testing only, to flag when the process was + // forcefully killed. + processKilled bool +} + +// NegotiatedVersion returns the protocol version negotiated with the server. +// This is only valid after Start() is called. +func (c *Client) NegotiatedVersion() int { + return c.negotiatedVersion +} + +// ClientConfig is the configuration used to initialize a new +// plugin client. After being used to initialize a plugin client, +// that configuration must not be modified again. +type ClientConfig struct { + // HandshakeConfig is the configuration that must match servers. + HandshakeConfig + + // Plugins are the plugins that can be consumed. + // The implied version of this PluginSet is the Handshake.ProtocolVersion. + Plugins PluginSet + + // VersionedPlugins is a map of PluginSets for specific protocol versions. + // These can be used to negotiate a compatible version between client and + // server. If this is set, Handshake.ProtocolVersion is not required. + VersionedPlugins map[int]PluginSet + + // One of the following must be set, but not both. + // + // Cmd is the unstarted subprocess for starting the plugin. If this is + // set, then the Client starts the plugin process on its own and connects + // to it. + // + // Reattach is configuration for reattaching to an existing plugin process + // that is already running. This isn't common. + Cmd *exec.Cmd + Reattach *ReattachConfig + + // SecureConfig is configuration for verifying the integrity of the + // executable. It can not be used with Reattach. + SecureConfig *SecureConfig + + // TLSConfig is used to enable TLS on the RPC client. + TLSConfig *tls.Config + + // Managed represents if the client should be managed by the + // plugin package or not. If true, then by calling CleanupClients, + // it will automatically be cleaned up. Otherwise, the client + // user is fully responsible for making sure to Kill all plugin + // clients. By default the client is _not_ managed. + Managed bool + + // The minimum and maximum port to use for communicating with + // the subprocess. If not set, this defaults to 10,000 and 25,000 + // respectively. + MinPort, MaxPort uint + + // StartTimeout is the timeout to wait for the plugin to say it + // has started successfully. + StartTimeout time.Duration + + // If non-nil, then the stderr of the client will be written to here + // (as well as the log). This is the original os.Stderr of the subprocess. + // This isn't the output of synced stderr. + Stderr io.Writer + + // SyncStdout, SyncStderr can be set to override the + // respective os.Std* values in the plugin. Care should be taken to + // avoid races here. If these are nil, then this will be set to + // ioutil.Discard. + SyncStdout io.Writer + SyncStderr io.Writer + + // AllowedProtocols is a list of allowed protocols. If this isn't set, + // then only netrpc is allowed. This is so that older go-plugin systems + // can show friendly errors if they see a plugin with an unknown + // protocol. + // + // By setting this, you can cause an error immediately on plugin start + // if an unsupported protocol is used with a good error message. + // + // If this isn't set at all (nil value), then only net/rpc is accepted. + // This is done for legacy reasons. You must explicitly opt-in to + // new protocols. + AllowedProtocols []Protocol + + // Logger is the logger that the client will used. If none is provided, + // it will default to hclog's default logger. + Logger hclog.Logger + + // AutoMTLS has the client and server automatically negotiate mTLS for + // transport authentication. This ensures that only the original client will + // be allowed to connect to the server, and all other connections will be + // rejected. The client will also refuse to connect to any server that isn't + // the original instance started by the client. + // + // In this mode of operation, the client generates a one-time use tls + // certificate, sends the public x.509 certificate to the new server, and + // the server generates a one-time use tls certificate, and sends the public + // x.509 certificate back to the client. These are used to authenticate all + // rpc connections between the client and server. + // + // Setting AutoMTLS to true implies that the server must support the + // protocol, and correctly negotiate the tls certificates, or a connection + // failure will result. + // + // The client should not set TLSConfig, nor should the server set a + // TLSProvider, because AutoMTLS implies that a new certificate and tls + // configuration will be generated at startup. + // + // You cannot Reattach to a server with this option enabled. + AutoMTLS bool + + // GRPCDialOptions allows plugin users to pass custom grpc.DialOption + // to create gRPC connections. This only affects plugins using the gRPC + // protocol. + GRPCDialOptions []grpc.DialOption +} + +// ReattachConfig is used to configure a client to reattach to an +// already-running plugin process. You can retrieve this information by +// calling ReattachConfig on Client. +type ReattachConfig struct { + Protocol Protocol + ProtocolVersion int + Addr net.Addr + Pid int + + // Test is set to true if this is reattaching to to a plugin in "test mode" + // (see ServeConfig.Test). In this mode, client.Kill will NOT kill the + // process and instead will rely on the plugin to terminate itself. This + // should not be used in non-test environments. + Test bool +} + +// SecureConfig is used to configure a client to verify the integrity of an +// executable before running. It does this by verifying the checksum is +// expected. Hash is used to specify the hashing method to use when checksumming +// the file. The configuration is verified by the client by calling the +// SecureConfig.Check() function. +// +// The host process should ensure the checksum was provided by a trusted and +// authoritative source. The binary should be installed in such a way that it +// can not be modified by an unauthorized user between the time of this check +// and the time of execution. +type SecureConfig struct { + Checksum []byte + Hash hash.Hash +} + +// Check takes the filepath to an executable and returns true if the checksum of +// the file matches the checksum provided in the SecureConfig. +func (s *SecureConfig) Check(filePath string) (bool, error) { + if len(s.Checksum) == 0 { + return false, ErrSecureConfigNoChecksum + } + + if s.Hash == nil { + return false, ErrSecureConfigNoHash + } + + file, err := os.Open(filePath) + if err != nil { + return false, err + } + defer file.Close() + + _, err = io.Copy(s.Hash, file) + if err != nil { + return false, err + } + + sum := s.Hash.Sum(nil) + + return subtle.ConstantTimeCompare(sum, s.Checksum) == 1, nil +} + +// This makes sure all the managed subprocesses are killed and properly +// logged. This should be called before the parent process running the +// plugins exits. +// +// This must only be called _once_. +func CleanupClients() { + // Set the killed to true so that we don't get unexpected panics + atomic.StoreUint32(&Killed, 1) + + // Kill all the managed clients in parallel and use a WaitGroup + // to wait for them all to finish up. + var wg sync.WaitGroup + managedClientsLock.Lock() + for _, client := range managedClients { + wg.Add(1) + + go func(client *Client) { + client.Kill() + wg.Done() + }(client) + } + managedClientsLock.Unlock() + + wg.Wait() +} + +// Creates a new plugin client which manages the lifecycle of an external +// plugin and gets the address for the RPC connection. +// +// The client must be cleaned up at some point by calling Kill(). If +// the client is a managed client (created with NewManagedClient) you +// can just call CleanupClients at the end of your program and they will +// be properly cleaned. +func NewClient(config *ClientConfig) (c *Client) { + if config.MinPort == 0 && config.MaxPort == 0 { + config.MinPort = 10000 + config.MaxPort = 25000 + } + + if config.StartTimeout == 0 { + config.StartTimeout = 1 * time.Minute + } + + if config.Stderr == nil { + config.Stderr = ioutil.Discard + } + + if config.SyncStdout == nil { + config.SyncStdout = ioutil.Discard + } + if config.SyncStderr == nil { + config.SyncStderr = ioutil.Discard + } + + if config.AllowedProtocols == nil { + config.AllowedProtocols = []Protocol{ProtocolNetRPC} + } + + if config.Logger == nil { + config.Logger = hclog.New(&hclog.LoggerOptions{ + Output: hclog.DefaultOutput, + Level: hclog.Trace, + Name: "plugin", + }) + } + + c = &Client{ + config: config, + logger: config.Logger, + } + if config.Managed { + managedClientsLock.Lock() + managedClients = append(managedClients, c) + managedClientsLock.Unlock() + } + + return +} + +// Client returns the protocol client for this connection. +// +// Subsequent calls to this will return the same client. +func (c *Client) Client() (ClientProtocol, error) { + _, err := c.Start() + if err != nil { + return nil, err + } + + c.l.Lock() + defer c.l.Unlock() + + if c.client != nil { + return c.client, nil + } + + switch c.protocol { + case ProtocolNetRPC: + c.client, err = newRPCClient(c) + + case ProtocolGRPC: + c.client, err = newGRPCClient(c.doneCtx, c) + + default: + return nil, fmt.Errorf("unknown server protocol: %s", c.protocol) + } + + if err != nil { + c.client = nil + return nil, err + } + + return c.client, nil +} + +// Tells whether or not the underlying process has exited. +func (c *Client) Exited() bool { + c.l.Lock() + defer c.l.Unlock() + return c.exited +} + +// killed is used in tests to check if a process failed to exit gracefully, and +// needed to be killed. +func (c *Client) killed() bool { + c.l.Lock() + defer c.l.Unlock() + return c.processKilled +} + +// End the executing subprocess (if it is running) and perform any cleanup +// tasks necessary such as capturing any remaining logs and so on. +// +// This method blocks until the process successfully exits. +// +// This method can safely be called multiple times. +func (c *Client) Kill() { + // Grab a lock to read some private fields. + c.l.Lock() + process := c.process + addr := c.address + c.l.Unlock() + + // If there is no process, there is nothing to kill. + if process == nil { + return + } + + defer func() { + // Wait for the all client goroutines to finish. + c.clientWaitGroup.Wait() + + // Make sure there is no reference to the old process after it has been + // killed. + c.l.Lock() + c.process = nil + c.l.Unlock() + }() + + // We need to check for address here. It is possible that the plugin + // started (process != nil) but has no address (addr == nil) if the + // plugin failed at startup. If we do have an address, we need to close + // the plugin net connections. + graceful := false + if addr != nil { + // Close the client to cleanly exit the process. + client, err := c.Client() + if err == nil { + err = client.Close() + + // If there is no error, then we attempt to wait for a graceful + // exit. If there was an error, we assume that graceful cleanup + // won't happen and just force kill. + graceful = err == nil + if err != nil { + // If there was an error just log it. We're going to force + // kill in a moment anyways. + c.logger.Warn("error closing client during Kill", "err", err) + } + } else { + c.logger.Error("client", "error", err) + } + } + + // If we're attempting a graceful exit, then we wait for a short period + // of time to allow that to happen. To wait for this we just wait on the + // doneCh which would be closed if the process exits. + if graceful { + select { + case <-c.doneCtx.Done(): + c.logger.Debug("plugin exited") + return + case <-time.After(2 * time.Second): + } + } + + // If graceful exiting failed, just kill it + c.logger.Warn("plugin failed to exit gracefully") + process.Kill() + + c.l.Lock() + c.processKilled = true + c.l.Unlock() +} + +// Starts the underlying subprocess, communicating with it to negotiate +// a port for RPC connections, and returning the address to connect via RPC. +// +// This method is safe to call multiple times. Subsequent calls have no effect. +// Once a client has been started once, it cannot be started again, even if +// it was killed. +func (c *Client) Start() (addr net.Addr, err error) { + c.l.Lock() + defer c.l.Unlock() + + if c.address != nil { + return c.address, nil + } + + // If one of cmd or reattach isn't set, then it is an error. We wrap + // this in a {} for scoping reasons, and hopeful that the escape + // analysis will pop the stack here. + { + cmdSet := c.config.Cmd != nil + attachSet := c.config.Reattach != nil + secureSet := c.config.SecureConfig != nil + if cmdSet == attachSet { + return nil, fmt.Errorf("Only one of Cmd or Reattach must be set") + } + + if secureSet && attachSet { + return nil, ErrSecureConfigAndReattach + } + } + + if c.config.Reattach != nil { + return c.reattach() + } + + if c.config.VersionedPlugins == nil { + c.config.VersionedPlugins = make(map[int]PluginSet) + } + + // handle all plugins as versioned, using the handshake config as the default. + version := int(c.config.ProtocolVersion) + + // Make sure we're not overwriting a real version 0. If ProtocolVersion was + // non-zero, then we have to just assume the user made sure that + // VersionedPlugins doesn't conflict. + if _, ok := c.config.VersionedPlugins[version]; !ok && c.config.Plugins != nil { + c.config.VersionedPlugins[version] = c.config.Plugins + } + + var versionStrings []string + for v := range c.config.VersionedPlugins { + versionStrings = append(versionStrings, strconv.Itoa(v)) + } + + env := []string{ + fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue), + fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort), + fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort), + fmt.Sprintf("PLUGIN_PROTOCOL_VERSIONS=%s", strings.Join(versionStrings, ",")), + } + + cmd := c.config.Cmd + cmd.Env = append(cmd.Env, os.Environ()...) + cmd.Env = append(cmd.Env, env...) + cmd.Stdin = os.Stdin + + cmdStdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + cmdStderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + + if c.config.SecureConfig != nil { + if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil { + return nil, fmt.Errorf("error verifying checksum: %s", err) + } else if !ok { + return nil, ErrChecksumsDoNotMatch + } + } + + // Setup a temporary certificate for client/server mtls, and send the public + // certificate to the plugin. + if c.config.AutoMTLS { + c.logger.Info("configuring client automatic mTLS") + certPEM, keyPEM, err := generateCert() + if err != nil { + c.logger.Error("failed to generate client certificate", "error", err) + return nil, err + } + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + c.logger.Error("failed to parse client certificate", "error", err) + return nil, err + } + + cmd.Env = append(cmd.Env, fmt.Sprintf("PLUGIN_CLIENT_CERT=%s", certPEM)) + + c.config.TLSConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + ServerName: "localhost", + } + } + + c.logger.Debug("starting plugin", "path", cmd.Path, "args", cmd.Args) + err = cmd.Start() + if err != nil { + return + } + + // Set the process + c.process = cmd.Process + c.logger.Debug("plugin started", "path", cmd.Path, "pid", c.process.Pid) + + // Make sure the command is properly cleaned up if there is an error + defer func() { + r := recover() + + if err != nil || r != nil { + cmd.Process.Kill() + } + + if r != nil { + panic(r) + } + }() + + // Create a context for when we kill + c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) + + // Start goroutine that logs the stderr + c.clientWaitGroup.Add(1) + c.stderrWaitGroup.Add(1) + // logStderr calls Done() + go c.logStderr(cmdStderr) + + c.clientWaitGroup.Add(1) + go func() { + // ensure the context is cancelled when we're done + defer c.ctxCancel() + + defer c.clientWaitGroup.Done() + + // get the cmd info early, since the process information will be removed + // in Kill. + pid := c.process.Pid + path := cmd.Path + + // wait to finish reading from stderr since the stderr pipe reader + // will be closed by the subsequent call to cmd.Wait(). + c.stderrWaitGroup.Wait() + + // Wait for the command to end. + err := cmd.Wait() + + debugMsgArgs := []interface{}{ + "path", path, + "pid", pid, + } + if err != nil { + debugMsgArgs = append(debugMsgArgs, + []interface{}{"error", err.Error()}...) + } + + // Log and make sure to flush the logs write away + c.logger.Debug("plugin process exited", debugMsgArgs...) + os.Stderr.Sync() + + // Set that we exited, which takes a lock + c.l.Lock() + defer c.l.Unlock() + c.exited = true + }() + + // Start a goroutine that is going to be reading the lines + // out of stdout + linesCh := make(chan string) + c.clientWaitGroup.Add(1) + go func() { + defer c.clientWaitGroup.Done() + defer close(linesCh) + + scanner := bufio.NewScanner(cmdStdout) + for scanner.Scan() { + linesCh <- scanner.Text() + } + }() + + // Make sure after we exit we read the lines from stdout forever + // so they don't block since it is a pipe. + // The scanner goroutine above will close this, but track it with a wait + // group for completeness. + c.clientWaitGroup.Add(1) + defer func() { + go func() { + defer c.clientWaitGroup.Done() + for range linesCh { + } + }() + }() + + // Some channels for the next step + timeout := time.After(c.config.StartTimeout) + + // Start looking for the address + c.logger.Debug("waiting for RPC address", "path", cmd.Path) + select { + case <-timeout: + err = errors.New("timeout while waiting for plugin to start") + case <-c.doneCtx.Done(): + err = errors.New("plugin exited before we could connect") + case line := <-linesCh: + // Trim the line and split by "|" in order to get the parts of + // the output. + line = strings.TrimSpace(line) + parts := strings.SplitN(line, "|", 6) + if len(parts) < 4 { + err = fmt.Errorf( + "Unrecognized remote plugin message: %s\n\n"+ + "This usually means that the plugin is either invalid or simply\n"+ + "needs to be recompiled to support the latest protocol.", line) + return + } + + // Check the core protocol. Wrapped in a {} for scoping. + { + var coreProtocol int + coreProtocol, err = strconv.Atoi(parts[0]) + if err != nil { + err = fmt.Errorf("Error parsing core protocol version: %s", err) + return + } + + if coreProtocol != CoreProtocolVersion { + err = fmt.Errorf("Incompatible core API version with plugin. "+ + "Plugin version: %s, Core version: %d\n\n"+ + "To fix this, the plugin usually only needs to be recompiled.\n"+ + "Please report this to the plugin author.", parts[0], CoreProtocolVersion) + return + } + } + + // Test the API version + version, pluginSet, err := c.checkProtoVersion(parts[1]) + if err != nil { + return addr, err + } + + // set the Plugins value to the compatible set, so the version + // doesn't need to be passed through to the ClientProtocol + // implementation. + c.config.Plugins = pluginSet + c.negotiatedVersion = version + c.logger.Debug("using plugin", "version", version) + + switch parts[2] { + case "tcp": + addr, err = net.ResolveTCPAddr("tcp", parts[3]) + case "unix": + addr, err = net.ResolveUnixAddr("unix", parts[3]) + default: + err = fmt.Errorf("Unknown address type: %s", parts[3]) + } + + // If we have a server type, then record that. We default to net/rpc + // for backwards compatibility. + c.protocol = ProtocolNetRPC + if len(parts) >= 5 { + c.protocol = Protocol(parts[4]) + } + + found := false + for _, p := range c.config.AllowedProtocols { + if p == c.protocol { + found = true + break + } + } + if !found { + err = fmt.Errorf("Unsupported plugin protocol %q. Supported: %v", + c.protocol, c.config.AllowedProtocols) + return addr, err + } + + // See if we have a TLS certificate from the server. + // Checking if the length is > 50 rules out catching the unused "extra" + // data returned from some older implementations. + if len(parts) >= 6 && len(parts[5]) > 50 { + err := c.loadServerCert(parts[5]) + if err != nil { + return nil, fmt.Errorf("error parsing server cert: %s", err) + } + } + } + + c.address = addr + return +} + +// loadServerCert is used by AutoMTLS to read an x.509 cert returned by the +// server, and load it as the RootCA for the client TLSConfig. +func (c *Client) loadServerCert(cert string) error { + certPool := x509.NewCertPool() + + asn1, err := base64.RawStdEncoding.DecodeString(cert) + if err != nil { + return err + } + + x509Cert, err := x509.ParseCertificate([]byte(asn1)) + if err != nil { + return err + } + + certPool.AddCert(x509Cert) + + c.config.TLSConfig.RootCAs = certPool + return nil +} + +func (c *Client) reattach() (net.Addr, error) { + // Verify the process still exists. If not, then it is an error + p, err := os.FindProcess(c.config.Reattach.Pid) + if err != nil { + // On Unix systems, FindProcess never returns an error. + // On Windows, for non-existent pids it returns: + // os.SyscallError - 'OpenProcess: the paremter is incorrect' + return nil, ErrProcessNotFound + } + + // Attempt to connect to the addr since on Unix systems FindProcess + // doesn't actually return an error if it can't find the process. + conn, err := net.Dial( + c.config.Reattach.Addr.Network(), + c.config.Reattach.Addr.String()) + if err != nil { + p.Kill() + return nil, ErrProcessNotFound + } + conn.Close() + + // Create a context for when we kill + c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) + + c.clientWaitGroup.Add(1) + // Goroutine to mark exit status + go func(pid int) { + defer c.clientWaitGroup.Done() + + // ensure the context is cancelled when we're done + defer c.ctxCancel() + + // Wait for the process to die + pidWait(pid) + + // Log so we can see it + c.logger.Debug("reattached plugin process exited") + + // Mark it + c.l.Lock() + defer c.l.Unlock() + c.exited = true + }(p.Pid) + + // Set the address and protocol + c.address = c.config.Reattach.Addr + c.protocol = c.config.Reattach.Protocol + if c.protocol == "" { + // Default the protocol to net/rpc for backwards compatibility + c.protocol = ProtocolNetRPC + } + + if c.config.Reattach.Test { + c.negotiatedVersion = c.config.Reattach.ProtocolVersion + } + + // If we're in test mode, we do NOT set the process. This avoids the + // process being killed (the only purpose we have for c.process), since + // in test mode the process is responsible for exiting on its own. + if !c.config.Reattach.Test { + c.process = p + } + + return c.address, nil +} + +// checkProtoVersion returns the negotiated version and PluginSet. +// This returns an error if the server returned an incompatible protocol +// version, or an invalid handshake response. +func (c *Client) checkProtoVersion(protoVersion string) (int, PluginSet, error) { + serverVersion, err := strconv.Atoi(protoVersion) + if err != nil { + return 0, nil, fmt.Errorf("Error parsing protocol version %q: %s", protoVersion, err) + } + + // record these for the error message + var clientVersions []int + + // all versions, including the legacy ProtocolVersion have been added to + // the versions set + for version, plugins := range c.config.VersionedPlugins { + clientVersions = append(clientVersions, version) + + if serverVersion != version { + continue + } + return version, plugins, nil + } + + return 0, nil, fmt.Errorf("Incompatible API version with plugin. "+ + "Plugin version: %d, Client versions: %d", serverVersion, clientVersions) +} + +// ReattachConfig returns the information that must be provided to NewClient +// to reattach to the plugin process that this client started. This is +// useful for plugins that detach from their parent process. +// +// If this returns nil then the process hasn't been started yet. Please +// call Start or Client before calling this. +func (c *Client) ReattachConfig() *ReattachConfig { + c.l.Lock() + defer c.l.Unlock() + + if c.address == nil { + return nil + } + + if c.config.Cmd != nil && c.config.Cmd.Process == nil { + return nil + } + + // If we connected via reattach, just return the information as-is + if c.config.Reattach != nil { + return c.config.Reattach + } + + return &ReattachConfig{ + Protocol: c.protocol, + Addr: c.address, + Pid: c.config.Cmd.Process.Pid, + } +} + +// Protocol returns the protocol of server on the remote end. This will +// start the plugin process if it isn't already started. Errors from +// starting the plugin are surpressed and ProtocolInvalid is returned. It +// is recommended you call Start explicitly before calling Protocol to ensure +// no errors occur. +func (c *Client) Protocol() Protocol { + _, err := c.Start() + if err != nil { + return ProtocolInvalid + } + + return c.protocol +} + +func netAddrDialer(addr net.Addr) func(string, time.Duration) (net.Conn, error) { + return func(_ string, _ time.Duration) (net.Conn, error) { + // Connect to the client + conn, err := net.Dial(addr.Network(), addr.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + return conn, nil + } +} + +// dialer is compatible with grpc.WithDialer and creates the connection +// to the plugin. +func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) { + conn, err := netAddrDialer(c.address)("", timeout) + if err != nil { + return nil, err + } + + // If we have a TLS config we wrap our connection. We only do this + // for net/rpc since gRPC uses its own mechanism for TLS. + if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + return conn, nil +} + +var stdErrBufferSize = 64 * 1024 + +func (c *Client) logStderr(r io.Reader) { + defer c.clientWaitGroup.Done() + defer c.stderrWaitGroup.Done() + l := c.logger.Named(filepath.Base(c.config.Cmd.Path)) + + reader := bufio.NewReaderSize(r, stdErrBufferSize) + // continuation indicates the previous line was a prefix + continuation := false + + for { + line, isPrefix, err := reader.ReadLine() + switch { + case err == io.EOF: + return + case err != nil: + l.Error("reading plugin stderr", "error", err) + return + } + + c.config.Stderr.Write(line) + + // The line was longer than our max token size, so it's likely + // incomplete and won't unmarshal. + if isPrefix || continuation { + l.Debug(string(line)) + + // if we're finishing a continued line, add the newline back in + if !isPrefix { + c.config.Stderr.Write([]byte{'\n'}) + } + + continuation = isPrefix + continue + } + + c.config.Stderr.Write([]byte{'\n'}) + + entry, err := parseJSON(line) + // If output is not JSON format, print directly to Debug + if err != nil { + // Attempt to infer the desired log level from the commonly used + // string prefixes + switch line := string(line); { + case strings.HasPrefix(line, "[TRACE]"): + l.Trace(line) + case strings.HasPrefix(line, "[DEBUG]"): + l.Debug(line) + case strings.HasPrefix(line, "[INFO]"): + l.Info(line) + case strings.HasPrefix(line, "[WARN]"): + l.Warn(line) + case strings.HasPrefix(line, "[ERROR]"): + l.Error(line) + default: + l.Debug(line) + } + } else { + out := flattenKVPairs(entry.KVPairs) + + out = append(out, "timestamp", entry.Timestamp.Format(hclog.TimeFormat)) + switch hclog.LevelFromString(entry.Level) { + case hclog.Trace: + l.Trace(entry.Message, out...) + case hclog.Debug: + l.Debug(entry.Message, out...) + case hclog.Info: + l.Info(entry.Message, out...) + case hclog.Warn: + l.Warn(entry.Message, out...) + case hclog.Error: + l.Error(entry.Message, out...) + default: + // if there was no log level, it's likely this is unexpected + // json from something other than hclog, and we should output + // it verbatim. + l.Debug(string(line)) + } + } + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/discover.go b/vendor/github.com/hashicorp/go-plugin/discover.go new file mode 100644 index 0000000000..d22c566ed5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/discover.go @@ -0,0 +1,28 @@ +package plugin + +import ( + "path/filepath" +) + +// Discover discovers plugins that are in a given directory. +// +// The directory doesn't need to be absolute. For example, "." will work fine. +// +// This currently assumes any file matching the glob is a plugin. +// In the future this may be smarter about checking that a file is +// executable and so on. +// +// TODO: test +func Discover(glob, dir string) ([]string, error) { + var err error + + // Make the directory absolute if it isn't already + if !filepath.IsAbs(dir) { + dir, err = filepath.Abs(dir) + if err != nil { + return nil, err + } + } + + return filepath.Glob(filepath.Join(dir, glob)) +} diff --git a/vendor/github.com/hashicorp/go-plugin/error.go b/vendor/github.com/hashicorp/go-plugin/error.go new file mode 100644 index 0000000000..22a7baa6a0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/error.go @@ -0,0 +1,24 @@ +package plugin + +// This is a type that wraps error types so that they can be messaged +// across RPC channels. Since "error" is an interface, we can't always +// gob-encode the underlying structure. This is a valid error interface +// implementer that we will push across. +type BasicError struct { + Message string +} + +// NewBasicError is used to create a BasicError. +// +// err is allowed to be nil. +func NewBasicError(err error) *BasicError { + if err == nil { + return nil + } + + return &BasicError{err.Error()} +} + +func (e *BasicError) Error() string { + return e.Message +} diff --git a/vendor/github.com/hashicorp/go-plugin/go.mod b/vendor/github.com/hashicorp/go-plugin/go.mod new file mode 100644 index 0000000000..4e182e6258 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/go.mod @@ -0,0 +1,15 @@ +module github.com/hashicorp/go-plugin + +go 1.13 + +require ( + github.com/golang/protobuf v1.3.4 + github.com/hashicorp/go-hclog v0.14.1 + github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb + github.com/jhump/protoreflect v1.6.0 + github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 + github.com/oklog/run v1.0.0 + github.com/stretchr/testify v1.3.0 // indirect + golang.org/x/net v0.0.0-20190311183353-d8887717615a + google.golang.org/grpc v1.27.1 +) diff --git a/vendor/github.com/hashicorp/go-plugin/go.sum b/vendor/github.com/hashicorp/go-plugin/go.sum new file mode 100644 index 0000000000..56062044ee --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/go.sum @@ -0,0 +1,87 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 h1:7GoSOOW2jpsfkntVKaS2rAr1TJqfcxotyaUcuxoZSzg= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go new file mode 100644 index 0000000000..daf142d170 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go @@ -0,0 +1,457 @@ +package plugin + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-plugin/internal/plugin" + + "github.com/oklog/run" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +// streamer interface is used in the broker to send/receive connection +// information. +type streamer interface { + Send(*plugin.ConnInfo) error + Recv() (*plugin.ConnInfo, error) + Close() +} + +// sendErr is used to pass errors back during a send. +type sendErr struct { + i *plugin.ConnInfo + ch chan error +} + +// gRPCBrokerServer is used by the plugin to start a stream and to send +// connection information to/from the plugin. Implements GRPCBrokerServer and +// streamer interfaces. +type gRPCBrokerServer struct { + // send is used to send connection info to the gRPC stream. + send chan *sendErr + + // recv is used to receive connection info from the gRPC stream. + recv chan *plugin.ConnInfo + + // quit closes down the stream. + quit chan struct{} + + // o is used to ensure we close the quit channel only once. + o sync.Once +} + +func newGRPCBrokerServer() *gRPCBrokerServer { + return &gRPCBrokerServer{ + send: make(chan *sendErr), + recv: make(chan *plugin.ConnInfo), + quit: make(chan struct{}), + } +} + +// StartStream implements the GRPCBrokerServer interface and will block until +// the quit channel is closed or the context reports Done. The stream will pass +// connection information to/from the client. +func (s *gRPCBrokerServer) StartStream(stream plugin.GRPCBroker_StartStreamServer) error { + doneCh := stream.Context().Done() + defer s.Close() + + // Proccess send stream + go func() { + for { + select { + case <-doneCh: + return + case <-s.quit: + return + case se := <-s.send: + err := stream.Send(se.i) + se.ch <- err + } + } + }() + + // Process receive stream + for { + i, err := stream.Recv() + if err != nil { + return err + } + select { + case <-doneCh: + return nil + case <-s.quit: + return nil + case s.recv <- i: + } + } + + return nil +} + +// Send is used by the GRPCBroker to pass connection information into the stream +// to the client. +func (s *gRPCBrokerServer) Send(i *plugin.ConnInfo) error { + ch := make(chan error) + defer close(ch) + + select { + case <-s.quit: + return errors.New("broker closed") + case s.send <- &sendErr{ + i: i, + ch: ch, + }: + } + + return <-ch +} + +// Recv is used by the GRPCBroker to pass connection information that has been +// sent from the client from the stream to the broker. +func (s *gRPCBrokerServer) Recv() (*plugin.ConnInfo, error) { + select { + case <-s.quit: + return nil, errors.New("broker closed") + case i := <-s.recv: + return i, nil + } +} + +// Close closes the quit channel, shutting down the stream. +func (s *gRPCBrokerServer) Close() { + s.o.Do(func() { + close(s.quit) + }) +} + +// gRPCBrokerClientImpl is used by the client to start a stream and to send +// connection information to/from the client. Implements GRPCBrokerClient and +// streamer interfaces. +type gRPCBrokerClientImpl struct { + // client is the underlying GRPC client used to make calls to the server. + client plugin.GRPCBrokerClient + + // send is used to send connection info to the gRPC stream. + send chan *sendErr + + // recv is used to receive connection info from the gRPC stream. + recv chan *plugin.ConnInfo + + // quit closes down the stream. + quit chan struct{} + + // o is used to ensure we close the quit channel only once. + o sync.Once +} + +func newGRPCBrokerClient(conn *grpc.ClientConn) *gRPCBrokerClientImpl { + return &gRPCBrokerClientImpl{ + client: plugin.NewGRPCBrokerClient(conn), + send: make(chan *sendErr), + recv: make(chan *plugin.ConnInfo), + quit: make(chan struct{}), + } +} + +// StartStream implements the GRPCBrokerClient interface and will block until +// the quit channel is closed or the context reports Done. The stream will pass +// connection information to/from the plugin. +func (s *gRPCBrokerClientImpl) StartStream() error { + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + defer s.Close() + + stream, err := s.client.StartStream(ctx) + if err != nil { + return err + } + doneCh := stream.Context().Done() + + go func() { + for { + select { + case <-doneCh: + return + case <-s.quit: + return + case se := <-s.send: + err := stream.Send(se.i) + se.ch <- err + } + } + }() + + for { + i, err := stream.Recv() + if err != nil { + return err + } + select { + case <-doneCh: + return nil + case <-s.quit: + return nil + case s.recv <- i: + } + } + + return nil +} + +// Send is used by the GRPCBroker to pass connection information into the stream +// to the plugin. +func (s *gRPCBrokerClientImpl) Send(i *plugin.ConnInfo) error { + ch := make(chan error) + defer close(ch) + + select { + case <-s.quit: + return errors.New("broker closed") + case s.send <- &sendErr{ + i: i, + ch: ch, + }: + } + + return <-ch +} + +// Recv is used by the GRPCBroker to pass connection information that has been +// sent from the plugin to the broker. +func (s *gRPCBrokerClientImpl) Recv() (*plugin.ConnInfo, error) { + select { + case <-s.quit: + return nil, errors.New("broker closed") + case i := <-s.recv: + return i, nil + } +} + +// Close closes the quit channel, shutting down the stream. +func (s *gRPCBrokerClientImpl) Close() { + s.o.Do(func() { + close(s.quit) + }) +} + +// GRPCBroker is responsible for brokering connections by unique ID. +// +// It is used by plugins to create multiple gRPC connections and data +// streams between the plugin process and the host process. +// +// This allows a plugin to request a channel with a specific ID to connect to +// or accept a connection from, and the broker handles the details of +// holding these channels open while they're being negotiated. +// +// The Plugin interface has access to these for both Server and Client. +// The broker can be used by either (optionally) to reserve and connect to +// new streams. This is useful for complex args and return values, +// or anything else you might need a data stream for. +type GRPCBroker struct { + nextId uint32 + streamer streamer + streams map[uint32]*gRPCBrokerPending + tls *tls.Config + doneCh chan struct{} + o sync.Once + + sync.Mutex +} + +type gRPCBrokerPending struct { + ch chan *plugin.ConnInfo + doneCh chan struct{} +} + +func newGRPCBroker(s streamer, tls *tls.Config) *GRPCBroker { + return &GRPCBroker{ + streamer: s, + streams: make(map[uint32]*gRPCBrokerPending), + tls: tls, + doneCh: make(chan struct{}), + } +} + +// Accept accepts a connection by ID. +// +// This should not be called multiple times with the same ID at one time. +func (b *GRPCBroker) Accept(id uint32) (net.Listener, error) { + listener, err := serverListener() + if err != nil { + return nil, err + } + + err = b.streamer.Send(&plugin.ConnInfo{ + ServiceId: id, + Network: listener.Addr().Network(), + Address: listener.Addr().String(), + }) + if err != nil { + return nil, err + } + + return listener, nil +} + +// AcceptAndServe is used to accept a specific stream ID and immediately +// serve a gRPC server on that stream ID. This is used to easily serve +// complex arguments. Each AcceptAndServe call opens a new listener socket and +// sends the connection info down the stream to the dialer. Since a new +// connection is opened every call, these calls should be used sparingly. +// Multiple gRPC server implementations can be registered to a single +// AcceptAndServe call. +func (b *GRPCBroker) AcceptAndServe(id uint32, s func([]grpc.ServerOption) *grpc.Server) { + listener, err := b.Accept(id) + if err != nil { + log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) + return + } + defer listener.Close() + + var opts []grpc.ServerOption + if b.tls != nil { + opts = []grpc.ServerOption{grpc.Creds(credentials.NewTLS(b.tls))} + } + + server := s(opts) + + // Here we use a run group to close this goroutine if the server is shutdown + // or the broker is shutdown. + var g run.Group + { + // Serve on the listener, if shutting down call GracefulStop. + g.Add(func() error { + return server.Serve(listener) + }, func(err error) { + server.GracefulStop() + }) + } + { + // block on the closeCh or the doneCh. If we are shutting down close the + // closeCh. + closeCh := make(chan struct{}) + g.Add(func() error { + select { + case <-b.doneCh: + case <-closeCh: + } + return nil + }, func(err error) { + close(closeCh) + }) + } + + // Block until we are done + g.Run() +} + +// Close closes the stream and all servers. +func (b *GRPCBroker) Close() error { + b.streamer.Close() + b.o.Do(func() { + close(b.doneCh) + }) + return nil +} + +// Dial opens a connection by ID. +func (b *GRPCBroker) Dial(id uint32) (conn *grpc.ClientConn, err error) { + var c *plugin.ConnInfo + + // Open the stream + p := b.getStream(id) + select { + case c = <-p.ch: + close(p.doneCh) + case <-time.After(5 * time.Second): + return nil, fmt.Errorf("timeout waiting for connection info") + } + + var addr net.Addr + switch c.Network { + case "tcp": + addr, err = net.ResolveTCPAddr("tcp", c.Address) + case "unix": + addr, err = net.ResolveUnixAddr("unix", c.Address) + default: + err = fmt.Errorf("Unknown address type: %s", c.Address) + } + if err != nil { + return nil, err + } + + return dialGRPCConn(b.tls, netAddrDialer(addr)) +} + +// NextId returns a unique ID to use next. +// +// It is possible for very long-running plugin hosts to wrap this value, +// though it would require a very large amount of calls. In practice +// we've never seen it happen. +func (m *GRPCBroker) NextId() uint32 { + return atomic.AddUint32(&m.nextId, 1) +} + +// Run starts the brokering and should be executed in a goroutine, since it +// blocks forever, or until the session closes. +// +// Uses of GRPCBroker never need to call this. It is called internally by +// the plugin host/client. +func (m *GRPCBroker) Run() { + for { + stream, err := m.streamer.Recv() + if err != nil { + // Once we receive an error, just exit + break + } + + // Initialize the waiter + p := m.getStream(stream.ServiceId) + select { + case p.ch <- stream: + default: + } + + go m.timeoutWait(stream.ServiceId, p) + } +} + +func (m *GRPCBroker) getStream(id uint32) *gRPCBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.streams[id] + if ok { + return p + } + + m.streams[id] = &gRPCBrokerPending{ + ch: make(chan *plugin.ConnInfo, 1), + doneCh: make(chan struct{}), + } + return m.streams[id] +} + +func (m *GRPCBroker) timeoutWait(id uint32, p *gRPCBrokerPending) { + // Wait for the stream to either be picked up and connected, or + // for a timeout. + select { + case <-p.doneCh: + case <-time.After(5 * time.Second): + } + + m.Lock() + defer m.Unlock() + + // Delete the stream so no one else can grab it + delete(m.streams, id) +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go new file mode 100644 index 0000000000..842903c922 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_client.go @@ -0,0 +1,126 @@ +package plugin + +import ( + "crypto/tls" + "fmt" + "math" + "net" + "time" + + "github.com/hashicorp/go-plugin/internal/plugin" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health/grpc_health_v1" +) + +func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error), dialOpts ...grpc.DialOption) (*grpc.ClientConn, error) { + // Build dialing options. + opts := make([]grpc.DialOption, 0) + + // We use a custom dialer so that we can connect over unix domain sockets. + opts = append(opts, grpc.WithDialer(dialer)) + + // Fail right away + opts = append(opts, grpc.FailOnNonTempDialError(true)) + + // If we have no TLS configuration set, we need to explicitly tell grpc + // that we're connecting with an insecure connection. + if tls == nil { + opts = append(opts, grpc.WithInsecure()) + } else { + opts = append(opts, grpc.WithTransportCredentials( + credentials.NewTLS(tls))) + } + + opts = append(opts, + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)), + grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(math.MaxInt32))) + + // Add our custom options if we have any + opts = append(opts, dialOpts...) + + // Connect. Note the first parameter is unused because we use a custom + // dialer that has the state to see the address. + conn, err := grpc.Dial("unused", opts...) + if err != nil { + return nil, err + } + + return conn, nil +} + +// newGRPCClient creates a new GRPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) { + conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer, c.config.GRPCDialOptions...) + if err != nil { + return nil, err + } + + // Start the broker. + brokerGRPCClient := newGRPCBrokerClient(conn) + broker := newGRPCBroker(brokerGRPCClient, c.config.TLSConfig) + go broker.Run() + go brokerGRPCClient.StartStream() + + // Start the stdio client + stdioClient, err := newGRPCStdioClient(doneCtx, c.logger.Named("stdio"), conn) + if err != nil { + return nil, err + } + go stdioClient.Run(c.config.SyncStdout, c.config.SyncStderr) + + cl := &GRPCClient{ + Conn: conn, + Plugins: c.config.Plugins, + doneCtx: doneCtx, + broker: broker, + controller: plugin.NewGRPCControllerClient(conn), + } + + return cl, nil +} + +// GRPCClient connects to a GRPCServer over gRPC to dispense plugin types. +type GRPCClient struct { + Conn *grpc.ClientConn + Plugins map[string]Plugin + + doneCtx context.Context + broker *GRPCBroker + + controller plugin.GRPCControllerClient +} + +// ClientProtocol impl. +func (c *GRPCClient) Close() error { + c.broker.Close() + c.controller.Shutdown(c.doneCtx, &plugin.Empty{}) + return c.Conn.Close() +} + +// ClientProtocol impl. +func (c *GRPCClient) Dispense(name string) (interface{}, error) { + raw, ok := c.Plugins[name] + if !ok { + return nil, fmt.Errorf("unknown plugin type: %s", name) + } + + p, ok := raw.(GRPCPlugin) + if !ok { + return nil, fmt.Errorf("plugin %q doesn't support gRPC", name) + } + + return p.GRPCClient(c.doneCtx, c.broker, c.Conn) +} + +// ClientProtocol impl. +func (c *GRPCClient) Ping() error { + client := grpc_health_v1.NewHealthClient(c.Conn) + _, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{ + Service: GRPCServiceName, + }) + + return err +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_controller.go b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go new file mode 100644 index 0000000000..1a8a8e70ea --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go @@ -0,0 +1,23 @@ +package plugin + +import ( + "context" + + "github.com/hashicorp/go-plugin/internal/plugin" +) + +// GRPCControllerServer handles shutdown calls to terminate the server when the +// plugin client is closed. +type grpcControllerServer struct { + server *GRPCServer +} + +// Shutdown stops the grpc server. It first will attempt a graceful stop, then a +// full stop on the server. +func (s *grpcControllerServer) Shutdown(ctx context.Context, _ *plugin.Empty) (*plugin.Empty, error) { + resp := &plugin.Empty{} + + // TODO: figure out why GracefullStop doesn't work. + s.server.Stop() + return resp, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go new file mode 100644 index 0000000000..387628bf48 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_server.go @@ -0,0 +1,149 @@ +package plugin + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/plugin" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/reflection" +) + +// GRPCServiceName is the name of the service that the health check should +// return as passing. +const GRPCServiceName = "plugin" + +// DefaultGRPCServer can be used with the "GRPCServer" field for Server +// as a default factory method to create a gRPC server with no extra options. +func DefaultGRPCServer(opts []grpc.ServerOption) *grpc.Server { + return grpc.NewServer(opts...) +} + +// GRPCServer is a ServerType implementation that serves plugins over +// gRPC. This allows plugins to easily be written for other languages. +// +// The GRPCServer outputs a custom configuration as a base64-encoded +// JSON structure represented by the GRPCServerConfig config structure. +type GRPCServer struct { + // Plugins are the list of plugins to serve. + Plugins map[string]Plugin + + // Server is the actual server that will accept connections. This + // will be used for plugin registration as well. + Server func([]grpc.ServerOption) *grpc.Server + + // TLS should be the TLS configuration if available. If this is nil, + // the connection will not have transport security. + TLS *tls.Config + + // DoneCh is the channel that is closed when this server has exited. + DoneCh chan struct{} + + // Stdout/StderrLis are the readers for stdout/stderr that will be copied + // to the stdout/stderr connection that is output. + Stdout io.Reader + Stderr io.Reader + + config GRPCServerConfig + server *grpc.Server + broker *GRPCBroker + stdioServer *grpcStdioServer + + logger hclog.Logger +} + +// ServerProtocol impl. +func (s *GRPCServer) Init() error { + // Create our server + var opts []grpc.ServerOption + if s.TLS != nil { + opts = append(opts, grpc.Creds(credentials.NewTLS(s.TLS))) + } + s.server = s.Server(opts) + + // Register the health service + healthCheck := health.NewServer() + healthCheck.SetServingStatus( + GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING) + grpc_health_v1.RegisterHealthServer(s.server, healthCheck) + + // Register the reflection service + reflection.Register(s.server) + + // Register the broker service + brokerServer := newGRPCBrokerServer() + plugin.RegisterGRPCBrokerServer(s.server, brokerServer) + s.broker = newGRPCBroker(brokerServer, s.TLS) + go s.broker.Run() + + // Register the controller + controllerServer := &grpcControllerServer{server: s} + plugin.RegisterGRPCControllerServer(s.server, controllerServer) + + // Register the stdio service + s.stdioServer = newGRPCStdioServer(s.logger, s.Stdout, s.Stderr) + plugin.RegisterGRPCStdioServer(s.server, s.stdioServer) + + // Register all our plugins onto the gRPC server. + for k, raw := range s.Plugins { + p, ok := raw.(GRPCPlugin) + if !ok { + return fmt.Errorf("%q is not a GRPC-compatible plugin", k) + } + + if err := p.GRPCServer(s.broker, s.server); err != nil { + return fmt.Errorf("error registering %q: %s", k, err) + } + } + + return nil +} + +// Stop calls Stop on the underlying grpc.Server +func (s *GRPCServer) Stop() { + s.server.Stop() +} + +// GracefulStop calls GracefulStop on the underlying grpc.Server +func (s *GRPCServer) GracefulStop() { + s.server.GracefulStop() +} + +// Config is the GRPCServerConfig encoded as JSON then base64. +func (s *GRPCServer) Config() string { + // Create a buffer that will contain our final contents + var buf bytes.Buffer + + // Wrap the base64 encoding with JSON encoding. + if err := json.NewEncoder(&buf).Encode(s.config); err != nil { + // We panic since ths shouldn't happen under any scenario. We + // carefully control the structure being encoded here and it should + // always be successful. + panic(err) + } + + return buf.String() +} + +func (s *GRPCServer) Serve(lis net.Listener) { + defer close(s.DoneCh) + err := s.server.Serve(lis) + if err != nil { + s.logger.Error("grpc server", "error", err) + } +} + +// GRPCServerConfig is the extra configuration passed along for consumers +// to facilitate using GRPC plugins. +type GRPCServerConfig struct { + StdoutAddr string `json:"stdout_addr"` + StderrAddr string `json:"stderr_addr"` +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go new file mode 100644 index 0000000000..a582181505 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go @@ -0,0 +1,207 @@ +package plugin + +import ( + "bufio" + "bytes" + "context" + "io" + + empty "github.com/golang/protobuf/ptypes/empty" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/plugin" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// grpcStdioBuffer is the buffer size we try to fill when sending a chunk of +// stdio data. This is currently 1 KB for no reason other than that seems like +// enough (stdio data isn't that common) and is fairly low. +const grpcStdioBuffer = 1 * 1024 + +// grpcStdioServer implements the Stdio service and streams stdiout/stderr. +type grpcStdioServer struct { + stdoutCh <-chan []byte + stderrCh <-chan []byte +} + +// newGRPCStdioServer creates a new grpcStdioServer and starts the stream +// copying for the given out and err readers. +// +// This must only be called ONCE per srcOut, srcErr. +func newGRPCStdioServer(log hclog.Logger, srcOut, srcErr io.Reader) *grpcStdioServer { + stdoutCh := make(chan []byte) + stderrCh := make(chan []byte) + + // Begin copying the streams + go copyChan(log, stdoutCh, srcOut) + go copyChan(log, stderrCh, srcErr) + + // Construct our server + return &grpcStdioServer{ + stdoutCh: stdoutCh, + stderrCh: stderrCh, + } +} + +// StreamStdio streams our stdout/err as the response. +func (s *grpcStdioServer) StreamStdio( + _ *empty.Empty, + srv plugin.GRPCStdio_StreamStdioServer, +) error { + // Share the same data value between runs. Sending this over the wire + // marshals it so we can reuse this. + var data plugin.StdioData + + for { + // Read our data + select { + case data.Data = <-s.stdoutCh: + data.Channel = plugin.StdioData_STDOUT + + case data.Data = <-s.stderrCh: + data.Channel = plugin.StdioData_STDERR + + case <-srv.Context().Done(): + return nil + } + + // Not sure if this is possible, but if we somehow got here and + // we didn't populate any data at all, then just continue. + if len(data.Data) == 0 { + continue + } + + // Send our data to the client. + if err := srv.Send(&data); err != nil { + return err + } + } +} + +// grpcStdioClient wraps the stdio service as a client to copy +// the stdio data to output writers. +type grpcStdioClient struct { + log hclog.Logger + stdioClient plugin.GRPCStdio_StreamStdioClient +} + +// newGRPCStdioClient creates a grpcStdioClient. This will perform the +// initial connection to the stdio service. If the stdio service is unavailable +// then this will be a no-op. This allows this to work without error for +// plugins that don't support this. +func newGRPCStdioClient( + ctx context.Context, + log hclog.Logger, + conn *grpc.ClientConn, +) (*grpcStdioClient, error) { + client := plugin.NewGRPCStdioClient(conn) + + // Connect immediately to the endpoint + stdioClient, err := client.StreamStdio(ctx, &empty.Empty{}) + + // If we get an Unavailable or Unimplemented error, this means that the plugin isn't + // updated and linking to the latest version of go-plugin that supports + // this. We fall back to the previous behavior of just not syncing anything. + if status.Code(err) == codes.Unavailable || status.Code(err) == codes.Unimplemented { + log.Warn("stdio service not available, stdout/stderr syncing unavailable") + stdioClient = nil + err = nil + } + if err != nil { + return nil, err + } + + return &grpcStdioClient{ + log: log, + stdioClient: stdioClient, + }, nil +} + +// Run starts the loop that receives stdio data and writes it to the given +// writers. This blocks and should be run in a goroutine. +func (c *grpcStdioClient) Run(stdout, stderr io.Writer) { + // This will be nil if stdio is not supported by the plugin + if c.stdioClient == nil { + c.log.Warn("stdio service unavailable, run will do nothing") + return + } + + for { + c.log.Trace("waiting for stdio data") + data, err := c.stdioClient.Recv() + if err != nil { + if err == io.EOF || + status.Code(err) == codes.Unavailable || + status.Code(err) == codes.Canceled || + status.Code(err) == codes.Unimplemented || + err == context.Canceled { + c.log.Debug("received EOF, stopping recv loop", "err", err) + return + } + + c.log.Error("error receiving data", "err", err) + return + } + + // Determine our output writer based on channel + var w io.Writer + switch data.Channel { + case plugin.StdioData_STDOUT: + w = stdout + + case plugin.StdioData_STDERR: + w = stderr + + default: + c.log.Warn("unknown channel, dropping", "channel", data.Channel) + continue + } + + // Write! In the event of an error we just continue. + if c.log.IsTrace() { + c.log.Trace("received data", "channel", data.Channel.String(), "len", len(data.Data)) + } + if _, err := io.Copy(w, bytes.NewReader(data.Data)); err != nil { + c.log.Error("failed to copy all bytes", "err", err) + } + } +} + +// copyChan copies an io.Reader into a channel. +func copyChan(log hclog.Logger, dst chan<- []byte, src io.Reader) { + bufsrc := bufio.NewReader(src) + + for { + // Make our data buffer. We allocate a new one per loop iteration + // so that we can send it over the channel. + var data [1024]byte + + // Read the data, this will block until data is available + n, err := bufsrc.Read(data[:]) + + // We have to check if we have data BEFORE err != nil. The bufio + // docs guarantee n == 0 on EOF but its better to be safe here. + if n > 0 { + // We have data! Send it on the channel. This will block if there + // is no reader on the other side. We expect that go-plugin will + // connect immediately to the stdio server to drain this so we want + // this block to happen for backpressure. + dst <- data[:n] + } + + // If we hit EOF we're done copying + if err == io.EOF { + log.Debug("stdio EOF, exiting copy loop") + return + } + + // Any other error we just exit the loop. We don't expect there to + // be errors since our use case for this is reading/writing from + // a in-process pipe (os.Pipe). + if err != nil { + log.Warn("error copying stdio data, stopping copy", "err", err) + return + } + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go new file mode 100644 index 0000000000..fb9d415254 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc -I ./ ./grpc_broker.proto ./grpc_controller.proto ./grpc_stdio.proto --go_out=plugins=grpc:. + +package plugin diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go new file mode 100644 index 0000000000..6bf103859f --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go @@ -0,0 +1,203 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_broker.proto + +package plugin + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ConnInfo struct { + ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"` + Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnInfo) Reset() { *m = ConnInfo{} } +func (m *ConnInfo) String() string { return proto.CompactTextString(m) } +func (*ConnInfo) ProtoMessage() {} +func (*ConnInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_grpc_broker_3322b07398605250, []int{0} +} +func (m *ConnInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConnInfo.Unmarshal(m, b) +} +func (m *ConnInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConnInfo.Marshal(b, m, deterministic) +} +func (dst *ConnInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnInfo.Merge(dst, src) +} +func (m *ConnInfo) XXX_Size() int { + return xxx_messageInfo_ConnInfo.Size(m) +} +func (m *ConnInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ConnInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnInfo proto.InternalMessageInfo + +func (m *ConnInfo) GetServiceId() uint32 { + if m != nil { + return m.ServiceId + } + return 0 +} + +func (m *ConnInfo) GetNetwork() string { + if m != nil { + return m.Network + } + return "" +} + +func (m *ConnInfo) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func init() { + proto.RegisterType((*ConnInfo)(nil), "plugin.ConnInfo") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GRPCBrokerClient is the client API for GRPCBroker service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCBrokerClient interface { + StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) +} + +type gRPCBrokerClient struct { + cc *grpc.ClientConn +} + +func NewGRPCBrokerClient(cc *grpc.ClientConn) GRPCBrokerClient { + return &gRPCBrokerClient{cc} +} + +func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_GRPCBroker_serviceDesc.Streams[0], "/plugin.GRPCBroker/StartStream", opts...) + if err != nil { + return nil, err + } + x := &gRPCBrokerStartStreamClient{stream} + return x, nil +} + +type GRPCBroker_StartStreamClient interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ClientStream +} + +type gRPCBrokerStartStreamClient struct { + grpc.ClientStream +} + +func (x *gRPCBrokerStartStreamClient) Send(m *ConnInfo) error { + return x.ClientStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamClient) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GRPCBrokerServer is the server API for GRPCBroker service. +type GRPCBrokerServer interface { + StartStream(GRPCBroker_StartStreamServer) error +} + +func RegisterGRPCBrokerServer(s *grpc.Server, srv GRPCBrokerServer) { + s.RegisterService(&_GRPCBroker_serviceDesc, srv) +} + +func _GRPCBroker_StartStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(GRPCBrokerServer).StartStream(&gRPCBrokerStartStreamServer{stream}) +} + +type GRPCBroker_StartStreamServer interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ServerStream +} + +type gRPCBrokerStartStreamServer struct { + grpc.ServerStream +} + +func (x *gRPCBrokerStartStreamServer) Send(m *ConnInfo) error { + return x.ServerStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamServer) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _GRPCBroker_serviceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCBroker", + HandlerType: (*GRPCBrokerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StartStream", + Handler: _GRPCBroker_StartStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc_broker.proto", +} + +func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor_grpc_broker_3322b07398605250) } + +var fileDescriptor_grpc_broker_3322b07398605250 = []byte{ + // 175 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, + 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b, + 0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91, + 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7, + 0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20, + 0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc, + 0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1, + 0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b, + 0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x9d, 0x38, 0xa2, 0xa0, 0xae, 0x4d, 0x62, 0x03, 0x3b, + 0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x10, 0x15, 0x39, 0x47, 0xd1, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto new file mode 100644 index 0000000000..aa3df4630a --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; +package plugin; +option go_package = "plugin"; + +message ConnInfo { + uint32 service_id = 1; + string network = 2; + string address = 3; +} + +service GRPCBroker { + rpc StartStream(stream ConnInfo) returns (stream ConnInfo); +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go new file mode 100644 index 0000000000..3e39da95a8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go @@ -0,0 +1,145 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_controller.proto + +package plugin + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_grpc_controller_08f8296ef6d80436, []int{0} +} +func (m *Empty) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Empty.Unmarshal(m, b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) +} +func (dst *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(dst, src) +} +func (m *Empty) XXX_Size() int { + return xxx_messageInfo_Empty.Size(m) +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Empty)(nil), "plugin.Empty") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GRPCControllerClient is the client API for GRPCController service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCControllerClient interface { + Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) +} + +type gRPCControllerClient struct { + cc *grpc.ClientConn +} + +func NewGRPCControllerClient(cc *grpc.ClientConn) GRPCControllerClient { + return &gRPCControllerClient{cc} +} + +func (c *gRPCControllerClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/plugin.GRPCController/Shutdown", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GRPCControllerServer is the server API for GRPCController service. +type GRPCControllerServer interface { + Shutdown(context.Context, *Empty) (*Empty, error) +} + +func RegisterGRPCControllerServer(s *grpc.Server, srv GRPCControllerServer) { + s.RegisterService(&_GRPCController_serviceDesc, srv) +} + +func _GRPCController_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GRPCControllerServer).Shutdown(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/plugin.GRPCController/Shutdown", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GRPCControllerServer).Shutdown(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +var _GRPCController_serviceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCController", + HandlerType: (*GRPCControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Shutdown", + Handler: _GRPCController_Shutdown_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "grpc_controller.proto", +} + +func init() { + proto.RegisterFile("grpc_controller.proto", fileDescriptor_grpc_controller_08f8296ef6d80436) +} + +var fileDescriptor_grpc_controller_08f8296ef6d80436 = []byte{ + // 108 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xc9, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0x62, 0x2b, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x62, 0xe7, 0x62, 0x75, 0xcd, 0x2d, + 0x28, 0xa9, 0x34, 0xb2, 0xe2, 0xe2, 0x73, 0x0f, 0x0a, 0x70, 0x76, 0x86, 0x2b, 0x14, 0xd2, 0xe0, + 0xe2, 0x08, 0xce, 0x28, 0x2d, 0x49, 0xc9, 0x2f, 0xcf, 0x13, 0xe2, 0xd5, 0x83, 0xa8, 0xd7, 0x03, + 0x2b, 0x96, 0x42, 0xe5, 0x3a, 0x71, 0x44, 0x41, 0x8d, 0x4b, 0x62, 0x03, 0x9b, 0x6e, 0x0c, 0x08, + 0x00, 0x00, 0xff, 0xff, 0xab, 0x7c, 0x27, 0xe5, 0x76, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto new file mode 100644 index 0000000000..345d0a1c1f --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; +package plugin; +option go_package = "plugin"; + +message Empty { +} + +// The GRPCController is responsible for telling the plugin server to shutdown. +service GRPCController { + rpc Shutdown(Empty) returns (Empty); +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go new file mode 100644 index 0000000000..c8f94921b4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go @@ -0,0 +1,233 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_stdio.proto + +package plugin + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import empty "github.com/golang/protobuf/ptypes/empty" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type StdioData_Channel int32 + +const ( + StdioData_INVALID StdioData_Channel = 0 + StdioData_STDOUT StdioData_Channel = 1 + StdioData_STDERR StdioData_Channel = 2 +) + +var StdioData_Channel_name = map[int32]string{ + 0: "INVALID", + 1: "STDOUT", + 2: "STDERR", +} +var StdioData_Channel_value = map[string]int32{ + "INVALID": 0, + "STDOUT": 1, + "STDERR": 2, +} + +func (x StdioData_Channel) String() string { + return proto.EnumName(StdioData_Channel_name, int32(x)) +} +func (StdioData_Channel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_grpc_stdio_db2934322ca63bd5, []int{0, 0} +} + +// StdioData is a single chunk of stdout or stderr data that is streamed +// from GRPCStdio. +type StdioData struct { + Channel StdioData_Channel `protobuf:"varint,1,opt,name=channel,proto3,enum=plugin.StdioData_Channel" json:"channel,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StdioData) Reset() { *m = StdioData{} } +func (m *StdioData) String() string { return proto.CompactTextString(m) } +func (*StdioData) ProtoMessage() {} +func (*StdioData) Descriptor() ([]byte, []int) { + return fileDescriptor_grpc_stdio_db2934322ca63bd5, []int{0} +} +func (m *StdioData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StdioData.Unmarshal(m, b) +} +func (m *StdioData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StdioData.Marshal(b, m, deterministic) +} +func (dst *StdioData) XXX_Merge(src proto.Message) { + xxx_messageInfo_StdioData.Merge(dst, src) +} +func (m *StdioData) XXX_Size() int { + return xxx_messageInfo_StdioData.Size(m) +} +func (m *StdioData) XXX_DiscardUnknown() { + xxx_messageInfo_StdioData.DiscardUnknown(m) +} + +var xxx_messageInfo_StdioData proto.InternalMessageInfo + +func (m *StdioData) GetChannel() StdioData_Channel { + if m != nil { + return m.Channel + } + return StdioData_INVALID +} + +func (m *StdioData) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func init() { + proto.RegisterType((*StdioData)(nil), "plugin.StdioData") + proto.RegisterEnum("plugin.StdioData_Channel", StdioData_Channel_name, StdioData_Channel_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GRPCStdioClient is the client API for GRPCStdio service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCStdioClient interface { + // StreamStdio returns a stream that contains all the stdout/stderr. + // This RPC endpoint must only be called ONCE. Once stdio data is consumed + // it is not sent again. + // + // Callers should connect early to prevent blocking on the plugin process. + StreamStdio(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) +} + +type gRPCStdioClient struct { + cc *grpc.ClientConn +} + +func NewGRPCStdioClient(cc *grpc.ClientConn) GRPCStdioClient { + return &gRPCStdioClient{cc} +} + +func (c *gRPCStdioClient) StreamStdio(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) { + stream, err := c.cc.NewStream(ctx, &_GRPCStdio_serviceDesc.Streams[0], "/plugin.GRPCStdio/StreamStdio", opts...) + if err != nil { + return nil, err + } + x := &gRPCStdioStreamStdioClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type GRPCStdio_StreamStdioClient interface { + Recv() (*StdioData, error) + grpc.ClientStream +} + +type gRPCStdioStreamStdioClient struct { + grpc.ClientStream +} + +func (x *gRPCStdioStreamStdioClient) Recv() (*StdioData, error) { + m := new(StdioData) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GRPCStdioServer is the server API for GRPCStdio service. +type GRPCStdioServer interface { + // StreamStdio returns a stream that contains all the stdout/stderr. + // This RPC endpoint must only be called ONCE. Once stdio data is consumed + // it is not sent again. + // + // Callers should connect early to prevent blocking on the plugin process. + StreamStdio(*empty.Empty, GRPCStdio_StreamStdioServer) error +} + +func RegisterGRPCStdioServer(s *grpc.Server, srv GRPCStdioServer) { + s.RegisterService(&_GRPCStdio_serviceDesc, srv) +} + +func _GRPCStdio_StreamStdio_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(empty.Empty) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(GRPCStdioServer).StreamStdio(m, &gRPCStdioStreamStdioServer{stream}) +} + +type GRPCStdio_StreamStdioServer interface { + Send(*StdioData) error + grpc.ServerStream +} + +type gRPCStdioStreamStdioServer struct { + grpc.ServerStream +} + +func (x *gRPCStdioStreamStdioServer) Send(m *StdioData) error { + return x.ServerStream.SendMsg(m) +} + +var _GRPCStdio_serviceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCStdio", + HandlerType: (*GRPCStdioServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamStdio", + Handler: _GRPCStdio_StreamStdio_Handler, + ServerStreams: true, + }, + }, + Metadata: "grpc_stdio.proto", +} + +func init() { proto.RegisterFile("grpc_stdio.proto", fileDescriptor_grpc_stdio_db2934322ca63bd5) } + +var fileDescriptor_grpc_stdio_db2934322ca63bd5 = []byte{ + // 221 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x48, 0x2f, 0x2a, 0x48, + 0x8e, 0x2f, 0x2e, 0x49, 0xc9, 0xcc, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, 0xc8, + 0x29, 0x4d, 0xcf, 0xcc, 0x93, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x07, 0x8b, 0x26, + 0x95, 0xa6, 0xe9, 0xa7, 0xe6, 0x16, 0x94, 0x54, 0x42, 0x14, 0x29, 0xb5, 0x30, 0x72, 0x71, 0x06, + 0x83, 0x34, 0xb9, 0x24, 0x96, 0x24, 0x0a, 0x19, 0x73, 0xb1, 0x27, 0x67, 0x24, 0xe6, 0xe5, 0xa5, + 0xe6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x19, 0x49, 0xea, 0x41, 0x0c, 0xd1, 0x83, 0xab, 0xd1, + 0x73, 0x86, 0x28, 0x08, 0x82, 0xa9, 0x14, 0x12, 0xe2, 0x62, 0x49, 0x49, 0x2c, 0x49, 0x94, 0x60, + 0x52, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0xb3, 0x95, 0xf4, 0xb8, 0xd8, 0xa1, 0xea, 0x84, 0xb8, 0xb9, + 0xd8, 0x3d, 0xfd, 0xc2, 0x1c, 0x7d, 0x3c, 0x5d, 0x04, 0x18, 0x84, 0xb8, 0xb8, 0xd8, 0x82, 0x43, + 0x5c, 0xfc, 0x43, 0x43, 0x04, 0x18, 0xa1, 0x6c, 0xd7, 0xa0, 0x20, 0x01, 0x26, 0x23, 0x77, 0x2e, + 0x4e, 0xf7, 0xa0, 0x00, 0x67, 0xb0, 0x2d, 0x42, 0x56, 0x5c, 0xdc, 0xc1, 0x25, 0x45, 0xa9, 0x89, + 0xb9, 0x10, 0xae, 0x98, 0x1e, 0xc4, 0x03, 0x7a, 0x30, 0x0f, 0xe8, 0xb9, 0x82, 0x3c, 0x20, 0x25, + 0x88, 0xe1, 0x36, 0x03, 0x46, 0x27, 0x8e, 0x28, 0xa8, 0xb7, 0x93, 0xd8, 0xc0, 0xca, 0x8d, 0x01, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x5d, 0xbb, 0xe0, 0x69, 0x19, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto new file mode 100644 index 0000000000..ce1a122303 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; +package plugin; +option go_package = "plugin"; + +import "google/protobuf/empty.proto"; + +// GRPCStdio is a service that is automatically run by the plugin process +// to stream any stdout/err data so that it can be mirrored on the plugin +// host side. +service GRPCStdio { + // StreamStdio returns a stream that contains all the stdout/stderr. + // This RPC endpoint must only be called ONCE. Once stdio data is consumed + // it is not sent again. + // + // Callers should connect early to prevent blocking on the plugin process. + rpc StreamStdio(google.protobuf.Empty) returns (stream StdioData); +} + +// StdioData is a single chunk of stdout or stderr data that is streamed +// from GRPCStdio. +message StdioData { + enum Channel { + INVALID = 0; + STDOUT = 1; + STDERR = 2; + } + + Channel channel = 1; + bytes data = 2; +} diff --git a/vendor/github.com/hashicorp/go-plugin/log_entry.go b/vendor/github.com/hashicorp/go-plugin/log_entry.go new file mode 100644 index 0000000000..fb2ef930ca --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/log_entry.go @@ -0,0 +1,73 @@ +package plugin + +import ( + "encoding/json" + "time" +) + +// logEntry is the JSON payload that gets sent to Stderr from the plugin to the host +type logEntry struct { + Message string `json:"@message"` + Level string `json:"@level"` + Timestamp time.Time `json:"timestamp"` + KVPairs []*logEntryKV `json:"kv_pairs"` +} + +// logEntryKV is a key value pair within the Output payload +type logEntryKV struct { + Key string `json:"key"` + Value interface{} `json:"value"` +} + +// flattenKVPairs is used to flatten KVPair slice into []interface{} +// for hclog consumption. +func flattenKVPairs(kvs []*logEntryKV) []interface{} { + var result []interface{} + for _, kv := range kvs { + result = append(result, kv.Key) + result = append(result, kv.Value) + } + + return result +} + +// parseJSON handles parsing JSON output +func parseJSON(input []byte) (*logEntry, error) { + var raw map[string]interface{} + entry := &logEntry{} + + err := json.Unmarshal(input, &raw) + if err != nil { + return nil, err + } + + // Parse hclog-specific objects + if v, ok := raw["@message"]; ok { + entry.Message = v.(string) + delete(raw, "@message") + } + + if v, ok := raw["@level"]; ok { + entry.Level = v.(string) + delete(raw, "@level") + } + + if v, ok := raw["@timestamp"]; ok { + t, err := time.Parse("2006-01-02T15:04:05.000000Z07:00", v.(string)) + if err != nil { + return nil, err + } + entry.Timestamp = t + delete(raw, "@timestamp") + } + + // Parse dynamic KV args from the hclog payload. + for k, v := range raw { + entry.KVPairs = append(entry.KVPairs, &logEntryKV{ + Key: k, + Value: v, + }) + } + + return entry, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/mtls.go b/vendor/github.com/hashicorp/go-plugin/mtls.go new file mode 100644 index 0000000000..8895524587 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/mtls.go @@ -0,0 +1,73 @@ +package plugin + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "time" +) + +// generateCert generates a temporary certificate for plugin authentication. The +// certificate and private key are returns in PEM format. +func generateCert() (cert []byte, privateKey []byte, err error) { + key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + return nil, nil, err + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + sn, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, nil, err + } + + host := "localhost" + + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + Organization: []string{"HashiCorp"}, + }, + DNSNames: []string{host}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + SerialNumber: sn, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + IsCA: true, + } + + der, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) + if err != nil { + return nil, nil, err + } + + var certOut bytes.Buffer + if err := pem.Encode(&certOut, &pem.Block{Type: "CERTIFICATE", Bytes: der}); err != nil { + return nil, nil, err + } + + keyBytes, err := x509.MarshalECPrivateKey(key) + if err != nil { + return nil, nil, err + } + + var keyOut bytes.Buffer + if err := pem.Encode(&keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}); err != nil { + return nil, nil, err + } + + cert = certOut.Bytes() + privateKey = keyOut.Bytes() + + return cert, privateKey, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/mux_broker.go b/vendor/github.com/hashicorp/go-plugin/mux_broker.go new file mode 100644 index 0000000000..01c45ad7c6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/mux_broker.go @@ -0,0 +1,204 @@ +package plugin + +import ( + "encoding/binary" + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/yamux" +) + +// MuxBroker is responsible for brokering multiplexed connections by unique ID. +// +// It is used by plugins to multiplex multiple RPC connections and data +// streams on top of a single connection between the plugin process and the +// host process. +// +// This allows a plugin to request a channel with a specific ID to connect to +// or accept a connection from, and the broker handles the details of +// holding these channels open while they're being negotiated. +// +// The Plugin interface has access to these for both Server and Client. +// The broker can be used by either (optionally) to reserve and connect to +// new multiplexed streams. This is useful for complex args and return values, +// or anything else you might need a data stream for. +type MuxBroker struct { + nextId uint32 + session *yamux.Session + streams map[uint32]*muxBrokerPending + + sync.Mutex +} + +type muxBrokerPending struct { + ch chan net.Conn + doneCh chan struct{} +} + +func newMuxBroker(s *yamux.Session) *MuxBroker { + return &MuxBroker{ + session: s, + streams: make(map[uint32]*muxBrokerPending), + } +} + +// Accept accepts a connection by ID. +// +// This should not be called multiple times with the same ID at one time. +func (m *MuxBroker) Accept(id uint32) (net.Conn, error) { + var c net.Conn + p := m.getStream(id) + select { + case c = <-p.ch: + close(p.doneCh) + case <-time.After(5 * time.Second): + m.Lock() + defer m.Unlock() + delete(m.streams, id) + + return nil, fmt.Errorf("timeout waiting for accept") + } + + // Ack our connection + if err := binary.Write(c, binary.LittleEndian, id); err != nil { + c.Close() + return nil, err + } + + return c, nil +} + +// AcceptAndServe is used to accept a specific stream ID and immediately +// serve an RPC server on that stream ID. This is used to easily serve +// complex arguments. +// +// The served interface is always registered to the "Plugin" name. +func (m *MuxBroker) AcceptAndServe(id uint32, v interface{}) { + conn, err := m.Accept(id) + if err != nil { + log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) + return + } + + serve(conn, "Plugin", v) +} + +// Close closes the connection and all sub-connections. +func (m *MuxBroker) Close() error { + return m.session.Close() +} + +// Dial opens a connection by ID. +func (m *MuxBroker) Dial(id uint32) (net.Conn, error) { + // Open the stream + stream, err := m.session.OpenStream() + if err != nil { + return nil, err + } + + // Write the stream ID onto the wire. + if err := binary.Write(stream, binary.LittleEndian, id); err != nil { + stream.Close() + return nil, err + } + + // Read the ack that we connected. Then we're off! + var ack uint32 + if err := binary.Read(stream, binary.LittleEndian, &ack); err != nil { + stream.Close() + return nil, err + } + if ack != id { + stream.Close() + return nil, fmt.Errorf("bad ack: %d (expected %d)", ack, id) + } + + return stream, nil +} + +// NextId returns a unique ID to use next. +// +// It is possible for very long-running plugin hosts to wrap this value, +// though it would require a very large amount of RPC calls. In practice +// we've never seen it happen. +func (m *MuxBroker) NextId() uint32 { + return atomic.AddUint32(&m.nextId, 1) +} + +// Run starts the brokering and should be executed in a goroutine, since it +// blocks forever, or until the session closes. +// +// Uses of MuxBroker never need to call this. It is called internally by +// the plugin host/client. +func (m *MuxBroker) Run() { + for { + stream, err := m.session.AcceptStream() + if err != nil { + // Once we receive an error, just exit + break + } + + // Read the stream ID from the stream + var id uint32 + if err := binary.Read(stream, binary.LittleEndian, &id); err != nil { + stream.Close() + continue + } + + // Initialize the waiter + p := m.getStream(id) + select { + case p.ch <- stream: + default: + } + + // Wait for a timeout + go m.timeoutWait(id, p) + } +} + +func (m *MuxBroker) getStream(id uint32) *muxBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.streams[id] + if ok { + return p + } + + m.streams[id] = &muxBrokerPending{ + ch: make(chan net.Conn, 1), + doneCh: make(chan struct{}), + } + return m.streams[id] +} + +func (m *MuxBroker) timeoutWait(id uint32, p *muxBrokerPending) { + // Wait for the stream to either be picked up and connected, or + // for a timeout. + timeout := false + select { + case <-p.doneCh: + case <-time.After(5 * time.Second): + timeout = true + } + + m.Lock() + defer m.Unlock() + + // Delete the stream so no one else can grab it + delete(m.streams, id) + + // If we timed out, then check if we have a channel in the buffer, + // and if so, close it. + if timeout { + select { + case s := <-p.ch: + s.Close() + } + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go new file mode 100644 index 0000000000..79d9674633 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/plugin.go @@ -0,0 +1,58 @@ +// The plugin package exposes functions and helpers for communicating to +// plugins which are implemented as standalone binary applications. +// +// plugin.Client fully manages the lifecycle of executing the application, +// connecting to it, and returning the RPC client for dispensing plugins. +// +// plugin.Serve fully manages listeners to expose an RPC server from a binary +// that plugin.Client can connect to. +package plugin + +import ( + "context" + "errors" + "net/rpc" + + "google.golang.org/grpc" +) + +// Plugin is the interface that is implemented to serve/connect to an +// inteface implementation. +type Plugin interface { + // Server should return the RPC server compatible struct to serve + // the methods that the Client calls over net/rpc. + Server(*MuxBroker) (interface{}, error) + + // Client returns an interface implementation for the plugin you're + // serving that communicates to the server end of the plugin. + Client(*MuxBroker, *rpc.Client) (interface{}, error) +} + +// GRPCPlugin is the interface that is implemented to serve/connect to +// a plugin over gRPC. +type GRPCPlugin interface { + // GRPCServer should register this plugin for serving with the + // given GRPCServer. Unlike Plugin.Server, this is only called once + // since gRPC plugins serve singletons. + GRPCServer(*GRPCBroker, *grpc.Server) error + + // GRPCClient should return the interface implementation for the plugin + // you're serving via gRPC. The provided context will be canceled by + // go-plugin in the event of the plugin process exiting. + GRPCClient(context.Context, *GRPCBroker, *grpc.ClientConn) (interface{}, error) +} + +// NetRPCUnsupportedPlugin implements Plugin but returns errors for the +// Server and Client functions. This will effectively disable support for +// net/rpc based plugins. +// +// This struct can be embedded in your struct. +type NetRPCUnsupportedPlugin struct{} + +func (p NetRPCUnsupportedPlugin) Server(*MuxBroker) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} + +func (p NetRPCUnsupportedPlugin) Client(*MuxBroker, *rpc.Client) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} diff --git a/vendor/github.com/hashicorp/go-plugin/process.go b/vendor/github.com/hashicorp/go-plugin/process.go new file mode 100644 index 0000000000..88c999a580 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process.go @@ -0,0 +1,24 @@ +package plugin + +import ( + "time" +) + +// pidAlive checks whether a pid is alive. +func pidAlive(pid int) bool { + return _pidAlive(pid) +} + +// pidWait blocks for a process to exit. +func pidWait(pid int) error { + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + for range ticker.C { + if !pidAlive(pid) { + break + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/process_posix.go b/vendor/github.com/hashicorp/go-plugin/process_posix.go new file mode 100644 index 0000000000..70ba546bf6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process_posix.go @@ -0,0 +1,19 @@ +// +build !windows + +package plugin + +import ( + "os" + "syscall" +) + +// _pidAlive tests whether a process is alive or not by sending it Signal 0, +// since Go otherwise has no way to test this. +func _pidAlive(pid int) bool { + proc, err := os.FindProcess(pid) + if err == nil { + err = proc.Signal(syscall.Signal(0)) + } + + return err == nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/process_windows.go b/vendor/github.com/hashicorp/go-plugin/process_windows.go new file mode 100644 index 0000000000..0eaa7705d2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process_windows.go @@ -0,0 +1,30 @@ +package plugin + +import ( + "syscall" +) + +const ( + // Weird name but matches the MSDN docs + exit_STILL_ACTIVE = 259 + + processDesiredAccess = syscall.STANDARD_RIGHTS_READ | + syscall.PROCESS_QUERY_INFORMATION | + syscall.SYNCHRONIZE +) + +// _pidAlive tests whether a process is alive or not +func _pidAlive(pid int) bool { + h, err := syscall.OpenProcess(processDesiredAccess, false, uint32(pid)) + if err != nil { + return false + } + defer syscall.CloseHandle(h) + + var ec uint32 + if e := syscall.GetExitCodeProcess(h, &ec); e != nil { + return false + } + + return ec == exit_STILL_ACTIVE +} diff --git a/vendor/github.com/hashicorp/go-plugin/protocol.go b/vendor/github.com/hashicorp/go-plugin/protocol.go new file mode 100644 index 0000000000..0cfc19e52d --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/protocol.go @@ -0,0 +1,45 @@ +package plugin + +import ( + "io" + "net" +) + +// Protocol is an enum representing the types of protocols. +type Protocol string + +const ( + ProtocolInvalid Protocol = "" + ProtocolNetRPC Protocol = "netrpc" + ProtocolGRPC Protocol = "grpc" +) + +// ServerProtocol is an interface that must be implemented for new plugin +// protocols to be servers. +type ServerProtocol interface { + // Init is called once to configure and initialize the protocol, but + // not start listening. This is the point at which all validation should + // be done and errors returned. + Init() error + + // Config is extra configuration to be outputted to stdout. This will + // be automatically base64 encoded to ensure it can be parsed properly. + // This can be an empty string if additional configuration is not needed. + Config() string + + // Serve is called to serve connections on the given listener. This should + // continue until the listener is closed. + Serve(net.Listener) +} + +// ClientProtocol is an interface that must be implemented for new plugin +// protocols to be clients. +type ClientProtocol interface { + io.Closer + + // Dispense dispenses a new instance of the plugin with the given name. + Dispense(string) (interface{}, error) + + // Ping checks that the client connection is still healthy. + Ping() error +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go new file mode 100644 index 0000000000..f30a4b1d38 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/rpc_client.go @@ -0,0 +1,170 @@ +package plugin + +import ( + "crypto/tls" + "fmt" + "io" + "net" + "net/rpc" + + "github.com/hashicorp/yamux" +) + +// RPCClient connects to an RPCServer over net/rpc to dispense plugin types. +type RPCClient struct { + broker *MuxBroker + control *rpc.Client + plugins map[string]Plugin + + // These are the streams used for the various stdout/err overrides + stdout, stderr net.Conn +} + +// newRPCClient creates a new RPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newRPCClient(c *Client) (*RPCClient, error) { + // Connect to the client + conn, err := net.Dial(c.address.Network(), c.address.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + if c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + // Create the actual RPC client + result, err := NewRPCClient(conn, c.config.Plugins) + if err != nil { + conn.Close() + return nil, err + } + + // Begin the stream syncing so that stdin, out, err work properly + err = result.SyncStreams( + c.config.SyncStdout, + c.config.SyncStderr) + if err != nil { + result.Close() + return nil, err + } + + return result, nil +} + +// NewRPCClient creates a client from an already-open connection-like value. +// Dial is typically used instead. +func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) { + // Create the yamux client so we can multiplex + mux, err := yamux.Client(conn, nil) + if err != nil { + conn.Close() + return nil, err + } + + // Connect to the control stream. + control, err := mux.Open() + if err != nil { + mux.Close() + return nil, err + } + + // Connect stdout, stderr streams + stdstream := make([]net.Conn, 2) + for i, _ := range stdstream { + stdstream[i], err = mux.Open() + if err != nil { + mux.Close() + return nil, err + } + } + + // Create the broker and start it up + broker := newMuxBroker(mux) + go broker.Run() + + // Build the client using our broker and control channel. + return &RPCClient{ + broker: broker, + control: rpc.NewClient(control), + plugins: plugins, + stdout: stdstream[0], + stderr: stdstream[1], + }, nil +} + +// SyncStreams should be called to enable syncing of stdout, +// stderr with the plugin. +// +// This will return immediately and the syncing will continue to happen +// in the background. You do not need to launch this in a goroutine itself. +// +// This should never be called multiple times. +func (c *RPCClient) SyncStreams(stdout io.Writer, stderr io.Writer) error { + go copyStream("stdout", stdout, c.stdout) + go copyStream("stderr", stderr, c.stderr) + return nil +} + +// Close closes the connection. The client is no longer usable after this +// is called. +func (c *RPCClient) Close() error { + // Call the control channel and ask it to gracefully exit. If this + // errors, then we save it so that we always return an error but we + // want to try to close the other channels anyways. + var empty struct{} + returnErr := c.control.Call("Control.Quit", true, &empty) + + // Close the other streams we have + if err := c.control.Close(); err != nil { + return err + } + if err := c.stdout.Close(); err != nil { + return err + } + if err := c.stderr.Close(); err != nil { + return err + } + if err := c.broker.Close(); err != nil { + return err + } + + // Return back the error we got from Control.Quit. This is very important + // since we MUST return non-nil error if this fails so that Client.Kill + // will properly try a process.Kill. + return returnErr +} + +func (c *RPCClient) Dispense(name string) (interface{}, error) { + p, ok := c.plugins[name] + if !ok { + return nil, fmt.Errorf("unknown plugin type: %s", name) + } + + var id uint32 + if err := c.control.Call( + "Dispenser.Dispense", name, &id); err != nil { + return nil, err + } + + conn, err := c.broker.Dial(id) + if err != nil { + return nil, err + } + + return p.Client(c.broker, rpc.NewClient(conn)) +} + +// Ping pings the connection to ensure it is still alive. +// +// The error from the RPC call is returned exactly if you want to inspect +// it for further error analysis. Any error returned from here would indicate +// that the connection to the plugin is not healthy. +func (c *RPCClient) Ping() error { + var empty struct{} + return c.control.Call("Control.Ping", true, &empty) +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go new file mode 100644 index 0000000000..5bb18dd5db --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go @@ -0,0 +1,197 @@ +package plugin + +import ( + "errors" + "fmt" + "io" + "log" + "net" + "net/rpc" + "sync" + + "github.com/hashicorp/yamux" +) + +// RPCServer listens for network connections and then dispenses interface +// implementations over net/rpc. +// +// After setting the fields below, they shouldn't be read again directly +// from the structure which may be reading/writing them concurrently. +type RPCServer struct { + Plugins map[string]Plugin + + // Stdout, Stderr are what this server will use instead of the + // normal stdin/out/err. This is because due to the multi-process nature + // of our plugin system, we can't use the normal process values so we + // make our own custom one we pipe across. + Stdout io.Reader + Stderr io.Reader + + // DoneCh should be set to a non-nil channel that will be closed + // when the control requests the RPC server to end. + DoneCh chan<- struct{} + + lock sync.Mutex +} + +// ServerProtocol impl. +func (s *RPCServer) Init() error { return nil } + +// ServerProtocol impl. +func (s *RPCServer) Config() string { return "" } + +// ServerProtocol impl. +func (s *RPCServer) Serve(lis net.Listener) { + for { + conn, err := lis.Accept() + if err != nil { + log.Printf("[ERR] plugin: plugin server: %s", err) + return + } + + go s.ServeConn(conn) + } +} + +// ServeConn runs a single connection. +// +// ServeConn blocks, serving the connection until the client hangs up. +func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) { + // First create the yamux server to wrap this connection + mux, err := yamux.Server(conn, nil) + if err != nil { + conn.Close() + log.Printf("[ERR] plugin: error creating yamux server: %s", err) + return + } + + // Accept the control connection + control, err := mux.Accept() + if err != nil { + mux.Close() + if err != io.EOF { + log.Printf("[ERR] plugin: error accepting control connection: %s", err) + } + + return + } + + // Connect the stdstreams (in, out, err) + stdstream := make([]net.Conn, 2) + for i, _ := range stdstream { + stdstream[i], err = mux.Accept() + if err != nil { + mux.Close() + log.Printf("[ERR] plugin: accepting stream %d: %s", i, err) + return + } + } + + // Copy std streams out to the proper place + go copyStream("stdout", stdstream[0], s.Stdout) + go copyStream("stderr", stdstream[1], s.Stderr) + + // Create the broker and start it up + broker := newMuxBroker(mux) + go broker.Run() + + // Use the control connection to build the dispenser and serve the + // connection. + server := rpc.NewServer() + server.RegisterName("Control", &controlServer{ + server: s, + }) + server.RegisterName("Dispenser", &dispenseServer{ + broker: broker, + plugins: s.Plugins, + }) + server.ServeConn(control) +} + +// done is called internally by the control server to trigger the +// doneCh to close which is listened to by the main process to cleanly +// exit. +func (s *RPCServer) done() { + s.lock.Lock() + defer s.lock.Unlock() + + if s.DoneCh != nil { + close(s.DoneCh) + s.DoneCh = nil + } +} + +// dispenseServer dispenses variousinterface implementations for Terraform. +type controlServer struct { + server *RPCServer +} + +// Ping can be called to verify the connection (and likely the binary) +// is still alive to a plugin. +func (c *controlServer) Ping( + null bool, response *struct{}) error { + *response = struct{}{} + return nil +} + +func (c *controlServer) Quit( + null bool, response *struct{}) error { + // End the server + c.server.done() + + // Always return true + *response = struct{}{} + + return nil +} + +// dispenseServer dispenses variousinterface implementations for Terraform. +type dispenseServer struct { + broker *MuxBroker + plugins map[string]Plugin +} + +func (d *dispenseServer) Dispense( + name string, response *uint32) error { + // Find the function to create this implementation + p, ok := d.plugins[name] + if !ok { + return fmt.Errorf("unknown plugin type: %s", name) + } + + // Create the implementation first so we know if there is an error. + impl, err := p.Server(d.broker) + if err != nil { + // We turn the error into an errors error so that it works across RPC + return errors.New(err.Error()) + } + + // Reserve an ID for our implementation + id := d.broker.NextId() + *response = id + + // Run the rest in a goroutine since it can only happen once this RPC + // call returns. We wait for a connection for the plugin implementation + // and serve it. + go func() { + conn, err := d.broker.Accept(id) + if err != nil { + log.Printf("[ERR] go-plugin: plugin dispense error: %s: %s", name, err) + return + } + + serve(conn, "Plugin", impl) + }() + + return nil +} + +func serve(conn io.ReadWriteCloser, name string, v interface{}) { + server := rpc.NewServer() + if err := server.RegisterName(name, v); err != nil { + log.Printf("[ERR] go-plugin: plugin dispense error: %s", err) + return + } + + server.ServeConn(conn) +} diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go new file mode 100644 index 0000000000..7a58cc3919 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -0,0 +1,589 @@ +package plugin + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/signal" + "runtime" + "sort" + "strconv" + "strings" + + hclog "github.com/hashicorp/go-hclog" + "google.golang.org/grpc" +) + +// CoreProtocolVersion is the ProtocolVersion of the plugin system itself. +// We will increment this whenever we change any protocol behavior. This +// will invalidate any prior plugins but will at least allow us to iterate +// on the core in a safe way. We will do our best to do this very +// infrequently. +const CoreProtocolVersion = 1 + +// HandshakeConfig is the configuration used by client and servers to +// handshake before starting a plugin connection. This is embedded by +// both ServeConfig and ClientConfig. +// +// In practice, the plugin host creates a HandshakeConfig that is exported +// and plugins then can easily consume it. +type HandshakeConfig struct { + // ProtocolVersion is the version that clients must match on to + // agree they can communicate. This should match the ProtocolVersion + // set on ClientConfig when using a plugin. + // This field is not required if VersionedPlugins are being used in the + // Client or Server configurations. + ProtocolVersion uint + + // MagicCookieKey and value are used as a very basic verification + // that a plugin is intended to be launched. This is not a security + // measure, just a UX feature. If the magic cookie doesn't match, + // we show human-friendly output. + MagicCookieKey string + MagicCookieValue string +} + +// PluginSet is a set of plugins provided to be registered in the plugin +// server. +type PluginSet map[string]Plugin + +// ServeConfig configures what sorts of plugins are served. +type ServeConfig struct { + // HandshakeConfig is the configuration that must match clients. + HandshakeConfig + + // TLSProvider is a function that returns a configured tls.Config. + TLSProvider func() (*tls.Config, error) + + // Plugins are the plugins that are served. + // The implied version of this PluginSet is the Handshake.ProtocolVersion. + Plugins PluginSet + + // VersionedPlugins is a map of PluginSets for specific protocol versions. + // These can be used to negotiate a compatible version between client and + // server. If this is set, Handshake.ProtocolVersion is not required. + VersionedPlugins map[int]PluginSet + + // GRPCServer should be non-nil to enable serving the plugins over + // gRPC. This is a function to create the server when needed with the + // given server options. The server options populated by go-plugin will + // be for TLS if set. You may modify the input slice. + // + // Note that the grpc.Server will automatically be registered with + // the gRPC health checking service. This is not optional since go-plugin + // relies on this to implement Ping(). + GRPCServer func([]grpc.ServerOption) *grpc.Server + + // Logger is used to pass a logger into the server. If none is provided the + // server will create a default logger. + Logger hclog.Logger + + // Test, if non-nil, will put plugin serving into "test mode". This is + // meant to be used as part of `go test` within a plugin's codebase to + // launch the plugin in-process and output a ReattachConfig. + // + // This changes the behavior of the server in a number of ways to + // accomodate the expectation of running in-process: + // + // * The handshake cookie is not validated. + // * Stdout/stderr will receive plugin reads and writes + // * Connection information will not be sent to stdout + // + Test *ServeTestConfig +} + +// ServeTestConfig configures plugin serving for test mode. See ServeConfig.Test. +type ServeTestConfig struct { + // Context, if set, will force the plugin serving to end when cancelled. + // This is only a test configuration because the non-test configuration + // expects to take over the process and therefore end on an interrupt or + // kill signal. For tests, we need to kill the plugin serving routinely + // and this provides a way to do so. + // + // If you want to wait for the plugin process to close before moving on, + // you can wait on CloseCh. + Context context.Context + + // If this channel is non-nil, we will send the ReattachConfig via + // this channel. This can be encoded (via JSON recommended) to the + // plugin client to attach to this plugin. + ReattachConfigCh chan<- *ReattachConfig + + // CloseCh, if non-nil, will be closed when serving exits. This can be + // used along with Context to determine when the server is fully shut down. + // If this is not set, you can still use Context on its own, but note there + // may be a period of time between canceling the context and the plugin + // server being shut down. + CloseCh chan<- struct{} + + // SyncStdio, if true, will enable the client side "SyncStdout/Stderr" + // functionality to work. This defaults to false because the implementation + // of making this work within test environments is particularly messy + // and SyncStdio functionality is fairly rare, so we default to the simple + // scenario. + SyncStdio bool +} + +// protocolVersion determines the protocol version and plugin set to be used by +// the server. In the event that there is no suitable version, the last version +// in the config is returned leaving the client to report the incompatibility. +func protocolVersion(opts *ServeConfig) (int, Protocol, PluginSet) { + protoVersion := int(opts.ProtocolVersion) + pluginSet := opts.Plugins + protoType := ProtocolNetRPC + // Check if the client sent a list of acceptable versions + var clientVersions []int + if vs := os.Getenv("PLUGIN_PROTOCOL_VERSIONS"); vs != "" { + for _, s := range strings.Split(vs, ",") { + v, err := strconv.Atoi(s) + if err != nil { + fmt.Fprintf(os.Stderr, "server sent invalid plugin version %q", s) + continue + } + clientVersions = append(clientVersions, v) + } + } + + // We want to iterate in reverse order, to ensure we match the newest + // compatible plugin version. + sort.Sort(sort.Reverse(sort.IntSlice(clientVersions))) + + // set the old un-versioned fields as if they were versioned plugins + if opts.VersionedPlugins == nil { + opts.VersionedPlugins = make(map[int]PluginSet) + } + + if pluginSet != nil { + opts.VersionedPlugins[protoVersion] = pluginSet + } + + // Sort the version to make sure we match the latest first + var versions []int + for v := range opts.VersionedPlugins { + versions = append(versions, v) + } + + sort.Sort(sort.Reverse(sort.IntSlice(versions))) + + // See if we have multiple versions of Plugins to choose from + for _, version := range versions { + // Record each version, since we guarantee that this returns valid + // values even if they are not a protocol match. + protoVersion = version + pluginSet = opts.VersionedPlugins[version] + + // If we have a configured gRPC server we should select a protocol + if opts.GRPCServer != nil { + // All plugins in a set must use the same transport, so check the first + // for the protocol type + for _, p := range pluginSet { + switch p.(type) { + case GRPCPlugin: + protoType = ProtocolGRPC + default: + protoType = ProtocolNetRPC + } + break + } + } + + for _, clientVersion := range clientVersions { + if clientVersion == protoVersion { + return protoVersion, protoType, pluginSet + } + } + } + + // Return the lowest version as the fallback. + // Since we iterated over all the versions in reverse order above, these + // values are from the lowest version number plugins (which may be from + // a combination of the Handshake.ProtocolVersion and ServeConfig.Plugins + // fields). This allows serving the oldest version of our plugins to a + // legacy client that did not send a PLUGIN_PROTOCOL_VERSIONS list. + return protoVersion, protoType, pluginSet +} + +// Serve serves the plugins given by ServeConfig. +// +// Serve doesn't return until the plugin is done being executed. Any +// fixable errors will be output to os.Stderr and the process will +// exit with a status code of 1. Serve will panic for unexpected +// conditions where a user's fix is unknown. +// +// This is the method that plugins should call in their main() functions. +func Serve(opts *ServeConfig) { + exitCode := -1 + // We use this to trigger an `os.Exit` so that we can execute our other + // deferred functions. In test mode, we just output the err to stderr + // and return. + defer func() { + if opts.Test == nil && exitCode >= 0 { + os.Exit(exitCode) + } + + if opts.Test != nil && opts.Test.CloseCh != nil { + close(opts.Test.CloseCh) + } + }() + + if opts.Test == nil { + // Validate the handshake config + if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" { + fmt.Fprintf(os.Stderr, + "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+ + "key or value was set. Please notify the plugin author and report\n"+ + "this as a bug.\n") + exitCode = 1 + return + } + + // First check the cookie + if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue { + fmt.Fprintf(os.Stderr, + "This binary is a plugin. These are not meant to be executed directly.\n"+ + "Please execute the program that consumes these plugins, which will\n"+ + "load any plugins automatically\n") + exitCode = 1 + return + } + } + + // negotiate the version and plugins + // start with default version in the handshake config + protoVersion, protoType, pluginSet := protocolVersion(opts) + + logger := opts.Logger + if logger == nil { + // internal logger to os.Stderr + logger = hclog.New(&hclog.LoggerOptions{ + Level: hclog.Trace, + Output: os.Stderr, + JSONFormat: true, + }) + } + + // Register a listener so we can accept a connection + listener, err := serverListener() + if err != nil { + logger.Error("plugin init error", "error", err) + return + } + + // Close the listener on return. We wrap this in a func() on purpose + // because the "listener" reference may change to TLS. + defer func() { + listener.Close() + }() + + var tlsConfig *tls.Config + if opts.TLSProvider != nil { + tlsConfig, err = opts.TLSProvider() + if err != nil { + logger.Error("plugin tls init", "error", err) + return + } + } + + var serverCert string + clientCert := os.Getenv("PLUGIN_CLIENT_CERT") + // If the client is configured using AutoMTLS, the certificate will be here, + // and we need to generate our own in response. + if tlsConfig == nil && clientCert != "" { + logger.Info("configuring server automatic mTLS") + clientCertPool := x509.NewCertPool() + if !clientCertPool.AppendCertsFromPEM([]byte(clientCert)) { + logger.Error("client cert provided but failed to parse", "cert", clientCert) + } + + certPEM, keyPEM, err := generateCert() + if err != nil { + logger.Error("failed to generate client certificate", "error", err) + panic(err) + } + + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + logger.Error("failed to parse client certificate", "error", err) + panic(err) + } + + tlsConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: clientCertPool, + MinVersion: tls.VersionTLS12, + } + + // We send back the raw leaf cert data for the client rather than the + // PEM, since the protocol can't handle newlines. + serverCert = base64.RawStdEncoding.EncodeToString(cert.Certificate[0]) + } + + // Create the channel to tell us when we're done + doneCh := make(chan struct{}) + + // Create our new stdout, stderr files. These will override our built-in + // stdout/stderr so that it works across the stream boundary. + var stdout_r, stderr_r io.Reader + stdout_r, stdout_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + stderr_r, stderr_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + + // If we're in test mode, we tee off the reader and write the data + // as-is to our normal Stdout and Stderr so that they continue working + // while stdio works. This is because in test mode, we assume we're running + // in `go test` or some equivalent and we want output to go to standard + // locations. + if opts.Test != nil { + // TODO(mitchellh): This isn't super ideal because a TeeReader + // only works if the reader side is actively read. If we never + // connect via a plugin client, the output still gets swallowed. + stdout_r = io.TeeReader(stdout_r, os.Stdout) + stderr_r = io.TeeReader(stderr_r, os.Stderr) + } + + // Build the server type + var server ServerProtocol + switch protoType { + case ProtocolNetRPC: + // If we have a TLS configuration then we wrap the listener + // ourselves and do it at that level. + if tlsConfig != nil { + listener = tls.NewListener(listener, tlsConfig) + } + + // Create the RPC server to dispense + server = &RPCServer{ + Plugins: pluginSet, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + } + + case ProtocolGRPC: + // Create the gRPC server + server = &GRPCServer{ + Plugins: pluginSet, + Server: opts.GRPCServer, + TLS: tlsConfig, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + logger: logger, + } + + default: + panic("unknown server protocol: " + protoType) + } + + // Initialize the servers + if err := server.Init(); err != nil { + logger.Error("protocol init", "error", err) + return + } + + logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String()) + + // Output the address and service name to stdout so that the client can + // bring it up. In test mode, we don't do this because clients will + // attach via a reattach config. + if opts.Test == nil { + fmt.Printf("%d|%d|%s|%s|%s|%s\n", + CoreProtocolVersion, + protoVersion, + listener.Addr().Network(), + listener.Addr().String(), + protoType, + serverCert) + os.Stdout.Sync() + } else if ch := opts.Test.ReattachConfigCh; ch != nil { + // Send back the reattach config that can be used. This isn't + // quite ready if they connect immediately but the client should + // retry a few times. + ch <- &ReattachConfig{ + Protocol: protoType, + ProtocolVersion: protoVersion, + Addr: listener.Addr(), + Pid: os.Getpid(), + Test: true, + } + } + + // Eat the interrupts. In test mode we disable this so that go test + // can be cancelled properly. + if opts.Test == nil { + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt) + go func() { + count := 0 + for { + <-ch + count++ + logger.Trace("plugin received interrupt signal, ignoring", "count", count) + } + }() + } + + // Set our stdout, stderr to the stdio stream that clients can retrieve + // using ClientConfig.SyncStdout/err. We only do this for non-test mode + // or if the test mode explicitly requests it. + // + // In test mode, we use a multiwriter so that the data continues going + // to the normal stdout/stderr so output can show up in test logs. We + // also send to the stdio stream so that clients can continue working + // if they depend on that. + if opts.Test == nil || opts.Test.SyncStdio { + if opts.Test != nil { + // In test mode we need to maintain the original values so we can + // reset it. + defer func(out, err *os.File) { + os.Stdout = out + os.Stderr = err + }(os.Stdout, os.Stderr) + } + os.Stdout = stdout_w + os.Stderr = stderr_w + } + + // Accept connections and wait for completion + go server.Serve(listener) + + ctx := context.Background() + if opts.Test != nil && opts.Test.Context != nil { + ctx = opts.Test.Context + } + select { + case <-ctx.Done(): + // Cancellation. We can stop the server by closing the listener. + // This isn't graceful at all but this is currently only used by + // tests and its our only way to stop. + listener.Close() + + // If this is a grpc server, then we also ask the server itself to + // end which will kill all connections. There isn't an easy way to do + // this for net/rpc currently but net/rpc is more and more unused. + if s, ok := server.(*GRPCServer); ok { + s.Stop() + } + + // Wait for the server itself to shut down + <-doneCh + + case <-doneCh: + // Note that given the documentation of Serve we should probably be + // setting exitCode = 0 and using os.Exit here. That's how it used to + // work before extracting this library. However, for years we've done + // this so we'll keep this functionality. + } +} + +func serverListener() (net.Listener, error) { + if runtime.GOOS == "windows" { + return serverListener_tcp() + } + + return serverListener_unix() +} + +func serverListener_tcp() (net.Listener, error) { + envMinPort := os.Getenv("PLUGIN_MIN_PORT") + envMaxPort := os.Getenv("PLUGIN_MAX_PORT") + + var minPort, maxPort int64 + var err error + + switch { + case len(envMinPort) == 0: + minPort = 0 + default: + minPort, err = strconv.ParseInt(envMinPort, 10, 32) + if err != nil { + return nil, fmt.Errorf("Couldn't get value from PLUGIN_MIN_PORT: %v", err) + } + } + + switch { + case len(envMaxPort) == 0: + maxPort = 0 + default: + maxPort, err = strconv.ParseInt(envMaxPort, 10, 32) + if err != nil { + return nil, fmt.Errorf("Couldn't get value from PLUGIN_MAX_PORT: %v", err) + } + } + + if minPort > maxPort { + return nil, fmt.Errorf("PLUGIN_MIN_PORT value of %d is greater than PLUGIN_MAX_PORT value of %d", minPort, maxPort) + } + + for port := minPort; port <= maxPort; port++ { + address := fmt.Sprintf("127.0.0.1:%d", port) + listener, err := net.Listen("tcp", address) + if err == nil { + return listener, nil + } + } + + return nil, errors.New("Couldn't bind plugin TCP listener") +} + +func serverListener_unix() (net.Listener, error) { + tf, err := ioutil.TempFile("", "plugin") + if err != nil { + return nil, err + } + path := tf.Name() + + // Close the file and remove it because it has to not exist for + // the domain socket. + if err := tf.Close(); err != nil { + return nil, err + } + if err := os.Remove(path); err != nil { + return nil, err + } + + l, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + + // Wrap the listener in rmListener so that the Unix domain socket file + // is removed on close. + return &rmListener{ + Listener: l, + Path: path, + }, nil +} + +// rmListener is an implementation of net.Listener that forwards most +// calls to the listener but also removes a file as part of the close. We +// use this to cleanup the unix domain socket on close. +type rmListener struct { + net.Listener + Path string +} + +func (l *rmListener) Close() error { + // Close the listener itself + if err := l.Listener.Close(); err != nil { + return err + } + + // Remove the file + return os.Remove(l.Path) +} diff --git a/vendor/github.com/hashicorp/go-plugin/server_mux.go b/vendor/github.com/hashicorp/go-plugin/server_mux.go new file mode 100644 index 0000000000..033079ea0f --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/server_mux.go @@ -0,0 +1,31 @@ +package plugin + +import ( + "fmt" + "os" +) + +// ServeMuxMap is the type that is used to configure ServeMux +type ServeMuxMap map[string]*ServeConfig + +// ServeMux is like Serve, but serves multiple types of plugins determined +// by the argument given on the command-line. +// +// This command doesn't return until the plugin is done being executed. Any +// errors are logged or output to stderr. +func ServeMux(m ServeMuxMap) { + if len(os.Args) != 2 { + fmt.Fprintf(os.Stderr, + "Invoked improperly. This is an internal command that shouldn't\n"+ + "be manually invoked.\n") + os.Exit(1) + } + + opts, ok := m[os.Args[1]] + if !ok { + fmt.Fprintf(os.Stderr, "Unknown plugin: %s\n", os.Args[1]) + os.Exit(1) + } + + Serve(opts) +} diff --git a/vendor/github.com/hashicorp/go-plugin/stream.go b/vendor/github.com/hashicorp/go-plugin/stream.go new file mode 100644 index 0000000000..1d547aaaab --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/stream.go @@ -0,0 +1,18 @@ +package plugin + +import ( + "io" + "log" +) + +func copyStream(name string, dst io.Writer, src io.Reader) { + if src == nil { + panic(name + ": src is nil") + } + if dst == nil { + panic(name + ": dst is nil") + } + if _, err := io.Copy(dst, src); err != nil && err != io.EOF { + log.Printf("[ERR] plugin: stream copy '%s' error: %s", name, err) + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go new file mode 100644 index 0000000000..e36f2eb2b7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/testing.go @@ -0,0 +1,180 @@ +package plugin + +import ( + "bytes" + "context" + "io" + "net" + "net/rpc" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/plugin" + "github.com/mitchellh/go-testing-interface" + "google.golang.org/grpc" +) + +// TestOptions allows specifying options that can affect the behavior of the +// test functions +type TestOptions struct { + //ServerStdout causes the given value to be used in place of a blank buffer + //for RPCServer's Stdout + ServerStdout io.ReadCloser + + //ServerStderr causes the given value to be used in place of a blank buffer + //for RPCServer's Stderr + ServerStderr io.ReadCloser +} + +// The testing file contains test helpers that you can use outside of +// this package for making it easier to test plugins themselves. + +// TestConn is a helper function for returning a client and server +// net.Conn connected to each other. +func TestConn(t testing.T) (net.Conn, net.Conn) { + // Listen to any local port. This listener will be closed + // after a single connection is established. + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Start a goroutine to accept our client connection + var serverConn net.Conn + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + defer l.Close() + var err error + serverConn, err = l.Accept() + if err != nil { + t.Fatalf("err: %s", err) + } + }() + + // Connect to the server + clientConn, err := net.Dial("tcp", l.Addr().String()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Wait for the server side to acknowledge it has connected + <-doneCh + + return clientConn, serverConn +} + +// TestRPCConn returns a rpc client and server connected to each other. +func TestRPCConn(t testing.T) (*rpc.Client, *rpc.Server) { + clientConn, serverConn := TestConn(t) + + server := rpc.NewServer() + go server.ServeConn(serverConn) + + client := rpc.NewClient(clientConn) + return client, server +} + +// TestPluginRPCConn returns a plugin RPC client and server that are connected +// together and configured. +func TestPluginRPCConn(t testing.T, ps map[string]Plugin, opts *TestOptions) (*RPCClient, *RPCServer) { + // Create two net.Conns we can use to shuttle our control connection + clientConn, serverConn := TestConn(t) + + // Start up the server + server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)} + if opts != nil { + if opts.ServerStdout != nil { + server.Stdout = opts.ServerStdout + } + if opts.ServerStderr != nil { + server.Stderr = opts.ServerStderr + } + } + go server.ServeConn(serverConn) + + // Connect the client to the server + client, err := NewRPCClient(clientConn, ps) + if err != nil { + t.Fatalf("err: %s", err) + } + + return client, server +} + +// TestGRPCConn returns a gRPC client conn and grpc server that are connected +// together and configured. The register function is used to register services +// prior to the Serve call. This is used to test gRPC connections. +func TestGRPCConn(t testing.T, register func(*grpc.Server)) (*grpc.ClientConn, *grpc.Server) { + // Create a listener + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + server := grpc.NewServer() + register(server) + go server.Serve(l) + + // Connect to the server + conn, err := grpc.Dial( + l.Addr().String(), + grpc.WithBlock(), + grpc.WithInsecure()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Connection successful, close the listener + l.Close() + + return conn, server +} + +// TestPluginGRPCConn returns a plugin gRPC client and server that are connected +// together and configured. This is used to test gRPC connections. +func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCServer) { + // Create a listener + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Start up the server + server := &GRPCServer{ + Plugins: ps, + DoneCh: make(chan struct{}), + Server: DefaultGRPCServer, + Stdout: new(bytes.Buffer), + Stderr: new(bytes.Buffer), + logger: hclog.Default(), + } + if err := server.Init(); err != nil { + t.Fatalf("err: %s", err) + } + go server.Serve(l) + + // Connect to the server + conn, err := grpc.Dial( + l.Addr().String(), + grpc.WithBlock(), + grpc.WithInsecure()) + if err != nil { + t.Fatalf("err: %s", err) + } + + brokerGRPCClient := newGRPCBrokerClient(conn) + broker := newGRPCBroker(brokerGRPCClient, nil) + go broker.Run() + go brokerGRPCClient.StartStream() + + // Create the client + client := &GRPCClient{ + Conn: conn, + Plugins: ps, + broker: broker, + doneCtx: context.Background(), + controller: plugin.NewGRPCControllerClient(conn), + } + + return client, server +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/.gitignore b/vendor/github.com/hashicorp/go-retryablehttp/.gitignore new file mode 100644 index 0000000000..4e309e0b32 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/.gitignore @@ -0,0 +1,4 @@ +.idea/ +*.iml +*.test +.vscode/ \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-multierror/.travis.yml b/vendor/github.com/hashicorp/go-retryablehttp/.travis.yml similarity index 54% rename from vendor/github.com/hashicorp/go-multierror/.travis.yml rename to vendor/github.com/hashicorp/go-retryablehttp/.travis.yml index 24b80388f7..c4fb6d6c8b 100644 --- a/vendor/github.com/hashicorp/go-multierror/.travis.yml +++ b/vendor/github.com/hashicorp/go-retryablehttp/.travis.yml @@ -3,10 +3,10 @@ sudo: false language: go go: - - 1.x + - 1.12.4 branches: only: - master -script: env GO111MODULE=on make test testrace +script: make updatedeps test diff --git a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE new file mode 100644 index 0000000000..e87a115e46 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-retryablehttp/Makefile b/vendor/github.com/hashicorp/go-retryablehttp/Makefile new file mode 100644 index 0000000000..da17640e64 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/Makefile @@ -0,0 +1,11 @@ +default: test + +test: + go vet ./... + go test -race ./... + +updatedeps: + go get -f -t -u ./... + go get -f -u ./... + +.PHONY: default test updatedeps diff --git a/vendor/github.com/hashicorp/go-retryablehttp/README.md b/vendor/github.com/hashicorp/go-retryablehttp/README.md new file mode 100644 index 0000000000..30357c7566 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/README.md @@ -0,0 +1,61 @@ +go-retryablehttp +================ + +[![Build Status](http://img.shields.io/travis/hashicorp/go-retryablehttp.svg?style=flat-square)][travis] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: http://travis-ci.org/hashicorp/go-retryablehttp +[godocs]: http://godoc.org/github.com/hashicorp/go-retryablehttp + +The `retryablehttp` package provides a familiar HTTP client interface with +automatic retries and exponential backoff. It is a thin wrapper over the +standard `net/http` client library and exposes nearly the same public API. This +makes `retryablehttp` very easy to drop into existing programs. + +`retryablehttp` performs automatic retries under certain conditions. Mainly, if +an error is returned by the client (connection errors, etc.), or if a 500-range +response code is received (except 501), then a retry is invoked after a wait +period. Otherwise, the response is returned and left to the caller to +interpret. + +The main difference from `net/http` is that requests which take a request body +(POST/PUT et. al) can have the body provided in a number of ways (some more or +less efficient) that allow "rewinding" the request body if the initial request +fails so that the full request can be attempted again. See the +[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp) for more +details. + +Version 0.6.0 and before are compatible with Go prior to 1.12. From 0.6.1 onward, Go 1.12+ is required. + +Example Use +=========== + +Using this library should look almost identical to what you would do with +`net/http`. The most simple example of a GET request is shown below: + +```go +resp, err := retryablehttp.Get("/foo") +if err != nil { + panic(err) +} +``` + +The returned response object is an `*http.Response`, the same thing you would +usually get from `net/http`. Had the request failed one or more times, the above +call would block and retry with exponential backoff. + +## Getting a stdlib `*http.Client` with retries + +It's possible to convert a `*retryablehttp.Client` directly to a `*http.Client`. +This makes use of retryablehttp broadly applicable with minimal effort. Simply +configure a `*retryablehttp.Client` as you wish, and then call `StandardClient()`: + +```go +retryClient := retryablehttp.NewClient() +retryClient.RetryMax = 10 + +standardClient := retryClient.StandardClient() // *http.Client +``` + +For more usage and examples see the +[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp). diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go new file mode 100644 index 0000000000..f1ccd3df35 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go @@ -0,0 +1,705 @@ +// Package retryablehttp provides a familiar HTTP client interface with +// automatic retries and exponential backoff. It is a thin wrapper over the +// standard net/http client library and exposes nearly the same public API. +// This makes retryablehttp very easy to drop into existing programs. +// +// retryablehttp performs automatic retries under certain conditions. Mainly, if +// an error is returned by the client (connection errors etc), or if a 500-range +// response is received, then a retry is invoked. Otherwise, the response is +// returned and left to the caller to interpret. +// +// Requests which take a request body should provide a non-nil function +// parameter. The best choice is to provide either a function satisfying +// ReaderFunc which provides multiple io.Readers in an efficient manner, a +// *bytes.Buffer (the underlying raw byte slice will be used) or a raw byte +// slice. As it is a reference type, and we will wrap it as needed by readers, +// we can efficiently re-use the request body without needing to copy it. If an +// io.Reader (such as a *bytes.Reader) is provided, the full body will be read +// prior to the first request, and will be efficiently re-used for any retries. +// ReadSeeker can be used, but some users have observed occasional data races +// between the net/http library and the Seek functionality of some +// implementations of ReadSeeker, so should be avoided if possible. +package retryablehttp + +import ( + "bytes" + "context" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "math/rand" + "net/http" + "net/url" + "os" + "regexp" + "strings" + "sync" + "time" + + "github.com/hashicorp/go-cleanhttp" +) + +var ( + // Default retry configuration + defaultRetryWaitMin = 1 * time.Second + defaultRetryWaitMax = 30 * time.Second + defaultRetryMax = 4 + + // defaultLogger is the logger provided with defaultClient + defaultLogger = log.New(os.Stderr, "", log.LstdFlags) + + // defaultClient is used for performing requests without explicitly making + // a new client. It is purposely private to avoid modifications. + defaultClient = NewClient() + + // We need to consume response bodies to maintain http connections, but + // limit the size we consume to respReadLimit. + respReadLimit = int64(4096) + + // A regular expression to match the error returned by net/http when the + // configured number of redirects is exhausted. This error isn't typed + // specifically so we resort to matching on the error string. + redirectsErrorRe = regexp.MustCompile(`stopped after \d+ redirects\z`) + + // A regular expression to match the error returned by net/http when the + // scheme specified in the URL is invalid. This error isn't typed + // specifically so we resort to matching on the error string. + schemeErrorRe = regexp.MustCompile(`unsupported protocol scheme`) +) + +// ReaderFunc is the type of function that can be given natively to NewRequest +type ReaderFunc func() (io.Reader, error) + +// LenReader is an interface implemented by many in-memory io.Reader's. Used +// for automatically sending the right Content-Length header when possible. +type LenReader interface { + Len() int +} + +// Request wraps the metadata needed to create HTTP requests. +type Request struct { + // body is a seekable reader over the request body payload. This is + // used to rewind the request data in between retries. + body ReaderFunc + + // Embed an HTTP request directly. This makes a *Request act exactly + // like an *http.Request so that all meta methods are supported. + *http.Request +} + +// WithContext returns wrapped Request with a shallow copy of underlying *http.Request +// with its context changed to ctx. The provided ctx must be non-nil. +func (r *Request) WithContext(ctx context.Context) *Request { + r.Request = r.Request.WithContext(ctx) + return r +} + +// BodyBytes allows accessing the request body. It is an analogue to +// http.Request's Body variable, but it returns a copy of the underlying data +// rather than consuming it. +// +// This function is not thread-safe; do not call it at the same time as another +// call, or at the same time this request is being used with Client.Do. +func (r *Request) BodyBytes() ([]byte, error) { + if r.body == nil { + return nil, nil + } + body, err := r.body() + if err != nil { + return nil, err + } + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(body) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// SetBody allows setting the request body. +// +// It is useful if a new body needs to be set without constructing a new Request. +func (r *Request) SetBody(rawBody interface{}) error { + bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody) + if err != nil { + return err + } + r.body = bodyReader + r.ContentLength = contentLength + return nil +} + +// WriteTo allows copying the request body into a writer. +// +// It writes data to w until there's no more data to write or +// when an error occurs. The return int64 value is the number of bytes +// written. Any error encountered during the write is also returned. +// The signature matches io.WriterTo interface. +func (r *Request) WriteTo(w io.Writer) (int64, error) { + body, err := r.body() + if err != nil { + return 0, err + } + if c, ok := body.(io.Closer); ok { + defer c.Close() + } + return io.Copy(w, body) +} + +func getBodyReaderAndContentLength(rawBody interface{}) (ReaderFunc, int64, error) { + var bodyReader ReaderFunc + var contentLength int64 + + switch body := rawBody.(type) { + // If they gave us a function already, great! Use it. + case ReaderFunc: + bodyReader = body + tmp, err := body() + if err != nil { + return nil, 0, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } + + case func() (io.Reader, error): + bodyReader = body + tmp, err := body() + if err != nil { + return nil, 0, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } + + // If a regular byte slice, we can read it over and over via new + // readers + case []byte: + buf := body + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // If a bytes.Buffer we can read the underlying byte slice over and + // over + case *bytes.Buffer: + buf := body + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf.Bytes()), nil + } + contentLength = int64(buf.Len()) + + // We prioritize *bytes.Reader here because we don't really want to + // deal with it seeking so want it to match here instead of the + // io.ReadSeeker case. + case *bytes.Reader: + buf, err := ioutil.ReadAll(body) + if err != nil { + return nil, 0, err + } + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // Compat case + case io.ReadSeeker: + raw := body + bodyReader = func() (io.Reader, error) { + _, err := raw.Seek(0, 0) + return ioutil.NopCloser(raw), err + } + if lr, ok := raw.(LenReader); ok { + contentLength = int64(lr.Len()) + } + + // Read all in so we can reset + case io.Reader: + buf, err := ioutil.ReadAll(body) + if err != nil { + return nil, 0, err + } + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // No body provided, nothing to do + case nil: + + // Unrecognized type + default: + return nil, 0, fmt.Errorf("cannot handle type %T", rawBody) + } + return bodyReader, contentLength, nil +} + +// FromRequest wraps an http.Request in a retryablehttp.Request +func FromRequest(r *http.Request) (*Request, error) { + bodyReader, _, err := getBodyReaderAndContentLength(r.Body) + if err != nil { + return nil, err + } + // Could assert contentLength == r.ContentLength + return &Request{bodyReader, r}, nil +} + +// NewRequest creates a new wrapped request. +func NewRequest(method, url string, rawBody interface{}) (*Request, error) { + bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody) + if err != nil { + return nil, err + } + + httpReq, err := http.NewRequest(method, url, nil) + if err != nil { + return nil, err + } + httpReq.ContentLength = contentLength + + return &Request{bodyReader, httpReq}, nil +} + +// Logger interface allows to use other loggers than +// standard log.Logger. +type Logger interface { + Printf(string, ...interface{}) +} + +// LeveledLogger interface implements the basic methods that a logger library needs +type LeveledLogger interface { + Error(string, ...interface{}) + Info(string, ...interface{}) + Debug(string, ...interface{}) + Warn(string, ...interface{}) +} + +// hookLogger adapts an LeveledLogger to Logger for use by the existing hook functions +// without changing the API. +type hookLogger struct { + LeveledLogger +} + +func (h hookLogger) Printf(s string, args ...interface{}) { + h.Info(fmt.Sprintf(s, args...)) +} + +// RequestLogHook allows a function to run before each retry. The HTTP +// request which will be made, and the retry number (0 for the initial +// request) are available to users. The internal logger is exposed to +// consumers. +type RequestLogHook func(Logger, *http.Request, int) + +// ResponseLogHook is like RequestLogHook, but allows running a function +// on each HTTP response. This function will be invoked at the end of +// every HTTP request executed, regardless of whether a subsequent retry +// needs to be performed or not. If the response body is read or closed +// from this method, this will affect the response returned from Do(). +type ResponseLogHook func(Logger, *http.Response) + +// CheckRetry specifies a policy for handling retries. It is called +// following each request with the response and error values returned by +// the http.Client. If CheckRetry returns false, the Client stops retrying +// and returns the response to the caller. If CheckRetry returns an error, +// that error value is returned in lieu of the error from the request. The +// Client will close any response body when retrying, but if the retry is +// aborted it is up to the CheckRetry callback to properly close any +// response body before returning. +type CheckRetry func(ctx context.Context, resp *http.Response, err error) (bool, error) + +// Backoff specifies a policy for how long to wait between retries. +// It is called after a failing request to determine the amount of time +// that should pass before trying again. +type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration + +// ErrorHandler is called if retries are expired, containing the last status +// from the http library. If not specified, default behavior for the library is +// to close the body and return an error indicating how many tries were +// attempted. If overriding this, be sure to close the body if needed. +type ErrorHandler func(resp *http.Response, err error, numTries int) (*http.Response, error) + +// Client is used to make HTTP requests. It adds additional functionality +// like automatic retries to tolerate minor outages. +type Client struct { + HTTPClient *http.Client // Internal HTTP client. + Logger interface{} // Customer logger instance. Can be either Logger or LeveledLogger + + RetryWaitMin time.Duration // Minimum time to wait + RetryWaitMax time.Duration // Maximum time to wait + RetryMax int // Maximum number of retries + + // RequestLogHook allows a user-supplied function to be called + // before each retry. + RequestLogHook RequestLogHook + + // ResponseLogHook allows a user-supplied function to be called + // with the response from each HTTP request executed. + ResponseLogHook ResponseLogHook + + // CheckRetry specifies the policy for handling retries, and is called + // after each request. The default policy is DefaultRetryPolicy. + CheckRetry CheckRetry + + // Backoff specifies the policy for how long to wait between retries + Backoff Backoff + + // ErrorHandler specifies the custom error handler to use, if any + ErrorHandler ErrorHandler + + loggerInit sync.Once +} + +// NewClient creates a new Client with default settings. +func NewClient() *Client { + return &Client{ + HTTPClient: cleanhttp.DefaultPooledClient(), + Logger: defaultLogger, + RetryWaitMin: defaultRetryWaitMin, + RetryWaitMax: defaultRetryWaitMax, + RetryMax: defaultRetryMax, + CheckRetry: DefaultRetryPolicy, + Backoff: DefaultBackoff, + } +} + +func (c *Client) logger() interface{} { + c.loggerInit.Do(func() { + if c.Logger == nil { + return + } + + switch c.Logger.(type) { + case Logger, LeveledLogger: + // ok + default: + // This should happen in dev when they are setting Logger and work on code, not in prod. + panic(fmt.Sprintf("invalid logger type passed, must be Logger or LeveledLogger, was %T", c.Logger)) + } + }) + + return c.Logger +} + +// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which +// will retry on connection errors and server errors. +func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + // do not retry on context.Canceled or context.DeadlineExceeded + if ctx.Err() != nil { + return false, ctx.Err() + } + + if err != nil { + if v, ok := err.(*url.Error); ok { + // Don't retry if the error was due to too many redirects. + if redirectsErrorRe.MatchString(v.Error()) { + return false, nil + } + + // Don't retry if the error was due to an invalid protocol scheme. + if schemeErrorRe.MatchString(v.Error()) { + return false, nil + } + + // Don't retry if the error was due to TLS cert verification failure. + if _, ok := v.Err.(x509.UnknownAuthorityError); ok { + return false, nil + } + } + + // The error is likely recoverable so retry. + return true, nil + } + + // Check the response code. We retry on 500-range responses to allow + // the server time to recover, as 500's are typically not permanent + // errors and may relate to outages on the server side. This will catch + // invalid response codes as well, like 0 and 999. + if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != 501) { + return true, nil + } + + return false, nil +} + +// DefaultBackoff provides a default callback for Client.Backoff which +// will perform exponential backoff based on the attempt number and limited +// by the provided minimum and maximum durations. +func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + mult := math.Pow(2, float64(attemptNum)) * float64(min) + sleep := time.Duration(mult) + if float64(sleep) != mult || sleep > max { + sleep = max + } + return sleep +} + +// LinearJitterBackoff provides a callback for Client.Backoff which will +// perform linear backoff based on the attempt number and with jitter to +// prevent a thundering herd. +// +// min and max here are *not* absolute values. The number to be multiplied by +// the attempt number will be chosen at random from between them, thus they are +// bounding the jitter. +// +// For instance: +// * To get strictly linear backoff of one second increasing each retry, set +// both to one second (1s, 2s, 3s, 4s, ...) +// * To get a small amount of jitter centered around one second increasing each +// retry, set to around one second, such as a min of 800ms and max of 1200ms +// (892ms, 2102ms, 2945ms, 4312ms, ...) +// * To get extreme jitter, set to a very wide spread, such as a min of 100ms +// and a max of 20s (15382ms, 292ms, 51321ms, 35234ms, ...) +func LinearJitterBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + // attemptNum always starts at zero but we want to start at 1 for multiplication + attemptNum++ + + if max <= min { + // Unclear what to do here, or they are the same, so return min * + // attemptNum + return min * time.Duration(attemptNum) + } + + // Seed rand; doing this every time is fine + rand := rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) + + // Pick a random number that lies somewhere between the min and max and + // multiply by the attemptNum. attemptNum starts at zero so we always + // increment here. We first get a random percentage, then apply that to the + // difference between min and max, and add to min. + jitter := rand.Float64() * float64(max-min) + jitterMin := int64(jitter) + int64(min) + return time.Duration(jitterMin * int64(attemptNum)) +} + +// PassthroughErrorHandler is an ErrorHandler that directly passes through the +// values from the net/http library for the final request. The body is not +// closed. +func PassthroughErrorHandler(resp *http.Response, err error, _ int) (*http.Response, error) { + return resp, err +} + +// Do wraps calling an HTTP method with retries. +func (c *Client) Do(req *Request) (*http.Response, error) { + if c.HTTPClient == nil { + c.HTTPClient = cleanhttp.DefaultPooledClient() + } + + logger := c.logger() + + if logger != nil { + switch v := logger.(type) { + case Logger: + v.Printf("[DEBUG] %s %s", req.Method, req.URL) + case LeveledLogger: + v.Debug("performing request", "method", req.Method, "url", req.URL) + } + } + + var resp *http.Response + var err error + + for i := 0; ; i++ { + var code int // HTTP response code + + // Always rewind the request body when non-nil. + if req.body != nil { + body, err := req.body() + if err != nil { + c.HTTPClient.CloseIdleConnections() + return resp, err + } + if c, ok := body.(io.ReadCloser); ok { + req.Body = c + } else { + req.Body = ioutil.NopCloser(body) + } + } + + if c.RequestLogHook != nil { + switch v := logger.(type) { + case Logger: + c.RequestLogHook(v, req.Request, i) + case LeveledLogger: + c.RequestLogHook(hookLogger{v}, req.Request, i) + default: + c.RequestLogHook(nil, req.Request, i) + } + } + + // Attempt the request + resp, err = c.HTTPClient.Do(req.Request) + if resp != nil { + code = resp.StatusCode + } + + // Check if we should continue with retries. + checkOK, checkErr := c.CheckRetry(req.Context(), resp, err) + + if err != nil { + switch v := logger.(type) { + case Logger: + v.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) + case LeveledLogger: + v.Error("request failed", "error", err, "method", req.Method, "url", req.URL) + } + } else { + // Call this here to maintain the behavior of logging all requests, + // even if CheckRetry signals to stop. + if c.ResponseLogHook != nil { + // Call the response logger function if provided. + switch v := logger.(type) { + case Logger: + c.ResponseLogHook(v, resp) + case LeveledLogger: + c.ResponseLogHook(hookLogger{v}, resp) + default: + c.ResponseLogHook(nil, resp) + } + } + } + + // Now decide if we should continue. + if !checkOK { + if checkErr != nil { + err = checkErr + } + c.HTTPClient.CloseIdleConnections() + return resp, err + } + + // We do this before drainBody because there's no need for the I/O if + // we're breaking out + remain := c.RetryMax - i + if remain <= 0 { + break + } + + // We're going to retry, consume any response to reuse the connection. + if err == nil && resp != nil { + c.drainBody(resp.Body) + } + + wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp) + desc := fmt.Sprintf("%s %s", req.Method, req.URL) + if code > 0 { + desc = fmt.Sprintf("%s (status: %d)", desc, code) + } + if logger != nil { + switch v := logger.(type) { + case Logger: + v.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) + case LeveledLogger: + v.Debug("retrying request", "request", desc, "timeout", wait, "remaining", remain) + } + } + select { + case <-req.Context().Done(): + c.HTTPClient.CloseIdleConnections() + return nil, req.Context().Err() + case <-time.After(wait): + } + } + + if c.ErrorHandler != nil { + c.HTTPClient.CloseIdleConnections() + return c.ErrorHandler(resp, err, c.RetryMax+1) + } + + // By default, we close the response body and return an error without + // returning the response + if resp != nil { + resp.Body.Close() + } + c.HTTPClient.CloseIdleConnections() + return nil, fmt.Errorf("%s %s giving up after %d attempts", + req.Method, req.URL, c.RetryMax+1) +} + +// Try to read the response body so we can reuse this connection. +func (c *Client) drainBody(body io.ReadCloser) { + defer body.Close() + _, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit)) + if err != nil { + if c.logger() != nil { + switch v := c.logger().(type) { + case Logger: + v.Printf("[ERR] error reading response body: %v", err) + case LeveledLogger: + v.Error("error reading response body", "error", err) + } + } + } +} + +// Get is a shortcut for doing a GET request without making a new client. +func Get(url string) (*http.Response, error) { + return defaultClient.Get(url) +} + +// Get is a convenience helper for doing simple GET requests. +func (c *Client) Get(url string) (*http.Response, error) { + req, err := NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Head is a shortcut for doing a HEAD request without making a new client. +func Head(url string) (*http.Response, error) { + return defaultClient.Head(url) +} + +// Head is a convenience method for doing simple HEAD requests. +func (c *Client) Head(url string) (*http.Response, error) { + req, err := NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Post is a shortcut for doing a POST request without making a new client. +func Post(url, bodyType string, body interface{}) (*http.Response, error) { + return defaultClient.Post(url, bodyType, body) +} + +// Post is a convenience method for doing simple POST requests. +func (c *Client) Post(url, bodyType string, body interface{}) (*http.Response, error) { + req, err := NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return c.Do(req) +} + +// PostForm is a shortcut to perform a POST with form data without creating +// a new client. +func PostForm(url string, data url.Values) (*http.Response, error) { + return defaultClient.PostForm(url, data) +} + +// PostForm is a convenience method for doing simple POST operations using +// pre-filled url.Values form data. +func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) { + return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// StandardClient returns a stdlib *http.Client with a custom Transport, which +// shims in a *retryablehttp.Client for added retries. +func (c *Client) StandardClient() *http.Client { + return &http.Client{ + Transport: &RoundTripper{Client: c}, + } +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/go.mod b/vendor/github.com/hashicorp/go-retryablehttp/go.mod new file mode 100644 index 0000000000..7cc02b76fa --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/go.mod @@ -0,0 +1,8 @@ +module github.com/hashicorp/go-retryablehttp + +require ( + github.com/hashicorp/go-cleanhttp v0.5.1 + github.com/hashicorp/go-hclog v0.9.2 +) + +go 1.13 diff --git a/vendor/github.com/hashicorp/go-retryablehttp/go.sum b/vendor/github.com/hashicorp/go-retryablehttp/go.sum new file mode 100644 index 0000000000..71afe56822 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/go.sum @@ -0,0 +1,10 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= diff --git a/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go b/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go new file mode 100644 index 0000000000..b841b4cfe5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/roundtripper.go @@ -0,0 +1,43 @@ +package retryablehttp + +import ( + "net/http" + "sync" +) + +// RoundTripper implements the http.RoundTripper interface, using a retrying +// HTTP client to execute requests. +// +// It is important to note that retryablehttp doesn't always act exactly as a +// RoundTripper should. This is highly dependent on the retryable client's +// configuration. +type RoundTripper struct { + // The client to use during requests. If nil, the default retryablehttp + // client and settings will be used. + Client *Client + + // once ensures that the logic to initialize the default client runs at + // most once, in a single thread. + once sync.Once +} + +// init initializes the underlying retryable client. +func (rt *RoundTripper) init() { + if rt.Client == nil { + rt.Client = NewClient() + } +} + +// RoundTrip satisfies the http.RoundTripper interface. +func (rt *RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + rt.once.Do(rt.init) + + // Convert the request to be retryable. + retryableReq, err := FromRequest(req) + if err != nil { + return nil, err + } + + // Execute the request. + return rt.Client.Do(retryableReq) +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/.travis.yml b/vendor/github.com/hashicorp/go-rootcerts/.travis.yml new file mode 100644 index 0000000000..80e1de44e9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/.travis.yml @@ -0,0 +1,12 @@ +sudo: false + +language: go + +go: + - 1.6 + +branches: + only: + - master + +script: make test diff --git a/vendor/github.com/hashicorp/go-rootcerts/LICENSE b/vendor/github.com/hashicorp/go-rootcerts/LICENSE new file mode 100644 index 0000000000..e87a115e46 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-rootcerts/Makefile b/vendor/github.com/hashicorp/go-rootcerts/Makefile new file mode 100644 index 0000000000..c3989e789f --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/Makefile @@ -0,0 +1,8 @@ +TEST?=./... + +test: + go test $(TEST) $(TESTARGS) -timeout=3s -parallel=4 + go vet $(TEST) + go test $(TEST) -race + +.PHONY: test diff --git a/vendor/github.com/hashicorp/go-rootcerts/README.md b/vendor/github.com/hashicorp/go-rootcerts/README.md new file mode 100644 index 0000000000..6a128e1e14 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/README.md @@ -0,0 +1,44 @@ +# rootcerts + +Functions for loading root certificates for TLS connections. + +----- + +Go's standard library `crypto/tls` provides a common mechanism for configuring +TLS connections in `tls.Config`. The `RootCAs` field on this struct is a pool +of certificates for the client to use as a trust store when verifying server +certificates. + +This library contains utility functions for loading certificates destined for +that field, as well as one other important thing: + +When the `RootCAs` field is `nil`, the standard library attempts to load the +host's root CA set. This behavior is OS-specific, and the Darwin +implementation contains [a bug that prevents trusted certificates from the +System and Login keychains from being loaded][1]. This library contains +Darwin-specific behavior that works around that bug. + +[1]: https://github.com/golang/go/issues/14514 + +## Example Usage + +Here's a snippet demonstrating how this library is meant to be used: + +```go +func httpClient() (*http.Client, error) + tlsConfig := &tls.Config{} + err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{ + CAFile: os.Getenv("MYAPP_CAFILE"), + CAPath: os.Getenv("MYAPP_CAPATH"), + Certificate: os.Getenv("MYAPP_CERTIFICATE"), + }) + if err != nil { + return nil, err + } + c := cleanhttp.DefaultClient() + t := cleanhttp.DefaultTransport() + t.TLSClientConfig = tlsConfig + c.Transport = t + return c, nil +} +``` diff --git a/vendor/github.com/hashicorp/go-rootcerts/doc.go b/vendor/github.com/hashicorp/go-rootcerts/doc.go new file mode 100644 index 0000000000..b55cc62848 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/doc.go @@ -0,0 +1,9 @@ +// Package rootcerts contains functions to aid in loading CA certificates for +// TLS connections. +// +// In addition, its default behavior on Darwin works around an open issue [1] +// in Go's crypto/x509 that prevents certicates from being loaded from the +// System or Login keychains. +// +// [1] https://github.com/golang/go/issues/14514 +package rootcerts diff --git a/vendor/github.com/hashicorp/go-rootcerts/go.mod b/vendor/github.com/hashicorp/go-rootcerts/go.mod new file mode 100644 index 0000000000..e2dd024702 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/go.mod @@ -0,0 +1,5 @@ +module github.com/hashicorp/go-rootcerts + +go 1.12 + +require github.com/mitchellh/go-homedir v1.1.0 diff --git a/vendor/github.com/hashicorp/go-rootcerts/go.sum b/vendor/github.com/hashicorp/go-rootcerts/go.sum new file mode 100644 index 0000000000..ae38d147b4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/go.sum @@ -0,0 +1,2 @@ +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go new file mode 100644 index 0000000000..69aabd6bc7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go @@ -0,0 +1,123 @@ +package rootcerts + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// Config determines where LoadCACerts will load certificates from. When CAFile, +// CACertificate and CAPath are blank, this library's functions will either load +// system roots explicitly and return them, or set the CertPool to nil to allow +// Go's standard library to load system certs. +type Config struct { + // CAFile is a path to a PEM-encoded certificate file or bundle. Takes + // precedence over CACertificate and CAPath. + CAFile string + + // CACertificate is a PEM-encoded certificate or bundle. Takes precedence + // over CAPath. + CACertificate []byte + + // CAPath is a path to a directory populated with PEM-encoded certificates. + CAPath string +} + +// ConfigureTLS sets up the RootCAs on the provided tls.Config based on the +// Config specified. +func ConfigureTLS(t *tls.Config, c *Config) error { + if t == nil { + return nil + } + pool, err := LoadCACerts(c) + if err != nil { + return err + } + t.RootCAs = pool + return nil +} + +// LoadCACerts loads a CertPool based on the Config specified. +func LoadCACerts(c *Config) (*x509.CertPool, error) { + if c == nil { + c = &Config{} + } + if c.CAFile != "" { + return LoadCAFile(c.CAFile) + } + if len(c.CACertificate) != 0 { + return AppendCertificate(c.CACertificate) + } + if c.CAPath != "" { + return LoadCAPath(c.CAPath) + } + + return LoadSystemCAs() +} + +// LoadCAFile loads a single PEM-encoded file from the path specified. +func LoadCAFile(caFile string) (*x509.CertPool, error) { + pool := x509.NewCertPool() + + pem, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, fmt.Errorf("Error loading CA File: %s", err) + } + + ok := pool.AppendCertsFromPEM(pem) + if !ok { + return nil, fmt.Errorf("Error loading CA File: Couldn't parse PEM in: %s", caFile) + } + + return pool, nil +} + +// AppendCertificate appends an in-memory PEM-encoded certificate or bundle and returns a pool. +func AppendCertificate(ca []byte) (*x509.CertPool, error) { + pool := x509.NewCertPool() + + ok := pool.AppendCertsFromPEM(ca) + if !ok { + return nil, errors.New("Error appending CA: Couldn't parse PEM") + } + + return pool, nil +} + +// LoadCAPath walks the provided path and loads all certificates encounted into +// a pool. +func LoadCAPath(caPath string) (*x509.CertPool, error) { + pool := x509.NewCertPool() + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if info.IsDir() { + return nil + } + + pem, err := ioutil.ReadFile(path) + if err != nil { + return fmt.Errorf("Error loading file from CAPath: %s", err) + } + + ok := pool.AppendCertsFromPEM(pem) + if !ok { + return fmt.Errorf("Error loading CA Path: Couldn't parse PEM in: %s", path) + } + + return nil + } + + err := filepath.Walk(caPath, walkFn) + if err != nil { + return nil, err + } + + return pool, nil +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go new file mode 100644 index 0000000000..66b1472c4a --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go @@ -0,0 +1,12 @@ +// +build !darwin + +package rootcerts + +import "crypto/x509" + +// LoadSystemCAs does nothing on non-Darwin systems. We return nil so that +// default behavior of standard TLS config libraries is triggered, which is to +// load system certs. +func LoadSystemCAs() (*x509.CertPool, error) { + return nil, nil +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go new file mode 100644 index 0000000000..a9a040657f --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go @@ -0,0 +1,48 @@ +package rootcerts + +import ( + "crypto/x509" + "os/exec" + "path" + + "github.com/mitchellh/go-homedir" +) + +// LoadSystemCAs has special behavior on Darwin systems to work around +func LoadSystemCAs() (*x509.CertPool, error) { + pool := x509.NewCertPool() + + for _, keychain := range certKeychains() { + err := addCertsFromKeychain(pool, keychain) + if err != nil { + return nil, err + } + } + + return pool, nil +} + +func addCertsFromKeychain(pool *x509.CertPool, keychain string) error { + cmd := exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", keychain) + data, err := cmd.Output() + if err != nil { + return err + } + + pool.AppendCertsFromPEM(data) + + return nil +} + +func certKeychains() []string { + keychains := []string{ + "/System/Library/Keychains/SystemRootCertificates.keychain", + "/Library/Keychains/System.keychain", + } + home, err := homedir.Dir() + if err == nil { + loginKeychain := path.Join(home, "Library", "Keychains", "login.keychain") + keychains = append(keychains, loginKeychain) + } + return keychains +} diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/mlock/LICENSE b/vendor/github.com/hashicorp/go-secure-stdlib/mlock/LICENSE new file mode 100644 index 0000000000..e87a115e46 --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/mlock/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/mlock/go.mod b/vendor/github.com/hashicorp/go-secure-stdlib/mlock/go.mod new file mode 100644 index 0000000000..208a25d784 --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/mlock/go.mod @@ -0,0 +1,5 @@ +module github.com/hashicorp/go-secure-stdlib/mlock + +go 1.16 + +require golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/mlock/go.sum b/vendor/github.com/hashicorp/go-secure-stdlib/mlock/go.sum new file mode 100644 index 0000000000..0f478630ca --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/mlock/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock.go b/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock.go new file mode 100644 index 0000000000..1675633d34 --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock.go @@ -0,0 +1,15 @@ +package mlock + +// This should be set by the OS-specific packages to tell whether LockMemory +// is supported or not. +var supported bool + +// Supported returns true if LockMemory is functional on this system. +func Supported() bool { + return supported +} + +// LockMemory prevents any memory from being swapped to disk. +func LockMemory() error { + return lockMemory() +} diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unavail.go b/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unavail.go new file mode 100644 index 0000000000..8084963f72 --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unavail.go @@ -0,0 +1,13 @@ +// +build android darwin nacl netbsd plan9 windows + +package mlock + +func init() { + supported = false +} + +func lockMemory() error { + // XXX: No good way to do this on Windows. There is the VirtualLock + // method, but it requires a specific address and offset. + return nil +} diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unix.go b/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unix.go new file mode 100644 index 0000000000..af0a69d48a --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/mlock/mlock_unix.go @@ -0,0 +1,18 @@ +// +build dragonfly freebsd linux openbsd solaris + +package mlock + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +func init() { + supported = true +} + +func lockMemory() error { + // Mlockall prevents all current and future pages from being swapped out. + return unix.Mlockall(syscall.MCL_CURRENT | syscall.MCL_FUTURE) +} diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/LICENSE b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/LICENSE new file mode 100644 index 0000000000..e87a115e46 --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/go.mod b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/go.mod new file mode 100644 index 0000000000..b58f28c02e --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/go.mod @@ -0,0 +1,10 @@ +module github.com/hashicorp/go-secure-stdlib/parseutil + +go 1.16 + +require ( + github.com/hashicorp/go-secure-stdlib/strutil v0.1.1 + github.com/hashicorp/go-sockaddr v1.0.2 + github.com/mitchellh/mapstructure v1.4.1 + github.com/stretchr/testify v1.7.0 +) diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/go.sum b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/go.sum new file mode 100644 index 0000000000..7b4e868c4a --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/go.sum @@ -0,0 +1,31 @@ +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1 h1:nd0HIW15E6FG1MsnArYaHfuw9C2zgzM8LxkG5Ty/788= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parsepath.go b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parsepath.go new file mode 100644 index 0000000000..45e1497ca7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parsepath.go @@ -0,0 +1,46 @@ +package parseutil + +import ( + "errors" + "fmt" + "io/ioutil" + "net/url" + "os" + "strings" +) + +var ErrNotAUrl = errors.New("not a url") + +// ParsePath parses a URL with schemes file://, env://, or any other. Depending +// on the scheme it will return specific types of data: +// +// * file:// will return a string with the file's contents +// +// * env:// will return a string with the env var's contents +// +// * Anything else will return the string as it was +// +// On error, we return the original string along with the error. The caller can +// switch on errors.Is(err, ErrNotAUrl) to understand whether it was the parsing +// step that errored or something else (such as a file not found). This is +// useful to attempt to read a non-URL string from some resource, but where the +// original input may simply be a valid string of that type. +func ParsePath(path string) (string, error) { + path = strings.TrimSpace(path) + parsed, err := url.Parse(path) + if err != nil { + return path, fmt.Errorf("error parsing url (%q): %w", err.Error(), ErrNotAUrl) + } + switch parsed.Scheme { + case "file": + contents, err := ioutil.ReadFile(strings.TrimPrefix(path, "file://")) + if err != nil { + return path, fmt.Errorf("error reading file at %s: %w", path, err) + } + return strings.TrimSpace(string(contents)), nil + case "env": + return strings.TrimSpace(os.Getenv(strings.TrimPrefix(path, "env://"))), nil + } + + return path, nil +} diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parseutil.go b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parseutil.go new file mode 100644 index 0000000000..745836add7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/parseutil/parseutil.go @@ -0,0 +1,308 @@ +package parseutil + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/strutil" + sockaddr "github.com/hashicorp/go-sockaddr" + "github.com/mitchellh/mapstructure" +) + +var validCapacityString = regexp.MustCompile("^[\t ]*([0-9]+)[\t ]?([kmgtKMGT][iI]?[bB])?[\t ]*$") + +// ParseCapacityString parses a capacity string and returns the number of bytes it represents. +// Capacity strings are things like 5gib or 10MB. Supported prefixes are kb, kib, mb, mib, gb, +// gib, tb, tib, which are not case sensitive. If no prefix is present, the number is assumed +// to be in bytes already. +func ParseCapacityString(in interface{}) (uint64, error) { + var cap uint64 + + jsonIn, ok := in.(json.Number) + if ok { + in = jsonIn.String() + } + + switch inp := in.(type) { + case nil: + // return default of zero + case string: + if inp == "" { + return cap, nil + } + + matches := validCapacityString.FindStringSubmatch(inp) + + // no sub-groups means we couldn't parse it + if len(matches) <= 1 { + return cap, errors.New("could not parse capacity from input") + } + + var multiplier uint64 = 1 + switch strings.ToLower(matches[2]) { + case "kb": + multiplier = 1000 + case "kib": + multiplier = 1024 + case "mb": + multiplier = 1000 * 1000 + case "mib": + multiplier = 1024 * 1024 + case "gb": + multiplier = 1000 * 1000 * 1000 + case "gib": + multiplier = 1024 * 1024 * 1024 + case "tb": + multiplier = 1000 * 1000 * 1000 * 1000 + case "tib": + multiplier = 1024 * 1024 * 1024 * 1024 + } + + size, err := strconv.ParseUint(matches[1], 10, 64) + if err != nil { + return cap, err + } + + cap = size * multiplier + case int: + cap = uint64(inp) + case int32: + cap = uint64(inp) + case int64: + cap = uint64(inp) + case uint: + cap = uint64(inp) + case uint32: + cap = uint64(inp) + case uint64: + cap = uint64(inp) + case float32: + cap = uint64(inp) + case float64: + cap = uint64(inp) + default: + return cap, errors.New("could not parse capacity from input") + } + + return cap, nil +} + +func ParseDurationSecond(in interface{}) (time.Duration, error) { + var dur time.Duration + jsonIn, ok := in.(json.Number) + if ok { + in = jsonIn.String() + } + switch inp := in.(type) { + case nil: + // return default of zero + case string: + if inp == "" { + return dur, nil + } + var err error + // Look for a suffix otherwise its a plain second value + if strings.HasSuffix(inp, "s") || strings.HasSuffix(inp, "m") || strings.HasSuffix(inp, "h") || strings.HasSuffix(inp, "ms") { + dur, err = time.ParseDuration(inp) + if err != nil { + return dur, err + } + } else { + // Plain integer + secs, err := strconv.ParseInt(inp, 10, 64) + if err != nil { + return dur, err + } + dur = time.Duration(secs) * time.Second + } + case int: + dur = time.Duration(inp) * time.Second + case int32: + dur = time.Duration(inp) * time.Second + case int64: + dur = time.Duration(inp) * time.Second + case uint: + dur = time.Duration(inp) * time.Second + case uint32: + dur = time.Duration(inp) * time.Second + case uint64: + dur = time.Duration(inp) * time.Second + case float32: + dur = time.Duration(inp) * time.Second + case float64: + dur = time.Duration(inp) * time.Second + case time.Duration: + dur = inp + default: + return 0, errors.New("could not parse duration from input") + } + + return dur, nil +} + +func ParseAbsoluteTime(in interface{}) (time.Time, error) { + var t time.Time + switch inp := in.(type) { + case nil: + // return default of zero + return t, nil + case string: + // Allow RFC3339 with nanoseconds, or without, + // or an epoch time as an integer. + var err error + t, err = time.Parse(time.RFC3339Nano, inp) + if err == nil { + break + } + t, err = time.Parse(time.RFC3339, inp) + if err == nil { + break + } + epochTime, err := strconv.ParseInt(inp, 10, 64) + if err == nil { + t = time.Unix(epochTime, 0) + break + } + return t, errors.New("could not parse string as date and time") + case json.Number: + epochTime, err := inp.Int64() + if err != nil { + return t, err + } + t = time.Unix(epochTime, 0) + case int: + t = time.Unix(int64(inp), 0) + case int32: + t = time.Unix(int64(inp), 0) + case int64: + t = time.Unix(inp, 0) + case uint: + t = time.Unix(int64(inp), 0) + case uint32: + t = time.Unix(int64(inp), 0) + case uint64: + t = time.Unix(int64(inp), 0) + default: + return t, errors.New("could not parse time from input type") + } + return t, nil +} + +func ParseInt(in interface{}) (int64, error) { + var ret int64 + jsonIn, ok := in.(json.Number) + if ok { + in = jsonIn.String() + } + switch in.(type) { + case string: + inp := in.(string) + if inp == "" { + return 0, nil + } + var err error + left, err := strconv.ParseInt(inp, 10, 64) + if err != nil { + return ret, err + } + ret = left + case int: + ret = int64(in.(int)) + case int32: + ret = int64(in.(int32)) + case int64: + ret = in.(int64) + case uint: + ret = int64(in.(uint)) + case uint32: + ret = int64(in.(uint32)) + case uint64: + ret = int64(in.(uint64)) + default: + return 0, errors.New("could not parse value from input") + } + + return ret, nil +} + +func ParseBool(in interface{}) (bool, error) { + var result bool + if err := mapstructure.WeakDecode(in, &result); err != nil { + return false, err + } + return result, nil +} + +func ParseString(in interface{}) (string, error) { + var result string + if err := mapstructure.WeakDecode(in, &result); err != nil { + return "", err + } + return result, nil +} + +func ParseCommaStringSlice(in interface{}) ([]string, error) { + rawString, ok := in.(string) + if ok && rawString == "" { + return []string{}, nil + } + var result []string + config := &mapstructure.DecoderConfig{ + Result: &result, + WeaklyTypedInput: true, + DecodeHook: mapstructure.StringToSliceHookFunc(","), + } + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return nil, err + } + if err := decoder.Decode(in); err != nil { + return nil, err + } + return strutil.TrimStrings(result), nil +} + +func ParseAddrs(addrs interface{}) ([]*sockaddr.SockAddrMarshaler, error) { + out := make([]*sockaddr.SockAddrMarshaler, 0) + stringAddrs := make([]string, 0) + + switch addrs.(type) { + case string: + stringAddrs = strutil.ParseArbitraryStringSlice(addrs.(string), ",") + if len(stringAddrs) == 0 { + return nil, fmt.Errorf("unable to parse addresses from %v", addrs) + } + + case []string: + stringAddrs = addrs.([]string) + + case []interface{}: + for _, v := range addrs.([]interface{}) { + stringAddr, ok := v.(string) + if !ok { + return nil, fmt.Errorf("error parsing %v as string", v) + } + stringAddrs = append(stringAddrs, stringAddr) + } + + default: + return nil, fmt.Errorf("unknown address input type %T", addrs) + } + + for _, addr := range stringAddrs { + sa, err := sockaddr.NewSockAddr(addr) + if err != nil { + return nil, fmt.Errorf("error parsing address %q: %w", addr, err) + } + out = append(out, &sockaddr.SockAddrMarshaler{ + SockAddr: sa, + }) + } + + return out, nil +} diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/strutil/LICENSE b/vendor/github.com/hashicorp/go-secure-stdlib/strutil/LICENSE new file mode 100644 index 0000000000..e87a115e46 --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/strutil/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/strutil/go.mod b/vendor/github.com/hashicorp/go-secure-stdlib/strutil/go.mod new file mode 100644 index 0000000000..18285479c2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/strutil/go.mod @@ -0,0 +1,8 @@ +module github.com/hashicorp/go-secure-stdlib/strutil + +go 1.16 + +require ( + github.com/ryanuber/go-glob v1.0.0 + github.com/stretchr/testify v1.7.0 +) diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/strutil/go.sum b/vendor/github.com/hashicorp/go-secure-stdlib/strutil/go.sum new file mode 100644 index 0000000000..3e8032d647 --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/strutil/go.sum @@ -0,0 +1,13 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/hashicorp/go-secure-stdlib/strutil/strutil.go b/vendor/github.com/hashicorp/go-secure-stdlib/strutil/strutil.go new file mode 100644 index 0000000000..55d3cdda7b --- /dev/null +++ b/vendor/github.com/hashicorp/go-secure-stdlib/strutil/strutil.go @@ -0,0 +1,508 @@ +package strutil + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "sort" + "strings" + "unicode" + + glob "github.com/ryanuber/go-glob" +) + +// StrListContainsGlob looks for a string in a list of strings and allows +// globs. +func StrListContainsGlob(haystack []string, needle string) bool { + for _, item := range haystack { + if glob.Glob(item, needle) { + return true + } + } + return false +} + +// StrListContains looks for a string in a list of strings. +func StrListContains(haystack []string, needle string) bool { + for _, item := range haystack { + if item == needle { + return true + } + } + return false +} + +// StrListContainsCaseInsensitive looks for a string in a list of strings. +func StrListContainsCaseInsensitive(haystack []string, needle string) bool { + for _, item := range haystack { + if strings.EqualFold(item, needle) { + return true + } + } + return false +} + +// StrListSubset checks if a given list is a subset +// of another set +func StrListSubset(super, sub []string) bool { + for _, item := range sub { + if !StrListContains(super, item) { + return false + } + } + return true +} + +// ParseDedupAndSortStrings parses a comma separated list of strings +// into a slice of strings. The return slice will be sorted and will +// not contain duplicate or empty items. +func ParseDedupAndSortStrings(input string, sep string) []string { + input = strings.TrimSpace(input) + parsed := []string{} + if input == "" { + // Don't return nil + return parsed + } + return RemoveDuplicates(strings.Split(input, sep), false) +} + +// ParseDedupLowercaseAndSortStrings parses a comma separated list of +// strings into a slice of strings. The return slice will be sorted and +// will not contain duplicate or empty items. The values will be converted +// to lower case. +func ParseDedupLowercaseAndSortStrings(input string, sep string) []string { + input = strings.TrimSpace(input) + parsed := []string{} + if input == "" { + // Don't return nil + return parsed + } + return RemoveDuplicates(strings.Split(input, sep), true) +} + +// ParseKeyValues parses a comma separated list of `=` tuples +// into a map[string]string. +func ParseKeyValues(input string, out map[string]string, sep string) error { + if out == nil { + return fmt.Errorf("'out is nil") + } + + keyValues := ParseDedupLowercaseAndSortStrings(input, sep) + if len(keyValues) == 0 { + return nil + } + + for _, keyValue := range keyValues { + shards := strings.Split(keyValue, "=") + if len(shards) != 2 { + return fmt.Errorf("invalid format") + } + + key := strings.TrimSpace(shards[0]) + value := strings.TrimSpace(shards[1]) + if key == "" || value == "" { + return fmt.Errorf("invalid pair: key: %q value: %q", key, value) + } + out[key] = value + } + return nil +} + +// ParseArbitraryKeyValues parses arbitrary tuples. The input +// can be one of the following: +// * JSON string +// * Base64 encoded JSON string +// * Comma separated list of `=` pairs +// * Base64 encoded string containing comma separated list of +// `=` pairs +// +// Input will be parsed into the output parameter, which should +// be a non-nil map[string]string. +func ParseArbitraryKeyValues(input string, out map[string]string, sep string) error { + input = strings.TrimSpace(input) + if input == "" { + return nil + } + if out == nil { + return fmt.Errorf("'out' is nil") + } + + // Try to base64 decode the input. If successful, consider the decoded + // value as input. + inputBytes, err := base64.StdEncoding.DecodeString(input) + if err == nil { + input = string(inputBytes) + } + + // Try to JSON unmarshal the input. If successful, consider that the + // metadata was supplied as JSON input. + err = json.Unmarshal([]byte(input), &out) + if err != nil { + // If JSON unmarshaling fails, consider that the input was + // supplied as a comma separated string of 'key=value' pairs. + if err = ParseKeyValues(input, out, sep); err != nil { + return fmt.Errorf("failed to parse the input: %w", err) + } + } + + // Validate the parsed input + for key, value := range out { + if key != "" && value == "" { + return fmt.Errorf("invalid value for key %q", key) + } + } + + return nil +} + +// ParseStringSlice parses a `sep`-separated list of strings into a +// []string with surrounding whitespace removed. +// +// The output will always be a valid slice but may be of length zero. +func ParseStringSlice(input string, sep string) []string { + input = strings.TrimSpace(input) + if input == "" { + return []string{} + } + + splitStr := strings.Split(input, sep) + ret := make([]string, len(splitStr)) + for i, val := range splitStr { + ret[i] = strings.TrimSpace(val) + } + + return ret +} + +// ParseArbitraryStringSlice parses arbitrary string slice. The input +// can be one of the following: +// * JSON string +// * Base64 encoded JSON string +// * `sep` separated list of values +// * Base64-encoded string containing a `sep` separated list of values +// +// Note that the separator is ignored if the input is found to already be in a +// structured format (e.g., JSON) +// +// The output will always be a valid slice but may be of length zero. +func ParseArbitraryStringSlice(input string, sep string) []string { + input = strings.TrimSpace(input) + if input == "" { + return []string{} + } + + // Try to base64 decode the input. If successful, consider the decoded + // value as input. + inputBytes, err := base64.StdEncoding.DecodeString(input) + if err == nil { + input = string(inputBytes) + } + + ret := []string{} + + // Try to JSON unmarshal the input. If successful, consider that the + // metadata was supplied as JSON input. + err = json.Unmarshal([]byte(input), &ret) + if err != nil { + // If JSON unmarshaling fails, consider that the input was + // supplied as a separated string of values. + return ParseStringSlice(input, sep) + } + + if ret == nil { + return []string{} + } + + return ret +} + +// TrimStrings takes a slice of strings and returns a slice of strings +// with trimmed spaces +func TrimStrings(items []string) []string { + ret := make([]string, len(items)) + for i, item := range items { + ret[i] = strings.TrimSpace(item) + } + return ret +} + +// RemoveDuplicates removes duplicate and empty elements from a slice of +// strings. This also may convert the items in the slice to lower case and +// returns a sorted slice. +func RemoveDuplicates(items []string, lowercase bool) []string { + itemsMap := map[string]bool{} + for _, item := range items { + item = strings.TrimSpace(item) + if lowercase { + item = strings.ToLower(item) + } + if item == "" { + continue + } + itemsMap[item] = true + } + items = make([]string, 0, len(itemsMap)) + for item := range itemsMap { + items = append(items, item) + } + sort.Strings(items) + return items +} + +// RemoveDuplicatesStable removes duplicate and empty elements from a slice of +// strings, preserving order (and case) of the original slice. +// In all cases, strings are compared after trimming whitespace +// If caseInsensitive, strings will be compared after ToLower() +func RemoveDuplicatesStable(items []string, caseInsensitive bool) []string { + itemsMap := make(map[string]bool, len(items)) + deduplicated := make([]string, 0, len(items)) + + for _, item := range items { + key := strings.TrimSpace(item) + if caseInsensitive { + key = strings.ToLower(key) + } + if key == "" || itemsMap[key] { + continue + } + itemsMap[key] = true + deduplicated = append(deduplicated, item) + } + return deduplicated +} + +// RemoveEmpty removes empty elements from a slice of +// strings +func RemoveEmpty(items []string) []string { + if len(items) == 0 { + return items + } + itemsSlice := make([]string, 0, len(items)) + for _, item := range items { + if item == "" { + continue + } + itemsSlice = append(itemsSlice, item) + } + return itemsSlice +} + +// EquivalentSlices checks whether the given string sets are equivalent, as in, +// they contain the same values. +func EquivalentSlices(a, b []string) bool { + if a == nil && b == nil { + return true + } + + if a == nil || b == nil { + return false + } + + // First we'll build maps to ensure unique values + mapA := map[string]bool{} + mapB := map[string]bool{} + for _, keyA := range a { + mapA[keyA] = true + } + for _, keyB := range b { + mapB[keyB] = true + } + + // Now we'll build our checking slices + var sortedA, sortedB []string + for keyA := range mapA { + sortedA = append(sortedA, keyA) + } + for keyB := range mapB { + sortedB = append(sortedB, keyB) + } + sort.Strings(sortedA) + sort.Strings(sortedB) + + // Finally, compare + if len(sortedA) != len(sortedB) { + return false + } + + for i := range sortedA { + if sortedA[i] != sortedB[i] { + return false + } + } + + return true +} + +// EqualStringMaps tests whether two map[string]string objects are equal. +// Equal means both maps have the same sets of keys and values. This function +// is 6-10x faster than a call to reflect.DeepEqual(). +func EqualStringMaps(a, b map[string]string) bool { + if len(a) != len(b) { + return false + } + + for k := range a { + v, ok := b[k] + if !ok || a[k] != v { + return false + } + } + + return true +} + +// StrListDelete removes the first occurrence of the given item from the slice +// of strings if the item exists. +func StrListDelete(s []string, d string) []string { + if s == nil { + return s + } + + for index, element := range s { + if element == d { + return append(s[:index], s[index+1:]...) + } + } + + return s +} + +// GlobbedStringsMatch compares item to val with support for a leading and/or +// trailing wildcard '*' in item. +func GlobbedStringsMatch(item, val string) bool { + if len(item) < 2 { + return val == item + } + + hasPrefix := strings.HasPrefix(item, "*") + hasSuffix := strings.HasSuffix(item, "*") + + if hasPrefix && hasSuffix { + return strings.Contains(val, item[1:len(item)-1]) + } else if hasPrefix { + return strings.HasSuffix(val, item[1:]) + } else if hasSuffix { + return strings.HasPrefix(val, item[:len(item)-1]) + } + + return val == item +} + +// AppendIfMissing adds a string to a slice if the given string is not present +func AppendIfMissing(slice []string, i string) []string { + if StrListContains(slice, i) { + return slice + } + return append(slice, i) +} + +// MergeSlices adds an arbitrary number of slices together, uniquely +func MergeSlices(args ...[]string) []string { + all := map[string]struct{}{} + for _, slice := range args { + for _, v := range slice { + all[v] = struct{}{} + } + } + + result := make([]string, 0, len(all)) + for k := range all { + result = append(result, k) + } + sort.Strings(result) + return result +} + +// Difference returns the set difference (A - B) of the two given slices. The +// result will also remove any duplicated values in set A regardless of whether +// that matches any values in set B. +func Difference(a, b []string, lowercase bool) []string { + if len(a) == 0 { + return a + } + if len(b) == 0 { + if !lowercase { + return a + } + newA := make([]string, len(a)) + for i, v := range a { + newA[i] = strings.ToLower(v) + } + return newA + } + + a = RemoveDuplicates(a, lowercase) + b = RemoveDuplicates(b, lowercase) + + itemsMap := map[string]bool{} + for _, aVal := range a { + itemsMap[aVal] = true + } + + // Perform difference calculation + for _, bVal := range b { + if _, ok := itemsMap[bVal]; ok { + itemsMap[bVal] = false + } + } + + items := []string{} + for item, exists := range itemsMap { + if exists { + items = append(items, item) + } + } + sort.Strings(items) + return items +} + +// GetString attempts to retrieve a value from the provided map and assert that it is a string. If the key does not +// exist in the map, this will return an empty string. If the key exists, but the value is not a string type, this will +// return an error. If no map or key is provied, this will return an error +func GetString(m map[string]interface{}, key string) (string, error) { + if m == nil { + return "", fmt.Errorf("missing map") + } + if key == "" { + return "", fmt.Errorf("missing key") + } + + rawVal, ok := m[key] + if !ok { + return "", nil + } + + str, ok := rawVal.(string) + if !ok { + return "", fmt.Errorf("invalid value at %s: is a %T", key, rawVal) + } + return str, nil +} + +// Printable returns true if all characters in the string are printable +// according to Unicode +func Printable(s string) bool { + return strings.IndexFunc(s, func(c rune) bool { + return !unicode.IsPrint(c) + }) == -1 +} + +// StringListToInterfaceList simply takes a []string and turns it into a +// []interface{} to satisfy the input requirements for other library functions +func StringListToInterfaceList(in []string) []interface{} { + ret := make([]interface{}, len(in)) + for i, v := range in { + ret[i] = v + } + return ret +} + +// Reverse reverses the input string +func Reverse(in string) string { + l := len(in) + out := make([]byte, l) + for i := 0; i <= l/2; i++ { + out[i], out[l-1-i] = in[l-1-i], in[i] + } + return string(out) +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/.gitignore b/vendor/github.com/hashicorp/go-sockaddr/.gitignore new file mode 100644 index 0000000000..41720b86e3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +.cover.out* +coverage.html diff --git a/vendor/github.com/hashicorp/go-sockaddr/GNUmakefile b/vendor/github.com/hashicorp/go-sockaddr/GNUmakefile new file mode 100644 index 0000000000..0f3ae1661e --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/GNUmakefile @@ -0,0 +1,65 @@ +TOOLS= golang.org/x/tools/cover +GOCOVER_TMPFILE?= $(GOCOVER_FILE).tmp +GOCOVER_FILE?= .cover.out +GOCOVERHTML?= coverage.html +FIND=`/usr/bin/which 2> /dev/null gfind find | /usr/bin/grep -v ^no | /usr/bin/head -n 1` +XARGS=`/usr/bin/which 2> /dev/null gxargs xargs | /usr/bin/grep -v ^no | /usr/bin/head -n 1` + +test:: $(GOCOVER_FILE) + @$(MAKE) -C cmd/sockaddr test + +cover:: coverage_report + +$(GOCOVER_FILE):: + @${FIND} . -type d ! -path '*cmd*' ! -path '*.git*' -print0 | ${XARGS} -0 -I % sh -ec "cd % && rm -f $(GOCOVER_TMPFILE) && go test -coverprofile=$(GOCOVER_TMPFILE)" + + @echo 'mode: set' > $(GOCOVER_FILE) + @${FIND} . -type f ! -path '*cmd*' ! -path '*.git*' -name "$(GOCOVER_TMPFILE)" -print0 | ${XARGS} -0 -n1 cat $(GOCOVER_TMPFILE) | grep -v '^mode: ' >> ${PWD}/$(GOCOVER_FILE) + +$(GOCOVERHTML): $(GOCOVER_FILE) + go tool cover -html=$(GOCOVER_FILE) -o $(GOCOVERHTML) + +coverage_report:: $(GOCOVER_FILE) + go tool cover -html=$(GOCOVER_FILE) + +audit_tools:: + @go get -u github.com/golang/lint/golint && echo "Installed golint:" + @go get -u github.com/fzipp/gocyclo && echo "Installed gocyclo:" + @go get -u github.com/remyoudompheng/go-misc/deadcode && echo "Installed deadcode:" + @go get -u github.com/client9/misspell/cmd/misspell && echo "Installed misspell:" + @go get -u github.com/gordonklaus/ineffassign && echo "Installed ineffassign:" + +audit:: + deadcode + go tool vet -all *.go + go tool vet -shadow=true *.go + golint *.go + ineffassign . + gocyclo -over 65 *.go + misspell *.go + +clean:: + rm -f $(GOCOVER_FILE) $(GOCOVERHTML) + +dev:: + @go build + @$(MAKE) -B -C cmd/sockaddr sockaddr + +install:: + @go install + @$(MAKE) -C cmd/sockaddr install + +doc:: + @echo Visit: http://127.0.0.1:6161/pkg/github.com/hashicorp/go-sockaddr/ + godoc -http=:6161 -goroot $GOROOT + +world:: + @set -e; \ + for os in solaris darwin freebsd linux windows android; do \ + for arch in amd64; do \ + printf "Building on %s-%s\n" "$${os}" "$${arch}" ; \ + env GOOS="$${os}" GOARCH="$${arch}" go build -o /dev/null; \ + done; \ + done + + $(MAKE) -C cmd/sockaddr world diff --git a/vendor/github.com/hashicorp/go-sockaddr/LICENSE b/vendor/github.com/hashicorp/go-sockaddr/LICENSE new file mode 100644 index 0000000000..a612ad9813 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-sockaddr/README.md b/vendor/github.com/hashicorp/go-sockaddr/README.md new file mode 100644 index 0000000000..a2e170ae09 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/README.md @@ -0,0 +1,118 @@ +# go-sockaddr + +## `sockaddr` Library + +Socket address convenience functions for Go. `go-sockaddr` is a convenience +library that makes doing the right thing with IP addresses easy. `go-sockaddr` +is loosely modeled after the UNIX `sockaddr_t` and creates a union of the family +of `sockaddr_t` types (see below for an ascii diagram). Library documentation +is available +at +[https://godoc.org/github.com/hashicorp/go-sockaddr](https://godoc.org/github.com/hashicorp/go-sockaddr). +The primary intent of the library was to make it possible to define heuristics +for selecting the correct IP addresses when a configuration is evaluated at +runtime. See +the +[docs](https://godoc.org/github.com/hashicorp/go-sockaddr), +[`template` package](https://godoc.org/github.com/hashicorp/go-sockaddr/template), +tests, +and +[CLI utility](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr) +for details and hints as to how to use this library. + +For example, with this library it is possible to find an IP address that: + +* is attached to a default route + ([`GetDefaultInterfaces()`](https://godoc.org/github.com/hashicorp/go-sockaddr#GetDefaultInterfaces)) +* is contained within a CIDR block ([`IfByNetwork()`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByNetwork)) +* is an RFC1918 address + ([`IfByRFC("1918")`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByRFC)) +* is ordered + ([`OrderedIfAddrBy(args)`](https://godoc.org/github.com/hashicorp/go-sockaddr#OrderedIfAddrBy) where + `args` includes, but is not limited + to, + [`AscIfType`](https://godoc.org/github.com/hashicorp/go-sockaddr#AscIfType), + [`AscNetworkSize`](https://godoc.org/github.com/hashicorp/go-sockaddr#AscNetworkSize)) +* excludes all IPv6 addresses + ([`IfByType("^(IPv4)$")`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByType)) +* is larger than a `/32` + ([`IfByMaskSize(32)`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByMaskSize)) +* is not on a `down` interface + ([`ExcludeIfs("flags", "down")`](https://godoc.org/github.com/hashicorp/go-sockaddr#ExcludeIfs)) +* preferences an IPv6 address over an IPv4 address + ([`SortIfByType()`](https://godoc.org/github.com/hashicorp/go-sockaddr#SortIfByType) + + [`ReverseIfAddrs()`](https://godoc.org/github.com/hashicorp/go-sockaddr#ReverseIfAddrs)); and +* excludes any IP in RFC6890 address + ([`IfByRFC("6890")`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByRFC)) + +Or any combination or variation therein. + +There are also a few simple helper functions such as `GetPublicIP` and +`GetPrivateIP` which both return strings and select the first public or private +IP address on the default interface, respectively. Similarly, there is also a +helper function called `GetInterfaceIP` which returns the first usable IP +address on the named interface. + +## `sockaddr` CLI + +Given the possible complexity of the `sockaddr` library, there is a CLI utility +that accompanies the library, also +called +[`sockaddr`](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr). +The +[`sockaddr`](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr) +utility exposes nearly all of the functionality of the library and can be used +either as an administrative tool or testing tool. To install +the +[`sockaddr`](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr), +run: + +```text +$ go get -u github.com/hashicorp/go-sockaddr/cmd/sockaddr +``` + +If you're familiar with UNIX's `sockaddr` struct's, the following diagram +mapping the C `sockaddr` (top) to `go-sockaddr` structs (bottom) and +interfaces will be helpful: + +``` ++-------------------------------------------------------+ +| | +| sockaddr | +| SockAddr | +| | +| +--------------+ +----------------------------------+ | +| | sockaddr_un | | | | +| | SockAddrUnix | | sockaddr_in{,6} | | +| +--------------+ | IPAddr | | +| | | | +| | +-------------+ +--------------+ | | +| | | sockaddr_in | | sockaddr_in6 | | | +| | | IPv4Addr | | IPv6Addr | | | +| | +-------------+ +--------------+ | | +| | | | +| +----------------------------------+ | +| | ++-------------------------------------------------------+ +``` + +## Inspiration and Design + +There were many subtle inspirations that led to this design, but the most direct +inspiration for the filtering syntax was +OpenBSD's +[`pf.conf(5)`](https://www.freebsd.org/cgi/man.cgi?query=pf.conf&apropos=0&sektion=0&arch=default&format=html#PARAMETERS) firewall +syntax that lets you select the first IP address on a given named interface. +The original problem stemmed from: + +* needing to create immutable images using [Packer](https://www.packer.io) that + ran the [Consul](https://www.consul.io) process (Consul can only use one IP + address at a time); +* images that may or may not have multiple interfaces or IP addresses at + runtime; and +* we didn't want to rely on configuration management to render out the correct + IP address if the VM image was being used in an auto-scaling group. + +Instead we needed some way to codify a heuristic that would correctly select the +right IP address but the input parameters were not known when the image was +created. diff --git a/vendor/github.com/hashicorp/go-sockaddr/doc.go b/vendor/github.com/hashicorp/go-sockaddr/doc.go new file mode 100644 index 0000000000..90671deb51 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/doc.go @@ -0,0 +1,5 @@ +/* +Package sockaddr is a Go implementation of the UNIX socket family data types and +related helper functions. +*/ +package sockaddr diff --git a/vendor/github.com/hashicorp/go-sockaddr/go.mod b/vendor/github.com/hashicorp/go-sockaddr/go.mod new file mode 100644 index 0000000000..21f8d8e8e7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/go.mod @@ -0,0 +1,8 @@ +module github.com/hashicorp/go-sockaddr + +require ( + github.com/hashicorp/errwrap v1.0.0 + github.com/mitchellh/cli v1.0.0 + github.com/mitchellh/go-wordwrap v1.0.0 + github.com/ryanuber/columnize v2.1.0+incompatible +) diff --git a/vendor/github.com/hashicorp/go-sockaddr/go.sum b/vendor/github.com/hashicorp/go-sockaddr/go.sum new file mode 100644 index 0000000000..1b2bdd4828 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/go.sum @@ -0,0 +1,24 @@ +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc h1:MeuS1UDyZyFH++6vVy44PuufTeFF0d0nfI6XB87YGSk= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifaddr.go b/vendor/github.com/hashicorp/go-sockaddr/ifaddr.go new file mode 100644 index 0000000000..0811b27599 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ifaddr.go @@ -0,0 +1,254 @@ +package sockaddr + +import "strings" + +// ifAddrAttrMap is a map of the IfAddr type-specific attributes. +var ifAddrAttrMap map[AttrName]func(IfAddr) string +var ifAddrAttrs []AttrName + +func init() { + ifAddrAttrInit() +} + +// GetPrivateIP returns a string with a single IP address that is part of RFC +// 6890 and has a default route. If the system can't determine its IP address +// or find an RFC 6890 IP address, an empty string will be returned instead. +// This function is the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetPrivateInterfaces | attr "address"}}' +/// ``` +func GetPrivateIP() (string, error) { + privateIfs, err := GetPrivateInterfaces() + if err != nil { + return "", err + } + if len(privateIfs) < 1 { + return "", nil + } + + ifAddr := privateIfs[0] + ip := *ToIPAddr(ifAddr.SockAddr) + return ip.NetIP().String(), nil +} + +// GetPrivateIPs returns a string with all IP addresses that are part of RFC +// 6890 (regardless of whether or not there is a default route, unlike +// GetPublicIP). If the system can't find any RFC 6890 IP addresses, an empty +// string will be returned instead. This function is the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | include "RFC" "6890" | join "address" " "}}' +/// ``` +func GetPrivateIPs() (string, error) { + ifAddrs, err := GetAllInterfaces() + if err != nil { + return "", err + } else if len(ifAddrs) < 1 { + return "", nil + } + + ifAddrs, _ = FilterIfByType(ifAddrs, TypeIP) + if len(ifAddrs) == 0 { + return "", nil + } + + OrderedIfAddrBy(AscIfType, AscIfNetworkSize).Sort(ifAddrs) + + ifAddrs, _, err = IfByRFC("6890", ifAddrs) + if err != nil { + return "", err + } else if len(ifAddrs) == 0 { + return "", nil + } + + _, ifAddrs, err = IfByRFC(ForwardingBlacklistRFC, ifAddrs) + if err != nil { + return "", err + } else if len(ifAddrs) == 0 { + return "", nil + } + + ips := make([]string, 0, len(ifAddrs)) + for _, ifAddr := range ifAddrs { + ip := *ToIPAddr(ifAddr.SockAddr) + s := ip.NetIP().String() + ips = append(ips, s) + } + + return strings.Join(ips, " "), nil +} + +// GetPublicIP returns a string with a single IP address that is NOT part of RFC +// 6890 and has a default route. If the system can't determine its IP address +// or find a non RFC 6890 IP address, an empty string will be returned instead. +// This function is the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetPublicInterfaces | attr "address"}}' +/// ``` +func GetPublicIP() (string, error) { + publicIfs, err := GetPublicInterfaces() + if err != nil { + return "", err + } else if len(publicIfs) < 1 { + return "", nil + } + + ifAddr := publicIfs[0] + ip := *ToIPAddr(ifAddr.SockAddr) + return ip.NetIP().String(), nil +} + +// GetPublicIPs returns a string with all IP addresses that are NOT part of RFC +// 6890 (regardless of whether or not there is a default route, unlike +// GetPublicIP). If the system can't find any non RFC 6890 IP addresses, an +// empty string will be returned instead. This function is the `eval` +// equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | exclude "RFC" "6890" | join "address" " "}}' +/// ``` +func GetPublicIPs() (string, error) { + ifAddrs, err := GetAllInterfaces() + if err != nil { + return "", err + } else if len(ifAddrs) < 1 { + return "", nil + } + + ifAddrs, _ = FilterIfByType(ifAddrs, TypeIP) + if len(ifAddrs) == 0 { + return "", nil + } + + OrderedIfAddrBy(AscIfType, AscIfNetworkSize).Sort(ifAddrs) + + _, ifAddrs, err = IfByRFC("6890", ifAddrs) + if err != nil { + return "", err + } else if len(ifAddrs) == 0 { + return "", nil + } + + ips := make([]string, 0, len(ifAddrs)) + for _, ifAddr := range ifAddrs { + ip := *ToIPAddr(ifAddr.SockAddr) + s := ip.NetIP().String() + ips = append(ips, s) + } + + return strings.Join(ips, " "), nil +} + +// GetInterfaceIP returns a string with a single IP address sorted by the size +// of the network (i.e. IP addresses with a smaller netmask, larger network +// size, are sorted first). This function is the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | include "name" <> | sort "type,size" | include "flag" "forwardable" | attr "address" }}' +/// ``` +func GetInterfaceIP(namedIfRE string) (string, error) { + ifAddrs, err := GetAllInterfaces() + if err != nil { + return "", err + } + + ifAddrs, _, err = IfByName(namedIfRE, ifAddrs) + if err != nil { + return "", err + } + + ifAddrs, _, err = IfByFlag("forwardable", ifAddrs) + if err != nil { + return "", err + } + + ifAddrs, err = SortIfBy("+type,+size", ifAddrs) + if err != nil { + return "", err + } + + if len(ifAddrs) == 0 { + return "", err + } + + ip := ToIPAddr(ifAddrs[0].SockAddr) + if ip == nil { + return "", err + } + + return IPAddrAttr(*ip, "address"), nil +} + +// GetInterfaceIPs returns a string with all IPs, sorted by the size of the +// network (i.e. IP addresses with a smaller netmask, larger network size, are +// sorted first), on a named interface. This function is the `eval` equivalent +// of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | include "name" <> | sort "type,size" | join "address" " "}}' +/// ``` +func GetInterfaceIPs(namedIfRE string) (string, error) { + ifAddrs, err := GetAllInterfaces() + if err != nil { + return "", err + } + + ifAddrs, _, err = IfByName(namedIfRE, ifAddrs) + if err != nil { + return "", err + } + + ifAddrs, err = SortIfBy("+type,+size", ifAddrs) + if err != nil { + return "", err + } + + if len(ifAddrs) == 0 { + return "", err + } + + ips := make([]string, 0, len(ifAddrs)) + for _, ifAddr := range ifAddrs { + ip := *ToIPAddr(ifAddr.SockAddr) + s := ip.NetIP().String() + ips = append(ips, s) + } + + return strings.Join(ips, " "), nil +} + +// IfAddrAttrs returns a list of attributes supported by the IfAddr type +func IfAddrAttrs() []AttrName { + return ifAddrAttrs +} + +// IfAddrAttr returns a string representation of an attribute for the given +// IfAddr. +func IfAddrAttr(ifAddr IfAddr, attrName AttrName) string { + fn, found := ifAddrAttrMap[attrName] + if !found { + return "" + } + + return fn(ifAddr) +} + +// ifAddrAttrInit is called once at init() +func ifAddrAttrInit() { + // Sorted for human readability + ifAddrAttrs = []AttrName{ + "flags", + "name", + } + + ifAddrAttrMap = map[AttrName]func(ifAddr IfAddr) string{ + "flags": func(ifAddr IfAddr) string { + return ifAddr.Interface.Flags.String() + }, + "name": func(ifAddr IfAddr) string { + return ifAddr.Interface.Name + }, + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go b/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go new file mode 100644 index 0000000000..80f61bef68 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go @@ -0,0 +1,1304 @@ +package sockaddr + +import ( + "encoding/binary" + "errors" + "fmt" + "math/big" + "net" + "regexp" + "sort" + "strconv" + "strings" +) + +var ( + // Centralize all regexps and regexp.Copy() where necessary. + signRE *regexp.Regexp = regexp.MustCompile(`^[\s]*[+-]`) + whitespaceRE *regexp.Regexp = regexp.MustCompile(`[\s]+`) + ifNameRE *regexp.Regexp = regexp.MustCompile(`^(?:Ethernet|Wireless LAN) adapter ([^:]+):`) + ipAddrRE *regexp.Regexp = regexp.MustCompile(`^ IPv[46] Address\. \. \. \. \. \. \. \. \. \. \. : ([^\s]+)`) +) + +// IfAddrs is a slice of IfAddr +type IfAddrs []IfAddr + +func (ifs IfAddrs) Len() int { return len(ifs) } + +// CmpIfFunc is the function signature that must be met to be used in the +// OrderedIfAddrBy multiIfAddrSorter +type CmpIfAddrFunc func(p1, p2 *IfAddr) int + +// multiIfAddrSorter implements the Sort interface, sorting the IfAddrs within. +type multiIfAddrSorter struct { + ifAddrs IfAddrs + cmp []CmpIfAddrFunc +} + +// Sort sorts the argument slice according to the Cmp functions passed to +// OrderedIfAddrBy. +func (ms *multiIfAddrSorter) Sort(ifAddrs IfAddrs) { + ms.ifAddrs = ifAddrs + sort.Sort(ms) +} + +// OrderedIfAddrBy sorts SockAddr by the list of sort function pointers. +func OrderedIfAddrBy(cmpFuncs ...CmpIfAddrFunc) *multiIfAddrSorter { + return &multiIfAddrSorter{ + cmp: cmpFuncs, + } +} + +// Len is part of sort.Interface. +func (ms *multiIfAddrSorter) Len() int { + return len(ms.ifAddrs) +} + +// Less is part of sort.Interface. It is implemented by looping along the Cmp() +// functions until it finds a comparison that is either less than or greater +// than. A return value of 0 defers sorting to the next function in the +// multisorter (which means the results of sorting may leave the resutls in a +// non-deterministic order). +func (ms *multiIfAddrSorter) Less(i, j int) bool { + p, q := &ms.ifAddrs[i], &ms.ifAddrs[j] + // Try all but the last comparison. + var k int + for k = 0; k < len(ms.cmp)-1; k++ { + cmp := ms.cmp[k] + x := cmp(p, q) + switch x { + case -1: + // p < q, so we have a decision. + return true + case 1: + // p > q, so we have a decision. + return false + } + // p == q; try the next comparison. + } + // All comparisons to here said "equal", so just return whatever the + // final comparison reports. + switch ms.cmp[k](p, q) { + case -1: + return true + case 1: + return false + default: + // Still a tie! Now what? + return false + panic("undefined sort order for remaining items in the list") + } +} + +// Swap is part of sort.Interface. +func (ms *multiIfAddrSorter) Swap(i, j int) { + ms.ifAddrs[i], ms.ifAddrs[j] = ms.ifAddrs[j], ms.ifAddrs[i] +} + +// AscIfAddress is a sorting function to sort IfAddrs by their respective +// address type. Non-equal types are deferred in the sort. +func AscIfAddress(p1Ptr, p2Ptr *IfAddr) int { + return AscAddress(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// AscIfDefault is a sorting function to sort IfAddrs by whether or not they +// have a default route or not. Non-equal types are deferred in the sort. +// +// FIXME: This is a particularly expensive sorting operation because of the +// non-memoized calls to NewRouteInfo(). In an ideal world the routeInfo data +// once at the start of the sort and pass it along as a context or by wrapping +// the IfAddr type with this information (this would also solve the inability to +// return errors and the possibility of failing silently). Fortunately, +// N*log(N) where N = 3 is only ~6.2 invocations. Not ideal, but not worth +// optimizing today. The common case is this gets called once or twice. +// Patches welcome. +func AscIfDefault(p1Ptr, p2Ptr *IfAddr) int { + ri, err := NewRouteInfo() + if err != nil { + return sortDeferDecision + } + + defaultIfName, err := ri.GetDefaultInterfaceName() + if err != nil { + return sortDeferDecision + } + + switch { + case p1Ptr.Interface.Name == defaultIfName && p2Ptr.Interface.Name == defaultIfName: + return sortDeferDecision + case p1Ptr.Interface.Name == defaultIfName: + return sortReceiverBeforeArg + case p2Ptr.Interface.Name == defaultIfName: + return sortArgBeforeReceiver + default: + return sortDeferDecision + } +} + +// AscIfName is a sorting function to sort IfAddrs by their interface names. +func AscIfName(p1Ptr, p2Ptr *IfAddr) int { + return strings.Compare(p1Ptr.Name, p2Ptr.Name) +} + +// AscIfNetworkSize is a sorting function to sort IfAddrs by their respective +// network mask size. +func AscIfNetworkSize(p1Ptr, p2Ptr *IfAddr) int { + return AscNetworkSize(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// AscIfPort is a sorting function to sort IfAddrs by their respective +// port type. Non-equal types are deferred in the sort. +func AscIfPort(p1Ptr, p2Ptr *IfAddr) int { + return AscPort(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// AscIfPrivate is a sorting function to sort IfAddrs by "private" values before +// "public" values. Both IPv4 and IPv6 are compared against RFC6890 (RFC6890 +// includes, and is not limited to, RFC1918 and RFC6598 for IPv4, and IPv6 +// includes RFC4193). +func AscIfPrivate(p1Ptr, p2Ptr *IfAddr) int { + return AscPrivate(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// AscIfType is a sorting function to sort IfAddrs by their respective address +// type. Non-equal types are deferred in the sort. +func AscIfType(p1Ptr, p2Ptr *IfAddr) int { + return AscType(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// DescIfAddress is identical to AscIfAddress but reverse ordered. +func DescIfAddress(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscAddress(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// DescIfDefault is identical to AscIfDefault but reverse ordered. +func DescIfDefault(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscIfDefault(p1Ptr, p2Ptr) +} + +// DescIfName is identical to AscIfName but reverse ordered. +func DescIfName(p1Ptr, p2Ptr *IfAddr) int { + return -1 * strings.Compare(p1Ptr.Name, p2Ptr.Name) +} + +// DescIfNetworkSize is identical to AscIfNetworkSize but reverse ordered. +func DescIfNetworkSize(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscNetworkSize(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// DescIfPort is identical to AscIfPort but reverse ordered. +func DescIfPort(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscPort(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// DescIfPrivate is identical to AscIfPrivate but reverse ordered. +func DescIfPrivate(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscPrivate(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// DescIfType is identical to AscIfType but reverse ordered. +func DescIfType(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscType(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// FilterIfByType filters IfAddrs and returns a list of the matching type +func FilterIfByType(ifAddrs IfAddrs, type_ SockAddrType) (matchedIfs, excludedIfs IfAddrs) { + excludedIfs = make(IfAddrs, 0, len(ifAddrs)) + matchedIfs = make(IfAddrs, 0, len(ifAddrs)) + + for _, ifAddr := range ifAddrs { + if ifAddr.SockAddr.Type()&type_ != 0 { + matchedIfs = append(matchedIfs, ifAddr) + } else { + excludedIfs = append(excludedIfs, ifAddr) + } + } + return matchedIfs, excludedIfs +} + +// IfAttr forwards the selector to IfAttr.Attr() for resolution. If there is +// more than one IfAddr, only the first IfAddr is used. +func IfAttr(selectorName string, ifAddr IfAddr) (string, error) { + attrName := AttrName(strings.ToLower(selectorName)) + attrVal, err := ifAddr.Attr(attrName) + return attrVal, err +} + +// IfAttrs forwards the selector to IfAttrs.Attr() for resolution. If there is +// more than one IfAddr, only the first IfAddr is used. +func IfAttrs(selectorName string, ifAddrs IfAddrs) (string, error) { + if len(ifAddrs) == 0 { + return "", nil + } + + attrName := AttrName(strings.ToLower(selectorName)) + attrVal, err := ifAddrs[0].Attr(attrName) + return attrVal, err +} + +// GetAllInterfaces iterates over all available network interfaces and finds all +// available IP addresses on each interface and converts them to +// sockaddr.IPAddrs, and returning the result as an array of IfAddr. +func GetAllInterfaces() (IfAddrs, error) { + ifs, err := net.Interfaces() + if err != nil { + return nil, err + } + + ifAddrs := make(IfAddrs, 0, len(ifs)) + for _, intf := range ifs { + addrs, err := intf.Addrs() + if err != nil { + return nil, err + } + + for _, addr := range addrs { + var ipAddr IPAddr + ipAddr, err = NewIPAddr(addr.String()) + if err != nil { + return IfAddrs{}, fmt.Errorf("unable to create an IP address from %q", addr.String()) + } + + ifAddr := IfAddr{ + SockAddr: ipAddr, + Interface: intf, + } + ifAddrs = append(ifAddrs, ifAddr) + } + } + + return ifAddrs, nil +} + +// GetDefaultInterfaces returns IfAddrs of the addresses attached to the default +// route. +func GetDefaultInterfaces() (IfAddrs, error) { + ri, err := NewRouteInfo() + if err != nil { + return nil, err + } + + defaultIfName, err := ri.GetDefaultInterfaceName() + if err != nil { + return nil, err + } + + var defaultIfs, ifAddrs IfAddrs + ifAddrs, err = GetAllInterfaces() + for _, ifAddr := range ifAddrs { + if ifAddr.Name == defaultIfName { + defaultIfs = append(defaultIfs, ifAddr) + } + } + + return defaultIfs, nil +} + +// GetPrivateInterfaces returns an IfAddrs that are part of RFC 6890 and have a +// default route. If the system can't determine its IP address or find an RFC +// 6890 IP address, an empty IfAddrs will be returned instead. This function is +// the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | include "type" "ip" | include "flags" "forwardable" | include "flags" "up" | sort "default,type,size" | include "RFC" "6890" }}' +/// ``` +func GetPrivateInterfaces() (IfAddrs, error) { + privateIfs, err := GetAllInterfaces() + if err != nil { + return IfAddrs{}, err + } + if len(privateIfs) == 0 { + return IfAddrs{}, nil + } + + privateIfs, _ = FilterIfByType(privateIfs, TypeIP) + if len(privateIfs) == 0 { + return IfAddrs{}, nil + } + + privateIfs, _, err = IfByFlag("forwardable", privateIfs) + if err != nil { + return IfAddrs{}, err + } + + privateIfs, _, err = IfByFlag("up", privateIfs) + if err != nil { + return IfAddrs{}, err + } + + if len(privateIfs) == 0 { + return IfAddrs{}, nil + } + + OrderedIfAddrBy(AscIfDefault, AscIfType, AscIfNetworkSize).Sort(privateIfs) + + privateIfs, _, err = IfByRFC("6890", privateIfs) + if err != nil { + return IfAddrs{}, err + } else if len(privateIfs) == 0 { + return IfAddrs{}, nil + } + + return privateIfs, nil +} + +// GetPublicInterfaces returns an IfAddrs that are NOT part of RFC 6890 and has a +// default route. If the system can't determine its IP address or find a non +// RFC 6890 IP address, an empty IfAddrs will be returned instead. This +// function is the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | include "type" "ip" | include "flags" "forwardable" | include "flags" "up" | sort "default,type,size" | exclude "RFC" "6890" }}' +/// ``` +func GetPublicInterfaces() (IfAddrs, error) { + publicIfs, err := GetAllInterfaces() + if err != nil { + return IfAddrs{}, err + } + if len(publicIfs) == 0 { + return IfAddrs{}, nil + } + + publicIfs, _ = FilterIfByType(publicIfs, TypeIP) + if len(publicIfs) == 0 { + return IfAddrs{}, nil + } + + publicIfs, _, err = IfByFlag("forwardable", publicIfs) + if err != nil { + return IfAddrs{}, err + } + + publicIfs, _, err = IfByFlag("up", publicIfs) + if err != nil { + return IfAddrs{}, err + } + + if len(publicIfs) == 0 { + return IfAddrs{}, nil + } + + OrderedIfAddrBy(AscIfDefault, AscIfType, AscIfNetworkSize).Sort(publicIfs) + + _, publicIfs, err = IfByRFC("6890", publicIfs) + if err != nil { + return IfAddrs{}, err + } else if len(publicIfs) == 0 { + return IfAddrs{}, nil + } + + return publicIfs, nil +} + +// IfByAddress returns a list of matched and non-matched IfAddrs, or an error if +// the regexp fails to compile. +func IfByAddress(inputRe string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + re, err := regexp.Compile(inputRe) + if err != nil { + return nil, nil, fmt.Errorf("Unable to compile address regexp %+q: %v", inputRe, err) + } + + matchedAddrs := make(IfAddrs, 0, len(ifAddrs)) + excludedAddrs := make(IfAddrs, 0, len(ifAddrs)) + for _, addr := range ifAddrs { + if re.MatchString(addr.SockAddr.String()) { + matchedAddrs = append(matchedAddrs, addr) + } else { + excludedAddrs = append(excludedAddrs, addr) + } + } + + return matchedAddrs, excludedAddrs, nil +} + +// IfByName returns a list of matched and non-matched IfAddrs, or an error if +// the regexp fails to compile. +func IfByName(inputRe string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + re, err := regexp.Compile(inputRe) + if err != nil { + return nil, nil, fmt.Errorf("Unable to compile name regexp %+q: %v", inputRe, err) + } + + matchedAddrs := make(IfAddrs, 0, len(ifAddrs)) + excludedAddrs := make(IfAddrs, 0, len(ifAddrs)) + for _, addr := range ifAddrs { + if re.MatchString(addr.Name) { + matchedAddrs = append(matchedAddrs, addr) + } else { + excludedAddrs = append(excludedAddrs, addr) + } + } + + return matchedAddrs, excludedAddrs, nil +} + +// IfByPort returns a list of matched and non-matched IfAddrs, or an error if +// the regexp fails to compile. +func IfByPort(inputRe string, ifAddrs IfAddrs) (matchedIfs, excludedIfs IfAddrs, err error) { + re, err := regexp.Compile(inputRe) + if err != nil { + return nil, nil, fmt.Errorf("Unable to compile port regexp %+q: %v", inputRe, err) + } + + ipIfs, nonIfs := FilterIfByType(ifAddrs, TypeIP) + matchedIfs = make(IfAddrs, 0, len(ipIfs)) + excludedIfs = append(IfAddrs(nil), nonIfs...) + for _, addr := range ipIfs { + ipAddr := ToIPAddr(addr.SockAddr) + if ipAddr == nil { + continue + } + + port := strconv.FormatInt(int64((*ipAddr).IPPort()), 10) + if re.MatchString(port) { + matchedIfs = append(matchedIfs, addr) + } else { + excludedIfs = append(excludedIfs, addr) + } + } + + return matchedIfs, excludedIfs, nil +} + +// IfByRFC returns a list of matched and non-matched IfAddrs that contain the +// relevant RFC-specified traits. +func IfByRFC(selectorParam string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + inputRFC, err := strconv.ParseUint(selectorParam, 10, 64) + if err != nil { + return IfAddrs{}, IfAddrs{}, fmt.Errorf("unable to parse RFC number %q: %v", selectorParam, err) + } + + matchedIfAddrs := make(IfAddrs, 0, len(ifAddrs)) + remainingIfAddrs := make(IfAddrs, 0, len(ifAddrs)) + + rfcNetMap := KnownRFCs() + rfcNets, ok := rfcNetMap[uint(inputRFC)] + if !ok { + return nil, nil, fmt.Errorf("unsupported RFC %d", inputRFC) + } + + for _, ifAddr := range ifAddrs { + var contained bool + for _, rfcNet := range rfcNets { + if rfcNet.Contains(ifAddr.SockAddr) { + matchedIfAddrs = append(matchedIfAddrs, ifAddr) + contained = true + break + } + } + if !contained { + remainingIfAddrs = append(remainingIfAddrs, ifAddr) + } + } + + return matchedIfAddrs, remainingIfAddrs, nil +} + +// IfByRFCs returns a list of matched and non-matched IfAddrs that contain the +// relevant RFC-specified traits. Multiple RFCs can be specified and separated +// by the `|` symbol. No protection is taken to ensure an IfAddr does not end +// up in both the included and excluded list. +func IfByRFCs(selectorParam string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + var includedIfs, excludedIfs IfAddrs + for _, rfcStr := range strings.Split(selectorParam, "|") { + includedRFCIfs, excludedRFCIfs, err := IfByRFC(rfcStr, ifAddrs) + if err != nil { + return IfAddrs{}, IfAddrs{}, fmt.Errorf("unable to lookup RFC number %q: %v", rfcStr, err) + } + includedIfs = append(includedIfs, includedRFCIfs...) + excludedIfs = append(excludedIfs, excludedRFCIfs...) + } + + return includedIfs, excludedIfs, nil +} + +// IfByMaskSize returns a list of matched and non-matched IfAddrs that have the +// matching mask size. +func IfByMaskSize(selectorParam string, ifAddrs IfAddrs) (matchedIfs, excludedIfs IfAddrs, err error) { + maskSize, err := strconv.ParseUint(selectorParam, 10, 64) + if err != nil { + return IfAddrs{}, IfAddrs{}, fmt.Errorf("invalid exclude size argument (%q): %v", selectorParam, err) + } + + ipIfs, nonIfs := FilterIfByType(ifAddrs, TypeIP) + matchedIfs = make(IfAddrs, 0, len(ipIfs)) + excludedIfs = append(IfAddrs(nil), nonIfs...) + for _, addr := range ipIfs { + ipAddr := ToIPAddr(addr.SockAddr) + if ipAddr == nil { + return IfAddrs{}, IfAddrs{}, fmt.Errorf("unable to filter mask sizes on non-IP type %s: %v", addr.SockAddr.Type().String(), addr.SockAddr.String()) + } + + switch { + case (*ipAddr).Type()&TypeIPv4 != 0 && maskSize > 32: + return IfAddrs{}, IfAddrs{}, fmt.Errorf("mask size out of bounds for IPv4 address: %d", maskSize) + case (*ipAddr).Type()&TypeIPv6 != 0 && maskSize > 128: + return IfAddrs{}, IfAddrs{}, fmt.Errorf("mask size out of bounds for IPv6 address: %d", maskSize) + } + + if (*ipAddr).Maskbits() == int(maskSize) { + matchedIfs = append(matchedIfs, addr) + } else { + excludedIfs = append(excludedIfs, addr) + } + } + + return matchedIfs, excludedIfs, nil +} + +// IfByType returns a list of matching and non-matching IfAddr that match the +// specified type. For instance: +// +// include "type" "IPv4,IPv6" +// +// will include any IfAddrs that is either an IPv4 or IPv6 address. Any +// addresses on those interfaces that don't match will be included in the +// remainder results. +func IfByType(inputTypes string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + matchingIfAddrs := make(IfAddrs, 0, len(ifAddrs)) + remainingIfAddrs := make(IfAddrs, 0, len(ifAddrs)) + + ifTypes := strings.Split(strings.ToLower(inputTypes), "|") + for _, ifType := range ifTypes { + switch ifType { + case "ip", "ipv4", "ipv6", "unix": + // Valid types + default: + return nil, nil, fmt.Errorf("unsupported type %q %q", ifType, inputTypes) + } + } + + for _, ifAddr := range ifAddrs { + for _, ifType := range ifTypes { + var matched bool + switch { + case ifType == "ip" && ifAddr.SockAddr.Type()&TypeIP != 0: + matched = true + case ifType == "ipv4" && ifAddr.SockAddr.Type()&TypeIPv4 != 0: + matched = true + case ifType == "ipv6" && ifAddr.SockAddr.Type()&TypeIPv6 != 0: + matched = true + case ifType == "unix" && ifAddr.SockAddr.Type()&TypeUnix != 0: + matched = true + } + + if matched { + matchingIfAddrs = append(matchingIfAddrs, ifAddr) + } else { + remainingIfAddrs = append(remainingIfAddrs, ifAddr) + } + } + } + + return matchingIfAddrs, remainingIfAddrs, nil +} + +// IfByFlag returns a list of matching and non-matching IfAddrs that match the +// specified type. For instance: +// +// include "flag" "up,broadcast" +// +// will include any IfAddrs that have both the "up" and "broadcast" flags set. +// Any addresses on those interfaces that don't match will be omitted from the +// results. +func IfByFlag(inputFlags string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + matchedAddrs := make(IfAddrs, 0, len(ifAddrs)) + excludedAddrs := make(IfAddrs, 0, len(ifAddrs)) + + var wantForwardable, + wantGlobalUnicast, + wantInterfaceLocalMulticast, + wantLinkLocalMulticast, + wantLinkLocalUnicast, + wantLoopback, + wantMulticast, + wantUnspecified bool + var ifFlags net.Flags + var checkFlags, checkAttrs bool + for _, flagName := range strings.Split(strings.ToLower(inputFlags), "|") { + switch flagName { + case "broadcast": + checkFlags = true + ifFlags = ifFlags | net.FlagBroadcast + case "down": + checkFlags = true + ifFlags = (ifFlags &^ net.FlagUp) + case "forwardable": + checkAttrs = true + wantForwardable = true + case "global unicast": + checkAttrs = true + wantGlobalUnicast = true + case "interface-local multicast": + checkAttrs = true + wantInterfaceLocalMulticast = true + case "link-local multicast": + checkAttrs = true + wantLinkLocalMulticast = true + case "link-local unicast": + checkAttrs = true + wantLinkLocalUnicast = true + case "loopback": + checkAttrs = true + checkFlags = true + ifFlags = ifFlags | net.FlagLoopback + wantLoopback = true + case "multicast": + checkAttrs = true + checkFlags = true + ifFlags = ifFlags | net.FlagMulticast + wantMulticast = true + case "point-to-point": + checkFlags = true + ifFlags = ifFlags | net.FlagPointToPoint + case "unspecified": + checkAttrs = true + wantUnspecified = true + case "up": + checkFlags = true + ifFlags = ifFlags | net.FlagUp + default: + return nil, nil, fmt.Errorf("Unknown interface flag: %+q", flagName) + } + } + + for _, ifAddr := range ifAddrs { + var matched bool + if checkFlags && ifAddr.Interface.Flags&ifFlags == ifFlags { + matched = true + } + if checkAttrs { + if ip := ToIPAddr(ifAddr.SockAddr); ip != nil { + netIP := (*ip).NetIP() + switch { + case wantGlobalUnicast && netIP.IsGlobalUnicast(): + matched = true + case wantInterfaceLocalMulticast && netIP.IsInterfaceLocalMulticast(): + matched = true + case wantLinkLocalMulticast && netIP.IsLinkLocalMulticast(): + matched = true + case wantLinkLocalUnicast && netIP.IsLinkLocalUnicast(): + matched = true + case wantLoopback && netIP.IsLoopback(): + matched = true + case wantMulticast && netIP.IsMulticast(): + matched = true + case wantUnspecified && netIP.IsUnspecified(): + matched = true + case wantForwardable && !IsRFC(ForwardingBlacklist, ifAddr.SockAddr): + matched = true + } + } + } + if matched { + matchedAddrs = append(matchedAddrs, ifAddr) + } else { + excludedAddrs = append(excludedAddrs, ifAddr) + } + } + return matchedAddrs, excludedAddrs, nil +} + +// IfByNetwork returns an IfAddrs that are equal to or included within the +// network passed in by selector. +func IfByNetwork(selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, IfAddrs, error) { + var includedIfs, excludedIfs IfAddrs + for _, netStr := range strings.Split(selectorParam, "|") { + netAddr, err := NewIPAddr(netStr) + if err != nil { + return nil, nil, fmt.Errorf("unable to create an IP address from %+q: %v", netStr, err) + } + + for _, ifAddr := range inputIfAddrs { + if netAddr.Contains(ifAddr.SockAddr) { + includedIfs = append(includedIfs, ifAddr) + } else { + excludedIfs = append(excludedIfs, ifAddr) + } + } + } + + return includedIfs, excludedIfs, nil +} + +// IfAddrMath will return a new IfAddr struct with a mutated value. +func IfAddrMath(operation, value string, inputIfAddr IfAddr) (IfAddr, error) { + // Regexp used to enforce the sign being a required part of the grammar for + // some values. + signRe := signRE.Copy() + + switch strings.ToLower(operation) { + case "address": + // "address" operates on the IP address and is allowed to overflow or + // underflow networks, however it will wrap along the underlying address's + // underlying type. + + if !signRe.MatchString(value) { + return IfAddr{}, fmt.Errorf("sign (+/-) is required for operation %q", operation) + } + + switch sockType := inputIfAddr.SockAddr.Type(); sockType { + case TypeIPv4: + // 33 == Accept any uint32 value + // TODO(seanc@): Add the ability to parse hex + i, err := strconv.ParseInt(value, 10, 33) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + ipv4 := *ToIPv4Addr(inputIfAddr.SockAddr) + ipv4Uint32 := uint32(ipv4.Address) + ipv4Uint32 += uint32(i) + return IfAddr{ + SockAddr: IPv4Addr{ + Address: IPv4Address(ipv4Uint32), + Mask: ipv4.Mask, + }, + Interface: inputIfAddr.Interface, + }, nil + case TypeIPv6: + // 64 == Accept any int32 value + // TODO(seanc@): Add the ability to parse hex. Also parse a bignum int. + i, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + ipv6 := *ToIPv6Addr(inputIfAddr.SockAddr) + ipv6BigIntA := new(big.Int) + ipv6BigIntA.Set(ipv6.Address) + ipv6BigIntB := big.NewInt(i) + + ipv6Addr := ipv6BigIntA.Add(ipv6BigIntA, ipv6BigIntB) + ipv6Addr.And(ipv6Addr, ipv6HostMask) + + return IfAddr{ + SockAddr: IPv6Addr{ + Address: IPv6Address(ipv6Addr), + Mask: ipv6.Mask, + }, + Interface: inputIfAddr.Interface, + }, nil + default: + return IfAddr{}, fmt.Errorf("unsupported type for operation %q: %T", operation, sockType) + } + case "network": + // "network" operates on the network address. Positive values start at the + // network address and negative values wrap at the network address, which + // means a "-1" value on a network will be the broadcast address after + // wrapping is applied. + + if !signRe.MatchString(value) { + return IfAddr{}, fmt.Errorf("sign (+/-) is required for operation %q", operation) + } + + switch sockType := inputIfAddr.SockAddr.Type(); sockType { + case TypeIPv4: + // 33 == Accept any uint32 value + // TODO(seanc@): Add the ability to parse hex + i, err := strconv.ParseInt(value, 10, 33) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + ipv4 := *ToIPv4Addr(inputIfAddr.SockAddr) + ipv4Uint32 := uint32(ipv4.NetworkAddress()) + + // Wrap along network mask boundaries. EZ-mode wrapping made possible by + // use of int64 vs a uint. + var wrappedMask int64 + if i >= 0 { + wrappedMask = i + } else { + wrappedMask = 1 + i + int64(^uint32(ipv4.Mask)) + } + + ipv4Uint32 = ipv4Uint32 + (uint32(wrappedMask) &^ uint32(ipv4.Mask)) + + return IfAddr{ + SockAddr: IPv4Addr{ + Address: IPv4Address(ipv4Uint32), + Mask: ipv4.Mask, + }, + Interface: inputIfAddr.Interface, + }, nil + case TypeIPv6: + // 64 == Accept any int32 value + // TODO(seanc@): Add the ability to parse hex. Also parse a bignum int. + i, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + ipv6 := *ToIPv6Addr(inputIfAddr.SockAddr) + ipv6BigInt := new(big.Int) + ipv6BigInt.Set(ipv6.NetworkAddress()) + + mask := new(big.Int) + mask.Set(ipv6.Mask) + if i > 0 { + wrappedMask := new(big.Int) + wrappedMask.SetInt64(i) + + wrappedMask.AndNot(wrappedMask, mask) + ipv6BigInt.Add(ipv6BigInt, wrappedMask) + } else { + // Mask off any bits that exceed the network size. Subtract the + // wrappedMask from the last usable - 1 + wrappedMask := new(big.Int) + wrappedMask.SetInt64(-1 * i) + wrappedMask.Sub(wrappedMask, big.NewInt(1)) + + wrappedMask.AndNot(wrappedMask, mask) + + lastUsable := new(big.Int) + lastUsable.Set(ipv6.LastUsable().(IPv6Addr).Address) + + ipv6BigInt = lastUsable.Sub(lastUsable, wrappedMask) + } + + return IfAddr{ + SockAddr: IPv6Addr{ + Address: IPv6Address(ipv6BigInt), + Mask: ipv6.Mask, + }, + Interface: inputIfAddr.Interface, + }, nil + default: + return IfAddr{}, fmt.Errorf("unsupported type for operation %q: %T", operation, sockType) + } + case "mask": + // "mask" operates on the IP address and returns the IP address on + // which the given integer mask has been applied. If the applied mask + // corresponds to a larger network than the mask of the IP address, + // the latter will be replaced by the former. + switch sockType := inputIfAddr.SockAddr.Type(); sockType { + case TypeIPv4: + i, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + if i > 32 { + return IfAddr{}, fmt.Errorf("parameter for operation %q on ipv4 addresses must be between 0 and 32", operation) + } + + ipv4 := *ToIPv4Addr(inputIfAddr.SockAddr) + + ipv4Mask := net.CIDRMask(int(i), 32) + ipv4MaskUint32 := binary.BigEndian.Uint32(ipv4Mask) + + maskedIpv4 := ipv4.NetIP().Mask(ipv4Mask) + maskedIpv4Uint32 := binary.BigEndian.Uint32(maskedIpv4) + + maskedIpv4MaskUint32 := uint32(ipv4.Mask) + + if ipv4MaskUint32 < maskedIpv4MaskUint32 { + maskedIpv4MaskUint32 = ipv4MaskUint32 + } + + return IfAddr{ + SockAddr: IPv4Addr{ + Address: IPv4Address(maskedIpv4Uint32), + Mask: IPv4Mask(maskedIpv4MaskUint32), + }, + Interface: inputIfAddr.Interface, + }, nil + case TypeIPv6: + i, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + if i > 128 { + return IfAddr{}, fmt.Errorf("parameter for operation %q on ipv6 addresses must be between 0 and 64", operation) + } + + ipv6 := *ToIPv6Addr(inputIfAddr.SockAddr) + + ipv6Mask := net.CIDRMask(int(i), 128) + ipv6MaskBigInt := new(big.Int) + ipv6MaskBigInt.SetBytes(ipv6Mask) + + maskedIpv6 := ipv6.NetIP().Mask(ipv6Mask) + maskedIpv6BigInt := new(big.Int) + maskedIpv6BigInt.SetBytes(maskedIpv6) + + maskedIpv6MaskBigInt := new(big.Int) + maskedIpv6MaskBigInt.Set(ipv6.Mask) + + if ipv6MaskBigInt.Cmp(maskedIpv6MaskBigInt) == -1 { + maskedIpv6MaskBigInt = ipv6MaskBigInt + } + + return IfAddr{ + SockAddr: IPv6Addr{ + Address: IPv6Address(maskedIpv6BigInt), + Mask: IPv6Mask(maskedIpv6MaskBigInt), + }, + Interface: inputIfAddr.Interface, + }, nil + default: + return IfAddr{}, fmt.Errorf("unsupported type for operation %q: %T", operation, sockType) + } + default: + return IfAddr{}, fmt.Errorf("unsupported math operation: %q", operation) + } +} + +// IfAddrsMath will apply an IfAddrMath operation each IfAddr struct. Any +// failure will result in zero results. +func IfAddrsMath(operation, value string, inputIfAddrs IfAddrs) (IfAddrs, error) { + outputAddrs := make(IfAddrs, 0, len(inputIfAddrs)) + for _, ifAddr := range inputIfAddrs { + result, err := IfAddrMath(operation, value, ifAddr) + if err != nil { + return IfAddrs{}, fmt.Errorf("unable to perform an IPMath operation on %s: %v", ifAddr, err) + } + outputAddrs = append(outputAddrs, result) + } + return outputAddrs, nil +} + +// IncludeIfs returns an IfAddrs based on the passed in selector. +func IncludeIfs(selectorName, selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, error) { + var includedIfs IfAddrs + var err error + + switch strings.ToLower(selectorName) { + case "address": + includedIfs, _, err = IfByAddress(selectorParam, inputIfAddrs) + case "flag", "flags": + includedIfs, _, err = IfByFlag(selectorParam, inputIfAddrs) + case "name": + includedIfs, _, err = IfByName(selectorParam, inputIfAddrs) + case "network": + includedIfs, _, err = IfByNetwork(selectorParam, inputIfAddrs) + case "port": + includedIfs, _, err = IfByPort(selectorParam, inputIfAddrs) + case "rfc", "rfcs": + includedIfs, _, err = IfByRFCs(selectorParam, inputIfAddrs) + case "size": + includedIfs, _, err = IfByMaskSize(selectorParam, inputIfAddrs) + case "type": + includedIfs, _, err = IfByType(selectorParam, inputIfAddrs) + default: + return IfAddrs{}, fmt.Errorf("invalid include selector %q", selectorName) + } + + if err != nil { + return IfAddrs{}, err + } + + return includedIfs, nil +} + +// ExcludeIfs returns an IfAddrs based on the passed in selector. +func ExcludeIfs(selectorName, selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, error) { + var excludedIfs IfAddrs + var err error + + switch strings.ToLower(selectorName) { + case "address": + _, excludedIfs, err = IfByAddress(selectorParam, inputIfAddrs) + case "flag", "flags": + _, excludedIfs, err = IfByFlag(selectorParam, inputIfAddrs) + case "name": + _, excludedIfs, err = IfByName(selectorParam, inputIfAddrs) + case "network": + _, excludedIfs, err = IfByNetwork(selectorParam, inputIfAddrs) + case "port": + _, excludedIfs, err = IfByPort(selectorParam, inputIfAddrs) + case "rfc", "rfcs": + _, excludedIfs, err = IfByRFCs(selectorParam, inputIfAddrs) + case "size": + _, excludedIfs, err = IfByMaskSize(selectorParam, inputIfAddrs) + case "type": + _, excludedIfs, err = IfByType(selectorParam, inputIfAddrs) + default: + return IfAddrs{}, fmt.Errorf("invalid exclude selector %q", selectorName) + } + + if err != nil { + return IfAddrs{}, err + } + + return excludedIfs, nil +} + +// SortIfBy returns an IfAddrs sorted based on the passed in selector. Multiple +// sort clauses can be passed in as a comma delimited list without whitespace. +func SortIfBy(selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, error) { + sortedIfs := append(IfAddrs(nil), inputIfAddrs...) + + clauses := strings.Split(selectorParam, ",") + sortFuncs := make([]CmpIfAddrFunc, len(clauses)) + + for i, clause := range clauses { + switch strings.TrimSpace(strings.ToLower(clause)) { + case "+address", "address": + // The "address" selector returns an array of IfAddrs + // ordered by the network address. IfAddrs that are not + // comparable will be at the end of the list and in a + // non-deterministic order. + sortFuncs[i] = AscIfAddress + case "-address": + sortFuncs[i] = DescIfAddress + case "+default", "default": + sortFuncs[i] = AscIfDefault + case "-default": + sortFuncs[i] = DescIfDefault + case "+name", "name": + // The "name" selector returns an array of IfAddrs + // ordered by the interface name. + sortFuncs[i] = AscIfName + case "-name": + sortFuncs[i] = DescIfName + case "+port", "port": + // The "port" selector returns an array of IfAddrs + // ordered by the port, if included in the IfAddr. + // IfAddrs that are not comparable will be at the end of + // the list and in a non-deterministic order. + sortFuncs[i] = AscIfPort + case "-port": + sortFuncs[i] = DescIfPort + case "+private", "private": + // The "private" selector returns an array of IfAddrs + // ordered by private addresses first. IfAddrs that are + // not comparable will be at the end of the list and in + // a non-deterministic order. + sortFuncs[i] = AscIfPrivate + case "-private": + sortFuncs[i] = DescIfPrivate + case "+size", "size": + // The "size" selector returns an array of IfAddrs + // ordered by the size of the network mask, smaller mask + // (larger number of hosts per network) to largest + // (e.g. a /24 sorts before a /32). + sortFuncs[i] = AscIfNetworkSize + case "-size": + sortFuncs[i] = DescIfNetworkSize + case "+type", "type": + // The "type" selector returns an array of IfAddrs + // ordered by the type of the IfAddr. The sort order is + // Unix, IPv4, then IPv6. + sortFuncs[i] = AscIfType + case "-type": + sortFuncs[i] = DescIfType + default: + // Return an empty list for invalid sort types. + return IfAddrs{}, fmt.Errorf("unknown sort type: %q", clause) + } + } + + OrderedIfAddrBy(sortFuncs...).Sort(sortedIfs) + + return sortedIfs, nil +} + +// UniqueIfAddrsBy creates a unique set of IfAddrs based on the matching +// selector. UniqueIfAddrsBy assumes the input has already been sorted. +func UniqueIfAddrsBy(selectorName string, inputIfAddrs IfAddrs) (IfAddrs, error) { + attrName := strings.ToLower(selectorName) + + ifs := make(IfAddrs, 0, len(inputIfAddrs)) + var lastMatch string + for _, ifAddr := range inputIfAddrs { + var out string + switch attrName { + case "address": + out = ifAddr.SockAddr.String() + case "name": + out = ifAddr.Name + default: + return nil, fmt.Errorf("unsupported unique constraint %+q", selectorName) + } + + switch { + case lastMatch == "", lastMatch != out: + lastMatch = out + ifs = append(ifs, ifAddr) + case lastMatch == out: + continue + } + } + + return ifs, nil +} + +// JoinIfAddrs joins an IfAddrs and returns a string +func JoinIfAddrs(selectorName string, joinStr string, inputIfAddrs IfAddrs) (string, error) { + outputs := make([]string, 0, len(inputIfAddrs)) + attrName := AttrName(strings.ToLower(selectorName)) + + for _, ifAddr := range inputIfAddrs { + var attrVal string + var err error + attrVal, err = ifAddr.Attr(attrName) + if err != nil { + return "", err + } + outputs = append(outputs, attrVal) + } + return strings.Join(outputs, joinStr), nil +} + +// LimitIfAddrs returns a slice of IfAddrs based on the specified limit. +func LimitIfAddrs(lim uint, in IfAddrs) (IfAddrs, error) { + // Clamp the limit to the length of the array + if int(lim) > len(in) { + lim = uint(len(in)) + } + + return in[0:lim], nil +} + +// OffsetIfAddrs returns a slice of IfAddrs based on the specified offset. +func OffsetIfAddrs(off int, in IfAddrs) (IfAddrs, error) { + var end bool + if off < 0 { + end = true + off = off * -1 + } + + if off > len(in) { + return IfAddrs{}, fmt.Errorf("unable to seek past the end of the interface array: offset (%d) exceeds the number of interfaces (%d)", off, len(in)) + } + + if end { + return in[len(in)-off:], nil + } + return in[off:], nil +} + +func (ifAddr IfAddr) String() string { + return fmt.Sprintf("%s %v", ifAddr.SockAddr, ifAddr.Interface) +} + +// parseDefaultIfNameFromRoute parses standard route(8)'s output for the *BSDs +// and Solaris. +func parseDefaultIfNameFromRoute(routeOut string) (string, error) { + lines := strings.Split(routeOut, "\n") + for _, line := range lines { + kvs := strings.SplitN(line, ":", 2) + if len(kvs) != 2 { + continue + } + + if strings.TrimSpace(kvs[0]) == "interface" { + ifName := strings.TrimSpace(kvs[1]) + return ifName, nil + } + } + + return "", errors.New("No default interface found") +} + +// parseDefaultIfNameFromIPCmd parses the default interface from ip(8) for +// Linux. +func parseDefaultIfNameFromIPCmd(routeOut string) (string, error) { + parsedLines := parseIfNameFromIPCmd(routeOut) + for _, parsedLine := range parsedLines { + if parsedLine[0] == "default" && + parsedLine[1] == "via" && + parsedLine[3] == "dev" { + ifName := strings.TrimSpace(parsedLine[4]) + return ifName, nil + } + } + + return "", errors.New("No default interface found") +} + +// parseDefaultIfNameFromIPCmdAndroid parses the default interface from ip(8) for +// Android. +func parseDefaultIfNameFromIPCmdAndroid(routeOut string) (string, error) { + parsedLines := parseIfNameFromIPCmd(routeOut) + if (len(parsedLines) > 0) { + ifName := strings.TrimSpace(parsedLines[0][4]) + return ifName, nil + } + + return "", errors.New("No default interface found") +} + + +// parseIfNameFromIPCmd parses interfaces from ip(8) for +// Linux. +func parseIfNameFromIPCmd(routeOut string) [][]string { + lines := strings.Split(routeOut, "\n") + re := whitespaceRE.Copy() + parsedLines := make([][]string, 0, len(lines)) + for _, line := range lines { + kvs := re.Split(line, -1) + if len(kvs) < 5 { + continue + } + parsedLines = append(parsedLines, kvs) + } + return parsedLines +} + +// parseDefaultIfNameWindows parses the default interface from `netstat -rn` and +// `ipconfig` on Windows. +func parseDefaultIfNameWindows(routeOut, ipconfigOut string) (string, error) { + defaultIPAddr, err := parseDefaultIPAddrWindowsRoute(routeOut) + if err != nil { + return "", err + } + + ifName, err := parseDefaultIfNameWindowsIPConfig(defaultIPAddr, ipconfigOut) + if err != nil { + return "", err + } + + return ifName, nil +} + +// parseDefaultIPAddrWindowsRoute parses the IP address on the default interface +// `netstat -rn`. +// +// NOTES(sean): Only IPv4 addresses are parsed at this time. If you have an +// IPv6 connected host, submit an issue on github.com/hashicorp/go-sockaddr with +// the output from `netstat -rn`, `ipconfig`, and version of Windows to see IPv6 +// support added. +func parseDefaultIPAddrWindowsRoute(routeOut string) (string, error) { + lines := strings.Split(routeOut, "\n") + re := whitespaceRE.Copy() + for _, line := range lines { + kvs := re.Split(strings.TrimSpace(line), -1) + if len(kvs) < 3 { + continue + } + + if kvs[0] == "0.0.0.0" && kvs[1] == "0.0.0.0" { + defaultIPAddr := strings.TrimSpace(kvs[3]) + return defaultIPAddr, nil + } + } + + return "", errors.New("No IP on default interface found") +} + +// parseDefaultIfNameWindowsIPConfig parses the output of `ipconfig` to find the +// interface name forwarding traffic to the default gateway. +func parseDefaultIfNameWindowsIPConfig(defaultIPAddr, routeOut string) (string, error) { + lines := strings.Split(routeOut, "\n") + ifNameRe := ifNameRE.Copy() + ipAddrRe := ipAddrRE.Copy() + var ifName string + for _, line := range lines { + switch ifNameMatches := ifNameRe.FindStringSubmatch(line); { + case len(ifNameMatches) > 1: + ifName = ifNameMatches[1] + continue + } + + switch ipAddrMatches := ipAddrRe.FindStringSubmatch(line); { + case len(ipAddrMatches) > 1 && ipAddrMatches[1] == defaultIPAddr: + return ifName, nil + } + } + + return "", errors.New("No default interface found with matching IP") +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifattr.go b/vendor/github.com/hashicorp/go-sockaddr/ifattr.go new file mode 100644 index 0000000000..6984cb4a35 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ifattr.go @@ -0,0 +1,65 @@ +package sockaddr + +import ( + "fmt" + "net" +) + +// IfAddr is a union of a SockAddr and a net.Interface. +type IfAddr struct { + SockAddr + net.Interface +} + +// Attr returns the named attribute as a string +func (ifAddr IfAddr) Attr(attrName AttrName) (string, error) { + val := IfAddrAttr(ifAddr, attrName) + if val != "" { + return val, nil + } + + return Attr(ifAddr.SockAddr, attrName) +} + +// Attr returns the named attribute as a string +func Attr(sa SockAddr, attrName AttrName) (string, error) { + switch sockType := sa.Type(); { + case sockType&TypeIP != 0: + ip := *ToIPAddr(sa) + attrVal := IPAddrAttr(ip, attrName) + if attrVal != "" { + return attrVal, nil + } + + if sockType == TypeIPv4 { + ipv4 := *ToIPv4Addr(sa) + attrVal := IPv4AddrAttr(ipv4, attrName) + if attrVal != "" { + return attrVal, nil + } + } else if sockType == TypeIPv6 { + ipv6 := *ToIPv6Addr(sa) + attrVal := IPv6AddrAttr(ipv6, attrName) + if attrVal != "" { + return attrVal, nil + } + } + + case sockType == TypeUnix: + us := *ToUnixSock(sa) + attrVal := UnixSockAttr(us, attrName) + if attrVal != "" { + return attrVal, nil + } + } + + // Non type-specific attributes + switch attrName { + case "string": + return sa.String(), nil + case "type": + return sa.Type().String(), nil + } + + return "", fmt.Errorf("unsupported attribute name %q", attrName) +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipaddr.go b/vendor/github.com/hashicorp/go-sockaddr/ipaddr.go new file mode 100644 index 0000000000..b47d15c201 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ipaddr.go @@ -0,0 +1,169 @@ +package sockaddr + +import ( + "fmt" + "math/big" + "net" + "strings" +) + +// Constants for the sizes of IPv3, IPv4, and IPv6 address types. +const ( + IPv3len = 6 + IPv4len = 4 + IPv6len = 16 +) + +// IPAddr is a generic IP address interface for IPv4 and IPv6 addresses, +// networks, and socket endpoints. +type IPAddr interface { + SockAddr + AddressBinString() string + AddressHexString() string + Cmp(SockAddr) int + CmpAddress(SockAddr) int + CmpPort(SockAddr) int + FirstUsable() IPAddr + Host() IPAddr + IPPort() IPPort + LastUsable() IPAddr + Maskbits() int + NetIP() *net.IP + NetIPMask() *net.IPMask + NetIPNet() *net.IPNet + Network() IPAddr + Octets() []int +} + +// IPPort is the type for an IP port number for the TCP and UDP IP transports. +type IPPort uint16 + +// IPPrefixLen is a typed integer representing the prefix length for a given +// IPAddr. +type IPPrefixLen byte + +// ipAddrAttrMap is a map of the IPAddr type-specific attributes. +var ipAddrAttrMap map[AttrName]func(IPAddr) string +var ipAddrAttrs []AttrName + +func init() { + ipAddrInit() +} + +// NewIPAddr creates a new IPAddr from a string. Returns nil if the string is +// not an IPv4 or an IPv6 address. +func NewIPAddr(addr string) (IPAddr, error) { + ipv4Addr, err := NewIPv4Addr(addr) + if err == nil { + return ipv4Addr, nil + } + + ipv6Addr, err := NewIPv6Addr(addr) + if err == nil { + return ipv6Addr, nil + } + + return nil, fmt.Errorf("invalid IPAddr %v", addr) +} + +// IPAddrAttr returns a string representation of an attribute for the given +// IPAddr. +func IPAddrAttr(ip IPAddr, selector AttrName) string { + fn, found := ipAddrAttrMap[selector] + if !found { + return "" + } + + return fn(ip) +} + +// IPAttrs returns a list of attributes supported by the IPAddr type +func IPAttrs() []AttrName { + return ipAddrAttrs +} + +// MustIPAddr is a helper method that must return an IPAddr or panic on invalid +// input. +func MustIPAddr(addr string) IPAddr { + ip, err := NewIPAddr(addr) + if err != nil { + panic(fmt.Sprintf("Unable to create an IPAddr from %+q: %v", addr, err)) + } + return ip +} + +// ipAddrInit is called once at init() +func ipAddrInit() { + // Sorted for human readability + ipAddrAttrs = []AttrName{ + "host", + "address", + "port", + "netmask", + "network", + "mask_bits", + "binary", + "hex", + "first_usable", + "last_usable", + "octets", + } + + ipAddrAttrMap = map[AttrName]func(ip IPAddr) string{ + "address": func(ip IPAddr) string { + return ip.NetIP().String() + }, + "binary": func(ip IPAddr) string { + return ip.AddressBinString() + }, + "first_usable": func(ip IPAddr) string { + return ip.FirstUsable().String() + }, + "hex": func(ip IPAddr) string { + return ip.AddressHexString() + }, + "host": func(ip IPAddr) string { + return ip.Host().String() + }, + "last_usable": func(ip IPAddr) string { + return ip.LastUsable().String() + }, + "mask_bits": func(ip IPAddr) string { + return fmt.Sprintf("%d", ip.Maskbits()) + }, + "netmask": func(ip IPAddr) string { + switch v := ip.(type) { + case IPv4Addr: + ipv4Mask := IPv4Addr{ + Address: IPv4Address(v.Mask), + Mask: IPv4HostMask, + } + return ipv4Mask.String() + case IPv6Addr: + ipv6Mask := new(big.Int) + ipv6Mask.Set(v.Mask) + ipv6MaskAddr := IPv6Addr{ + Address: IPv6Address(ipv6Mask), + Mask: ipv6HostMask, + } + return ipv6MaskAddr.String() + default: + return fmt.Sprintf("", ip) + } + }, + "network": func(ip IPAddr) string { + return ip.Network().NetIP().String() + }, + "octets": func(ip IPAddr) string { + octets := ip.Octets() + octetStrs := make([]string, 0, len(octets)) + for _, octet := range octets { + octetStrs = append(octetStrs, fmt.Sprintf("%d", octet)) + } + return strings.Join(octetStrs, " ") + }, + "port": func(ip IPAddr) string { + return fmt.Sprintf("%d", ip.IPPort()) + }, + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go b/vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go new file mode 100644 index 0000000000..6eeb7ddd2f --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go @@ -0,0 +1,98 @@ +package sockaddr + +import "bytes" + +type IPAddrs []IPAddr + +func (s IPAddrs) Len() int { return len(s) } +func (s IPAddrs) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// // SortIPAddrsByCmp is a type that satisfies sort.Interface and can be used +// // by the routines in this package. The SortIPAddrsByCmp type is used to +// // sort IPAddrs by Cmp() +// type SortIPAddrsByCmp struct{ IPAddrs } + +// // Less reports whether the element with index i should sort before the +// // element with index j. +// func (s SortIPAddrsByCmp) Less(i, j int) bool { +// // Sort by Type, then address, then port number. +// return Less(s.IPAddrs[i], s.IPAddrs[j]) +// } + +// SortIPAddrsBySpecificMaskLen is a type that satisfies sort.Interface and +// can be used by the routines in this package. The +// SortIPAddrsBySpecificMaskLen type is used to sort IPAddrs by smallest +// network (most specific to largest network). +type SortIPAddrsByNetworkSize struct{ IPAddrs } + +// Less reports whether the element with index i should sort before the +// element with index j. +func (s SortIPAddrsByNetworkSize) Less(i, j int) bool { + // Sort masks with a larger binary value (i.e. fewer hosts per network + // prefix) after masks with a smaller value (larger number of hosts per + // prefix). + switch bytes.Compare([]byte(*s.IPAddrs[i].NetIPMask()), []byte(*s.IPAddrs[j].NetIPMask())) { + case 0: + // Fall through to the second test if the net.IPMasks are the + // same. + break + case 1: + return true + case -1: + return false + default: + panic("bad, m'kay?") + } + + // Sort IPs based on the length (i.e. prefer IPv4 over IPv6). + iLen := len(*s.IPAddrs[i].NetIP()) + jLen := len(*s.IPAddrs[j].NetIP()) + if iLen != jLen { + return iLen > jLen + } + + // Sort IPs based on their network address from lowest to highest. + switch bytes.Compare(s.IPAddrs[i].NetIPNet().IP, s.IPAddrs[j].NetIPNet().IP) { + case 0: + break + case 1: + return false + case -1: + return true + default: + panic("lol wut?") + } + + // If a host does not have a port set, it always sorts after hosts + // that have a port (e.g. a host with a /32 and port number is more + // specific and should sort first over a host with a /32 but no port + // set). + if s.IPAddrs[i].IPPort() == 0 || s.IPAddrs[j].IPPort() == 0 { + return false + } + return s.IPAddrs[i].IPPort() < s.IPAddrs[j].IPPort() +} + +// SortIPAddrsBySpecificMaskLen is a type that satisfies sort.Interface and +// can be used by the routines in this package. The +// SortIPAddrsBySpecificMaskLen type is used to sort IPAddrs by smallest +// network (most specific to largest network). +type SortIPAddrsBySpecificMaskLen struct{ IPAddrs } + +// Less reports whether the element with index i should sort before the +// element with index j. +func (s SortIPAddrsBySpecificMaskLen) Less(i, j int) bool { + return s.IPAddrs[i].Maskbits() > s.IPAddrs[j].Maskbits() +} + +// SortIPAddrsByBroadMaskLen is a type that satisfies sort.Interface and can +// be used by the routines in this package. The SortIPAddrsByBroadMaskLen +// type is used to sort IPAddrs by largest network (i.e. largest subnets +// first). +type SortIPAddrsByBroadMaskLen struct{ IPAddrs } + +// Less reports whether the element with index i should sort before the +// element with index j. +func (s SortIPAddrsByBroadMaskLen) Less(i, j int) bool { + return s.IPAddrs[i].Maskbits() < s.IPAddrs[j].Maskbits() +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go b/vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go new file mode 100644 index 0000000000..4d395dc954 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go @@ -0,0 +1,516 @@ +package sockaddr + +import ( + "encoding/binary" + "fmt" + "net" + "regexp" + "strconv" + "strings" +) + +type ( + // IPv4Address is a named type representing an IPv4 address. + IPv4Address uint32 + + // IPv4Network is a named type representing an IPv4 network. + IPv4Network uint32 + + // IPv4Mask is a named type representing an IPv4 network mask. + IPv4Mask uint32 +) + +// IPv4HostMask is a constant represents a /32 IPv4 Address +// (i.e. 255.255.255.255). +const IPv4HostMask = IPv4Mask(0xffffffff) + +// ipv4AddrAttrMap is a map of the IPv4Addr type-specific attributes. +var ipv4AddrAttrMap map[AttrName]func(IPv4Addr) string +var ipv4AddrAttrs []AttrName +var trailingHexNetmaskRE *regexp.Regexp + +// IPv4Addr implements a convenience wrapper around the union of Go's +// built-in net.IP and net.IPNet types. In UNIX-speak, IPv4Addr implements +// `sockaddr` when the the address family is set to AF_INET +// (i.e. `sockaddr_in`). +type IPv4Addr struct { + IPAddr + Address IPv4Address + Mask IPv4Mask + Port IPPort +} + +func init() { + ipv4AddrInit() + trailingHexNetmaskRE = regexp.MustCompile(`/([0f]{8})$`) +} + +// NewIPv4Addr creates an IPv4Addr from a string. String can be in the form +// of either an IPv4:port (e.g. `1.2.3.4:80`, in which case the mask is +// assumed to be a `/32`), an IPv4 address (e.g. `1.2.3.4`, also with a `/32` +// mask), or an IPv4 CIDR (e.g. `1.2.3.4/24`, which has its IP port +// initialized to zero). ipv4Str can not be a hostname. +// +// NOTE: Many net.*() routines will initialize and return an IPv6 address. +// To create uint32 values from net.IP, always test to make sure the address +// returned can be converted to a 4 byte array using To4(). +func NewIPv4Addr(ipv4Str string) (IPv4Addr, error) { + // Strip off any bogus hex-encoded netmasks that will be mis-parsed by Go. In + // particular, clients with the Barracuda VPN client will see something like: + // `192.168.3.51/00ffffff` as their IP address. + trailingHexNetmaskRe := trailingHexNetmaskRE.Copy() + if match := trailingHexNetmaskRe.FindStringIndex(ipv4Str); match != nil { + ipv4Str = ipv4Str[:match[0]] + } + + // Parse as an IPv4 CIDR + ipAddr, network, err := net.ParseCIDR(ipv4Str) + if err == nil { + ipv4 := ipAddr.To4() + if ipv4 == nil { + return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address", ipv4Str) + } + + // If we see an IPv6 netmask, convert it to an IPv4 mask. + netmaskSepPos := strings.LastIndexByte(ipv4Str, '/') + if netmaskSepPos != -1 && netmaskSepPos+1 < len(ipv4Str) { + netMask, err := strconv.ParseUint(ipv4Str[netmaskSepPos+1:], 10, 8) + if err != nil { + return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address: unable to parse CIDR netmask: %v", ipv4Str, err) + } else if netMask > 128 { + return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address: invalid CIDR netmask", ipv4Str) + } + + if netMask >= 96 { + // Convert the IPv6 netmask to an IPv4 netmask + network.Mask = net.CIDRMask(int(netMask-96), IPv4len*8) + } + } + ipv4Addr := IPv4Addr{ + Address: IPv4Address(binary.BigEndian.Uint32(ipv4)), + Mask: IPv4Mask(binary.BigEndian.Uint32(network.Mask)), + } + return ipv4Addr, nil + } + + // Attempt to parse ipv4Str as a /32 host with a port number. + tcpAddr, err := net.ResolveTCPAddr("tcp4", ipv4Str) + if err == nil { + ipv4 := tcpAddr.IP.To4() + if ipv4 == nil { + return IPv4Addr{}, fmt.Errorf("Unable to resolve %+q as an IPv4 address", ipv4Str) + } + + ipv4Uint32 := binary.BigEndian.Uint32(ipv4) + ipv4Addr := IPv4Addr{ + Address: IPv4Address(ipv4Uint32), + Mask: IPv4HostMask, + Port: IPPort(tcpAddr.Port), + } + + return ipv4Addr, nil + } + + // Parse as a naked IPv4 address + ip := net.ParseIP(ipv4Str) + if ip != nil { + ipv4 := ip.To4() + if ipv4 == nil { + return IPv4Addr{}, fmt.Errorf("Unable to string convert %+q to an IPv4 address", ipv4Str) + } + + ipv4Uint32 := binary.BigEndian.Uint32(ipv4) + ipv4Addr := IPv4Addr{ + Address: IPv4Address(ipv4Uint32), + Mask: IPv4HostMask, + } + return ipv4Addr, nil + } + + return IPv4Addr{}, fmt.Errorf("Unable to parse %+q to an IPv4 address: %v", ipv4Str, err) +} + +// AddressBinString returns a string with the IPv4Addr's Address represented +// as a sequence of '0' and '1' characters. This method is useful for +// debugging or by operators who want to inspect an address. +func (ipv4 IPv4Addr) AddressBinString() string { + return fmt.Sprintf("%032s", strconv.FormatUint(uint64(ipv4.Address), 2)) +} + +// AddressHexString returns a string with the IPv4Addr address represented as +// a sequence of hex characters. This method is useful for debugging or by +// operators who want to inspect an address. +func (ipv4 IPv4Addr) AddressHexString() string { + return fmt.Sprintf("%08s", strconv.FormatUint(uint64(ipv4.Address), 16)) +} + +// Broadcast is an IPv4Addr-only method that returns the broadcast address of +// the network. +// +// NOTE: IPv6 only supports multicast, so this method only exists for +// IPv4Addr. +func (ipv4 IPv4Addr) Broadcast() IPAddr { + // Nothing should listen on a broadcast address. + return IPv4Addr{ + Address: IPv4Address(ipv4.BroadcastAddress()), + Mask: IPv4HostMask, + } +} + +// BroadcastAddress returns a IPv4Network of the IPv4Addr's broadcast +// address. +func (ipv4 IPv4Addr) BroadcastAddress() IPv4Network { + return IPv4Network(uint32(ipv4.Address)&uint32(ipv4.Mask) | ^uint32(ipv4.Mask)) +} + +// CmpAddress follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because its address is lower than arg +// - 0 if the SockAddr arg is equal to the receiving IPv4Addr or the argument is +// of a different type. +// - 1 If the argument should sort first. +func (ipv4 IPv4Addr) CmpAddress(sa SockAddr) int { + ipv4b, ok := sa.(IPv4Addr) + if !ok { + return sortDeferDecision + } + + switch { + case ipv4.Address == ipv4b.Address: + return sortDeferDecision + case ipv4.Address < ipv4b.Address: + return sortReceiverBeforeArg + default: + return sortArgBeforeReceiver + } +} + +// CmpPort follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because its port is lower than arg +// - 0 if the SockAddr arg's port number is equal to the receiving IPv4Addr, +// regardless of type. +// - 1 If the argument should sort first. +func (ipv4 IPv4Addr) CmpPort(sa SockAddr) int { + var saPort IPPort + switch v := sa.(type) { + case IPv4Addr: + saPort = v.Port + case IPv6Addr: + saPort = v.Port + default: + return sortDeferDecision + } + + switch { + case ipv4.Port == saPort: + return sortDeferDecision + case ipv4.Port < saPort: + return sortReceiverBeforeArg + default: + return sortArgBeforeReceiver + } +} + +// CmpRFC follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because it belongs to the RFC and its +// arg does not +// - 0 if the receiver and arg both belong to the same RFC or neither do. +// - 1 If the arg belongs to the RFC but receiver does not. +func (ipv4 IPv4Addr) CmpRFC(rfcNum uint, sa SockAddr) int { + recvInRFC := IsRFC(rfcNum, ipv4) + ipv4b, ok := sa.(IPv4Addr) + if !ok { + // If the receiver is part of the desired RFC and the SockAddr + // argument is not, return -1 so that the receiver sorts before + // the non-IPv4 SockAddr. Conversely, if the receiver is not + // part of the RFC, punt on sorting and leave it for the next + // sorter. + if recvInRFC { + return sortReceiverBeforeArg + } else { + return sortDeferDecision + } + } + + argInRFC := IsRFC(rfcNum, ipv4b) + switch { + case (recvInRFC && argInRFC), (!recvInRFC && !argInRFC): + // If a and b both belong to the RFC, or neither belong to + // rfcNum, defer sorting to the next sorter. + return sortDeferDecision + case recvInRFC && !argInRFC: + return sortReceiverBeforeArg + default: + return sortArgBeforeReceiver + } +} + +// Contains returns true if the SockAddr is contained within the receiver. +func (ipv4 IPv4Addr) Contains(sa SockAddr) bool { + ipv4b, ok := sa.(IPv4Addr) + if !ok { + return false + } + + return ipv4.ContainsNetwork(ipv4b) +} + +// ContainsAddress returns true if the IPv4Address is contained within the +// receiver. +func (ipv4 IPv4Addr) ContainsAddress(x IPv4Address) bool { + return IPv4Address(ipv4.NetworkAddress()) <= x && + IPv4Address(ipv4.BroadcastAddress()) >= x +} + +// ContainsNetwork returns true if the network from IPv4Addr is contained +// within the receiver. +func (ipv4 IPv4Addr) ContainsNetwork(x IPv4Addr) bool { + return ipv4.NetworkAddress() <= x.NetworkAddress() && + ipv4.BroadcastAddress() >= x.BroadcastAddress() +} + +// DialPacketArgs returns the arguments required to be passed to +// net.DialUDP(). If the Mask of ipv4 is not a /32 or the Port is 0, +// DialPacketArgs() will fail. See Host() to create an IPv4Addr with its +// mask set to /32. +func (ipv4 IPv4Addr) DialPacketArgs() (network, dialArgs string) { + if ipv4.Mask != IPv4HostMask || ipv4.Port == 0 { + return "udp4", "" + } + return "udp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) +} + +// DialStreamArgs returns the arguments required to be passed to +// net.DialTCP(). If the Mask of ipv4 is not a /32 or the Port is 0, +// DialStreamArgs() will fail. See Host() to create an IPv4Addr with its +// mask set to /32. +func (ipv4 IPv4Addr) DialStreamArgs() (network, dialArgs string) { + if ipv4.Mask != IPv4HostMask || ipv4.Port == 0 { + return "tcp4", "" + } + return "tcp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) +} + +// Equal returns true if a SockAddr is equal to the receiving IPv4Addr. +func (ipv4 IPv4Addr) Equal(sa SockAddr) bool { + ipv4b, ok := sa.(IPv4Addr) + if !ok { + return false + } + + if ipv4.Port != ipv4b.Port { + return false + } + + if ipv4.Address != ipv4b.Address { + return false + } + + if ipv4.NetIPNet().String() != ipv4b.NetIPNet().String() { + return false + } + + return true +} + +// FirstUsable returns an IPv4Addr set to the first address following the +// network prefix. The first usable address in a network is normally the +// gateway and should not be used except by devices forwarding packets +// between two administratively distinct networks (i.e. a router). This +// function does not discriminate against first usable vs "first address that +// should be used." For example, FirstUsable() on "192.168.1.10/24" would +// return the address "192.168.1.1/24". +func (ipv4 IPv4Addr) FirstUsable() IPAddr { + addr := ipv4.NetworkAddress() + + // If /32, return the address itself. If /31 assume a point-to-point + // link and return the lower address. + if ipv4.Maskbits() < 31 { + addr++ + } + + return IPv4Addr{ + Address: IPv4Address(addr), + Mask: IPv4HostMask, + } +} + +// Host returns a copy of ipv4 with its mask set to /32 so that it can be +// used by DialPacketArgs(), DialStreamArgs(), ListenPacketArgs(), or +// ListenStreamArgs(). +func (ipv4 IPv4Addr) Host() IPAddr { + // Nothing should listen on a broadcast address. + return IPv4Addr{ + Address: ipv4.Address, + Mask: IPv4HostMask, + Port: ipv4.Port, + } +} + +// IPPort returns the Port number attached to the IPv4Addr +func (ipv4 IPv4Addr) IPPort() IPPort { + return ipv4.Port +} + +// LastUsable returns the last address before the broadcast address in a +// given network. +func (ipv4 IPv4Addr) LastUsable() IPAddr { + addr := ipv4.BroadcastAddress() + + // If /32, return the address itself. If /31 assume a point-to-point + // link and return the upper address. + if ipv4.Maskbits() < 31 { + addr-- + } + + return IPv4Addr{ + Address: IPv4Address(addr), + Mask: IPv4HostMask, + } +} + +// ListenPacketArgs returns the arguments required to be passed to +// net.ListenUDP(). If the Mask of ipv4 is not a /32, ListenPacketArgs() +// will fail. See Host() to create an IPv4Addr with its mask set to /32. +func (ipv4 IPv4Addr) ListenPacketArgs() (network, listenArgs string) { + if ipv4.Mask != IPv4HostMask { + return "udp4", "" + } + return "udp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) +} + +// ListenStreamArgs returns the arguments required to be passed to +// net.ListenTCP(). If the Mask of ipv4 is not a /32, ListenStreamArgs() +// will fail. See Host() to create an IPv4Addr with its mask set to /32. +func (ipv4 IPv4Addr) ListenStreamArgs() (network, listenArgs string) { + if ipv4.Mask != IPv4HostMask { + return "tcp4", "" + } + return "tcp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) +} + +// Maskbits returns the number of network mask bits in a given IPv4Addr. For +// example, the Maskbits() of "192.168.1.1/24" would return 24. +func (ipv4 IPv4Addr) Maskbits() int { + mask := make(net.IPMask, IPv4len) + binary.BigEndian.PutUint32(mask, uint32(ipv4.Mask)) + maskOnes, _ := mask.Size() + return maskOnes +} + +// MustIPv4Addr is a helper method that must return an IPv4Addr or panic on +// invalid input. +func MustIPv4Addr(addr string) IPv4Addr { + ipv4, err := NewIPv4Addr(addr) + if err != nil { + panic(fmt.Sprintf("Unable to create an IPv4Addr from %+q: %v", addr, err)) + } + return ipv4 +} + +// NetIP returns the address as a net.IP (address is always presized to +// IPv4). +func (ipv4 IPv4Addr) NetIP() *net.IP { + x := make(net.IP, IPv4len) + binary.BigEndian.PutUint32(x, uint32(ipv4.Address)) + return &x +} + +// NetIPMask create a new net.IPMask from the IPv4Addr. +func (ipv4 IPv4Addr) NetIPMask() *net.IPMask { + ipv4Mask := net.IPMask{} + ipv4Mask = make(net.IPMask, IPv4len) + binary.BigEndian.PutUint32(ipv4Mask, uint32(ipv4.Mask)) + return &ipv4Mask +} + +// NetIPNet create a new net.IPNet from the IPv4Addr. +func (ipv4 IPv4Addr) NetIPNet() *net.IPNet { + ipv4net := &net.IPNet{} + ipv4net.IP = make(net.IP, IPv4len) + binary.BigEndian.PutUint32(ipv4net.IP, uint32(ipv4.NetworkAddress())) + ipv4net.Mask = *ipv4.NetIPMask() + return ipv4net +} + +// Network returns the network prefix or network address for a given network. +func (ipv4 IPv4Addr) Network() IPAddr { + return IPv4Addr{ + Address: IPv4Address(ipv4.NetworkAddress()), + Mask: ipv4.Mask, + } +} + +// NetworkAddress returns an IPv4Network of the IPv4Addr's network address. +func (ipv4 IPv4Addr) NetworkAddress() IPv4Network { + return IPv4Network(uint32(ipv4.Address) & uint32(ipv4.Mask)) +} + +// Octets returns a slice of the four octets in an IPv4Addr's Address. The +// order of the bytes is big endian. +func (ipv4 IPv4Addr) Octets() []int { + return []int{ + int(ipv4.Address >> 24), + int((ipv4.Address >> 16) & 0xff), + int((ipv4.Address >> 8) & 0xff), + int(ipv4.Address & 0xff), + } +} + +// String returns a string representation of the IPv4Addr +func (ipv4 IPv4Addr) String() string { + if ipv4.Port != 0 { + return fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) + } + + if ipv4.Maskbits() == 32 { + return ipv4.NetIP().String() + } + + return fmt.Sprintf("%s/%d", ipv4.NetIP().String(), ipv4.Maskbits()) +} + +// Type is used as a type switch and returns TypeIPv4 +func (IPv4Addr) Type() SockAddrType { + return TypeIPv4 +} + +// IPv4AddrAttr returns a string representation of an attribute for the given +// IPv4Addr. +func IPv4AddrAttr(ipv4 IPv4Addr, selector AttrName) string { + fn, found := ipv4AddrAttrMap[selector] + if !found { + return "" + } + + return fn(ipv4) +} + +// IPv4Attrs returns a list of attributes supported by the IPv4Addr type +func IPv4Attrs() []AttrName { + return ipv4AddrAttrs +} + +// ipv4AddrInit is called once at init() +func ipv4AddrInit() { + // Sorted for human readability + ipv4AddrAttrs = []AttrName{ + "size", // Same position as in IPv6 for output consistency + "broadcast", + "uint32", + } + + ipv4AddrAttrMap = map[AttrName]func(ipv4 IPv4Addr) string{ + "broadcast": func(ipv4 IPv4Addr) string { + return ipv4.Broadcast().String() + }, + "size": func(ipv4 IPv4Addr) string { + return fmt.Sprintf("%d", 1< 2 && ipv6Str[0] == '[' && ipv6Str[len(ipv6Str)-1] == ']' { + ipv6Str = ipv6Str[1 : len(ipv6Str)-1] + } + ip := net.ParseIP(ipv6Str) + if ip != nil { + ipv6 := ip.To16() + if ipv6 == nil { + return IPv6Addr{}, fmt.Errorf("Unable to string convert %+q to a 16byte IPv6 address", ipv6Str) + } + + ipv6BigIntAddr := new(big.Int) + ipv6BigIntAddr.SetBytes(ipv6) + + ipv6BigIntMask := new(big.Int) + ipv6BigIntMask.Set(ipv6HostMask) + + return IPv6Addr{ + Address: IPv6Address(ipv6BigIntAddr), + Mask: IPv6Mask(ipv6BigIntMask), + }, nil + } + + // Parse as an IPv6 CIDR + ipAddr, network, err := net.ParseCIDR(ipv6Str) + if err == nil { + ipv6 := ipAddr.To16() + if ipv6 == nil { + return IPv6Addr{}, fmt.Errorf("Unable to convert %+q to a 16byte IPv6 address", ipv6Str) + } + + ipv6BigIntAddr := new(big.Int) + ipv6BigIntAddr.SetBytes(ipv6) + + ipv6BigIntMask := new(big.Int) + ipv6BigIntMask.SetBytes(network.Mask) + + ipv6Addr := IPv6Addr{ + Address: IPv6Address(ipv6BigIntAddr), + Mask: IPv6Mask(ipv6BigIntMask), + } + return ipv6Addr, nil + } + + return IPv6Addr{}, fmt.Errorf("Unable to parse %+q to an IPv6 address: %v", ipv6Str, err) +} + +// AddressBinString returns a string with the IPv6Addr's Address represented +// as a sequence of '0' and '1' characters. This method is useful for +// debugging or by operators who want to inspect an address. +func (ipv6 IPv6Addr) AddressBinString() string { + bi := big.Int(*ipv6.Address) + return fmt.Sprintf("%0128s", bi.Text(2)) +} + +// AddressHexString returns a string with the IPv6Addr address represented as +// a sequence of hex characters. This method is useful for debugging or by +// operators who want to inspect an address. +func (ipv6 IPv6Addr) AddressHexString() string { + bi := big.Int(*ipv6.Address) + return fmt.Sprintf("%032s", bi.Text(16)) +} + +// CmpAddress follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because its address is lower than arg +// - 0 if the SockAddr arg equal to the receiving IPv6Addr or the argument is of a +// different type. +// - 1 If the argument should sort first. +func (ipv6 IPv6Addr) CmpAddress(sa SockAddr) int { + ipv6b, ok := sa.(IPv6Addr) + if !ok { + return sortDeferDecision + } + + ipv6aBigInt := new(big.Int) + ipv6aBigInt.Set(ipv6.Address) + ipv6bBigInt := new(big.Int) + ipv6bBigInt.Set(ipv6b.Address) + + return ipv6aBigInt.Cmp(ipv6bBigInt) +} + +// CmpPort follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because its port is lower than arg +// - 0 if the SockAddr arg's port number is equal to the receiving IPv6Addr, +// regardless of type. +// - 1 If the argument should sort first. +func (ipv6 IPv6Addr) CmpPort(sa SockAddr) int { + var saPort IPPort + switch v := sa.(type) { + case IPv4Addr: + saPort = v.Port + case IPv6Addr: + saPort = v.Port + default: + return sortDeferDecision + } + + switch { + case ipv6.Port == saPort: + return sortDeferDecision + case ipv6.Port < saPort: + return sortReceiverBeforeArg + default: + return sortArgBeforeReceiver + } +} + +// CmpRFC follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because it belongs to the RFC and its +// arg does not +// - 0 if the receiver and arg both belong to the same RFC or neither do. +// - 1 If the arg belongs to the RFC but receiver does not. +func (ipv6 IPv6Addr) CmpRFC(rfcNum uint, sa SockAddr) int { + recvInRFC := IsRFC(rfcNum, ipv6) + ipv6b, ok := sa.(IPv6Addr) + if !ok { + // If the receiver is part of the desired RFC and the SockAddr + // argument is not, sort receiver before the non-IPv6 SockAddr. + // Conversely, if the receiver is not part of the RFC, punt on + // sorting and leave it for the next sorter. + if recvInRFC { + return sortReceiverBeforeArg + } else { + return sortDeferDecision + } + } + + argInRFC := IsRFC(rfcNum, ipv6b) + switch { + case (recvInRFC && argInRFC), (!recvInRFC && !argInRFC): + // If a and b both belong to the RFC, or neither belong to + // rfcNum, defer sorting to the next sorter. + return sortDeferDecision + case recvInRFC && !argInRFC: + return sortReceiverBeforeArg + default: + return sortArgBeforeReceiver + } +} + +// Contains returns true if the SockAddr is contained within the receiver. +func (ipv6 IPv6Addr) Contains(sa SockAddr) bool { + ipv6b, ok := sa.(IPv6Addr) + if !ok { + return false + } + + return ipv6.ContainsNetwork(ipv6b) +} + +// ContainsAddress returns true if the IPv6Address is contained within the +// receiver. +func (ipv6 IPv6Addr) ContainsAddress(x IPv6Address) bool { + xAddr := IPv6Addr{ + Address: x, + Mask: ipv6HostMask, + } + + { + xIPv6 := xAddr.FirstUsable().(IPv6Addr) + yIPv6 := ipv6.FirstUsable().(IPv6Addr) + if xIPv6.CmpAddress(yIPv6) >= 1 { + return false + } + } + + { + xIPv6 := xAddr.LastUsable().(IPv6Addr) + yIPv6 := ipv6.LastUsable().(IPv6Addr) + if xIPv6.CmpAddress(yIPv6) <= -1 { + return false + } + } + return true +} + +// ContainsNetwork returns true if the network from IPv6Addr is contained within +// the receiver. +func (x IPv6Addr) ContainsNetwork(y IPv6Addr) bool { + { + xIPv6 := x.FirstUsable().(IPv6Addr) + yIPv6 := y.FirstUsable().(IPv6Addr) + if ret := xIPv6.CmpAddress(yIPv6); ret >= 1 { + return false + } + } + + { + xIPv6 := x.LastUsable().(IPv6Addr) + yIPv6 := y.LastUsable().(IPv6Addr) + if ret := xIPv6.CmpAddress(yIPv6); ret <= -1 { + return false + } + } + return true +} + +// DialPacketArgs returns the arguments required to be passed to +// net.DialUDP(). If the Mask of ipv6 is not a /128 or the Port is 0, +// DialPacketArgs() will fail. See Host() to create an IPv6Addr with its +// mask set to /128. +func (ipv6 IPv6Addr) DialPacketArgs() (network, dialArgs string) { + ipv6Mask := big.Int(*ipv6.Mask) + if ipv6Mask.Cmp(ipv6HostMask) != 0 || ipv6.Port == 0 { + return "udp6", "" + } + return "udp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) +} + +// DialStreamArgs returns the arguments required to be passed to +// net.DialTCP(). If the Mask of ipv6 is not a /128 or the Port is 0, +// DialStreamArgs() will fail. See Host() to create an IPv6Addr with its +// mask set to /128. +func (ipv6 IPv6Addr) DialStreamArgs() (network, dialArgs string) { + ipv6Mask := big.Int(*ipv6.Mask) + if ipv6Mask.Cmp(ipv6HostMask) != 0 || ipv6.Port == 0 { + return "tcp6", "" + } + return "tcp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) +} + +// Equal returns true if a SockAddr is equal to the receiving IPv4Addr. +func (ipv6a IPv6Addr) Equal(sa SockAddr) bool { + ipv6b, ok := sa.(IPv6Addr) + if !ok { + return false + } + + if ipv6a.NetIP().String() != ipv6b.NetIP().String() { + return false + } + + if ipv6a.NetIPNet().String() != ipv6b.NetIPNet().String() { + return false + } + + if ipv6a.Port != ipv6b.Port { + return false + } + + return true +} + +// FirstUsable returns an IPv6Addr set to the first address following the +// network prefix. The first usable address in a network is normally the +// gateway and should not be used except by devices forwarding packets +// between two administratively distinct networks (i.e. a router). This +// function does not discriminate against first usable vs "first address that +// should be used." For example, FirstUsable() on "2001:0db8::0003/64" would +// return "2001:0db8::00011". +func (ipv6 IPv6Addr) FirstUsable() IPAddr { + return IPv6Addr{ + Address: IPv6Address(ipv6.NetworkAddress()), + Mask: ipv6HostMask, + } +} + +// Host returns a copy of ipv6 with its mask set to /128 so that it can be +// used by DialPacketArgs(), DialStreamArgs(), ListenPacketArgs(), or +// ListenStreamArgs(). +func (ipv6 IPv6Addr) Host() IPAddr { + // Nothing should listen on a broadcast address. + return IPv6Addr{ + Address: ipv6.Address, + Mask: ipv6HostMask, + Port: ipv6.Port, + } +} + +// IPPort returns the Port number attached to the IPv6Addr +func (ipv6 IPv6Addr) IPPort() IPPort { + return ipv6.Port +} + +// LastUsable returns the last address in a given network. +func (ipv6 IPv6Addr) LastUsable() IPAddr { + addr := new(big.Int) + addr.Set(ipv6.Address) + + mask := new(big.Int) + mask.Set(ipv6.Mask) + + negMask := new(big.Int) + negMask.Xor(ipv6HostMask, mask) + + lastAddr := new(big.Int) + lastAddr.And(addr, mask) + lastAddr.Or(lastAddr, negMask) + + return IPv6Addr{ + Address: IPv6Address(lastAddr), + Mask: ipv6HostMask, + } +} + +// ListenPacketArgs returns the arguments required to be passed to +// net.ListenUDP(). If the Mask of ipv6 is not a /128, ListenPacketArgs() +// will fail. See Host() to create an IPv6Addr with its mask set to /128. +func (ipv6 IPv6Addr) ListenPacketArgs() (network, listenArgs string) { + ipv6Mask := big.Int(*ipv6.Mask) + if ipv6Mask.Cmp(ipv6HostMask) != 0 { + return "udp6", "" + } + return "udp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) +} + +// ListenStreamArgs returns the arguments required to be passed to +// net.ListenTCP(). If the Mask of ipv6 is not a /128, ListenStreamArgs() +// will fail. See Host() to create an IPv6Addr with its mask set to /128. +func (ipv6 IPv6Addr) ListenStreamArgs() (network, listenArgs string) { + ipv6Mask := big.Int(*ipv6.Mask) + if ipv6Mask.Cmp(ipv6HostMask) != 0 { + return "tcp6", "" + } + return "tcp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) +} + +// Maskbits returns the number of network mask bits in a given IPv6Addr. For +// example, the Maskbits() of "2001:0db8::0003/64" would return 64. +func (ipv6 IPv6Addr) Maskbits() int { + maskOnes, _ := ipv6.NetIPNet().Mask.Size() + + return maskOnes +} + +// MustIPv6Addr is a helper method that must return an IPv6Addr or panic on +// invalid input. +func MustIPv6Addr(addr string) IPv6Addr { + ipv6, err := NewIPv6Addr(addr) + if err != nil { + panic(fmt.Sprintf("Unable to create an IPv6Addr from %+q: %v", addr, err)) + } + return ipv6 +} + +// NetIP returns the address as a net.IP. +func (ipv6 IPv6Addr) NetIP() *net.IP { + return bigIntToNetIPv6(ipv6.Address) +} + +// NetIPMask create a new net.IPMask from the IPv6Addr. +func (ipv6 IPv6Addr) NetIPMask() *net.IPMask { + ipv6Mask := make(net.IPMask, IPv6len) + m := big.Int(*ipv6.Mask) + copy(ipv6Mask, m.Bytes()) + return &ipv6Mask +} + +// Network returns a pointer to the net.IPNet within IPv4Addr receiver. +func (ipv6 IPv6Addr) NetIPNet() *net.IPNet { + ipv6net := &net.IPNet{} + ipv6net.IP = make(net.IP, IPv6len) + copy(ipv6net.IP, *ipv6.NetIP()) + ipv6net.Mask = *ipv6.NetIPMask() + return ipv6net +} + +// Network returns the network prefix or network address for a given network. +func (ipv6 IPv6Addr) Network() IPAddr { + return IPv6Addr{ + Address: IPv6Address(ipv6.NetworkAddress()), + Mask: ipv6.Mask, + } +} + +// NetworkAddress returns an IPv6Network of the IPv6Addr's network address. +func (ipv6 IPv6Addr) NetworkAddress() IPv6Network { + addr := new(big.Int) + addr.SetBytes((*ipv6.Address).Bytes()) + + mask := new(big.Int) + mask.SetBytes(*ipv6.NetIPMask()) + + netAddr := new(big.Int) + netAddr.And(addr, mask) + + return IPv6Network(netAddr) +} + +// Octets returns a slice of the 16 octets in an IPv6Addr's Address. The +// order of the bytes is big endian. +func (ipv6 IPv6Addr) Octets() []int { + x := make([]int, IPv6len) + for i, b := range *bigIntToNetIPv6(ipv6.Address) { + x[i] = int(b) + } + + return x +} + +// String returns a string representation of the IPv6Addr +func (ipv6 IPv6Addr) String() string { + if ipv6.Port != 0 { + return fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) + } + + if ipv6.Maskbits() == 128 { + return ipv6.NetIP().String() + } + + return fmt.Sprintf("%s/%d", ipv6.NetIP().String(), ipv6.Maskbits()) +} + +// Type is used as a type switch and returns TypeIPv6 +func (IPv6Addr) Type() SockAddrType { + return TypeIPv6 +} + +// IPv6Attrs returns a list of attributes supported by the IPv6Addr type +func IPv6Attrs() []AttrName { + return ipv6AddrAttrs +} + +// IPv6AddrAttr returns a string representation of an attribute for the given +// IPv6Addr. +func IPv6AddrAttr(ipv6 IPv6Addr, selector AttrName) string { + fn, found := ipv6AddrAttrMap[selector] + if !found { + return "" + } + + return fn(ipv6) +} + +// ipv6AddrInit is called once at init() +func ipv6AddrInit() { + // Sorted for human readability + ipv6AddrAttrs = []AttrName{ + "size", // Same position as in IPv6 for output consistency + "uint128", + } + + ipv6AddrAttrMap = map[AttrName]func(ipv6 IPv6Addr) string{ + "size": func(ipv6 IPv6Addr) string { + netSize := big.NewInt(1) + netSize = netSize.Lsh(netSize, uint(IPv6len*8-ipv6.Maskbits())) + return netSize.Text(10) + }, + "uint128": func(ipv6 IPv6Addr) string { + b := big.Int(*ipv6.Address) + return b.Text(10) + }, + } +} + +// bigIntToNetIPv6 is a helper function that correctly returns a net.IP with the +// correctly padded values. +func bigIntToNetIPv6(bi *big.Int) *net.IP { + x := make(net.IP, IPv6len) + ipv6Bytes := bi.Bytes() + + // It's possibe for ipv6Bytes to be less than IPv6len bytes in size. If + // they are different sizes we to pad the size of response. + if len(ipv6Bytes) < IPv6len { + buf := new(bytes.Buffer) + buf.Grow(IPv6len) + + for i := len(ipv6Bytes); i < IPv6len; i++ { + if err := binary.Write(buf, binary.BigEndian, byte(0)); err != nil { + panic(fmt.Sprintf("Unable to pad byte %d of input %v: %v", i, bi, err)) + } + } + + for _, b := range ipv6Bytes { + if err := binary.Write(buf, binary.BigEndian, b); err != nil { + panic(fmt.Sprintf("Unable to preserve endianness of input %v: %v", bi, err)) + } + } + + ipv6Bytes = buf.Bytes() + } + i := copy(x, ipv6Bytes) + if i != IPv6len { + panic("IPv6 wrong size") + } + return &x +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/rfc.go b/vendor/github.com/hashicorp/go-sockaddr/rfc.go new file mode 100644 index 0000000000..02e188f6fe --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/rfc.go @@ -0,0 +1,948 @@ +package sockaddr + +// ForwardingBlacklist is a faux RFC that includes a list of non-forwardable IP +// blocks. +const ForwardingBlacklist = 4294967295 +const ForwardingBlacklistRFC = "4294967295" + +// IsRFC tests to see if an SockAddr matches the specified RFC +func IsRFC(rfcNum uint, sa SockAddr) bool { + rfcNetMap := KnownRFCs() + rfcNets, ok := rfcNetMap[rfcNum] + if !ok { + return false + } + + var contained bool + for _, rfcNet := range rfcNets { + if rfcNet.Contains(sa) { + contained = true + break + } + } + return contained +} + +// KnownRFCs returns an initial set of known RFCs. +// +// NOTE (sean@): As this list evolves over time, please submit patches to keep +// this list current. If something isn't right, inquire, as it may just be a +// bug on my part. Some of the inclusions were based on my judgement as to what +// would be a useful value (e.g. RFC3330). +// +// Useful resources: +// +// * https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml +// * https://www.iana.org/assignments/ipv6-unicast-address-assignments/ipv6-unicast-address-assignments.xhtml +// * https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml +func KnownRFCs() map[uint]SockAddrs { + // NOTE(sean@): Multiple SockAddrs per RFC lend themselves well to a + // RADIX tree, but `ENOTIME`. Patches welcome. + return map[uint]SockAddrs{ + 919: { + // [RFC919] Broadcasting Internet Datagrams + MustIPv4Addr("255.255.255.255/32"), // [RFC1122], §7 Broadcast IP Addressing - Proposed Standards + }, + 1122: { + // [RFC1122] Requirements for Internet Hosts -- Communication Layers + MustIPv4Addr("0.0.0.0/8"), // [RFC1122], §3.2.1.3 + MustIPv4Addr("127.0.0.0/8"), // [RFC1122], §3.2.1.3 + }, + 1112: { + // [RFC1112] Host Extensions for IP Multicasting + MustIPv4Addr("224.0.0.0/4"), // [RFC1112], §4 Host Group Addresses + }, + 1918: { + // [RFC1918] Address Allocation for Private Internets + MustIPv4Addr("10.0.0.0/8"), + MustIPv4Addr("172.16.0.0/12"), + MustIPv4Addr("192.168.0.0/16"), + }, + 2544: { + // [RFC2544] Benchmarking Methodology for Network + // Interconnect Devices + MustIPv4Addr("198.18.0.0/15"), + }, + 2765: { + // [RFC2765] Stateless IP/ICMP Translation Algorithm + // (SIIT) (obsoleted by RFCs 6145, which itself was + // later obsoleted by 7915). + + // [RFC2765], §2.1 Addresses + MustIPv6Addr("0:0:0:0:0:ffff:0:0/96"), + }, + 2928: { + // [RFC2928] Initial IPv6 Sub-TLA ID Assignments + MustIPv6Addr("2001::/16"), // Superblock + //MustIPv6Addr("2001:0000::/23"), // IANA + //MustIPv6Addr("2001:0200::/23"), // APNIC + //MustIPv6Addr("2001:0400::/23"), // ARIN + //MustIPv6Addr("2001:0600::/23"), // RIPE NCC + //MustIPv6Addr("2001:0800::/23"), // (future assignment) + // ... + //MustIPv6Addr("2001:FE00::/23"), // (future assignment) + }, + 3056: { // 6to4 address + // [RFC3056] Connection of IPv6 Domains via IPv4 Clouds + + // [RFC3056], §2 IPv6 Prefix Allocation + MustIPv6Addr("2002::/16"), + }, + 3068: { + // [RFC3068] An Anycast Prefix for 6to4 Relay Routers + // (obsolete by RFC7526) + + // [RFC3068], § 6to4 Relay anycast address + MustIPv4Addr("192.88.99.0/24"), + + // [RFC3068], §2.5 6to4 IPv6 relay anycast address + // + // NOTE: /120 == 128-(32-24) + MustIPv6Addr("2002:c058:6301::/120"), + }, + 3171: { + // [RFC3171] IANA Guidelines for IPv4 Multicast Address Assignments + MustIPv4Addr("224.0.0.0/4"), + }, + 3330: { + // [RFC3330] Special-Use IPv4 Addresses + + // Addresses in this block refer to source hosts on + // "this" network. Address 0.0.0.0/32 may be used as a + // source address for this host on this network; other + // addresses within 0.0.0.0/8 may be used to refer to + // specified hosts on this network [RFC1700, page 4]. + MustIPv4Addr("0.0.0.0/8"), + + // 10.0.0.0/8 - This block is set aside for use in + // private networks. Its intended use is documented in + // [RFC1918]. Addresses within this block should not + // appear on the public Internet. + MustIPv4Addr("10.0.0.0/8"), + + // 14.0.0.0/8 - This block is set aside for assignments + // to the international system of Public Data Networks + // [RFC1700, page 181]. The registry of assignments + // within this block can be accessed from the "Public + // Data Network Numbers" link on the web page at + // http://www.iana.org/numbers.html. Addresses within + // this block are assigned to users and should be + // treated as such. + + // 24.0.0.0/8 - This block was allocated in early 1996 + // for use in provisioning IP service over cable + // television systems. Although the IANA initially was + // involved in making assignments to cable operators, + // this responsibility was transferred to American + // Registry for Internet Numbers (ARIN) in May 2001. + // Addresses within this block are assigned in the + // normal manner and should be treated as such. + + // 39.0.0.0/8 - This block was used in the "Class A + // Subnet Experiment" that commenced in May 1995, as + // documented in [RFC1797]. The experiment has been + // completed and this block has been returned to the + // pool of addresses reserved for future allocation or + // assignment. This block therefore no longer has a + // special use and is subject to allocation to a + // Regional Internet Registry for assignment in the + // normal manner. + + // 127.0.0.0/8 - This block is assigned for use as the Internet host + // loopback address. A datagram sent by a higher level protocol to an + // address anywhere within this block should loop back inside the host. + // This is ordinarily implemented using only 127.0.0.1/32 for loopback, + // but no addresses within this block should ever appear on any network + // anywhere [RFC1700, page 5]. + MustIPv4Addr("127.0.0.0/8"), + + // 128.0.0.0/16 - This block, corresponding to the + // numerically lowest of the former Class B addresses, + // was initially and is still reserved by the IANA. + // Given the present classless nature of the IP address + // space, the basis for the reservation no longer + // applies and addresses in this block are subject to + // future allocation to a Regional Internet Registry for + // assignment in the normal manner. + + // 169.254.0.0/16 - This is the "link local" block. It + // is allocated for communication between hosts on a + // single link. Hosts obtain these addresses by + // auto-configuration, such as when a DHCP server may + // not be found. + MustIPv4Addr("169.254.0.0/16"), + + // 172.16.0.0/12 - This block is set aside for use in + // private networks. Its intended use is documented in + // [RFC1918]. Addresses within this block should not + // appear on the public Internet. + MustIPv4Addr("172.16.0.0/12"), + + // 191.255.0.0/16 - This block, corresponding to the numerically highest + // to the former Class B addresses, was initially and is still reserved + // by the IANA. Given the present classless nature of the IP address + // space, the basis for the reservation no longer applies and addresses + // in this block are subject to future allocation to a Regional Internet + // Registry for assignment in the normal manner. + + // 192.0.0.0/24 - This block, corresponding to the + // numerically lowest of the former Class C addresses, + // was initially and is still reserved by the IANA. + // Given the present classless nature of the IP address + // space, the basis for the reservation no longer + // applies and addresses in this block are subject to + // future allocation to a Regional Internet Registry for + // assignment in the normal manner. + + // 192.0.2.0/24 - This block is assigned as "TEST-NET" for use in + // documentation and example code. It is often used in conjunction with + // domain names example.com or example.net in vendor and protocol + // documentation. Addresses within this block should not appear on the + // public Internet. + MustIPv4Addr("192.0.2.0/24"), + + // 192.88.99.0/24 - This block is allocated for use as 6to4 relay + // anycast addresses, according to [RFC3068]. + MustIPv4Addr("192.88.99.0/24"), + + // 192.168.0.0/16 - This block is set aside for use in private networks. + // Its intended use is documented in [RFC1918]. Addresses within this + // block should not appear on the public Internet. + MustIPv4Addr("192.168.0.0/16"), + + // 198.18.0.0/15 - This block has been allocated for use + // in benchmark tests of network interconnect devices. + // Its use is documented in [RFC2544]. + MustIPv4Addr("198.18.0.0/15"), + + // 223.255.255.0/24 - This block, corresponding to the + // numerically highest of the former Class C addresses, + // was initially and is still reserved by the IANA. + // Given the present classless nature of the IP address + // space, the basis for the reservation no longer + // applies and addresses in this block are subject to + // future allocation to a Regional Internet Registry for + // assignment in the normal manner. + + // 224.0.0.0/4 - This block, formerly known as the Class + // D address space, is allocated for use in IPv4 + // multicast address assignments. The IANA guidelines + // for assignments from this space are described in + // [RFC3171]. + MustIPv4Addr("224.0.0.0/4"), + + // 240.0.0.0/4 - This block, formerly known as the Class E address + // space, is reserved. The "limited broadcast" destination address + // 255.255.255.255 should never be forwarded outside the (sub-)net of + // the source. The remainder of this space is reserved + // for future use. [RFC1700, page 4] + MustIPv4Addr("240.0.0.0/4"), + }, + 3849: { + // [RFC3849] IPv6 Address Prefix Reserved for Documentation + MustIPv6Addr("2001:db8::/32"), // [RFC3849], §4 IANA Considerations + }, + 3927: { + // [RFC3927] Dynamic Configuration of IPv4 Link-Local Addresses + MustIPv4Addr("169.254.0.0/16"), // [RFC3927], §2.1 Link-Local Address Selection + }, + 4038: { + // [RFC4038] Application Aspects of IPv6 Transition + + // [RFC4038], §4.2. IPv6 Applications in a Dual-Stack Node + MustIPv6Addr("0:0:0:0:0:ffff::/96"), + }, + 4193: { + // [RFC4193] Unique Local IPv6 Unicast Addresses + MustIPv6Addr("fc00::/7"), + }, + 4291: { + // [RFC4291] IP Version 6 Addressing Architecture + + // [RFC4291], §2.5.2 The Unspecified Address + MustIPv6Addr("::/128"), + + // [RFC4291], §2.5.3 The Loopback Address + MustIPv6Addr("::1/128"), + + // [RFC4291], §2.5.5.1. IPv4-Compatible IPv6 Address + MustIPv6Addr("::/96"), + + // [RFC4291], §2.5.5.2. IPv4-Mapped IPv6 Address + MustIPv6Addr("::ffff:0:0/96"), + + // [RFC4291], §2.5.6 Link-Local IPv6 Unicast Addresses + MustIPv6Addr("fe80::/10"), + + // [RFC4291], §2.5.7 Site-Local IPv6 Unicast Addresses + // (depreciated) + MustIPv6Addr("fec0::/10"), + + // [RFC4291], §2.7 Multicast Addresses + MustIPv6Addr("ff00::/8"), + + // IPv6 Multicast Information. + // + // In the following "table" below, `ff0x` is replaced + // with the following values depending on the scope of + // the query: + // + // IPv6 Multicast Scopes: + // * ff00/9 // reserved + // * ff01/9 // interface-local + // * ff02/9 // link-local + // * ff03/9 // realm-local + // * ff04/9 // admin-local + // * ff05/9 // site-local + // * ff08/9 // organization-local + // * ff0e/9 // global + // * ff0f/9 // reserved + // + // IPv6 Multicast Addresses: + // * ff0x::2 // All routers + // * ff02::5 // OSPFIGP + // * ff02::6 // OSPFIGP Designated Routers + // * ff02::9 // RIP Routers + // * ff02::a // EIGRP Routers + // * ff02::d // All PIM Routers + // * ff02::1a // All RPL Routers + // * ff0x::fb // mDNSv6 + // * ff0x::101 // All Network Time Protocol (NTP) servers + // * ff02::1:1 // Link Name + // * ff02::1:2 // All-dhcp-agents + // * ff02::1:3 // Link-local Multicast Name Resolution + // * ff05::1:3 // All-dhcp-servers + // * ff02::1:ff00:0/104 // Solicited-node multicast address. + // * ff02::2:ff00:0/104 // Node Information Queries + }, + 4380: { + // [RFC4380] Teredo: Tunneling IPv6 over UDP through + // Network Address Translations (NATs) + + // [RFC4380], §2.6 Global Teredo IPv6 Service Prefix + MustIPv6Addr("2001:0000::/32"), + }, + 4773: { + // [RFC4773] Administration of the IANA Special Purpose IPv6 Address Block + MustIPv6Addr("2001:0000::/23"), // IANA + }, + 4843: { + // [RFC4843] An IPv6 Prefix for Overlay Routable Cryptographic Hash Identifiers (ORCHID) + MustIPv6Addr("2001:10::/28"), // [RFC4843], §7 IANA Considerations + }, + 5180: { + // [RFC5180] IPv6 Benchmarking Methodology for Network Interconnect Devices + MustIPv6Addr("2001:0200::/48"), // [RFC5180], §8 IANA Considerations + }, + 5735: { + // [RFC5735] Special Use IPv4 Addresses + MustIPv4Addr("192.0.2.0/24"), // TEST-NET-1 + MustIPv4Addr("198.51.100.0/24"), // TEST-NET-2 + MustIPv4Addr("203.0.113.0/24"), // TEST-NET-3 + MustIPv4Addr("198.18.0.0/15"), // Benchmarks + }, + 5737: { + // [RFC5737] IPv4 Address Blocks Reserved for Documentation + MustIPv4Addr("192.0.2.0/24"), // TEST-NET-1 + MustIPv4Addr("198.51.100.0/24"), // TEST-NET-2 + MustIPv4Addr("203.0.113.0/24"), // TEST-NET-3 + }, + 6052: { + // [RFC6052] IPv6 Addressing of IPv4/IPv6 Translators + MustIPv6Addr("64:ff9b::/96"), // [RFC6052], §2.1. Well-Known Prefix + }, + 6333: { + // [RFC6333] Dual-Stack Lite Broadband Deployments Following IPv4 Exhaustion + MustIPv4Addr("192.0.0.0/29"), // [RFC6333], §5.7 Well-Known IPv4 Address + }, + 6598: { + // [RFC6598] IANA-Reserved IPv4 Prefix for Shared Address Space + MustIPv4Addr("100.64.0.0/10"), + }, + 6666: { + // [RFC6666] A Discard Prefix for IPv6 + MustIPv6Addr("0100::/64"), + }, + 6890: { + // [RFC6890] Special-Purpose IP Address Registries + + // From "RFC6890 §2.2.1 Information Requirements": + /* + The IPv4 and IPv6 Special-Purpose Address Registries maintain the + following information regarding each entry: + + o Address Block - A block of IPv4 or IPv6 addresses that has been + registered for a special purpose. + + o Name - A descriptive name for the special-purpose address block. + + o RFC - The RFC through which the special-purpose address block was + requested. + + o Allocation Date - The date upon which the special-purpose address + block was allocated. + + o Termination Date - The date upon which the allocation is to be + terminated. This field is applicable for limited-use allocations + only. + + o Source - A boolean value indicating whether an address from the + allocated special-purpose address block is valid when used as the + source address of an IP datagram that transits two devices. + + o Destination - A boolean value indicating whether an address from + the allocated special-purpose address block is valid when used as + the destination address of an IP datagram that transits two + devices. + + o Forwardable - A boolean value indicating whether a router may + forward an IP datagram whose destination address is drawn from the + allocated special-purpose address block between external + interfaces. + + o Global - A boolean value indicating whether an IP datagram whose + destination address is drawn from the allocated special-purpose + address block is forwardable beyond a specified administrative + domain. + + o Reserved-by-Protocol - A boolean value indicating whether the + special-purpose address block is reserved by IP, itself. This + value is "TRUE" if the RFC that created the special-purpose + address block requires all compliant IP implementations to behave + in a special way when processing packets either to or from + addresses contained by the address block. + + If the value of "Destination" is FALSE, the values of "Forwardable" + and "Global" must also be false. + */ + + /*+----------------------+----------------------------+ + * | Attribute | Value | + * +----------------------+----------------------------+ + * | Address Block | 0.0.0.0/8 | + * | Name | "This host on this network"| + * | RFC | [RFC1122], Section 3.2.1.3 | + * | Allocation Date | September 1981 | + * | Termination Date | N/A | + * | Source | True | + * | Destination | False | + * | Forwardable | False | + * | Global | False | + * | Reserved-by-Protocol | True | + * +----------------------+----------------------------+*/ + MustIPv4Addr("0.0.0.0/8"), + + /*+----------------------+---------------+ + * | Attribute | Value | + * +----------------------+---------------+ + * | Address Block | 10.0.0.0/8 | + * | Name | Private-Use | + * | RFC | [RFC1918] | + * | Allocation Date | February 1996 | + * | Termination Date | N/A | + * | Source | True | + * | Destination | True | + * | Forwardable | True | + * | Global | False | + * | Reserved-by-Protocol | False | + * +----------------------+---------------+ */ + MustIPv4Addr("10.0.0.0/8"), + + /*+----------------------+----------------------+ + | Attribute | Value | + +----------------------+----------------------+ + | Address Block | 100.64.0.0/10 | + | Name | Shared Address Space | + | RFC | [RFC6598] | + | Allocation Date | April 2012 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------+*/ + MustIPv4Addr("100.64.0.0/10"), + + /*+----------------------+----------------------------+ + | Attribute | Value | + +----------------------+----------------------------+ + | Address Block | 127.0.0.0/8 | + | Name | Loopback | + | RFC | [RFC1122], Section 3.2.1.3 | + | Allocation Date | September 1981 | + | Termination Date | N/A | + | Source | False [1] | + | Destination | False [1] | + | Forwardable | False [1] | + | Global | False [1] | + | Reserved-by-Protocol | True | + +----------------------+----------------------------+*/ + // [1] Several protocols have been granted exceptions to + // this rule. For examples, see [RFC4379] and + // [RFC5884]. + MustIPv4Addr("127.0.0.0/8"), + + /*+----------------------+----------------+ + | Attribute | Value | + +----------------------+----------------+ + | Address Block | 169.254.0.0/16 | + | Name | Link Local | + | RFC | [RFC3927] | + | Allocation Date | May 2005 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+----------------+*/ + MustIPv4Addr("169.254.0.0/16"), + + /*+----------------------+---------------+ + | Attribute | Value | + +----------------------+---------------+ + | Address Block | 172.16.0.0/12 | + | Name | Private-Use | + | RFC | [RFC1918] | + | Allocation Date | February 1996 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+---------------+*/ + MustIPv4Addr("172.16.0.0/12"), + + /*+----------------------+---------------------------------+ + | Attribute | Value | + +----------------------+---------------------------------+ + | Address Block | 192.0.0.0/24 [2] | + | Name | IETF Protocol Assignments | + | RFC | Section 2.1 of this document | + | Allocation Date | January 2010 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+---------------------------------+*/ + // [2] Not usable unless by virtue of a more specific + // reservation. + MustIPv4Addr("192.0.0.0/24"), + + /*+----------------------+--------------------------------+ + | Attribute | Value | + +----------------------+--------------------------------+ + | Address Block | 192.0.0.0/29 | + | Name | IPv4 Service Continuity Prefix | + | RFC | [RFC6333], [RFC7335] | + | Allocation Date | June 2011 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+--------------------------------+*/ + MustIPv4Addr("192.0.0.0/29"), + + /*+----------------------+----------------------------+ + | Attribute | Value | + +----------------------+----------------------------+ + | Address Block | 192.0.2.0/24 | + | Name | Documentation (TEST-NET-1) | + | RFC | [RFC5737] | + | Allocation Date | January 2010 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------------+*/ + MustIPv4Addr("192.0.2.0/24"), + + /*+----------------------+--------------------+ + | Attribute | Value | + +----------------------+--------------------+ + | Address Block | 192.88.99.0/24 | + | Name | 6to4 Relay Anycast | + | RFC | [RFC3068] | + | Allocation Date | June 2001 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | True | + | Reserved-by-Protocol | False | + +----------------------+--------------------+*/ + MustIPv4Addr("192.88.99.0/24"), + + /*+----------------------+----------------+ + | Attribute | Value | + +----------------------+----------------+ + | Address Block | 192.168.0.0/16 | + | Name | Private-Use | + | RFC | [RFC1918] | + | Allocation Date | February 1996 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------+*/ + MustIPv4Addr("192.168.0.0/16"), + + /*+----------------------+---------------+ + | Attribute | Value | + +----------------------+---------------+ + | Address Block | 198.18.0.0/15 | + | Name | Benchmarking | + | RFC | [RFC2544] | + | Allocation Date | March 1999 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+---------------+*/ + MustIPv4Addr("198.18.0.0/15"), + + /*+----------------------+----------------------------+ + | Attribute | Value | + +----------------------+----------------------------+ + | Address Block | 198.51.100.0/24 | + | Name | Documentation (TEST-NET-2) | + | RFC | [RFC5737] | + | Allocation Date | January 2010 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------------+*/ + MustIPv4Addr("198.51.100.0/24"), + + /*+----------------------+----------------------------+ + | Attribute | Value | + +----------------------+----------------------------+ + | Address Block | 203.0.113.0/24 | + | Name | Documentation (TEST-NET-3) | + | RFC | [RFC5737] | + | Allocation Date | January 2010 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------------+*/ + MustIPv4Addr("203.0.113.0/24"), + + /*+----------------------+----------------------+ + | Attribute | Value | + +----------------------+----------------------+ + | Address Block | 240.0.0.0/4 | + | Name | Reserved | + | RFC | [RFC1112], Section 4 | + | Allocation Date | August 1989 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+----------------------+*/ + MustIPv4Addr("240.0.0.0/4"), + + /*+----------------------+----------------------+ + | Attribute | Value | + +----------------------+----------------------+ + | Address Block | 255.255.255.255/32 | + | Name | Limited Broadcast | + | RFC | [RFC0919], Section 7 | + | Allocation Date | October 1984 | + | Termination Date | N/A | + | Source | False | + | Destination | True | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------+*/ + MustIPv4Addr("255.255.255.255/32"), + + /*+----------------------+------------------+ + | Attribute | Value | + +----------------------+------------------+ + | Address Block | ::1/128 | + | Name | Loopback Address | + | RFC | [RFC4291] | + | Allocation Date | February 2006 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+------------------+*/ + MustIPv6Addr("::1/128"), + + /*+----------------------+---------------------+ + | Attribute | Value | + +----------------------+---------------------+ + | Address Block | ::/128 | + | Name | Unspecified Address | + | RFC | [RFC4291] | + | Allocation Date | February 2006 | + | Termination Date | N/A | + | Source | True | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+---------------------+*/ + MustIPv6Addr("::/128"), + + /*+----------------------+---------------------+ + | Attribute | Value | + +----------------------+---------------------+ + | Address Block | 64:ff9b::/96 | + | Name | IPv4-IPv6 Translat. | + | RFC | [RFC6052] | + | Allocation Date | October 2010 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | True | + | Reserved-by-Protocol | False | + +----------------------+---------------------+*/ + MustIPv6Addr("64:ff9b::/96"), + + /*+----------------------+---------------------+ + | Attribute | Value | + +----------------------+---------------------+ + | Address Block | ::ffff:0:0/96 | + | Name | IPv4-mapped Address | + | RFC | [RFC4291] | + | Allocation Date | February 2006 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+---------------------+*/ + MustIPv6Addr("::ffff:0:0/96"), + + /*+----------------------+----------------------------+ + | Attribute | Value | + +----------------------+----------------------------+ + | Address Block | 100::/64 | + | Name | Discard-Only Address Block | + | RFC | [RFC6666] | + | Allocation Date | June 2012 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------------+*/ + MustIPv6Addr("100::/64"), + + /*+----------------------+---------------------------+ + | Attribute | Value | + +----------------------+---------------------------+ + | Address Block | 2001::/23 | + | Name | IETF Protocol Assignments | + | RFC | [RFC2928] | + | Allocation Date | September 2000 | + | Termination Date | N/A | + | Source | False[1] | + | Destination | False[1] | + | Forwardable | False[1] | + | Global | False[1] | + | Reserved-by-Protocol | False | + +----------------------+---------------------------+*/ + // [1] Unless allowed by a more specific allocation. + MustIPv6Addr("2001::/16"), + + /*+----------------------+----------------+ + | Attribute | Value | + +----------------------+----------------+ + | Address Block | 2001::/32 | + | Name | TEREDO | + | RFC | [RFC4380] | + | Allocation Date | January 2006 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------+*/ + // Covered by previous entry, included for completeness. + // + // MustIPv6Addr("2001::/16"), + + /*+----------------------+----------------+ + | Attribute | Value | + +----------------------+----------------+ + | Address Block | 2001:2::/48 | + | Name | Benchmarking | + | RFC | [RFC5180] | + | Allocation Date | April 2008 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------+*/ + // Covered by previous entry, included for completeness. + // + // MustIPv6Addr("2001:2::/48"), + + /*+----------------------+---------------+ + | Attribute | Value | + +----------------------+---------------+ + | Address Block | 2001:db8::/32 | + | Name | Documentation | + | RFC | [RFC3849] | + | Allocation Date | July 2004 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+---------------+*/ + // Covered by previous entry, included for completeness. + // + // MustIPv6Addr("2001:db8::/32"), + + /*+----------------------+--------------+ + | Attribute | Value | + +----------------------+--------------+ + | Address Block | 2001:10::/28 | + | Name | ORCHID | + | RFC | [RFC4843] | + | Allocation Date | March 2007 | + | Termination Date | March 2014 | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+--------------+*/ + // Covered by previous entry, included for completeness. + // + // MustIPv6Addr("2001:10::/28"), + + /*+----------------------+---------------+ + | Attribute | Value | + +----------------------+---------------+ + | Address Block | 2002::/16 [2] | + | Name | 6to4 | + | RFC | [RFC3056] | + | Allocation Date | February 2001 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | N/A [2] | + | Reserved-by-Protocol | False | + +----------------------+---------------+*/ + // [2] See [RFC3056] for details. + MustIPv6Addr("2002::/16"), + + /*+----------------------+--------------+ + | Attribute | Value | + +----------------------+--------------+ + | Address Block | fc00::/7 | + | Name | Unique-Local | + | RFC | [RFC4193] | + | Allocation Date | October 2005 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+--------------+*/ + MustIPv6Addr("fc00::/7"), + + /*+----------------------+-----------------------+ + | Attribute | Value | + +----------------------+-----------------------+ + | Address Block | fe80::/10 | + | Name | Linked-Scoped Unicast | + | RFC | [RFC4291] | + | Allocation Date | February 2006 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+-----------------------+*/ + MustIPv6Addr("fe80::/10"), + }, + 7335: { + // [RFC7335] IPv4 Service Continuity Prefix + MustIPv4Addr("192.0.0.0/29"), // [RFC7335], §6 IANA Considerations + }, + ForwardingBlacklist: { // Pseudo-RFC + // Blacklist of non-forwardable IP blocks taken from RFC6890 + // + // TODO: the attributes for forwardable should be + // searcahble and embedded in the main list of RFCs + // above. + MustIPv4Addr("0.0.0.0/8"), + MustIPv4Addr("127.0.0.0/8"), + MustIPv4Addr("169.254.0.0/16"), + MustIPv4Addr("192.0.0.0/24"), + MustIPv4Addr("192.0.2.0/24"), + MustIPv4Addr("198.51.100.0/24"), + MustIPv4Addr("203.0.113.0/24"), + MustIPv4Addr("240.0.0.0/4"), + MustIPv4Addr("255.255.255.255/32"), + MustIPv6Addr("::1/128"), + MustIPv6Addr("::/128"), + MustIPv6Addr("::ffff:0:0/96"), + + // There is no way of expressing a whitelist per RFC2928 + // atm without creating a negative mask, which I don't + // want to do atm. + //MustIPv6Addr("2001::/23"), + + MustIPv6Addr("2001:db8::/32"), + MustIPv6Addr("2001:10::/28"), + MustIPv6Addr("fe80::/10"), + }, + } +} + +// VisitAllRFCs iterates over all known RFCs and calls the visitor +func VisitAllRFCs(fn func(rfcNum uint, sockaddrs SockAddrs)) { + rfcNetMap := KnownRFCs() + + // Blacklist of faux-RFCs. Don't show the world that we're abusing the + // RFC system in this library. + rfcBlacklist := map[uint]struct{}{ + ForwardingBlacklist: {}, + } + + for rfcNum, sas := range rfcNetMap { + if _, found := rfcBlacklist[rfcNum]; !found { + fn(rfcNum, sas) + } + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info.go b/vendor/github.com/hashicorp/go-sockaddr/route_info.go new file mode 100644 index 0000000000..2a3ee1db9e --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info.go @@ -0,0 +1,19 @@ +package sockaddr + +// RouteInterface specifies an interface for obtaining memoized route table and +// network information from a given OS. +type RouteInterface interface { + // GetDefaultInterfaceName returns the name of the interface that has a + // default route or an error and an empty string if a problem was + // encountered. + GetDefaultInterfaceName() (string, error) +} + +// VisitCommands visits each command used by the platform-specific RouteInfo +// implementation. +func (ri routeInfo) VisitCommands(fn func(name string, cmd []string)) { + for k, v := range ri.cmds { + cmds := append([]string(nil), v...) + fn(k, cmds) + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_android.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_android.go new file mode 100644 index 0000000000..9885915a6b --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_android.go @@ -0,0 +1,34 @@ +package sockaddr + +import ( + "errors" + "os/exec" +) + +type routeInfo struct { + cmds map[string][]string +} + +// NewRouteInfo returns a Android-specific implementation of the RouteInfo +// interface. +func NewRouteInfo() (routeInfo, error) { + return routeInfo{ + cmds: map[string][]string{"ip": {"/system/bin/ip", "route", "get", "8.8.8.8"}}, + }, nil +} + +// GetDefaultInterfaceName returns the interface name attached to the default +// route on the default interface. +func (ri routeInfo) GetDefaultInterfaceName() (string, error) { + out, err := exec.Command(ri.cmds["ip"][0], ri.cmds["ip"][1:]...).Output() + if err != nil { + return "", err + } + + + var ifName string + if ifName, err = parseDefaultIfNameFromIPCmdAndroid(string(out)); err != nil { + return "", errors.New("No default interface found") + } + return ifName, nil +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go new file mode 100644 index 0000000000..705757abc7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go @@ -0,0 +1,36 @@ +// +build darwin dragonfly freebsd netbsd openbsd + +package sockaddr + +import "os/exec" + +var cmds map[string][]string = map[string][]string{ + "route": {"/sbin/route", "-n", "get", "default"}, +} + +type routeInfo struct { + cmds map[string][]string +} + +// NewRouteInfo returns a BSD-specific implementation of the RouteInfo +// interface. +func NewRouteInfo() (routeInfo, error) { + return routeInfo{ + cmds: cmds, + }, nil +} + +// GetDefaultInterfaceName returns the interface name attached to the default +// route on the default interface. +func (ri routeInfo) GetDefaultInterfaceName() (string, error) { + out, err := exec.Command(cmds["route"][0], cmds["route"][1:]...).Output() + if err != nil { + return "", err + } + + var ifName string + if ifName, err = parseDefaultIfNameFromRoute(string(out)); err != nil { + return "", err + } + return ifName, nil +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_default.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_default.go new file mode 100644 index 0000000000..d1b009f653 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_default.go @@ -0,0 +1,10 @@ +// +build android nacl plan9 + +package sockaddr + +import "errors" + +// getDefaultIfName is the default interface function for unsupported platforms. +func getDefaultIfName() (string, error) { + return "", errors.New("No default interface found (unsupported platform)") +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go new file mode 100644 index 0000000000..b62ce6ecb2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go @@ -0,0 +1,42 @@ +// +build !android + +package sockaddr + +import ( + "errors" + "os/exec" +) + +type routeInfo struct { + cmds map[string][]string +} + +// NewRouteInfo returns a Linux-specific implementation of the RouteInfo +// interface. +func NewRouteInfo() (routeInfo, error) { + // CoreOS Container Linux moved ip to /usr/bin/ip, so look it up on + // $PATH and fallback to /sbin/ip on error. + path, _ := exec.LookPath("ip") + if path == "" { + path = "/sbin/ip" + } + + return routeInfo{ + cmds: map[string][]string{"ip": {path, "route"}}, + }, nil +} + +// GetDefaultInterfaceName returns the interface name attached to the default +// route on the default interface. +func (ri routeInfo) GetDefaultInterfaceName() (string, error) { + out, err := exec.Command(ri.cmds["ip"][0], ri.cmds["ip"][1:]...).Output() + if err != nil { + return "", err + } + + var ifName string + if ifName, err = parseDefaultIfNameFromIPCmd(string(out)); err != nil { + return "", errors.New("No default interface found") + } + return ifName, nil +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go new file mode 100644 index 0000000000..ee8e7984d7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go @@ -0,0 +1,37 @@ +package sockaddr + +import ( + "errors" + "os/exec" +) + +var cmds map[string][]string = map[string][]string{ + "route": {"/usr/sbin/route", "-n", "get", "default"}, +} + +type routeInfo struct { + cmds map[string][]string +} + +// NewRouteInfo returns a BSD-specific implementation of the RouteInfo +// interface. +func NewRouteInfo() (routeInfo, error) { + return routeInfo{ + cmds: cmds, + }, nil +} + +// GetDefaultInterfaceName returns the interface name attached to the default +// route on the default interface. +func (ri routeInfo) GetDefaultInterfaceName() (string, error) { + out, err := exec.Command(cmds["route"][0], cmds["route"][1:]...).Output() + if err != nil { + return "", err + } + + var ifName string + if ifName, err = parseDefaultIfNameFromRoute(string(out)); err != nil { + return "", errors.New("No default interface found") + } + return ifName, nil +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go new file mode 100644 index 0000000000..3da972883e --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go @@ -0,0 +1,41 @@ +package sockaddr + +import "os/exec" + +var cmds map[string][]string = map[string][]string{ + "netstat": {"netstat", "-rn"}, + "ipconfig": {"ipconfig"}, +} + +type routeInfo struct { + cmds map[string][]string +} + +// NewRouteInfo returns a BSD-specific implementation of the RouteInfo +// interface. +func NewRouteInfo() (routeInfo, error) { + return routeInfo{ + cmds: cmds, + }, nil +} + +// GetDefaultInterfaceName returns the interface name attached to the default +// route on the default interface. +func (ri routeInfo) GetDefaultInterfaceName() (string, error) { + ifNameOut, err := exec.Command(cmds["netstat"][0], cmds["netstat"][1:]...).Output() + if err != nil { + return "", err + } + + ipconfigOut, err := exec.Command(cmds["ipconfig"][0], cmds["ipconfig"][1:]...).Output() + if err != nil { + return "", err + } + + ifName, err := parseDefaultIfNameWindows(string(ifNameOut), string(ipconfigOut)) + if err != nil { + return "", err + } + + return ifName, nil +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/sockaddr.go b/vendor/github.com/hashicorp/go-sockaddr/sockaddr.go new file mode 100644 index 0000000000..826c91c2e3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/sockaddr.go @@ -0,0 +1,206 @@ +package sockaddr + +import ( + "encoding/json" + "fmt" + "strings" +) + +type SockAddrType int +type AttrName string + +const ( + TypeUnknown SockAddrType = 0x0 + TypeUnix = 0x1 + TypeIPv4 = 0x2 + TypeIPv6 = 0x4 + + // TypeIP is the union of TypeIPv4 and TypeIPv6 + TypeIP = 0x6 +) + +type SockAddr interface { + // CmpRFC returns 0 if SockAddr exactly matches one of the matched RFC + // networks, -1 if the receiver is contained within the RFC network, or + // 1 if the address is not contained within the RFC. + CmpRFC(rfcNum uint, sa SockAddr) int + + // Contains returns true if the SockAddr arg is contained within the + // receiver + Contains(SockAddr) bool + + // Equal allows for the comparison of two SockAddrs + Equal(SockAddr) bool + + DialPacketArgs() (string, string) + DialStreamArgs() (string, string) + ListenPacketArgs() (string, string) + ListenStreamArgs() (string, string) + + // String returns the string representation of SockAddr + String() string + + // Type returns the SockAddrType + Type() SockAddrType +} + +// sockAddrAttrMap is a map of the SockAddr type-specific attributes. +var sockAddrAttrMap map[AttrName]func(SockAddr) string +var sockAddrAttrs []AttrName + +func init() { + sockAddrInit() +} + +// New creates a new SockAddr from the string. The order in which New() +// attempts to construct a SockAddr is: IPv4Addr, IPv6Addr, SockAddrUnix. +// +// NOTE: New() relies on the heuristic wherein if the path begins with either a +// '.' or '/' character before creating a new UnixSock. For UNIX sockets that +// are absolute paths or are nested within a sub-directory, this works as +// expected, however if the UNIX socket is contained in the current working +// directory, this will fail unless the path begins with "./" +// (e.g. "./my-local-socket"). Calls directly to NewUnixSock() do not suffer +// this limitation. Invalid IP addresses such as "256.0.0.0/-1" will run afoul +// of this heuristic and be assumed to be a valid UNIX socket path (which they +// are, but it is probably not what you want and you won't realize it until you +// stat(2) the file system to discover it doesn't exist). +func NewSockAddr(s string) (SockAddr, error) { + ipv4Addr, err := NewIPv4Addr(s) + if err == nil { + return ipv4Addr, nil + } + + ipv6Addr, err := NewIPv6Addr(s) + if err == nil { + return ipv6Addr, nil + } + + // Check to make sure the string begins with either a '.' or '/', or + // contains a '/'. + if len(s) > 1 && (strings.IndexAny(s[0:1], "./") != -1 || strings.IndexByte(s, '/') != -1) { + unixSock, err := NewUnixSock(s) + if err == nil { + return unixSock, nil + } + } + + return nil, fmt.Errorf("Unable to convert %q to an IPv4 or IPv6 address, or a UNIX Socket", s) +} + +// ToIPAddr returns an IPAddr type or nil if the type conversion fails. +func ToIPAddr(sa SockAddr) *IPAddr { + ipa, ok := sa.(IPAddr) + if !ok { + return nil + } + return &ipa +} + +// ToIPv4Addr returns an IPv4Addr type or nil if the type conversion fails. +func ToIPv4Addr(sa SockAddr) *IPv4Addr { + switch v := sa.(type) { + case IPv4Addr: + return &v + default: + return nil + } +} + +// ToIPv6Addr returns an IPv6Addr type or nil if the type conversion fails. +func ToIPv6Addr(sa SockAddr) *IPv6Addr { + switch v := sa.(type) { + case IPv6Addr: + return &v + default: + return nil + } +} + +// ToUnixSock returns a UnixSock type or nil if the type conversion fails. +func ToUnixSock(sa SockAddr) *UnixSock { + switch v := sa.(type) { + case UnixSock: + return &v + default: + return nil + } +} + +// SockAddrAttr returns a string representation of an attribute for the given +// SockAddr. +func SockAddrAttr(sa SockAddr, selector AttrName) string { + fn, found := sockAddrAttrMap[selector] + if !found { + return "" + } + + return fn(sa) +} + +// String() for SockAddrType returns a string representation of the +// SockAddrType (e.g. "IPv4", "IPv6", "UNIX", "IP", or "unknown"). +func (sat SockAddrType) String() string { + switch sat { + case TypeIPv4: + return "IPv4" + case TypeIPv6: + return "IPv6" + // There is no concrete "IP" type. Leaving here as a reminder. + // case TypeIP: + // return "IP" + case TypeUnix: + return "UNIX" + default: + panic("unsupported type") + } +} + +// sockAddrInit is called once at init() +func sockAddrInit() { + sockAddrAttrs = []AttrName{ + "type", // type should be first + "string", + } + + sockAddrAttrMap = map[AttrName]func(sa SockAddr) string{ + "string": func(sa SockAddr) string { + return sa.String() + }, + "type": func(sa SockAddr) string { + return sa.Type().String() + }, + } +} + +// UnixSockAttrs returns a list of attributes supported by the UnixSock type +func SockAddrAttrs() []AttrName { + return sockAddrAttrs +} + +// Although this is pretty trivial to do in a program, having the logic here is +// useful all around. Note that this marshals into a *string* -- the underlying +// string representation of the sockaddr. If you then unmarshal into this type +// in Go, all will work as expected, but externally you can take what comes out +// and use the string value directly. +type SockAddrMarshaler struct { + SockAddr +} + +func (s *SockAddrMarshaler) MarshalJSON() ([]byte, error) { + return json.Marshal(s.SockAddr.String()) +} + +func (s *SockAddrMarshaler) UnmarshalJSON(in []byte) error { + var str string + err := json.Unmarshal(in, &str) + if err != nil { + return err + } + sa, err := NewSockAddr(str) + if err != nil { + return err + } + s.SockAddr = sa + return nil +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go b/vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go new file mode 100644 index 0000000000..75fbffb1ea --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go @@ -0,0 +1,193 @@ +package sockaddr + +import ( + "bytes" + "sort" +) + +// SockAddrs is a slice of SockAddrs +type SockAddrs []SockAddr + +func (s SockAddrs) Len() int { return len(s) } +func (s SockAddrs) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// CmpAddrFunc is the function signature that must be met to be used in the +// OrderedAddrBy multiAddrSorter +type CmpAddrFunc func(p1, p2 *SockAddr) int + +// multiAddrSorter implements the Sort interface, sorting the SockAddrs within. +type multiAddrSorter struct { + addrs SockAddrs + cmp []CmpAddrFunc +} + +// Sort sorts the argument slice according to the Cmp functions passed to +// OrderedAddrBy. +func (ms *multiAddrSorter) Sort(sockAddrs SockAddrs) { + ms.addrs = sockAddrs + sort.Sort(ms) +} + +// OrderedAddrBy sorts SockAddr by the list of sort function pointers. +func OrderedAddrBy(cmpFuncs ...CmpAddrFunc) *multiAddrSorter { + return &multiAddrSorter{ + cmp: cmpFuncs, + } +} + +// Len is part of sort.Interface. +func (ms *multiAddrSorter) Len() int { + return len(ms.addrs) +} + +// Less is part of sort.Interface. It is implemented by looping along the +// Cmp() functions until it finds a comparison that is either less than, +// equal to, or greater than. +func (ms *multiAddrSorter) Less(i, j int) bool { + p, q := &ms.addrs[i], &ms.addrs[j] + // Try all but the last comparison. + var k int + for k = 0; k < len(ms.cmp)-1; k++ { + cmp := ms.cmp[k] + x := cmp(p, q) + switch x { + case -1: + // p < q, so we have a decision. + return true + case 1: + // p > q, so we have a decision. + return false + } + // p == q; try the next comparison. + } + // All comparisons to here said "equal", so just return whatever the + // final comparison reports. + switch ms.cmp[k](p, q) { + case -1: + return true + case 1: + return false + default: + // Still a tie! Now what? + return false + } +} + +// Swap is part of sort.Interface. +func (ms *multiAddrSorter) Swap(i, j int) { + ms.addrs[i], ms.addrs[j] = ms.addrs[j], ms.addrs[i] +} + +const ( + // NOTE (sean@): These constants are here for code readability only and + // are sprucing up the code for readability purposes. Some of the + // Cmp*() variants have confusing logic (especially when dealing with + // mixed-type comparisons) and this, I think, has made it easier to grok + // the code faster. + sortReceiverBeforeArg = -1 + sortDeferDecision = 0 + sortArgBeforeReceiver = 1 +) + +// AscAddress is a sorting function to sort SockAddrs by their respective +// address type. Non-equal types are deferred in the sort. +func AscAddress(p1Ptr, p2Ptr *SockAddr) int { + p1 := *p1Ptr + p2 := *p2Ptr + + switch v := p1.(type) { + case IPv4Addr: + return v.CmpAddress(p2) + case IPv6Addr: + return v.CmpAddress(p2) + case UnixSock: + return v.CmpAddress(p2) + default: + return sortDeferDecision + } +} + +// AscPort is a sorting function to sort SockAddrs by their respective address +// type. Non-equal types are deferred in the sort. +func AscPort(p1Ptr, p2Ptr *SockAddr) int { + p1 := *p1Ptr + p2 := *p2Ptr + + switch v := p1.(type) { + case IPv4Addr: + return v.CmpPort(p2) + case IPv6Addr: + return v.CmpPort(p2) + default: + return sortDeferDecision + } +} + +// AscPrivate is a sorting function to sort "more secure" private values before +// "more public" values. Both IPv4 and IPv6 are compared against RFC6890 +// (RFC6890 includes, and is not limited to, RFC1918 and RFC6598 for IPv4, and +// IPv6 includes RFC4193). +func AscPrivate(p1Ptr, p2Ptr *SockAddr) int { + p1 := *p1Ptr + p2 := *p2Ptr + + switch v := p1.(type) { + case IPv4Addr, IPv6Addr: + return v.CmpRFC(6890, p2) + default: + return sortDeferDecision + } +} + +// AscNetworkSize is a sorting function to sort SockAddrs based on their network +// size. Non-equal types are deferred in the sort. +func AscNetworkSize(p1Ptr, p2Ptr *SockAddr) int { + p1 := *p1Ptr + p2 := *p2Ptr + p1Type := p1.Type() + p2Type := p2.Type() + + // Network size operations on non-IP types make no sense + if p1Type != p2Type && p1Type != TypeIP { + return sortDeferDecision + } + + ipA := p1.(IPAddr) + ipB := p2.(IPAddr) + + return bytes.Compare([]byte(*ipA.NetIPMask()), []byte(*ipB.NetIPMask())) +} + +// AscType is a sorting function to sort "more secure" types before +// "less-secure" types. +func AscType(p1Ptr, p2Ptr *SockAddr) int { + p1 := *p1Ptr + p2 := *p2Ptr + p1Type := p1.Type() + p2Type := p2.Type() + switch { + case p1Type < p2Type: + return sortReceiverBeforeArg + case p1Type == p2Type: + return sortDeferDecision + case p1Type > p2Type: + return sortArgBeforeReceiver + default: + return sortDeferDecision + } +} + +// FilterByType returns two lists: a list of matched and unmatched SockAddrs +func (sas SockAddrs) FilterByType(type_ SockAddrType) (matched, excluded SockAddrs) { + matched = make(SockAddrs, 0, len(sas)) + excluded = make(SockAddrs, 0, len(sas)) + + for _, sa := range sas { + if sa.Type()&type_ != 0 { + matched = append(matched, sa) + } else { + excluded = append(excluded, sa) + } + } + return matched, excluded +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/unixsock.go b/vendor/github.com/hashicorp/go-sockaddr/unixsock.go new file mode 100644 index 0000000000..f3be3f67e7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/unixsock.go @@ -0,0 +1,135 @@ +package sockaddr + +import ( + "fmt" + "strings" +) + +type UnixSock struct { + SockAddr + path string +} +type UnixSocks []*UnixSock + +// unixAttrMap is a map of the UnixSockAddr type-specific attributes. +var unixAttrMap map[AttrName]func(UnixSock) string +var unixAttrs []AttrName + +func init() { + unixAttrInit() +} + +// NewUnixSock creates an UnixSock from a string path. String can be in the +// form of either URI-based string (e.g. `file:///etc/passwd`), an absolute +// path (e.g. `/etc/passwd`), or a relative path (e.g. `./foo`). +func NewUnixSock(s string) (ret UnixSock, err error) { + ret.path = s + return ret, nil +} + +// CmpAddress follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because its name lexically sorts before arg +// - 0 if the SockAddr arg is not a UnixSock, or is a UnixSock with the same path. +// - 1 If the argument should sort first. +func (us UnixSock) CmpAddress(sa SockAddr) int { + usb, ok := sa.(UnixSock) + if !ok { + return sortDeferDecision + } + + return strings.Compare(us.Path(), usb.Path()) +} + +// DialPacketArgs returns the arguments required to be passed to net.DialUnix() +// with the `unixgram` network type. +func (us UnixSock) DialPacketArgs() (network, dialArgs string) { + return "unixgram", us.path +} + +// DialStreamArgs returns the arguments required to be passed to net.DialUnix() +// with the `unix` network type. +func (us UnixSock) DialStreamArgs() (network, dialArgs string) { + return "unix", us.path +} + +// Equal returns true if a SockAddr is equal to the receiving UnixSock. +func (us UnixSock) Equal(sa SockAddr) bool { + usb, ok := sa.(UnixSock) + if !ok { + return false + } + + if us.Path() != usb.Path() { + return false + } + + return true +} + +// ListenPacketArgs returns the arguments required to be passed to +// net.ListenUnixgram() with the `unixgram` network type. +func (us UnixSock) ListenPacketArgs() (network, dialArgs string) { + return "unixgram", us.path +} + +// ListenStreamArgs returns the arguments required to be passed to +// net.ListenUnix() with the `unix` network type. +func (us UnixSock) ListenStreamArgs() (network, dialArgs string) { + return "unix", us.path +} + +// MustUnixSock is a helper method that must return an UnixSock or panic on +// invalid input. +func MustUnixSock(addr string) UnixSock { + us, err := NewUnixSock(addr) + if err != nil { + panic(fmt.Sprintf("Unable to create a UnixSock from %+q: %v", addr, err)) + } + return us +} + +// Path returns the given path of the UnixSock +func (us UnixSock) Path() string { + return us.path +} + +// String returns the path of the UnixSock +func (us UnixSock) String() string { + return fmt.Sprintf("%+q", us.path) +} + +// Type is used as a type switch and returns TypeUnix +func (UnixSock) Type() SockAddrType { + return TypeUnix +} + +// UnixSockAttrs returns a list of attributes supported by the UnixSockAddr type +func UnixSockAttrs() []AttrName { + return unixAttrs +} + +// UnixSockAttr returns a string representation of an attribute for the given +// UnixSock. +func UnixSockAttr(us UnixSock, attrName AttrName) string { + fn, found := unixAttrMap[attrName] + if !found { + return "" + } + + return fn(us) +} + +// unixAttrInit is called once at init() +func unixAttrInit() { + // Sorted for human readability + unixAttrs = []AttrName{ + "path", + } + + unixAttrMap = map[AttrName]func(us UnixSock) string{ + "path": func(us UnixSock) string { + return us.Path() + }, + } +} diff --git a/vendor/github.com/hashicorp/go-uuid/.travis.yml b/vendor/github.com/hashicorp/go-uuid/.travis.yml new file mode 100644 index 0000000000..769849071e --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/.travis.yml @@ -0,0 +1,12 @@ +language: go + +sudo: false + +go: + - 1.4 + - 1.5 + - 1.6 + - tip + +script: + - go test -bench . -benchmem -v ./... diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE new file mode 100644 index 0000000000..e87a115e46 --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-uuid/README.md b/vendor/github.com/hashicorp/go-uuid/README.md new file mode 100644 index 0000000000..fbde8b9aef --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/README.md @@ -0,0 +1,8 @@ +# uuid [![Build Status](https://travis-ci.org/hashicorp/go-uuid.svg?branch=master)](https://travis-ci.org/hashicorp/go-uuid) + +Generates UUID-format strings using high quality, _purely random_ bytes. It is **not** intended to be RFC compliant, merely to use a well-understood string representation of a 128-bit value. It can also parse UUID-format strings into their component bytes. + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-uuid). diff --git a/vendor/github.com/hashicorp/go-uuid/go.mod b/vendor/github.com/hashicorp/go-uuid/go.mod new file mode 100644 index 0000000000..dd57f9d21a --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-uuid diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go new file mode 100644 index 0000000000..0c10c4e9f5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/uuid.go @@ -0,0 +1,83 @@ +package uuid + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "io" +) + +// GenerateRandomBytes is used to generate random bytes of given size. +func GenerateRandomBytes(size int) ([]byte, error) { + return GenerateRandomBytesWithReader(size, rand.Reader) +} + +// GenerateRandomBytesWithReader is used to generate random bytes of given size read from a given reader. +func GenerateRandomBytesWithReader(size int, reader io.Reader) ([]byte, error) { + if reader == nil { + return nil, fmt.Errorf("provided reader is nil") + } + buf := make([]byte, size) + if _, err := io.ReadFull(reader, buf); err != nil { + return nil, fmt.Errorf("failed to read random bytes: %v", err) + } + return buf, nil +} + + +const uuidLen = 16 + +// GenerateUUID is used to generate a random UUID +func GenerateUUID() (string, error) { + return GenerateUUIDWithReader(rand.Reader) +} + +// GenerateUUIDWithReader is used to generate a random UUID with a given Reader +func GenerateUUIDWithReader(reader io.Reader) (string, error) { + if reader == nil { + return "", fmt.Errorf("provided reader is nil") + } + buf, err := GenerateRandomBytesWithReader(uuidLen, reader) + if err != nil { + return "", err + } + return FormatUUID(buf) +} + +func FormatUUID(buf []byte) (string, error) { + if buflen := len(buf); buflen != uuidLen { + return "", fmt.Errorf("wrong length byte slice (%d)", buflen) + } + + return fmt.Sprintf("%x-%x-%x-%x-%x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]), nil +} + +func ParseUUID(uuid string) ([]byte, error) { + if len(uuid) != 2 * uuidLen + 4 { + return nil, fmt.Errorf("uuid string is wrong length") + } + + if uuid[8] != '-' || + uuid[13] != '-' || + uuid[18] != '-' || + uuid[23] != '-' { + return nil, fmt.Errorf("uuid is improperly formatted") + } + + hexStr := uuid[0:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:36] + + ret, err := hex.DecodeString(hexStr) + if err != nil { + return nil, err + } + if len(ret) != uuidLen { + return nil, fmt.Errorf("decoded hex is the wrong length") + } + + return ret, nil +} diff --git a/vendor/github.com/hashicorp/go-version/.travis.yml b/vendor/github.com/hashicorp/go-version/.travis.yml new file mode 100644 index 0000000000..01c5dc219a --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.2 + - 1.3 + - 1.4 + - 1.9 + - "1.10" + - 1.11 + - 1.12 + +script: + - go test diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE new file mode 100644 index 0000000000..c33dcc7c92 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor†+ + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version†+ + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution†+ + means Covered Software of a particular Contributor. + +1.4. “Covered Software†+ + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses†+ means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form†+ + means any form of the work other than Source Code Form. + +1.7. “Larger Work†+ + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License†+ + means this document. + +1.9. “Licensable†+ + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications†+ + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims†of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License†+ + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form†+ + means the form of the work preferred for making modifications. + +1.14. “You†(or “Yourâ€) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You†includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control†means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is†basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses†Notice + + This Source Code Form is “Incompatible + With Secondary Licensesâ€, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md new file mode 100644 index 0000000000..6f3a15ce77 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -0,0 +1,65 @@ +# Versioning Library for Go +[![Build Status](https://travis-ci.org/hashicorp/go-version.svg?branch=master)](https://travis-ci.org/hashicorp/go-version) + +go-version is a library for parsing versions and version constraints, +and verifying versions against a set of constraints. go-version +can sort a collection of versions properly, handles prerelease/beta +versions, can increment versions, etc. + +Versions used with go-version must follow [SemVer](http://semver.org/). + +## Installation and Usage + +Package documentation can be found on +[GoDoc](http://godoc.org/github.com/hashicorp/go-version). + +Installation can be done with a normal `go get`: + +``` +$ go get github.com/hashicorp/go-version +``` + +#### Version Parsing and Comparison + +```go +v1, err := version.NewVersion("1.2") +v2, err := version.NewVersion("1.5+metadata") + +// Comparison example. There is also GreaterThan, Equal, and just +// a simple Compare that returns an int allowing easy >=, <=, etc. +if v1.LessThan(v2) { + fmt.Printf("%s is less than %s", v1, v2) +} +``` + +#### Version Constraints + +```go +v1, err := version.NewVersion("1.2") + +// Constraints example. +constraints, err := version.NewConstraint(">= 1.0, < 1.4") +if constraints.Check(v1) { + fmt.Printf("%s satisfies constraints %s", v1, constraints) +} +``` + +#### Version Sorting + +```go +versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"} +versions := make([]*version.Version, len(versionsRaw)) +for i, raw := range versionsRaw { + v, _ := version.NewVersion(raw) + versions[i] = v +} + +// After this, the versions are properly sorted +sort.Sort(version.Collection(versions)) +``` + +## Issues and Contributing + +If you find an issue with this library, please report an issue. If you'd +like, we welcome any contributions. Fork this library and submit a pull +request. diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go new file mode 100644 index 0000000000..d055759611 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -0,0 +1,204 @@ +package version + +import ( + "fmt" + "reflect" + "regexp" + "strings" +) + +// Constraint represents a single constraint for a version, such as +// ">= 1.0". +type Constraint struct { + f constraintFunc + check *Version + original string +} + +// Constraints is a slice of constraints. We make a custom type so that +// we can add methods to it. +type Constraints []*Constraint + +type constraintFunc func(v, c *Version) bool + +var constraintOperators map[string]constraintFunc + +var constraintRegexp *regexp.Regexp + +func init() { + constraintOperators = map[string]constraintFunc{ + "": constraintEqual, + "=": constraintEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "~>": constraintPessimistic, + } + + ops := make([]string, 0, len(constraintOperators)) + for k := range constraintOperators { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegexp = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + VersionRegexpRaw)) +} + +// NewConstraint will parse one or more constraints from the given +// constraint string. The string must be a comma-separated list of +// constraints. +func NewConstraint(v string) (Constraints, error) { + vs := strings.Split(v, ",") + result := make([]*Constraint, len(vs)) + for i, single := range vs { + c, err := parseSingle(single) + if err != nil { + return nil, err + } + + result[i] = c + } + + return Constraints(result), nil +} + +// Check tests if a version satisfies all the constraints. +func (cs Constraints) Check(v *Version) bool { + for _, c := range cs { + if !c.Check(v) { + return false + } + } + + return true +} + +// Returns the string format of the constraints +func (cs Constraints) String() string { + csStr := make([]string, len(cs)) + for i, c := range cs { + csStr[i] = c.String() + } + + return strings.Join(csStr, ",") +} + +// Check tests if a constraint is validated by the given version. +func (c *Constraint) Check(v *Version) bool { + return c.f(v, c.check) +} + +func (c *Constraint) String() string { + return c.original +} + +func parseSingle(v string) (*Constraint, error) { + matches := constraintRegexp.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed constraint: %s", v) + } + + check, err := NewVersion(matches[2]) + if err != nil { + return nil, err + } + + return &Constraint{ + f: constraintOperators[matches[1]], + check: check, + original: v, + }, nil +} + +func prereleaseCheck(v, c *Version) bool { + switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; { + case cPre && vPre: + // A constraint with a pre-release can only match a pre-release version + // with the same base segments. + return reflect.DeepEqual(c.Segments64(), v.Segments64()) + + case !cPre && vPre: + // A constraint without a pre-release can only match a version without a + // pre-release. + return false + + case cPre && !vPre: + // OK, except with the pessimistic operator + case !cPre && !vPre: + // OK + } + return true +} + +//------------------------------------------------------------------- +// Constraint functions +//------------------------------------------------------------------- + +func constraintEqual(v, c *Version) bool { + return v.Equal(c) +} + +func constraintNotEqual(v, c *Version) bool { + return !v.Equal(c) +} + +func constraintGreaterThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == 1 +} + +func constraintLessThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == -1 +} + +func constraintGreaterThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) >= 0 +} + +func constraintLessThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) <= 0 +} + +func constraintPessimistic(v, c *Version) bool { + // Using a pessimistic constraint with a pre-release, restricts versions to pre-releases + if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") { + return false + } + + // If the version being checked is naturally less than the constraint, then there + // is no way for the version to be valid against the constraint + if v.LessThan(c) { + return false + } + // We'll use this more than once, so grab the length now so it's a little cleaner + // to write the later checks + cs := len(c.segments) + + // If the version being checked has less specificity than the constraint, then there + // is no way for the version to be valid against the constraint + if cs > len(v.segments) { + return false + } + + // Check the segments in the constraint against those in the version. If the version + // being checked, at any point, does not have the same values in each index of the + // constraints segments, then it cannot be valid against the constraint. + for i := 0; i < c.si-1; i++ { + if v.segments[i] != c.segments[i] { + return false + } + } + + // Check the last part of the segment in the constraint. If the version segment at + // this index is less than the constraints segment at this index, then it cannot + // be valid against the constraint + if c.segments[cs-1] > v.segments[cs-1] { + return false + } + + // If nothing has rejected the version by now, it's valid + return true +} diff --git a/vendor/github.com/hashicorp/go-version/go.mod b/vendor/github.com/hashicorp/go-version/go.mod new file mode 100644 index 0000000000..f5285555fa --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-version diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go new file mode 100644 index 0000000000..1032c5606c --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -0,0 +1,380 @@ +package version + +import ( + "bytes" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +// The compiled regular expression used to test the validity of a version. +var ( + versionRegexp *regexp.Regexp + semverRegexp *regexp.Regexp +) + +// The raw regular expression string used for testing the validity +// of a version. +const ( + VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` + + // SemverRegexpRaw requires a separator between version and prerelease + SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` +) + +// Version represents a single version. +type Version struct { + metadata string + pre string + segments []int64 + si int + original string +} + +func init() { + versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") + semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") +} + +// NewVersion parses the given version and returns a new +// Version. +func NewVersion(v string) (*Version, error) { + return newVersion(v, versionRegexp) +} + +// NewSemver parses the given version and returns a new +// Version that adheres strictly to SemVer specs +// https://semver.org/ +func NewSemver(v string) (*Version, error) { + return newVersion(v, semverRegexp) +} + +func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { + matches := pattern.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed version: %s", v) + } + segmentsStr := strings.Split(matches[1], ".") + segments := make([]int64, len(segmentsStr)) + si := 0 + for i, str := range segmentsStr { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return nil, fmt.Errorf( + "Error parsing version: %s", err) + } + + segments[i] = int64(val) + si++ + } + + // Even though we could support more than three segments, if we + // got less than three, pad it with 0s. This is to cover the basic + // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum + for i := len(segments); i < 3; i++ { + segments = append(segments, 0) + } + + pre := matches[7] + if pre == "" { + pre = matches[4] + } + + return &Version{ + metadata: matches[10], + pre: pre, + segments: segments, + si: si, + original: v, + }, nil +} + +// Must is a helper that wraps a call to a function returning (*Version, error) +// and panics if error is non-nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + + return v +} + +// Compare compares this version to another version. This +// returns -1, 0, or 1 if this version is smaller, equal, +// or larger than the other version, respectively. +// +// If you want boolean results, use the LessThan, Equal, +// GreaterThan, GreaterThanOrEqual or LessThanOrEqual methods. +func (v *Version) Compare(other *Version) int { + // A quick, efficient equality check + if v.String() == other.String() { + return 0 + } + + segmentsSelf := v.Segments64() + segmentsOther := other.Segments64() + + // If the segments are the same, we must compare on prerelease info + if reflect.DeepEqual(segmentsSelf, segmentsOther) { + preSelf := v.Prerelease() + preOther := other.Prerelease() + if preSelf == "" && preOther == "" { + return 0 + } + if preSelf == "" { + return 1 + } + if preOther == "" { + return -1 + } + + return comparePrereleases(preSelf, preOther) + } + + // Get the highest specificity (hS), or if they're equal, just use segmentSelf length + lenSelf := len(segmentsSelf) + lenOther := len(segmentsOther) + hS := lenSelf + if lenSelf < lenOther { + hS = lenOther + } + // Compare the segments + // Because a constraint could have more/less specificity than the version it's + // checking, we need to account for a lopsided or jagged comparison + for i := 0; i < hS; i++ { + if i > lenSelf-1 { + // This means Self had the lower specificity + // Check to see if the remaining segments in Other are all zeros + if !allZero(segmentsOther[i:]) { + // if not, it means that Other has to be greater than Self + return -1 + } + break + } else if i > lenOther-1 { + // this means Other had the lower specificity + // Check to see if the remaining segments in Self are all zeros - + if !allZero(segmentsSelf[i:]) { + //if not, it means that Self has to be greater than Other + return 1 + } + break + } + lhs := segmentsSelf[i] + rhs := segmentsOther[i] + if lhs == rhs { + continue + } else if lhs < rhs { + return -1 + } + // Otherwis, rhs was > lhs, they're not equal + return 1 + } + + // if we got this far, they're equal + return 0 +} + +func allZero(segs []int64) bool { + for _, s := range segs { + if s != 0 { + return false + } + } + return true +} + +func comparePart(preSelf string, preOther string) int { + if preSelf == preOther { + return 0 + } + + var selfInt int64 + selfNumeric := true + selfInt, err := strconv.ParseInt(preSelf, 10, 64) + if err != nil { + selfNumeric = false + } + + var otherInt int64 + otherNumeric := true + otherInt, err = strconv.ParseInt(preOther, 10, 64) + if err != nil { + otherNumeric = false + } + + // if a part is empty, we use the other to decide + if preSelf == "" { + if otherNumeric { + return -1 + } + return 1 + } + + if preOther == "" { + if selfNumeric { + return 1 + } + return -1 + } + + if selfNumeric && !otherNumeric { + return -1 + } else if !selfNumeric && otherNumeric { + return 1 + } else if !selfNumeric && !otherNumeric && preSelf > preOther { + return 1 + } else if selfInt > otherInt { + return 1 + } + + return -1 +} + +func comparePrereleases(v string, other string) int { + // the same pre release! + if v == other { + return 0 + } + + // split both pre releases for analyse their parts + selfPreReleaseMeta := strings.Split(v, ".") + otherPreReleaseMeta := strings.Split(other, ".") + + selfPreReleaseLen := len(selfPreReleaseMeta) + otherPreReleaseLen := len(otherPreReleaseMeta) + + biggestLen := otherPreReleaseLen + if selfPreReleaseLen > otherPreReleaseLen { + biggestLen = selfPreReleaseLen + } + + // loop for parts to find the first difference + for i := 0; i < biggestLen; i = i + 1 { + partSelfPre := "" + if i < selfPreReleaseLen { + partSelfPre = selfPreReleaseMeta[i] + } + + partOtherPre := "" + if i < otherPreReleaseLen { + partOtherPre = otherPreReleaseMeta[i] + } + + compare := comparePart(partSelfPre, partOtherPre) + // if parts are equals, continue the loop + if compare != 0 { + return compare + } + } + + return 0 +} + +// Equal tests if two versions are equal. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// GreaterThan tests if this version is greater than another version. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// GreaterThanOrEqualTo tests if this version is greater than or equal to another version. +func (v *Version) GreaterThanOrEqual(o *Version) bool { + return v.Compare(o) >= 0 +} + +// LessThan tests if this version is less than another version. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// LessThanOrEqualTo tests if this version is less than or equal to another version. +func (v *Version) LessThanOrEqual(o *Version) bool { + return v.Compare(o) <= 0 +} + +// Metadata returns any metadata that was part of the version +// string. +// +// Metadata is anything that comes after the "+" in the version. +// For example, with "1.2.3+beta", the metadata is "beta". +func (v *Version) Metadata() string { + return v.metadata +} + +// Prerelease returns any prerelease data that is part of the version, +// or blank if there is no prerelease data. +// +// Prerelease information is anything that comes after the "-" in the +// version (but before any metadata). For example, with "1.2.3-beta", +// the prerelease information is "beta". +func (v *Version) Prerelease() string { + return v.pre +} + +// Segments returns the numeric segments of the version as a slice of ints. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments() []int { + segmentSlice := make([]int, len(v.segments)) + for i, v := range v.segments { + segmentSlice[i] = int(v) + } + return segmentSlice +} + +// Segments64 returns the numeric segments of the version as a slice of int64s. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments64() []int64 { + result := make([]int64, len(v.segments)) + copy(result, v.segments) + return result +} + +// String returns the full version string included pre-release +// and metadata information. +// +// This value is rebuilt according to the parsed segments and other +// information. Therefore, ambiguities in the version string such as +// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and +// missing parts (1.0 => 1.0.0) will be made into a canonicalized form +// as shown in the parenthesized examples. +func (v *Version) String() string { + var buf bytes.Buffer + fmtParts := make([]string, len(v.segments)) + for i, s := range v.segments { + // We can ignore err here since we've pre-parsed the values in segments + str := strconv.FormatInt(s, 10) + fmtParts[i] = str + } + fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original parsed version as-is, including any +// potential whitespace, `v` prefix, etc. +func (v *Version) Original() string { + return v.original +} diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go new file mode 100644 index 0000000000..cc888d43e6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version_collection.go @@ -0,0 +1,17 @@ +package version + +// Collection is a type that implements the sort.Interface interface +// so that versions can be sorted. +type Collection []*Version + +func (v Collection) Len() int { + return len(v) +} + +func (v Collection) Less(i, j int) bool { + return v[i].LessThan(v[j]) +} + +func (v Collection) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} diff --git a/vendor/github.com/hashicorp/vault/api/LICENSE b/vendor/github.com/hashicorp/vault/api/LICENSE new file mode 100644 index 0000000000..e87a115e46 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/vault/api/README.md b/vendor/github.com/hashicorp/vault/api/README.md new file mode 100644 index 0000000000..bc525e9bb4 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/README.md @@ -0,0 +1,8 @@ +Vault API +================= + +This provides the `github.com/hashicorp/vault/api` package which contains code useful for interacting with a Vault server. + +For examples of how to use this module, see the [vault-examples](https://github.com/hashicorp/vault-examples/tree/main/go) repo. + +[![GoDoc](https://godoc.org/github.com/hashicorp/vault/api?status.png)](https://godoc.org/github.com/hashicorp/vault/api) \ No newline at end of file diff --git a/vendor/github.com/hashicorp/vault/api/auth.go b/vendor/github.com/hashicorp/vault/api/auth.go new file mode 100644 index 0000000000..10af56bb99 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/auth.go @@ -0,0 +1,46 @@ +package api + +import ( + "context" + "fmt" +) + +// Auth is used to perform credential backend related operations. +type Auth struct { + c *Client +} + +type AuthMethod interface { + Login(ctx context.Context, client *Client) (*Secret, error) +} + +// Auth is used to return the client for credential-backend API calls. +func (c *Client) Auth() *Auth { + return &Auth{c: c} +} + +// Login sets up the required request body for login requests to the given auth +// method's /login API endpoint, and then performs a write to it. After a +// successful login, this method will automatically set the client's token to +// the login response's ClientToken as well. +// +// The Secret returned is the authentication secret, which if desired can be +// passed as input to the NewLifetimeWatcher method in order to start +// automatically renewing the token. +func (a *Auth) Login(ctx context.Context, authMethod AuthMethod) (*Secret, error) { + if authMethod == nil { + return nil, fmt.Errorf("no auth method provided for login") + } + + authSecret, err := authMethod.Login(ctx, a.c) + if err != nil { + return nil, fmt.Errorf("unable to log in to auth method: %w", err) + } + if authSecret == nil || authSecret.Auth == nil || authSecret.Auth.ClientToken == "" { + return nil, fmt.Errorf("login response from auth method did not return client token") + } + + a.c.SetToken(authSecret.Auth.ClientToken) + + return authSecret, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/auth_token.go b/vendor/github.com/hashicorp/vault/api/auth_token.go new file mode 100644 index 0000000000..32c77bc620 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/auth_token.go @@ -0,0 +1,296 @@ +package api + +import "context" + +// TokenAuth is used to perform token backend operations on Vault +type TokenAuth struct { + c *Client +} + +// Token is used to return the client for token-backend API calls +func (a *Auth) Token() *TokenAuth { + return &TokenAuth{c: a.c} +} + +func (c *TokenAuth) Create(opts *TokenCreateRequest) (*Secret, error) { + r := c.c.NewRequest("POST", "/v1/auth/token/create") + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) CreateOrphan(opts *TokenCreateRequest) (*Secret, error) { + r := c.c.NewRequest("POST", "/v1/auth/token/create-orphan") + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) CreateWithRole(opts *TokenCreateRequest, roleName string) (*Secret, error) { + r := c.c.NewRequest("POST", "/v1/auth/token/create/"+roleName) + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) Lookup(token string) (*Secret, error) { + r := c.c.NewRequest("POST", "/v1/auth/token/lookup") + if err := r.SetJSONBody(map[string]interface{}{ + "token": token, + }); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) LookupAccessor(accessor string) (*Secret, error) { + r := c.c.NewRequest("POST", "/v1/auth/token/lookup-accessor") + if err := r.SetJSONBody(map[string]interface{}{ + "accessor": accessor, + }); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) LookupSelf() (*Secret, error) { + r := c.c.NewRequest("GET", "/v1/auth/token/lookup-self") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) RenewAccessor(accessor string, increment int) (*Secret, error) { + r := c.c.NewRequest("POST", "/v1/auth/token/renew-accessor") + if err := r.SetJSONBody(map[string]interface{}{ + "accessor": accessor, + "increment": increment, + }); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) Renew(token string, increment int) (*Secret, error) { + r := c.c.NewRequest("PUT", "/v1/auth/token/renew") + if err := r.SetJSONBody(map[string]interface{}{ + "token": token, + "increment": increment, + }); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) RenewSelf(increment int) (*Secret, error) { + r := c.c.NewRequest("PUT", "/v1/auth/token/renew-self") + + body := map[string]interface{}{"increment": increment} + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +// RenewTokenAsSelf behaves like renew-self, but authenticates using a provided +// token instead of the token attached to the client. +func (c *TokenAuth) RenewTokenAsSelf(token string, increment int) (*Secret, error) { + r := c.c.NewRequest("PUT", "/v1/auth/token/renew-self") + r.ClientToken = token + + body := map[string]interface{}{"increment": increment} + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +// RevokeAccessor revokes a token associated with the given accessor +// along with all the child tokens. +func (c *TokenAuth) RevokeAccessor(accessor string) error { + r := c.c.NewRequest("POST", "/v1/auth/token/revoke-accessor") + if err := r.SetJSONBody(map[string]interface{}{ + "accessor": accessor, + }); err != nil { + return err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// RevokeOrphan revokes a token without revoking the tree underneath it (so +// child tokens are orphaned rather than revoked) +func (c *TokenAuth) RevokeOrphan(token string) error { + r := c.c.NewRequest("PUT", "/v1/auth/token/revoke-orphan") + if err := r.SetJSONBody(map[string]interface{}{ + "token": token, + }); err != nil { + return err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// RevokeSelf revokes the token making the call. The `token` parameter is kept +// for backwards compatibility but is ignored; only the client's set token has +// an effect. +func (c *TokenAuth) RevokeSelf(token string) error { + r := c.c.NewRequest("PUT", "/v1/auth/token/revoke-self") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// RevokeTree is the "normal" revoke operation that revokes the given token and +// the entire tree underneath -- all of its child tokens, their child tokens, +// etc. +func (c *TokenAuth) RevokeTree(token string) error { + r := c.c.NewRequest("PUT", "/v1/auth/token/revoke") + if err := r.SetJSONBody(map[string]interface{}{ + "token": token, + }); err != nil { + return err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// TokenCreateRequest is the options structure for creating a token. +type TokenCreateRequest struct { + ID string `json:"id,omitempty"` + Policies []string `json:"policies,omitempty"` + Metadata map[string]string `json:"meta,omitempty"` + Lease string `json:"lease,omitempty"` + TTL string `json:"ttl,omitempty"` + ExplicitMaxTTL string `json:"explicit_max_ttl,omitempty"` + Period string `json:"period,omitempty"` + NoParent bool `json:"no_parent,omitempty"` + NoDefaultPolicy bool `json:"no_default_policy,omitempty"` + DisplayName string `json:"display_name"` + NumUses int `json:"num_uses"` + Renewable *bool `json:"renewable,omitempty"` + Type string `json:"type"` + EntityAlias string `json:"entity_alias"` +} diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go new file mode 100644 index 0000000000..2d1c3b683f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/client.go @@ -0,0 +1,1393 @@ +package api + +import ( + "context" + "crypto/hmac" + "crypto/sha256" + "crypto/tls" + "encoding/base64" + "encoding/hex" + "fmt" + "net" + "net/http" + "net/url" + "os" + "path" + "strconv" + "strings" + "sync" + "time" + "unicode" + + "github.com/hashicorp/errwrap" + cleanhttp "github.com/hashicorp/go-cleanhttp" + retryablehttp "github.com/hashicorp/go-retryablehttp" + rootcerts "github.com/hashicorp/go-rootcerts" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "golang.org/x/net/http2" + "golang.org/x/time/rate" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + EnvVaultAddress = "VAULT_ADDR" + EnvVaultAgentAddr = "VAULT_AGENT_ADDR" + EnvVaultCACert = "VAULT_CACERT" + EnvVaultCAPath = "VAULT_CAPATH" + EnvVaultClientCert = "VAULT_CLIENT_CERT" + EnvVaultClientKey = "VAULT_CLIENT_KEY" + EnvVaultClientTimeout = "VAULT_CLIENT_TIMEOUT" + EnvVaultSRVLookup = "VAULT_SRV_LOOKUP" + EnvVaultSkipVerify = "VAULT_SKIP_VERIFY" + EnvVaultNamespace = "VAULT_NAMESPACE" + EnvVaultTLSServerName = "VAULT_TLS_SERVER_NAME" + EnvVaultWrapTTL = "VAULT_WRAP_TTL" + EnvVaultMaxRetries = "VAULT_MAX_RETRIES" + EnvVaultToken = "VAULT_TOKEN" + EnvVaultMFA = "VAULT_MFA" + EnvRateLimit = "VAULT_RATE_LIMIT" + EnvHTTPProxy = "VAULT_HTTP_PROXY" + HeaderIndex = "X-Vault-Index" +) + +// Deprecated values +const ( + EnvVaultAgentAddress = "VAULT_AGENT_ADDR" + EnvVaultInsecure = "VAULT_SKIP_VERIFY" +) + +// WrappingLookupFunc is a function that, given an HTTP verb and a path, +// returns an optional string duration to be used for response wrapping (e.g. +// "15s", or simply "15"). The path will not begin with "/v1/" or "v1/" or "/", +// however, end-of-path forward slashes are not trimmed, so must match your +// called path precisely. Response wrapping will only be used when the return +// value is not the empty string. +type WrappingLookupFunc func(operation, path string) string + +// Config is used to configure the creation of the client. +type Config struct { + modifyLock sync.RWMutex + + // Address is the address of the Vault server. This should be a complete + // URL such as "http://vault.example.com". If you need a custom SSL + // cert or want to enable insecure mode, you need to specify a custom + // HttpClient. + Address string + + // AgentAddress is the address of the local Vault agent. This should be a + // complete URL such as "http://vault.example.com". + AgentAddress string + + // HttpClient is the HTTP client to use. Vault sets sane defaults for the + // http.Client and its associated http.Transport created in DefaultConfig. + // If you must modify Vault's defaults, it is suggested that you start with + // that client and modify as needed rather than start with an empty client + // (or http.DefaultClient). + HttpClient *http.Client + + // MinRetryWait controls the minimum time to wait before retrying when a 5xx + // error occurs. Defaults to 1000 milliseconds. + MinRetryWait time.Duration + + // MaxRetryWait controls the maximum time to wait before retrying when a 5xx + // error occurs. Defaults to 1500 milliseconds. + MaxRetryWait time.Duration + + // MaxRetries controls the maximum number of times to retry when a 5xx + // error occurs. Set to 0 to disable retrying. Defaults to 2 (for a total + // of three tries). + MaxRetries int + + // Timeout is for setting custom timeout parameter in the HttpClient + Timeout time.Duration + + // If there is an error when creating the configuration, this will be the + // error + Error error + + // The Backoff function to use; a default is used if not provided + Backoff retryablehttp.Backoff + + // The CheckRetry function to use; a default is used if not provided + CheckRetry retryablehttp.CheckRetry + + // Logger is the leveled logger to provide to the retryable HTTP client. + Logger retryablehttp.LeveledLogger + + // Limiter is the rate limiter used by the client. + // If this pointer is nil, then there will be no limit set. + // In contrast, if this pointer is set, even to an empty struct, + // then that limiter will be used. Note that an empty Limiter + // is equivalent blocking all events. + Limiter *rate.Limiter + + // OutputCurlString causes the actual request to return an error of type + // *OutputStringError. Type asserting the error message will allow + // fetching a cURL-compatible string for the operation. + // + // Note: It is not thread-safe to set this and make concurrent requests + // with the same client. Cloning a client will not clone this value. + OutputCurlString bool + + // SRVLookup enables the client to lookup the host through DNS SRV lookup + SRVLookup bool + + // CloneHeaders ensures that the source client's headers are copied to + // its clone. + CloneHeaders bool + + // ReadYourWrites ensures isolated read-after-write semantics by + // providing discovered cluster replication states in each request. + // The shared state is automatically propagated to all Client clones. + // + // Note: Careful consideration should be made prior to enabling this setting + // since there will be a performance penalty paid upon each request. + // This feature requires Enterprise server-side. + ReadYourWrites bool +} + +// TLSConfig contains the parameters needed to configure TLS on the HTTP client +// used to communicate with Vault. +type TLSConfig struct { + // CACert is the path to a PEM-encoded CA cert file to use to verify the + // Vault server SSL certificate. + CACert string + + // CAPath is the path to a directory of PEM-encoded CA cert files to verify + // the Vault server SSL certificate. + CAPath string + + // ClientCert is the path to the certificate for Vault communication + ClientCert string + + // ClientKey is the path to the private key for Vault communication + ClientKey string + + // TLSServerName, if set, is used to set the SNI host when connecting via + // TLS. + TLSServerName string + + // Insecure enables or disables SSL verification + Insecure bool +} + +// DefaultConfig returns a default configuration for the client. It is +// safe to modify the return value of this function. +// +// The default Address is https://127.0.0.1:8200, but this can be overridden by +// setting the `VAULT_ADDR` environment variable. +// +// If an error is encountered, this will return nil. +func DefaultConfig() *Config { + config := &Config{ + Address: "https://127.0.0.1:8200", + HttpClient: cleanhttp.DefaultPooledClient(), + Timeout: time.Second * 60, + MinRetryWait: time.Millisecond * 1000, + MaxRetryWait: time.Millisecond * 1500, + MaxRetries: 2, + Backoff: retryablehttp.LinearJitterBackoff, + } + + transport := config.HttpClient.Transport.(*http.Transport) + transport.TLSHandshakeTimeout = 10 * time.Second + transport.TLSClientConfig = &tls.Config{ + MinVersion: tls.VersionTLS12, + } + if err := http2.ConfigureTransport(transport); err != nil { + config.Error = err + return config + } + + if err := config.ReadEnvironment(); err != nil { + config.Error = err + return config + } + + // Ensure redirects are not automatically followed + // Note that this is sane for the API client as it has its own + // redirect handling logic (and thus also for command/meta), + // but in e.g. http_test actual redirect handling is necessary + config.HttpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { + // Returning this value causes the Go net library to not close the + // response body and to nil out the error. Otherwise retry clients may + // try three times on every redirect because it sees an error from this + // function (to prevent redirects) passing through to it. + return http.ErrUseLastResponse + } + + return config +} + +// ConfigureTLS takes a set of TLS configurations and applies those to the +// HTTP client. +func (c *Config) ConfigureTLS(t *TLSConfig) error { + if c.HttpClient == nil { + c.HttpClient = DefaultConfig().HttpClient + } + clientTLSConfig := c.HttpClient.Transport.(*http.Transport).TLSClientConfig + + var clientCert tls.Certificate + foundClientCert := false + + switch { + case t.ClientCert != "" && t.ClientKey != "": + var err error + clientCert, err = tls.LoadX509KeyPair(t.ClientCert, t.ClientKey) + if err != nil { + return err + } + foundClientCert = true + case t.ClientCert != "" || t.ClientKey != "": + return fmt.Errorf("both client cert and client key must be provided") + } + + if t.CACert != "" || t.CAPath != "" { + rootConfig := &rootcerts.Config{ + CAFile: t.CACert, + CAPath: t.CAPath, + } + if err := rootcerts.ConfigureTLS(clientTLSConfig, rootConfig); err != nil { + return err + } + } + + if t.Insecure { + clientTLSConfig.InsecureSkipVerify = true + } + + if foundClientCert { + // We use this function to ignore the server's preferential list of + // CAs, otherwise any CA used for the cert auth backend must be in the + // server's CA pool + clientTLSConfig.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { + return &clientCert, nil + } + } + + if t.TLSServerName != "" { + clientTLSConfig.ServerName = t.TLSServerName + } + + return nil +} + +// ReadEnvironment reads configuration information from the environment. If +// there is an error, no configuration value is updated. +func (c *Config) ReadEnvironment() error { + var envAddress string + var envAgentAddress string + var envCACert string + var envCAPath string + var envClientCert string + var envClientKey string + var envClientTimeout time.Duration + var envInsecure bool + var envTLSServerName string + var envMaxRetries *uint64 + var envSRVLookup bool + var limit *rate.Limiter + var envHTTPProxy string + + // Parse the environment variables + if v := os.Getenv(EnvVaultAddress); v != "" { + envAddress = v + } + if v := os.Getenv(EnvVaultAgentAddr); v != "" { + envAgentAddress = v + } else if v := os.Getenv(EnvVaultAgentAddress); v != "" { + envAgentAddress = v + } + if v := os.Getenv(EnvVaultMaxRetries); v != "" { + maxRetries, err := strconv.ParseUint(v, 10, 32) + if err != nil { + return err + } + envMaxRetries = &maxRetries + } + if v := os.Getenv(EnvVaultCACert); v != "" { + envCACert = v + } + if v := os.Getenv(EnvVaultCAPath); v != "" { + envCAPath = v + } + if v := os.Getenv(EnvVaultClientCert); v != "" { + envClientCert = v + } + if v := os.Getenv(EnvVaultClientKey); v != "" { + envClientKey = v + } + if v := os.Getenv(EnvRateLimit); v != "" { + rateLimit, burstLimit, err := parseRateLimit(v) + if err != nil { + return err + } + limit = rate.NewLimiter(rate.Limit(rateLimit), burstLimit) + } + if t := os.Getenv(EnvVaultClientTimeout); t != "" { + clientTimeout, err := parseutil.ParseDurationSecond(t) + if err != nil { + return fmt.Errorf("could not parse %q", EnvVaultClientTimeout) + } + envClientTimeout = clientTimeout + } + if v := os.Getenv(EnvVaultSkipVerify); v != "" { + var err error + envInsecure, err = strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("could not parse VAULT_SKIP_VERIFY") + } + } else if v := os.Getenv(EnvVaultInsecure); v != "" { + var err error + envInsecure, err = strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("could not parse VAULT_INSECURE") + } + } + if v := os.Getenv(EnvVaultSRVLookup); v != "" { + var err error + envSRVLookup, err = strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("could not parse %s", EnvVaultSRVLookup) + } + } + + if v := os.Getenv(EnvVaultTLSServerName); v != "" { + envTLSServerName = v + } + + if v := os.Getenv(EnvHTTPProxy); v != "" { + envHTTPProxy = v + } + + // Configure the HTTP clients TLS configuration. + t := &TLSConfig{ + CACert: envCACert, + CAPath: envCAPath, + ClientCert: envClientCert, + ClientKey: envClientKey, + TLSServerName: envTLSServerName, + Insecure: envInsecure, + } + + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + c.SRVLookup = envSRVLookup + c.Limiter = limit + + if err := c.ConfigureTLS(t); err != nil { + return err + } + + if envAddress != "" { + c.Address = envAddress + } + + if envAgentAddress != "" { + c.AgentAddress = envAgentAddress + } + + if envMaxRetries != nil { + c.MaxRetries = int(*envMaxRetries) + } + + if envClientTimeout != 0 { + c.Timeout = envClientTimeout + } + + if envHTTPProxy != "" { + url, err := url.Parse(envHTTPProxy) + if err != nil { + return err + } + + transport := c.HttpClient.Transport.(*http.Transport) + transport.Proxy = http.ProxyURL(url) + } + + return nil +} + +func parseRateLimit(val string) (rate float64, burst int, err error) { + _, err = fmt.Sscanf(val, "%f:%d", &rate, &burst) + if err != nil { + rate, err = strconv.ParseFloat(val, 64) + if err != nil { + err = fmt.Errorf("%v was provided but incorrectly formatted", EnvRateLimit) + } + burst = int(rate) + } + + return rate, burst, err +} + +// Client is the client to the Vault API. Create a client with NewClient. +type Client struct { + modifyLock sync.RWMutex + addr *url.URL + config *Config + token string + headers http.Header + wrappingLookupFunc WrappingLookupFunc + mfaCreds []string + policyOverride bool + requestCallbacks []RequestCallback + responseCallbacks []ResponseCallback + replicationStateStore *replicationStateStore +} + +// NewClient returns a new client for the given configuration. +// +// If the configuration is nil, Vault will use configuration from +// DefaultConfig(), which is the recommended starting configuration. +// +// If the environment variable `VAULT_TOKEN` is present, the token will be +// automatically added to the client. Otherwise, you must manually call +// `SetToken()`. +func NewClient(c *Config) (*Client, error) { + def := DefaultConfig() + if def == nil { + return nil, fmt.Errorf("could not create/read default configuration") + } + if def.Error != nil { + return nil, errwrap.Wrapf("error encountered setting up default configuration: {{err}}", def.Error) + } + + if c == nil { + c = def + } + + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + if c.MinRetryWait == 0 { + c.MinRetryWait = def.MinRetryWait + } + + if c.MaxRetryWait == 0 { + c.MaxRetryWait = def.MaxRetryWait + } + + if c.HttpClient == nil { + c.HttpClient = def.HttpClient + } + if c.HttpClient.Transport == nil { + c.HttpClient.Transport = def.HttpClient.Transport + } + + address := c.Address + if c.AgentAddress != "" { + address = c.AgentAddress + } + + u, err := url.Parse(address) + if err != nil { + return nil, err + } + + if strings.HasPrefix(address, "unix://") { + socket := strings.TrimPrefix(address, "unix://") + transport := c.HttpClient.Transport.(*http.Transport) + transport.DialContext = func(context.Context, string, string) (net.Conn, error) { + return net.Dial("unix", socket) + } + + // Since the address points to a unix domain socket, the scheme in the + // *URL would be set to `unix`. The *URL in the client is expected to + // be pointing to the protocol used in the application layer and not to + // the transport layer. Hence, setting the fields accordingly. + u.Scheme = "http" + u.Host = socket + u.Path = "" + } + + client := &Client{ + addr: u, + config: c, + headers: make(http.Header), + } + + if c.ReadYourWrites { + client.replicationStateStore = &replicationStateStore{} + } + + // Add the VaultRequest SSRF protection header + client.headers[consts.RequestHeaderName] = []string{"true"} + + if token := os.Getenv(EnvVaultToken); token != "" { + client.token = token + } + + if namespace := os.Getenv(EnvVaultNamespace); namespace != "" { + client.setNamespace(namespace) + } + + return client, nil +} + +func (c *Client) CloneConfig() *Config { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + newConfig := DefaultConfig() + newConfig.Address = c.config.Address + newConfig.AgentAddress = c.config.AgentAddress + newConfig.MinRetryWait = c.config.MinRetryWait + newConfig.MaxRetryWait = c.config.MaxRetryWait + newConfig.MaxRetries = c.config.MaxRetries + newConfig.Timeout = c.config.Timeout + newConfig.Backoff = c.config.Backoff + newConfig.CheckRetry = c.config.CheckRetry + newConfig.Logger = c.config.Logger + newConfig.Limiter = c.config.Limiter + newConfig.OutputCurlString = c.config.OutputCurlString + newConfig.SRVLookup = c.config.SRVLookup + newConfig.CloneHeaders = c.config.CloneHeaders + newConfig.ReadYourWrites = c.config.ReadYourWrites + + // we specifically want a _copy_ of the client here, not a pointer to the original one + newClient := *c.config.HttpClient + newConfig.HttpClient = &newClient + + return newConfig +} + +// Sets the address of Vault in the client. The format of address should be +// "://:". Setting this on a client will override the +// value of VAULT_ADDR environment variable. +func (c *Client) SetAddress(addr string) error { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + parsedAddr, err := url.Parse(addr) + if err != nil { + return errwrap.Wrapf("failed to set address: {{err}}", err) + } + + c.config.modifyLock.Lock() + c.config.Address = addr + c.config.modifyLock.Unlock() + c.addr = parsedAddr + return nil +} + +// Address returns the Vault URL the client is configured to connect to +func (c *Client) Address() string { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + return c.addr.String() +} + +// SetLimiter will set the rate limiter for this client. +// This method is thread-safe. +// rateLimit and burst are specified according to https://godoc.org/golang.org/x/time/rate#NewLimiter +func (c *Client) SetLimiter(rateLimit float64, burst int) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.Limiter = rate.NewLimiter(rate.Limit(rateLimit), burst) +} + +func (c *Client) Limiter() *rate.Limiter { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.Limiter +} + +// SetMinRetryWait sets the minimum time to wait before retrying in the case of certain errors. +func (c *Client) SetMinRetryWait(retryWait time.Duration) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.MinRetryWait = retryWait +} + +func (c *Client) MinRetryWait() time.Duration { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.MinRetryWait +} + +// SetMaxRetryWait sets the maximum time to wait before retrying in the case of certain errors. +func (c *Client) SetMaxRetryWait(retryWait time.Duration) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.MaxRetryWait = retryWait +} + +func (c *Client) MaxRetryWait() time.Duration { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.MaxRetryWait +} + +// SetMaxRetries sets the number of retries that will be used in the case of certain errors +func (c *Client) SetMaxRetries(retries int) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.MaxRetries = retries +} + +func (c *Client) MaxRetries() int { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.MaxRetries +} + +func (c *Client) SetSRVLookup(srv bool) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.SRVLookup = srv +} + +func (c *Client) SRVLookup() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.SRVLookup +} + +// SetCheckRetry sets the CheckRetry function to be used for future requests. +func (c *Client) SetCheckRetry(checkRetry retryablehttp.CheckRetry) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.CheckRetry = checkRetry +} + +func (c *Client) CheckRetry() retryablehttp.CheckRetry { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.CheckRetry +} + +// SetClientTimeout sets the client request timeout +func (c *Client) SetClientTimeout(timeout time.Duration) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.Timeout = timeout +} + +func (c *Client) ClientTimeout() time.Duration { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.Timeout +} + +func (c *Client) OutputCurlString() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.OutputCurlString +} + +func (c *Client) SetOutputCurlString(curl bool) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.OutputCurlString = curl +} + +// CurrentWrappingLookupFunc sets a lookup function that returns desired wrap TTLs +// for a given operation and path. +func (c *Client) CurrentWrappingLookupFunc() WrappingLookupFunc { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + return c.wrappingLookupFunc +} + +// SetWrappingLookupFunc sets a lookup function that returns desired wrap TTLs +// for a given operation and path. +func (c *Client) SetWrappingLookupFunc(lookupFunc WrappingLookupFunc) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.wrappingLookupFunc = lookupFunc +} + +// SetMFACreds sets the MFA credentials supplied either via the environment +// variable or via the command line. +func (c *Client) SetMFACreds(creds []string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.mfaCreds = creds +} + +// SetNamespace sets the namespace supplied either via the environment +// variable or via the command line. +func (c *Client) SetNamespace(namespace string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.setNamespace(namespace) +} + +func (c *Client) setNamespace(namespace string) { + if c.headers == nil { + c.headers = make(http.Header) + } + + c.headers.Set(consts.NamespaceHeaderName, namespace) +} + +// Token returns the access token being used by this client. It will +// return the empty string if there is no token set. +func (c *Client) Token() string { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + return c.token +} + +// SetToken sets the token directly. This won't perform any auth +// verification, it simply sets the token properly for future requests. +func (c *Client) SetToken(v string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.token = v +} + +// ClearToken deletes the token if it is set or does nothing otherwise. +func (c *Client) ClearToken() { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.token = "" +} + +// Headers gets the current set of headers used for requests. This returns a +// copy; to modify it call AddHeader or SetHeaders. +func (c *Client) Headers() http.Header { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + if c.headers == nil { + return nil + } + + ret := make(http.Header) + for k, v := range c.headers { + for _, val := range v { + ret[k] = append(ret[k], val) + } + } + + return ret +} + +// AddHeader allows a single header key/value pair to be added +// in a race-safe fashion. +func (c *Client) AddHeader(key, value string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.headers.Add(key, value) +} + +// SetHeaders clears all previous headers and uses only the given +// ones going forward. +func (c *Client) SetHeaders(headers http.Header) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.headers = headers +} + +// SetBackoff sets the backoff function to be used for future requests. +func (c *Client) SetBackoff(backoff retryablehttp.Backoff) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.Backoff = backoff +} + +func (c *Client) SetLogger(logger retryablehttp.LeveledLogger) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.Logger = logger +} + +// SetCloneHeaders to allow headers to be copied whenever the client is cloned. +func (c *Client) SetCloneHeaders(cloneHeaders bool) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.CloneHeaders = cloneHeaders +} + +// CloneHeaders gets the configured CloneHeaders value. +func (c *Client) CloneHeaders() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.CloneHeaders +} + +// SetReadYourWrites to prevent reading stale cluster replication state. +func (c *Client) SetReadYourWrites(preventStaleReads bool) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + if preventStaleReads && c.replicationStateStore == nil { + c.replicationStateStore = &replicationStateStore{} + } else { + c.replicationStateStore = nil + } + + c.config.ReadYourWrites = preventStaleReads +} + +// ReadYourWrites gets the configured value of ReadYourWrites +func (c *Client) ReadYourWrites() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.ReadYourWrites +} + +// Clone creates a new client with the same configuration. Note that the same +// underlying http.Client is used; modifying the client from more than one +// goroutine at once may not be safe, so modify the client as needed and then +// clone. +// +// Also, only the client's config is currently copied; this means items not in +// the api.Config struct, such as policy override and wrapping function +// behavior, must currently then be set as desired on the new client. +func (c *Client) Clone() (*Client, error) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + config := c.config + config.modifyLock.RLock() + defer config.modifyLock.RUnlock() + + newConfig := &Config{ + Address: config.Address, + HttpClient: config.HttpClient, + MinRetryWait: config.MinRetryWait, + MaxRetryWait: config.MaxRetryWait, + MaxRetries: config.MaxRetries, + Timeout: config.Timeout, + Backoff: config.Backoff, + CheckRetry: config.CheckRetry, + Logger: config.Logger, + Limiter: config.Limiter, + OutputCurlString: config.OutputCurlString, + AgentAddress: config.AgentAddress, + SRVLookup: config.SRVLookup, + CloneHeaders: config.CloneHeaders, + ReadYourWrites: config.ReadYourWrites, + } + client, err := NewClient(newConfig) + if err != nil { + return nil, err + } + + if config.CloneHeaders { + client.SetHeaders(c.Headers().Clone()) + } + + client.replicationStateStore = c.replicationStateStore + + return client, nil +} + +// SetPolicyOverride sets whether requests should be sent with the policy +// override flag to request overriding soft-mandatory Sentinel policies (both +// RGPs and EGPs) +func (c *Client) SetPolicyOverride(override bool) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.policyOverride = override +} + +// NewRequest creates a new raw request object to query the Vault server +// configured for this client. This is an advanced method and generally +// doesn't need to be called externally. +func (c *Client) NewRequest(method, requestPath string) *Request { + c.modifyLock.RLock() + addr := c.addr + token := c.token + mfaCreds := c.mfaCreds + wrappingLookupFunc := c.wrappingLookupFunc + policyOverride := c.policyOverride + c.modifyLock.RUnlock() + + host := addr.Host + // if SRV records exist (see https://tools.ietf.org/html/draft-andrews-http-srv-02), lookup the SRV + // record and take the highest match; this is not designed for high-availability, just discovery + // Internet Draft specifies that the SRV record is ignored if a port is given + if addr.Port() == "" && c.config.SRVLookup { + _, addrs, err := net.LookupSRV("http", "tcp", addr.Hostname()) + if err == nil && len(addrs) > 0 { + host = fmt.Sprintf("%s:%d", addrs[0].Target, addrs[0].Port) + } + } + + req := &Request{ + Method: method, + URL: &url.URL{ + User: addr.User, + Scheme: addr.Scheme, + Host: host, + Path: path.Join(addr.Path, requestPath), + }, + Host: addr.Host, + ClientToken: token, + Params: make(map[string][]string), + } + + var lookupPath string + switch { + case strings.HasPrefix(requestPath, "/v1/"): + lookupPath = strings.TrimPrefix(requestPath, "/v1/") + case strings.HasPrefix(requestPath, "v1/"): + lookupPath = strings.TrimPrefix(requestPath, "v1/") + default: + lookupPath = requestPath + } + + req.MFAHeaderVals = mfaCreds + + if wrappingLookupFunc != nil { + req.WrapTTL = wrappingLookupFunc(method, lookupPath) + } else { + req.WrapTTL = DefaultWrappingLookupFunc(method, lookupPath) + } + + req.Headers = c.Headers() + req.PolicyOverride = policyOverride + + return req +} + +// RawRequest performs the raw request given. This request may be against +// a Vault server not configured with this client. This is an advanced operation +// that generally won't need to be called externally. +func (c *Client) RawRequest(r *Request) (*Response, error) { + return c.RawRequestWithContext(context.Background(), r) +} + +// RawRequestWithContext performs the raw request given. This request may be against +// a Vault server not configured with this client. This is an advanced operation +// that generally won't need to be called externally. +func (c *Client) RawRequestWithContext(ctx context.Context, r *Request) (*Response, error) { + c.modifyLock.RLock() + token := c.token + + c.config.modifyLock.RLock() + limiter := c.config.Limiter + minRetryWait := c.config.MinRetryWait + maxRetryWait := c.config.MaxRetryWait + maxRetries := c.config.MaxRetries + checkRetry := c.config.CheckRetry + backoff := c.config.Backoff + httpClient := c.config.HttpClient + timeout := c.config.Timeout + outputCurlString := c.config.OutputCurlString + logger := c.config.Logger + c.config.modifyLock.RUnlock() + + c.modifyLock.RUnlock() + + for _, cb := range c.requestCallbacks { + cb(r) + } + + if c.config.ReadYourWrites { + c.replicationStateStore.requireState(r) + } + + if limiter != nil { + limiter.Wait(ctx) + } + + // Sanity check the token before potentially erroring from the API + idx := strings.IndexFunc(token, func(c rune) bool { + return !unicode.IsPrint(c) + }) + if idx != -1 { + return nil, fmt.Errorf("configured Vault token contains non-printable characters and cannot be used") + } + + redirectCount := 0 +START: + req, err := r.toRetryableHTTP() + if err != nil { + return nil, err + } + if req == nil { + return nil, fmt.Errorf("nil request created") + } + + if outputCurlString { + LastOutputStringError = &OutputStringError{ + Request: req, + TLSSkipVerify: c.config.HttpClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify, + } + return nil, LastOutputStringError + } + + if timeout != 0 { + // Note: we purposefully do not call cancel manually. The reason is + // when canceled, the request.Body will EOF when reading due to the way + // it streams data in. Cancel will still be run when the timeout is + // hit, so this doesn't really harm anything. + ctx, _ = context.WithTimeout(ctx, timeout) + } + req.Request = req.Request.WithContext(ctx) + + if backoff == nil { + backoff = retryablehttp.LinearJitterBackoff + } + + if checkRetry == nil { + checkRetry = DefaultRetryPolicy + } + + client := &retryablehttp.Client{ + HTTPClient: httpClient, + RetryWaitMin: minRetryWait, + RetryWaitMax: maxRetryWait, + RetryMax: maxRetries, + Backoff: backoff, + CheckRetry: checkRetry, + Logger: logger, + ErrorHandler: retryablehttp.PassthroughErrorHandler, + } + + var result *Response + resp, err := client.Do(req) + if resp != nil { + result = &Response{Response: resp} + } + if err != nil { + if strings.Contains(err.Error(), "tls: oversized") { + err = errwrap.Wrapf( + "{{err}}\n\n"+ + "This error usually means that the server is running with TLS disabled\n"+ + "but the client is configured to use TLS. Please either enable TLS\n"+ + "on the server or run the client with -address set to an address\n"+ + "that uses the http protocol:\n\n"+ + " vault -address http://
\n\n"+ + "You can also set the VAULT_ADDR environment variable:\n\n\n"+ + " VAULT_ADDR=http://
vault \n\n"+ + "where
is replaced by the actual address to the server.", + err) + } + return result, err + } + + // Check for a redirect, only allowing for a single redirect + if (resp.StatusCode == 301 || resp.StatusCode == 302 || resp.StatusCode == 307) && redirectCount == 0 { + // Parse the updated location + respLoc, err := resp.Location() + if err != nil { + return result, err + } + + // Ensure a protocol downgrade doesn't happen + if req.URL.Scheme == "https" && respLoc.Scheme != "https" { + return result, fmt.Errorf("redirect would cause protocol downgrade") + } + + // Update the request + r.URL = respLoc + + // Reset the request body if any + if err := r.ResetJSONBody(); err != nil { + return result, err + } + + // Retry the request + redirectCount++ + goto START + } + + if result != nil { + for _, cb := range c.responseCallbacks { + cb(result) + } + + if c.config.ReadYourWrites { + c.replicationStateStore.recordState(result) + } + } + if err := result.Error(); err != nil { + return result, err + } + + return result, nil +} + +type ( + RequestCallback func(*Request) + ResponseCallback func(*Response) +) + +// WithRequestCallbacks makes a shallow clone of Client, modifies it to use +// the given callbacks, and returns it. Each of the callbacks will be invoked +// on every outgoing request. A client may be used to issue requests +// concurrently; any locking needed by callbacks invoked concurrently is the +// callback's responsibility. +func (c *Client) WithRequestCallbacks(callbacks ...RequestCallback) *Client { + c2 := *c + c2.modifyLock = sync.RWMutex{} + c2.requestCallbacks = callbacks + return &c2 +} + +// WithResponseCallbacks makes a shallow clone of Client, modifies it to use +// the given callbacks, and returns it. Each of the callbacks will be invoked +// on every received response. A client may be used to issue requests +// concurrently; any locking needed by callbacks invoked concurrently is the +// callback's responsibility. +func (c *Client) WithResponseCallbacks(callbacks ...ResponseCallback) *Client { + c2 := *c + c2.modifyLock = sync.RWMutex{} + c2.responseCallbacks = callbacks + return &c2 +} + +// RecordState returns a response callback that will record the state returned +// by Vault in a response header. +func RecordState(state *string) ResponseCallback { + return func(resp *Response) { + *state = resp.Header.Get(HeaderIndex) + } +} + +// RequireState returns a request callback that will add a request header to +// specify the state we require of Vault. This state was obtained from a +// response header seen previous, probably captured with RecordState. +func RequireState(states ...string) RequestCallback { + return func(req *Request) { + for _, s := range states { + req.Headers.Add(HeaderIndex, s) + } + } +} + +// compareReplicationStates returns 1 if s1 is newer or identical, -1 if s1 is older, and 0 +// if neither s1 or s2 is strictly greater. An error is returned if s1 or s2 +// are invalid or from different clusters. +func compareReplicationStates(s1, s2 string) (int, error) { + w1, err := ParseReplicationState(s1, nil) + if err != nil { + return 0, err + } + w2, err := ParseReplicationState(s2, nil) + if err != nil { + return 0, err + } + + if w1.ClusterID != w2.ClusterID { + return 0, fmt.Errorf("can't compare replication states with different ClusterIDs") + } + + switch { + case w1.LocalIndex >= w2.LocalIndex && w1.ReplicatedIndex >= w2.ReplicatedIndex: + return 1, nil + // We've already handled the case where both are equal above, so really we're + // asking here if one or both are lesser. + case w1.LocalIndex <= w2.LocalIndex && w1.ReplicatedIndex <= w2.ReplicatedIndex: + return -1, nil + } + + return 0, nil +} + +// MergeReplicationStates returns a merged array of replication states by iterating +// through all states in `old`. An iterated state is merged to the result before `new` +// based on the result of compareReplicationStates +func MergeReplicationStates(old []string, new string) []string { + if len(old) == 0 || len(old) > 2 { + return []string{new} + } + + var ret []string + for _, o := range old { + c, err := compareReplicationStates(o, new) + if err != nil { + return []string{new} + } + switch c { + case 1: + ret = append(ret, o) + case -1: + ret = append(ret, new) + case 0: + ret = append(ret, o, new) + } + } + return strutil.RemoveDuplicates(ret, false) +} + +func ParseReplicationState(raw string, hmacKey []byte) (*logical.WALState, error) { + cooked, err := base64.StdEncoding.DecodeString(raw) + if err != nil { + return nil, err + } + s := string(cooked) + + lastIndex := strings.LastIndexByte(s, ':') + if lastIndex == -1 { + return nil, fmt.Errorf("invalid full state header format") + } + state, stateHMACRaw := s[:lastIndex], s[lastIndex+1:] + stateHMAC, err := hex.DecodeString(stateHMACRaw) + if err != nil { + return nil, fmt.Errorf("invalid state header HMAC: %v, %w", stateHMACRaw, err) + } + + if len(hmacKey) != 0 { + hm := hmac.New(sha256.New, hmacKey) + hm.Write([]byte(state)) + if !hmac.Equal(hm.Sum(nil), stateHMAC) { + return nil, fmt.Errorf("invalid state header HMAC (mismatch)") + } + } + + pieces := strings.Split(state, ":") + if len(pieces) != 4 || pieces[0] != "v1" || pieces[1] == "" { + return nil, fmt.Errorf("invalid state header format") + } + localIndex, err := strconv.ParseUint(pieces[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid local index in state header: %w", err) + } + replicatedIndex, err := strconv.ParseUint(pieces[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid replicated index in state header: %w", err) + } + + return &logical.WALState{ + ClusterID: pieces[1], + LocalIndex: localIndex, + ReplicatedIndex: replicatedIndex, + }, nil +} + +// ForwardInconsistent returns a request callback that will add a request +// header which says: if the state required isn't present on the node receiving +// this request, forward it to the active node. This should be used in +// conjunction with RequireState. +func ForwardInconsistent() RequestCallback { + return func(req *Request) { + req.Headers.Set("X-Vault-Inconsistent", "forward-active-node") + } +} + +// ForwardAlways returns a request callback which adds a header telling any +// performance standbys handling the request to forward it to the active node. +// This feature must be enabled in Vault's configuration. +func ForwardAlways() RequestCallback { + return func(req *Request) { + req.Headers.Set("X-Vault-Forward", "active-node") + } +} + +// DefaultRetryPolicy is the default retry policy used by new Client objects. +// It is the same as retryablehttp.DefaultRetryPolicy except that it also retries +// 412 requests, which are returned by Vault when a X-Vault-Index header isn't +// satisfied. +func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + retry, err := retryablehttp.DefaultRetryPolicy(ctx, resp, err) + if err != nil || retry { + return retry, err + } + if resp != nil && resp.StatusCode == 412 { + return true, nil + } + return false, nil +} + +// replicationStateStore is used to track cluster replication states +// in order to ensure proper read-after-write semantics for a Client. +type replicationStateStore struct { + m sync.RWMutex + store []string +} + +// recordState updates the store's replication states with the merger of all +// states. +func (w *replicationStateStore) recordState(resp *Response) { + w.m.Lock() + defer w.m.Unlock() + newState := resp.Header.Get(HeaderIndex) + if newState != "" { + w.store = MergeReplicationStates(w.store, newState) + } +} + +// requireState updates the Request with the store's current replication states. +func (w *replicationStateStore) requireState(req *Request) { + w.m.RLock() + defer w.m.RUnlock() + for _, s := range w.store { + req.Headers.Add(HeaderIndex, s) + } +} + +// states currently stored. +func (w *replicationStateStore) states() []string { + w.m.RLock() + defer w.m.RUnlock() + c := make([]string, len(w.store)) + copy(c, w.store) + return c +} diff --git a/vendor/github.com/hashicorp/vault/api/go.mod b/vendor/github.com/hashicorp/vault/api/go.mod new file mode 100644 index 0000000000..d2fd451cbe --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/go.mod @@ -0,0 +1,24 @@ +module github.com/hashicorp/vault/api + +go 1.13 + +replace github.com/hashicorp/vault/sdk => ../sdk + +require ( + github.com/cenkalti/backoff/v3 v3.0.0 + github.com/frankban/quicktest v1.13.0 // indirect + github.com/go-test/deep v1.0.2 + github.com/hashicorp/errwrap v1.1.0 + github.com/hashicorp/go-cleanhttp v0.5.1 + github.com/hashicorp/go-hclog v0.16.2 + github.com/hashicorp/go-multierror v1.1.1 + github.com/hashicorp/go-retryablehttp v0.6.6 + github.com/hashicorp/go-rootcerts v1.0.2 + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1 + github.com/hashicorp/hcl v1.0.0 + github.com/hashicorp/vault/sdk v0.3.0 + github.com/mitchellh/mapstructure v1.4.2 + golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 + golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 + gopkg.in/square/go-jose.v2 v2.5.1 +) diff --git a/vendor/github.com/hashicorp/vault/api/go.sum b/vendor/github.com/hashicorp/vault/api/go.sum new file mode 100644 index 0000000000..d5e55d2f58 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/go.sum @@ -0,0 +1,326 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= +github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.4.3 h1:DXmvivbWD5qdiBts9TpBC7BYL1Aia5sxbRgQB+v6UZM= +github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 h1:cCRo8gK7oq6A2L6LICkUZ+/a5rLiRXFMf1Qd4xSwxTc= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1 h1:78ki3QBevHwYrVxnyVeaEz+7WtifHhauYF23es/0KlI= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1 h1:nd0HIW15E6FG1MsnArYaHfuw9C2zgzM8LxkG5Ty/788= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/hashicorp/vault/api/help.go b/vendor/github.com/hashicorp/vault/api/help.go new file mode 100644 index 0000000000..321bd597c1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/help.go @@ -0,0 +1,30 @@ +package api + +import ( + "context" + "fmt" +) + +// Help reads the help information for the given path. +func (c *Client) Help(path string) (*Help, error) { + r := c.NewRequest("GET", fmt.Sprintf("/v1/%s", path)) + r.Params.Add("help", "1") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result Help + err = resp.DecodeJSON(&result) + return &result, err +} + +type Help struct { + Help string `json:"help"` + SeeAlso []string `json:"see_also"` + OpenAPI map[string]interface{} `json:"openapi"` +} diff --git a/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go b/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go new file mode 100644 index 0000000000..b1d81332f1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go @@ -0,0 +1,396 @@ +package api + +import ( + "errors" + "math/rand" + "sync" + "time" + + "github.com/cenkalti/backoff/v3" +) + +var ( + ErrLifetimeWatcherMissingInput = errors.New("missing input") + ErrLifetimeWatcherMissingSecret = errors.New("missing secret") + ErrLifetimeWatcherNotRenewable = errors.New("secret is not renewable") + ErrLifetimeWatcherNoSecretData = errors.New("returned empty secret data") + + // Deprecated; kept for compatibility + ErrRenewerMissingInput = errors.New("missing input to renewer") + ErrRenewerMissingSecret = errors.New("missing secret to renew") + ErrRenewerNotRenewable = errors.New("secret is not renewable") + ErrRenewerNoSecretData = errors.New("returned empty secret data") + + // DefaultLifetimeWatcherRenewBuffer is the default size of the buffer for renew + // messages on the channel. + DefaultLifetimeWatcherRenewBuffer = 5 + // Deprecated: kept for backwards compatibility + DefaultRenewerRenewBuffer = 5 +) + +type RenewBehavior uint + +const ( + // RenewBehaviorIgnoreErrors means we will attempt to keep renewing until + // we hit the lifetime threshold. It also ignores errors stemming from + // passing a non-renewable lease in. In practice, this means you simply + // reauthenticate/refetch credentials when the watcher exits. This is the + // default. + RenewBehaviorIgnoreErrors RenewBehavior = iota + + // RenewBehaviorRenewDisabled turns off renewal attempts entirely. This + // allows you to simply watch lifetime and have the watcher return at a + // reasonable threshold without actually making Vault calls. + RenewBehaviorRenewDisabled + + // RenewBehaviorErrorOnErrors is the "legacy" behavior which always exits + // on some kind of error + RenewBehaviorErrorOnErrors +) + +// LifetimeWatcher is a process for watching lifetime of a secret. +// +// watcher, err := client.NewLifetimeWatcher(&LifetimeWatcherInput{ +// Secret: mySecret, +// }) +// go watcher.Start() +// defer watcher.Stop() +// +// for { +// select { +// case err := <-watcher.DoneCh(): +// if err != nil { +// log.Fatal(err) +// } +// +// // Renewal is now over +// case renewal := <-watcher.RenewCh(): +// log.Printf("Successfully renewed: %#v", renewal) +// } +// } +// +// +// `DoneCh` will return if renewal fails, or if the remaining lease duration is +// under a built-in threshold and either renewing is not extending it or +// renewing is disabled. In both cases, the caller should attempt a re-read of +// the secret. Clients should check the return value of the channel to see if +// renewal was successful. +type LifetimeWatcher struct { + l sync.Mutex + + client *Client + secret *Secret + grace time.Duration + random *rand.Rand + increment int + doneCh chan error + renewCh chan *RenewOutput + renewBehavior RenewBehavior + + stopped bool + stopCh chan struct{} + + errLifetimeWatcherNotRenewable error + errLifetimeWatcherNoSecretData error +} + +// LifetimeWatcherInput is used as input to the renew function. +type LifetimeWatcherInput struct { + // Secret is the secret to renew + Secret *Secret + + // DEPRECATED: this does not do anything. + Grace time.Duration + + // Rand is the randomizer to use for underlying randomization. If not + // provided, one will be generated and seeded automatically. If provided, it + // is assumed to have already been seeded. + Rand *rand.Rand + + // RenewBuffer is the size of the buffered channel where renew messages are + // dispatched. + RenewBuffer int + + // The new TTL, in seconds, that should be set on the lease. The TTL set + // here may or may not be honored by the vault server, based on Vault + // configuration or any associated max TTL values. + Increment int + + // RenewBehavior controls what happens when a renewal errors or the + // passed-in secret is not renewable. + RenewBehavior RenewBehavior +} + +// RenewOutput is the metadata returned to the client (if it's listening) to +// renew messages. +type RenewOutput struct { + // RenewedAt is the timestamp when the renewal took place (UTC). + RenewedAt time.Time + + // Secret is the underlying renewal data. It's the same struct as all data + // that is returned from Vault, but since this is renewal data, it will not + // usually include the secret itself. + Secret *Secret +} + +// NewLifetimeWatcher creates a new renewer from the given input. +func (c *Client) NewLifetimeWatcher(i *LifetimeWatcherInput) (*LifetimeWatcher, error) { + if i == nil { + return nil, ErrLifetimeWatcherMissingInput + } + + secret := i.Secret + if secret == nil { + return nil, ErrLifetimeWatcherMissingSecret + } + + random := i.Rand + if random == nil { + random = rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) + } + + renewBuffer := i.RenewBuffer + if renewBuffer == 0 { + renewBuffer = DefaultLifetimeWatcherRenewBuffer + } + + return &LifetimeWatcher{ + client: c, + secret: secret, + increment: i.Increment, + random: random, + doneCh: make(chan error, 1), + renewCh: make(chan *RenewOutput, renewBuffer), + renewBehavior: i.RenewBehavior, + + stopped: false, + stopCh: make(chan struct{}), + + errLifetimeWatcherNotRenewable: ErrLifetimeWatcherNotRenewable, + errLifetimeWatcherNoSecretData: ErrLifetimeWatcherNoSecretData, + }, nil +} + +// Deprecated: exists only for backwards compatibility. Calls +// NewLifetimeWatcher, and sets compatibility flags. +func (c *Client) NewRenewer(i *LifetimeWatcherInput) (*LifetimeWatcher, error) { + if i == nil { + return nil, ErrRenewerMissingInput + } + + secret := i.Secret + if secret == nil { + return nil, ErrRenewerMissingSecret + } + + renewer, err := c.NewLifetimeWatcher(i) + if err != nil { + return nil, err + } + + renewer.renewBehavior = RenewBehaviorErrorOnErrors + renewer.errLifetimeWatcherNotRenewable = ErrRenewerNotRenewable + renewer.errLifetimeWatcherNoSecretData = ErrRenewerNoSecretData + return renewer, err +} + +// DoneCh returns the channel where the renewer will publish when renewal stops. +// If there is an error, this will be an error. +func (r *LifetimeWatcher) DoneCh() <-chan error { + return r.doneCh +} + +// RenewCh is a channel that receives a message when a successful renewal takes +// place and includes metadata about the renewal. +func (r *LifetimeWatcher) RenewCh() <-chan *RenewOutput { + return r.renewCh +} + +// Stop stops the renewer. +func (r *LifetimeWatcher) Stop() { + r.l.Lock() + defer r.l.Unlock() + + if !r.stopped { + close(r.stopCh) + r.stopped = true + } +} + +// Start starts a background process for watching the lifetime of this secret. +// If renewal is enabled, when the secret has auth data, this attempts to renew +// the auth (token); When the secret has a lease, this attempts to renew the +// lease. +func (r *LifetimeWatcher) Start() { + r.doneCh <- r.doRenew() +} + +// Renew is for comnpatibility with the legacy api.Renewer. Calling Renew +// simply chains to Start. +func (r *LifetimeWatcher) Renew() { + r.Start() +} + +type renewFunc func(string, int) (*Secret, error) + +// doRenew is a helper for renewing authentication. +func (r *LifetimeWatcher) doRenew() error { + defaultInitialRetryInterval := 10 * time.Second + switch { + case r.secret.Auth != nil: + return r.doRenewWithOptions(true, !r.secret.Auth.Renewable, + r.secret.Auth.LeaseDuration, r.secret.Auth.ClientToken, + r.client.Auth().Token().RenewTokenAsSelf, defaultInitialRetryInterval) + default: + return r.doRenewWithOptions(false, !r.secret.Renewable, + r.secret.LeaseDuration, r.secret.LeaseID, + r.client.Sys().Renew, defaultInitialRetryInterval) + } +} + +func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool, initLeaseDuration int, credString string, + renew renewFunc, initialRetryInterval time.Duration) error { + if credString == "" || + (nonRenewable && r.renewBehavior == RenewBehaviorErrorOnErrors) { + return r.errLifetimeWatcherNotRenewable + } + + initialTime := time.Now() + priorDuration := time.Duration(initLeaseDuration) * time.Second + r.calculateGrace(priorDuration) + var errorBackoff backoff.BackOff + + for { + // Check if we are stopped. + select { + case <-r.stopCh: + return nil + default: + } + + var remainingLeaseDuration time.Duration + fallbackLeaseDuration := initialTime.Add(priorDuration).Sub(time.Now()) + var renewal *Secret + var err error + + switch { + case nonRenewable || r.renewBehavior == RenewBehaviorRenewDisabled: + // Can't or won't renew, just keep the same expiration so we exit + // when it's reauthentication time + remainingLeaseDuration = fallbackLeaseDuration + + default: + // Renew the token + renewal, err = renew(credString, r.increment) + if err != nil || renewal == nil || (tokenMode && renewal.Auth == nil) { + if r.renewBehavior == RenewBehaviorErrorOnErrors { + if err != nil { + return err + } + if renewal == nil || (tokenMode && renewal.Auth == nil) { + return r.errLifetimeWatcherNoSecretData + } + } + + // Calculate remaining duration until initial token lease expires + remainingLeaseDuration = initialTime.Add(time.Duration(initLeaseDuration) * time.Second).Sub(time.Now()) + if errorBackoff == nil { + errorBackoff = &backoff.ExponentialBackOff{ + MaxElapsedTime: remainingLeaseDuration, + RandomizationFactor: backoff.DefaultRandomizationFactor, + InitialInterval: initialRetryInterval, + MaxInterval: 5 * time.Minute, + Multiplier: 2, + Clock: backoff.SystemClock, + } + errorBackoff.Reset() + } + break + } + errorBackoff = nil + + // Push a message that a renewal took place. + select { + case r.renewCh <- &RenewOutput{time.Now().UTC(), renewal}: + default: + } + + // Possibly error if we are not renewable + if ((tokenMode && !renewal.Auth.Renewable) || (!tokenMode && !renewal.Renewable)) && + r.renewBehavior == RenewBehaviorErrorOnErrors { + return r.errLifetimeWatcherNotRenewable + } + + // Reset initial time + initialTime = time.Now() + + // Grab the lease duration + initLeaseDuration = renewal.LeaseDuration + if tokenMode { + initLeaseDuration = renewal.Auth.LeaseDuration + } + + remainingLeaseDuration = time.Duration(initLeaseDuration) * time.Second + } + + var sleepDuration time.Duration + + if errorBackoff != nil { + sleepDuration = errorBackoff.NextBackOff() + if sleepDuration == backoff.Stop { + return err + } + } else { + // We keep evaluating a new grace period so long as the lease is + // extending. Once it stops extending, we've hit the max and need to + // rely on the grace duration. + if remainingLeaseDuration > priorDuration { + r.calculateGrace(remainingLeaseDuration) + } + priorDuration = remainingLeaseDuration + + // The sleep duration is set to 2/3 of the current lease duration plus + // 1/3 of the current grace period, which adds jitter. + sleepDuration = time.Duration(float64(remainingLeaseDuration.Nanoseconds())*2/3 + float64(r.grace.Nanoseconds())/3) + } + + // If we are within grace, return now; or, if the amount of time we + // would sleep would land us in the grace period. This helps with short + // tokens; for example, you don't want a current lease duration of 4 + // seconds, a grace period of 3 seconds, and end up sleeping for more + // than three of those seconds and having a very small budget of time + // to renew. + if remainingLeaseDuration <= r.grace || remainingLeaseDuration-sleepDuration <= r.grace { + return nil + } + + select { + case <-r.stopCh: + return nil + case <-time.After(sleepDuration): + continue + } + } +} + +// calculateGrace calculates the grace period based on a reasonable set of +// assumptions given the total lease time; it also adds some jitter to not have +// clients be in sync. +func (r *LifetimeWatcher) calculateGrace(leaseDuration time.Duration) { + if leaseDuration <= 0 { + r.grace = 0 + return + } + + leaseNanos := float64(leaseDuration.Nanoseconds()) + jitterMax := 0.1 * leaseNanos + + // For a given lease duration, we want to allow 80-90% of that to elapse, + // so the remaining amount is the grace period + r.grace = time.Duration(jitterMax) + time.Duration(uint64(r.random.Int63())%uint64(jitterMax)) +} + +type ( + Renewer = LifetimeWatcher + RenewerInput = LifetimeWatcherInput +) diff --git a/vendor/github.com/hashicorp/vault/api/logical.go b/vendor/github.com/hashicorp/vault/api/logical.go new file mode 100644 index 0000000000..f8f8bc5376 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/logical.go @@ -0,0 +1,311 @@ +package api + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/url" + "os" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +const ( + wrappedResponseLocation = "cubbyhole/response" +) + +var ( + // The default TTL that will be used with `sys/wrapping/wrap`, can be + // changed + DefaultWrappingTTL = "5m" + + // The default function used if no other function is set. It honors the env + // var to set the wrap TTL. The default wrap TTL will apply when when writing + // to `sys/wrapping/wrap` when the env var is not set. + DefaultWrappingLookupFunc = func(operation, path string) string { + if os.Getenv(EnvVaultWrapTTL) != "" { + return os.Getenv(EnvVaultWrapTTL) + } + + if (operation == "PUT" || operation == "POST") && path == "sys/wrapping/wrap" { + return DefaultWrappingTTL + } + + return "" + } +) + +// Logical is used to perform logical backend operations on Vault. +type Logical struct { + c *Client +} + +// Logical is used to return the client for logical-backend API calls. +func (c *Client) Logical() *Logical { + return &Logical{c: c} +} + +func (c *Logical) Read(path string) (*Secret, error) { + return c.ReadWithData(path, nil) +} + +func (c *Logical) ReadWithData(path string, data map[string][]string) (*Secret, error) { + r := c.c.NewRequest("GET", "/v1/"+path) + + var values url.Values + for k, v := range data { + if values == nil { + values = make(url.Values) + } + for _, val := range v { + values.Add(k, val) + } + } + + if values != nil { + r.Params = values + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + } + if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, parseErr + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, nil + } + return nil, nil + } + if err != nil { + return nil, err + } + + return ParseSecret(resp.Body) +} + +func (c *Logical) List(path string) (*Secret, error) { + r := c.c.NewRequest("LIST", "/v1/"+path) + // Set this for broader compatibility, but we use LIST above to be able to + // handle the wrapping lookup function + r.Method = "GET" + r.Params.Set("list", "true") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + } + if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, parseErr + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, nil + } + return nil, nil + } + if err != nil { + return nil, err + } + + return ParseSecret(resp.Body) +} + +func (c *Logical) Write(path string, data map[string]interface{}) (*Secret, error) { + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + + r := c.c.NewRequest("PUT", "/v1/"+path) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + return c.write(ctx, path, r) +} + +func (c *Logical) JSONMergePatch(ctx context.Context, path string, data map[string]interface{}) (*Secret, error) { + r := c.c.NewRequest("PATCH", "/v1/"+path) + r.Headers = http.Header{ + "Content-Type": []string{"application/merge-patch+json"}, + } + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + return c.write(ctx, path, r) +} + +func (c *Logical) WriteBytes(path string, data []byte) (*Secret, error) { + r := c.c.NewRequest("PUT", "/v1/"+path) + r.BodyBytes = data + + return c.write(context.Background(), path, r) +} + +func (c *Logical) write(ctx context.Context, path string, request *Request) (*Secret, error) { + resp, err := c.c.RawRequestWithContext(ctx, request) + if resp != nil { + defer resp.Body.Close() + } + if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, parseErr + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, err + } + } + if err != nil { + return nil, err + } + + return ParseSecret(resp.Body) +} + +func (c *Logical) Delete(path string) (*Secret, error) { + return c.DeleteWithData(path, nil) +} + +func (c *Logical) DeleteWithData(path string, data map[string][]string) (*Secret, error) { + r := c.c.NewRequest("DELETE", "/v1/"+path) + + var values url.Values + for k, v := range data { + if values == nil { + values = make(url.Values) + } + for _, val := range v { + values.Add(k, val) + } + } + + if values != nil { + r.Params = values + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + } + if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, parseErr + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, err + } + } + if err != nil { + return nil, err + } + + return ParseSecret(resp.Body) +} + +func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) { + var data map[string]interface{} + if wrappingToken != "" { + if c.c.Token() == "" { + c.c.SetToken(wrappingToken) + } else if wrappingToken != c.c.Token() { + data = map[string]interface{}{ + "token": wrappingToken, + } + } + } + + r := c.c.NewRequest("PUT", "/v1/sys/wrapping/unwrap") + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + } + if resp == nil || resp.StatusCode != 404 { + if err != nil { + return nil, err + } + if resp == nil { + return nil, nil + } + return ParseSecret(resp.Body) + } + + // In the 404 case this may actually be a wrapped 404 error + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, parseErr + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, nil + } + + // Otherwise this might be an old-style wrapping token so attempt the old + // method + if wrappingToken != "" { + origToken := c.c.Token() + defer c.c.SetToken(origToken) + c.c.SetToken(wrappingToken) + } + + secret, err = c.Read(wrappedResponseLocation) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("error reading %q: {{err}}", wrappedResponseLocation), err) + } + if secret == nil { + return nil, fmt.Errorf("no value found at %q", wrappedResponseLocation) + } + if secret.Data == nil { + return nil, fmt.Errorf("\"data\" not found in wrapping response") + } + if _, ok := secret.Data["response"]; !ok { + return nil, fmt.Errorf("\"response\" not found in wrapping response \"data\" map") + } + + wrappedSecret := new(Secret) + buf := bytes.NewBufferString(secret.Data["response"].(string)) + if err := jsonutil.DecodeJSONFromReader(buf, wrappedSecret); err != nil { + return nil, errwrap.Wrapf("error unmarshalling wrapped secret: {{err}}", err) + } + + return wrappedSecret, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/output_string.go b/vendor/github.com/hashicorp/vault/api/output_string.go new file mode 100644 index 0000000000..8b654ad820 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/output_string.go @@ -0,0 +1,73 @@ +package api + +import ( + "fmt" + "strings" + + retryablehttp "github.com/hashicorp/go-retryablehttp" +) + +const ( + ErrOutputStringRequest = "output a string, please" +) + +var LastOutputStringError *OutputStringError + +type OutputStringError struct { + *retryablehttp.Request + TLSSkipVerify bool + parsingError error + parsedCurlString string +} + +func (d *OutputStringError) Error() string { + if d.parsedCurlString == "" { + d.parseRequest() + if d.parsingError != nil { + return d.parsingError.Error() + } + } + + return ErrOutputStringRequest +} + +func (d *OutputStringError) parseRequest() { + body, err := d.Request.BodyBytes() + if err != nil { + d.parsingError = err + return + } + + // Build cURL string + d.parsedCurlString = "curl " + if d.TLSSkipVerify { + d.parsedCurlString += "--insecure " + } + if d.Request.Method != "GET" { + d.parsedCurlString = fmt.Sprintf("%s-X %s ", d.parsedCurlString, d.Request.Method) + } + for k, v := range d.Request.Header { + for _, h := range v { + if strings.ToLower(k) == "x-vault-token" { + h = `$(vault print token)` + } + d.parsedCurlString = fmt.Sprintf("%s-H \"%s: %s\" ", d.parsedCurlString, k, h) + } + } + + if len(body) > 0 { + // We need to escape single quotes since that's what we're using to + // quote the body + escapedBody := strings.Replace(string(body), "'", "'\"'\"'", -1) + d.parsedCurlString = fmt.Sprintf("%s-d '%s' ", d.parsedCurlString, escapedBody) + } + + d.parsedCurlString = fmt.Sprintf("%s%s", d.parsedCurlString, d.Request.URL.String()) +} + +func (d *OutputStringError) CurlString() string { + if d.parsedCurlString == "" { + d.parseRequest() + } + return d.parsedCurlString +} diff --git a/vendor/github.com/hashicorp/vault/api/plugin_helpers.go b/vendor/github.com/hashicorp/vault/api/plugin_helpers.go new file mode 100644 index 0000000000..c2978b388f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/plugin_helpers.go @@ -0,0 +1,189 @@ +package api + +import ( + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "flag" + "net/url" + "os" + + squarejwt "gopkg.in/square/go-jose.v2/jwt" + + "github.com/hashicorp/errwrap" +) + +var ( + // PluginMetadataModeEnv is an ENV name used to disable TLS communication + // to bootstrap mounting plugins. + PluginMetadataModeEnv = "VAULT_PLUGIN_METADATA_MODE" + + // PluginUnwrapTokenEnv is the ENV name used to pass unwrap tokens to the + // plugin. + PluginUnwrapTokenEnv = "VAULT_UNWRAP_TOKEN" +) + +// PluginAPIClientMeta is a helper that plugins can use to configure TLS connections +// back to Vault. +type PluginAPIClientMeta struct { + // These are set by the command line flags. + flagCACert string + flagCAPath string + flagClientCert string + flagClientKey string + flagInsecure bool +} + +// FlagSet returns the flag set for configuring the TLS connection +func (f *PluginAPIClientMeta) FlagSet() *flag.FlagSet { + fs := flag.NewFlagSet("vault plugin settings", flag.ContinueOnError) + + fs.StringVar(&f.flagCACert, "ca-cert", "", "") + fs.StringVar(&f.flagCAPath, "ca-path", "", "") + fs.StringVar(&f.flagClientCert, "client-cert", "", "") + fs.StringVar(&f.flagClientKey, "client-key", "", "") + fs.BoolVar(&f.flagInsecure, "tls-skip-verify", false, "") + + return fs +} + +// GetTLSConfig will return a TLSConfig based off the values from the flags +func (f *PluginAPIClientMeta) GetTLSConfig() *TLSConfig { + // If we need custom TLS configuration, then set it + if f.flagCACert != "" || f.flagCAPath != "" || f.flagClientCert != "" || f.flagClientKey != "" || f.flagInsecure { + t := &TLSConfig{ + CACert: f.flagCACert, + CAPath: f.flagCAPath, + ClientCert: f.flagClientCert, + ClientKey: f.flagClientKey, + TLSServerName: "", + Insecure: f.flagInsecure, + } + + return t + } + + return nil +} + +// VaultPluginTLSProvider is run inside a plugin and retrieves the response +// wrapped TLS certificate from vault. It returns a configured TLS Config. +func VaultPluginTLSProvider(apiTLSConfig *TLSConfig) func() (*tls.Config, error) { + if os.Getenv(PluginMetadataModeEnv) == "true" { + return nil + } + + return func() (*tls.Config, error) { + unwrapToken := os.Getenv(PluginUnwrapTokenEnv) + + parsedJWT, err := squarejwt.ParseSigned(unwrapToken) + if err != nil { + return nil, errwrap.Wrapf("error parsing wrapping token: {{err}}", err) + } + + allClaims := make(map[string]interface{}) + if err = parsedJWT.UnsafeClaimsWithoutVerification(&allClaims); err != nil { + return nil, errwrap.Wrapf("error parsing claims from wrapping token: {{err}}", err) + } + + addrClaimRaw, ok := allClaims["addr"] + if !ok { + return nil, errors.New("could not validate addr claim") + } + vaultAddr, ok := addrClaimRaw.(string) + if !ok { + return nil, errors.New("could not parse addr claim") + } + if vaultAddr == "" { + return nil, errors.New(`no vault api_addr found`) + } + + // Sanity check the value + if _, err := url.Parse(vaultAddr); err != nil { + return nil, errwrap.Wrapf("error parsing the vault api_addr: {{err}}", err) + } + + // Unwrap the token + clientConf := DefaultConfig() + clientConf.Address = vaultAddr + if apiTLSConfig != nil { + err := clientConf.ConfigureTLS(apiTLSConfig) + if err != nil { + return nil, errwrap.Wrapf("error configuring api client {{err}}", err) + } + } + client, err := NewClient(clientConf) + if err != nil { + return nil, errwrap.Wrapf("error during api client creation: {{err}}", err) + } + + // Reset token value to make sure nothing has been set by default + client.ClearToken() + + secret, err := client.Logical().Unwrap(unwrapToken) + if err != nil { + return nil, errwrap.Wrapf("error during token unwrap request: {{err}}", err) + } + if secret == nil { + return nil, errors.New("error during token unwrap request: secret is nil") + } + + // Retrieve and parse the server's certificate + serverCertBytesRaw, ok := secret.Data["ServerCert"].(string) + if !ok { + return nil, errors.New("error unmarshalling certificate") + } + + serverCertBytes, err := base64.StdEncoding.DecodeString(serverCertBytesRaw) + if err != nil { + return nil, errwrap.Wrapf("error parsing certificate: {{err}}", err) + } + + serverCert, err := x509.ParseCertificate(serverCertBytes) + if err != nil { + return nil, errwrap.Wrapf("error parsing certificate: {{err}}", err) + } + + // Retrieve and parse the server's private key + serverKeyB64, ok := secret.Data["ServerKey"].(string) + if !ok { + return nil, errors.New("error unmarshalling certificate") + } + + serverKeyRaw, err := base64.StdEncoding.DecodeString(serverKeyB64) + if err != nil { + return nil, errwrap.Wrapf("error parsing certificate: {{err}}", err) + } + + serverKey, err := x509.ParseECPrivateKey(serverKeyRaw) + if err != nil { + return nil, errwrap.Wrapf("error parsing certificate: {{err}}", err) + } + + // Add CA cert to the cert pool + caCertPool := x509.NewCertPool() + caCertPool.AddCert(serverCert) + + // Build a certificate object out of the server's cert and private key. + cert := tls.Certificate{ + Certificate: [][]byte{serverCertBytes}, + PrivateKey: serverKey, + Leaf: serverCert, + } + + // Setup TLS config + tlsConfig := &tls.Config{ + ClientCAs: caCertPool, + RootCAs: caCertPool, + ClientAuth: tls.RequireAndVerifyClientCert, + // TLS 1.2 minimum + MinVersion: tls.VersionTLS12, + Certificates: []tls.Certificate{cert}, + ServerName: serverCert.Subject.CommonName, + } + tlsConfig.BuildNameToCertificate() + + return tlsConfig, nil + } +} diff --git a/vendor/github.com/hashicorp/vault/api/request.go b/vendor/github.com/hashicorp/vault/api/request.go new file mode 100644 index 0000000000..1cbbc62f90 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/request.go @@ -0,0 +1,148 @@ +package api + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + + "github.com/hashicorp/vault/sdk/helper/consts" + + retryablehttp "github.com/hashicorp/go-retryablehttp" +) + +// Request is a raw request configuration structure used to initiate +// API requests to the Vault server. +type Request struct { + Method string + URL *url.URL + Host string + Params url.Values + Headers http.Header + ClientToken string + MFAHeaderVals []string + WrapTTL string + Obj interface{} + + // When possible, use BodyBytes as it is more efficient due to how the + // retry logic works + BodyBytes []byte + + // Fallback + Body io.Reader + BodySize int64 + + // Whether to request overriding soft-mandatory Sentinel policies (RGPs and + // EGPs). If set, the override flag will take effect for all policies + // evaluated during the request. + PolicyOverride bool +} + +// SetJSONBody is used to set a request body that is a JSON-encoded value. +func (r *Request) SetJSONBody(val interface{}) error { + buf, err := json.Marshal(val) + if err != nil { + return err + } + + r.Obj = val + r.BodyBytes = buf + return nil +} + +// ResetJSONBody is used to reset the body for a redirect +func (r *Request) ResetJSONBody() error { + if r.BodyBytes == nil { + return nil + } + return r.SetJSONBody(r.Obj) +} + +// DEPRECATED: ToHTTP turns this request into a valid *http.Request for use +// with the net/http package. +func (r *Request) ToHTTP() (*http.Request, error) { + req, err := r.toRetryableHTTP() + if err != nil { + return nil, err + } + + switch { + case r.BodyBytes == nil && r.Body == nil: + // No body + + case r.BodyBytes != nil: + req.Request.Body = ioutil.NopCloser(bytes.NewReader(r.BodyBytes)) + + default: + if c, ok := r.Body.(io.ReadCloser); ok { + req.Request.Body = c + } else { + req.Request.Body = ioutil.NopCloser(r.Body) + } + } + + return req.Request, nil +} + +func (r *Request) toRetryableHTTP() (*retryablehttp.Request, error) { + // Encode the query parameters + r.URL.RawQuery = r.Params.Encode() + + // Create the HTTP request, defaulting to retryable + var req *retryablehttp.Request + + var err error + var body interface{} + + switch { + case r.BodyBytes == nil && r.Body == nil: + // No body + + case r.BodyBytes != nil: + // Use bytes, it's more efficient + body = r.BodyBytes + + default: + body = r.Body + } + + req, err = retryablehttp.NewRequest(r.Method, r.URL.RequestURI(), body) + if err != nil { + return nil, err + } + + req.URL.User = r.URL.User + req.URL.Scheme = r.URL.Scheme + req.URL.Host = r.URL.Host + req.Host = r.Host + + if r.Headers != nil { + for header, vals := range r.Headers { + for _, val := range vals { + req.Header.Add(header, val) + } + } + } + + if len(r.ClientToken) != 0 { + req.Header.Set(consts.AuthHeaderName, r.ClientToken) + } + + if len(r.WrapTTL) != 0 { + req.Header.Set("X-Vault-Wrap-TTL", r.WrapTTL) + } + + if len(r.MFAHeaderVals) != 0 { + for _, mfaHeaderVal := range r.MFAHeaderVals { + req.Header.Add("X-Vault-MFA", mfaHeaderVal) + } + } + + if r.PolicyOverride { + req.Header.Set("X-Vault-Policy-Override", "true") + } + + return req, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/response.go b/vendor/github.com/hashicorp/vault/api/response.go new file mode 100644 index 0000000000..9ce3d12aac --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/response.go @@ -0,0 +1,133 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +// Response is a raw response that wraps an HTTP response. +type Response struct { + *http.Response +} + +// DecodeJSON will decode the response body to a JSON structure. This +// will consume the response body, but will not close it. Close must +// still be called. +func (r *Response) DecodeJSON(out interface{}) error { + return jsonutil.DecodeJSONFromReader(r.Body, out) +} + +// Error returns an error response if there is one. If there is an error, +// this will fully consume the response body, but will not close it. The +// body must still be closed manually. +func (r *Response) Error() error { + // 200 to 399 are okay status codes. 429 is the code for health status of + // standby nodes, otherwise, 429 is treated as quota limit reached. + if (r.StatusCode >= 200 && r.StatusCode < 400) || (r.StatusCode == 429 && r.Request.URL.Path == "/v1/sys/health") { + return nil + } + + // We have an error. Let's copy the body into our own buffer first, + // so that if we can't decode JSON, we can at least copy it raw. + bodyBuf := &bytes.Buffer{} + if _, err := io.Copy(bodyBuf, r.Body); err != nil { + return err + } + + r.Body.Close() + r.Body = ioutil.NopCloser(bodyBuf) + ns := r.Header.Get(consts.NamespaceHeaderName) + + // Build up the error object + respErr := &ResponseError{ + HTTPMethod: r.Request.Method, + URL: r.Request.URL.String(), + StatusCode: r.StatusCode, + NamespacePath: ns, + } + + // Decode the error response if we can. Note that we wrap the bodyBuf + // in a bytes.Reader here so that the JSON decoder doesn't move the + // read pointer for the original buffer. + var resp ErrorResponse + if err := jsonutil.DecodeJSON(bodyBuf.Bytes(), &resp); err != nil { + // Store the fact that we couldn't decode the errors + respErr.RawError = true + respErr.Errors = []string{bodyBuf.String()} + } else { + // Store the decoded errors + respErr.Errors = resp.Errors + } + + return respErr +} + +// ErrorResponse is the raw structure of errors when they're returned by the +// HTTP API. +type ErrorResponse struct { + Errors []string +} + +// ResponseError is the error returned when Vault responds with an error or +// non-success HTTP status code. If a request to Vault fails because of a +// network error a different error message will be returned. ResponseError gives +// access to the underlying errors and status code. +type ResponseError struct { + // HTTPMethod is the HTTP method for the request (PUT, GET, etc). + HTTPMethod string + + // URL is the URL of the request. + URL string + + // StatusCode is the HTTP status code. + StatusCode int + + // RawError marks that the underlying error messages returned by Vault were + // not parsable. The Errors slice will contain the raw response body as the + // first and only error string if this value is set to true. + RawError bool + + // Errors are the underlying errors returned by Vault. + Errors []string + + // Namespace path to be reported to the client if it is set to anything other + // than root + NamespacePath string +} + +// Error returns a human-readable error string for the response error. +func (r *ResponseError) Error() string { + errString := "Errors" + if r.RawError { + errString = "Raw Message" + } + + var ns string + if r.NamespacePath != "" && r.NamespacePath != "root/" { + ns = "Namespace: " + r.NamespacePath + "\n" + } + + var errBody bytes.Buffer + errBody.WriteString(fmt.Sprintf( + "Error making API request.\n\n"+ + ns+ + "URL: %s %s\n"+ + "Code: %d. %s:\n\n", + r.HTTPMethod, r.URL, r.StatusCode, errString)) + + if r.RawError && len(r.Errors) == 1 { + errBody.WriteString(r.Errors[0]) + } else { + for _, err := range r.Errors { + errBody.WriteString(fmt.Sprintf("* %s", err)) + } + } + + return errBody.String() +} diff --git a/vendor/github.com/hashicorp/vault/api/secret.go b/vendor/github.com/hashicorp/vault/api/secret.go new file mode 100644 index 0000000000..64865d0ba1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/secret.go @@ -0,0 +1,322 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +// Secret is the structure returned for every secret within Vault. +type Secret struct { + // The request ID that generated this response + RequestID string `json:"request_id"` + + LeaseID string `json:"lease_id"` + LeaseDuration int `json:"lease_duration"` + Renewable bool `json:"renewable"` + + // Data is the actual contents of the secret. The format of the data + // is arbitrary and up to the secret backend. + Data map[string]interface{} `json:"data"` + + // Warnings contains any warnings related to the operation. These + // are not issues that caused the command to fail, but that the + // client should be aware of. + Warnings []string `json:"warnings"` + + // Auth, if non-nil, means that there was authentication information + // attached to this response. + Auth *SecretAuth `json:"auth,omitempty"` + + // WrapInfo, if non-nil, means that the initial response was wrapped in the + // cubbyhole of the given token (which has a TTL of the given number of + // seconds) + WrapInfo *SecretWrapInfo `json:"wrap_info,omitempty"` +} + +// TokenID returns the standardized token ID (token) for the given secret. +func (s *Secret) TokenID() (string, error) { + if s == nil { + return "", nil + } + + if s.Auth != nil && len(s.Auth.ClientToken) > 0 { + return s.Auth.ClientToken, nil + } + + if s.Data == nil || s.Data["id"] == nil { + return "", nil + } + + id, ok := s.Data["id"].(string) + if !ok { + return "", fmt.Errorf("token found but in the wrong format") + } + + return id, nil +} + +// TokenAccessor returns the standardized token accessor for the given secret. +// If the secret is nil or does not contain an accessor, this returns the empty +// string. +func (s *Secret) TokenAccessor() (string, error) { + if s == nil { + return "", nil + } + + if s.Auth != nil && len(s.Auth.Accessor) > 0 { + return s.Auth.Accessor, nil + } + + if s.Data == nil || s.Data["accessor"] == nil { + return "", nil + } + + accessor, ok := s.Data["accessor"].(string) + if !ok { + return "", fmt.Errorf("token found but in the wrong format") + } + + return accessor, nil +} + +// TokenRemainingUses returns the standardized remaining uses for the given +// secret. If the secret is nil or does not contain the "num_uses", this +// returns -1. On error, this will return -1 and a non-nil error. +func (s *Secret) TokenRemainingUses() (int, error) { + if s == nil || s.Data == nil || s.Data["num_uses"] == nil { + return -1, nil + } + + uses, err := parseutil.ParseInt(s.Data["num_uses"]) + if err != nil { + return 0, err + } + + return int(uses), nil +} + +// TokenPolicies returns the standardized list of policies for the given secret. +// If the secret is nil or does not contain any policies, this returns nil. It +// also populates the secret's Auth info with identity/token policy info. +func (s *Secret) TokenPolicies() ([]string, error) { + if s == nil { + return nil, nil + } + + if s.Auth != nil && len(s.Auth.Policies) > 0 { + return s.Auth.Policies, nil + } + + if s.Data == nil || s.Data["policies"] == nil { + return nil, nil + } + + var tokenPolicies []string + + // Token policies + { + _, ok := s.Data["policies"] + if !ok { + goto TOKEN_DONE + } + + sList, ok := s.Data["policies"].([]string) + if ok { + tokenPolicies = sList + goto TOKEN_DONE + } + + list, ok := s.Data["policies"].([]interface{}) + if !ok { + return nil, fmt.Errorf("unable to convert token policies to expected format") + } + for _, v := range list { + p, ok := v.(string) + if !ok { + return nil, fmt.Errorf("unable to convert policy %v to string", v) + } + tokenPolicies = append(tokenPolicies, p) + } + } + +TOKEN_DONE: + var identityPolicies []string + + // Identity policies + { + _, ok := s.Data["identity_policies"] + if !ok { + goto DONE + } + + sList, ok := s.Data["identity_policies"].([]string) + if ok { + identityPolicies = sList + goto DONE + } + + list, ok := s.Data["identity_policies"].([]interface{}) + if !ok { + return nil, fmt.Errorf("unable to convert identity policies to expected format") + } + for _, v := range list { + p, ok := v.(string) + if !ok { + return nil, fmt.Errorf("unable to convert policy %v to string", v) + } + identityPolicies = append(identityPolicies, p) + } + } + +DONE: + + if s.Auth == nil { + s.Auth = &SecretAuth{} + } + + policies := append(tokenPolicies, identityPolicies...) + + s.Auth.TokenPolicies = tokenPolicies + s.Auth.IdentityPolicies = identityPolicies + s.Auth.Policies = policies + + return policies, nil +} + +// TokenMetadata returns the map of metadata associated with this token, if any +// exists. If the secret is nil or does not contain the "metadata" key, this +// returns nil. +func (s *Secret) TokenMetadata() (map[string]string, error) { + if s == nil { + return nil, nil + } + + if s.Auth != nil && len(s.Auth.Metadata) > 0 { + return s.Auth.Metadata, nil + } + + if s.Data == nil || (s.Data["metadata"] == nil && s.Data["meta"] == nil) { + return nil, nil + } + + data, ok := s.Data["metadata"].(map[string]interface{}) + if !ok { + data, ok = s.Data["meta"].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("unable to convert metadata field to expected format") + } + } + + metadata := make(map[string]string, len(data)) + for k, v := range data { + typed, ok := v.(string) + if !ok { + return nil, fmt.Errorf("unable to convert metadata value %v to string", v) + } + metadata[k] = typed + } + + return metadata, nil +} + +// TokenIsRenewable returns the standardized token renewability for the given +// secret. If the secret is nil or does not contain the "renewable" key, this +// returns false. +func (s *Secret) TokenIsRenewable() (bool, error) { + if s == nil { + return false, nil + } + + if s.Auth != nil && s.Auth.Renewable { + return s.Auth.Renewable, nil + } + + if s.Data == nil || s.Data["renewable"] == nil { + return false, nil + } + + renewable, err := parseutil.ParseBool(s.Data["renewable"]) + if err != nil { + return false, errwrap.Wrapf("could not convert renewable value to a boolean: {{err}}", err) + } + + return renewable, nil +} + +// TokenTTL returns the standardized remaining token TTL for the given secret. +// If the secret is nil or does not contain a TTL, this returns 0. +func (s *Secret) TokenTTL() (time.Duration, error) { + if s == nil { + return 0, nil + } + + if s.Auth != nil && s.Auth.LeaseDuration > 0 { + return time.Duration(s.Auth.LeaseDuration) * time.Second, nil + } + + if s.Data == nil || s.Data["ttl"] == nil { + return 0, nil + } + + ttl, err := parseutil.ParseDurationSecond(s.Data["ttl"]) + if err != nil { + return 0, err + } + + return ttl, nil +} + +// SecretWrapInfo contains wrapping information if we have it. If what is +// contained is an authentication token, the accessor for the token will be +// available in WrappedAccessor. +type SecretWrapInfo struct { + Token string `json:"token"` + Accessor string `json:"accessor"` + TTL int `json:"ttl"` + CreationTime time.Time `json:"creation_time"` + CreationPath string `json:"creation_path"` + WrappedAccessor string `json:"wrapped_accessor"` +} + +// SecretAuth is the structure containing auth information if we have it. +type SecretAuth struct { + ClientToken string `json:"client_token"` + Accessor string `json:"accessor"` + Policies []string `json:"policies"` + TokenPolicies []string `json:"token_policies"` + IdentityPolicies []string `json:"identity_policies"` + Metadata map[string]string `json:"metadata"` + Orphan bool `json:"orphan"` + EntityID string `json:"entity_id"` + + LeaseDuration int `json:"lease_duration"` + Renewable bool `json:"renewable"` +} + +// ParseSecret is used to parse a secret value from JSON from an io.Reader. +func ParseSecret(r io.Reader) (*Secret, error) { + // First read the data into a buffer. Not super efficient but we want to + // know if we actually have a body or not. + var buf bytes.Buffer + _, err := buf.ReadFrom(r) + if err != nil { + return nil, err + } + if buf.Len() == 0 { + return nil, nil + } + + // First decode the JSON into a map[string]interface{} + var secret Secret + if err := jsonutil.DecodeJSONFromReader(&buf, &secret); err != nil { + return nil, err + } + + return &secret, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/ssh.go b/vendor/github.com/hashicorp/vault/api/ssh.go new file mode 100644 index 0000000000..837eac4ff7 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/ssh.go @@ -0,0 +1,62 @@ +package api + +import ( + "context" + "fmt" +) + +// SSH is used to return a client to invoke operations on SSH backend. +type SSH struct { + c *Client + MountPoint string +} + +// SSH returns the client for logical-backend API calls. +func (c *Client) SSH() *SSH { + return c.SSHWithMountPoint(SSHHelperDefaultMountPoint) +} + +// SSHWithMountPoint returns the client with specific SSH mount point. +func (c *Client) SSHWithMountPoint(mountPoint string) *SSH { + return &SSH{ + c: c, + MountPoint: mountPoint, + } +} + +// Credential invokes the SSH backend API to create a credential to establish an SSH session. +func (c *SSH) Credential(role string, data map[string]interface{}) (*Secret, error) { + r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/%s/creds/%s", c.MountPoint, role)) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +// SignKey signs the given public key and returns a signed public key to pass +// along with the SSH request. +func (c *SSH) SignKey(role string, data map[string]interface{}) (*Secret, error) { + r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/%s/sign/%s", c.MountPoint, role)) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} diff --git a/vendor/github.com/hashicorp/vault/api/ssh_agent.go b/vendor/github.com/hashicorp/vault/api/ssh_agent.go new file mode 100644 index 0000000000..fda70bcddd --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/ssh_agent.go @@ -0,0 +1,241 @@ +package api + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "os" + + "github.com/hashicorp/errwrap" + cleanhttp "github.com/hashicorp/go-cleanhttp" + multierror "github.com/hashicorp/go-multierror" + rootcerts "github.com/hashicorp/go-rootcerts" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/vault/sdk/helper/hclutil" + "github.com/mitchellh/mapstructure" +) + +const ( + // SSHHelperDefaultMountPoint is the default path at which SSH backend will be + // mounted in the Vault server. + SSHHelperDefaultMountPoint = "ssh" + + // VerifyEchoRequest is the echo request message sent as OTP by the helper. + VerifyEchoRequest = "verify-echo-request" + + // VerifyEchoResponse is the echo response message sent as a response to OTP + // matching echo request. + VerifyEchoResponse = "verify-echo-response" +) + +// SSHHelper is a structure representing a vault-ssh-helper which can talk to vault server +// in order to verify the OTP entered by the user. It contains the path at which +// SSH backend is mounted at the server. +type SSHHelper struct { + c *Client + MountPoint string +} + +// SSHVerifyResponse is a structure representing the fields in Vault server's +// response. +type SSHVerifyResponse struct { + // Usually empty. If the request OTP is echo request message, this will + // be set to the corresponding echo response message. + Message string `json:"message" mapstructure:"message"` + + // Username associated with the OTP + Username string `json:"username" mapstructure:"username"` + + // IP associated with the OTP + IP string `json:"ip" mapstructure:"ip"` + + // Name of the role against which the OTP was issued + RoleName string `json:"role_name" mapstructure:"role_name"` +} + +// SSHHelperConfig is a structure which represents the entries from the vault-ssh-helper's configuration file. +type SSHHelperConfig struct { + VaultAddr string `hcl:"vault_addr"` + SSHMountPoint string `hcl:"ssh_mount_point"` + Namespace string `hcl:"namespace"` + CACert string `hcl:"ca_cert"` + CAPath string `hcl:"ca_path"` + AllowedCidrList string `hcl:"allowed_cidr_list"` + AllowedRoles string `hcl:"allowed_roles"` + TLSSkipVerify bool `hcl:"tls_skip_verify"` + TLSServerName string `hcl:"tls_server_name"` +} + +// SetTLSParameters sets the TLS parameters for this SSH agent. +func (c *SSHHelperConfig) SetTLSParameters(clientConfig *Config, certPool *x509.CertPool) { + tlsConfig := &tls.Config{ + InsecureSkipVerify: c.TLSSkipVerify, + MinVersion: tls.VersionTLS12, + RootCAs: certPool, + ServerName: c.TLSServerName, + } + + transport := cleanhttp.DefaultTransport() + transport.TLSClientConfig = tlsConfig + clientConfig.HttpClient.Transport = transport +} + +// Returns true if any of the following conditions are true: +// * CA cert is configured +// * CA path is configured +// * configured to skip certificate verification +// * TLS server name is configured +// +func (c *SSHHelperConfig) shouldSetTLSParameters() bool { + return c.CACert != "" || c.CAPath != "" || c.TLSServerName != "" || c.TLSSkipVerify +} + +// NewClient returns a new client for the configuration. This client will be used by the +// vault-ssh-helper to communicate with Vault server and verify the OTP entered by user. +// If the configuration supplies Vault SSL certificates, then the client will +// have TLS configured in its transport. +func (c *SSHHelperConfig) NewClient() (*Client, error) { + // Creating a default client configuration for communicating with vault server. + clientConfig := DefaultConfig() + + // Pointing the client to the actual address of vault server. + clientConfig.Address = c.VaultAddr + + // Check if certificates are provided via config file. + if c.shouldSetTLSParameters() { + rootConfig := &rootcerts.Config{ + CAFile: c.CACert, + CAPath: c.CAPath, + } + certPool, err := rootcerts.LoadCACerts(rootConfig) + if err != nil { + return nil, err + } + // Enable TLS on the HTTP client information + c.SetTLSParameters(clientConfig, certPool) + } + + // Creating the client object for the given configuration + client, err := NewClient(clientConfig) + if err != nil { + return nil, err + } + + // Configure namespace + if c.Namespace != "" { + client.SetNamespace(c.Namespace) + } + + return client, nil +} + +// LoadSSHHelperConfig loads ssh-helper's configuration from the file and populates the corresponding +// in-memory structure. +// +// Vault address is a required parameter. +// Mount point defaults to "ssh". +func LoadSSHHelperConfig(path string) (*SSHHelperConfig, error) { + contents, err := ioutil.ReadFile(path) + if err != nil && !os.IsNotExist(err) { + return nil, multierror.Prefix(err, "ssh_helper:") + } + return ParseSSHHelperConfig(string(contents)) +} + +// ParseSSHHelperConfig parses the given contents as a string for the SSHHelper +// configuration. +func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) { + root, err := hcl.Parse(string(contents)) + if err != nil { + return nil, errwrap.Wrapf("error parsing config: {{err}}", err) + } + + list, ok := root.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("error parsing config: file doesn't contain a root object") + } + + valid := []string{ + "vault_addr", + "ssh_mount_point", + "namespace", + "ca_cert", + "ca_path", + "allowed_cidr_list", + "allowed_roles", + "tls_skip_verify", + "tls_server_name", + } + if err := hclutil.CheckHCLKeys(list, valid); err != nil { + return nil, multierror.Prefix(err, "ssh_helper:") + } + + var c SSHHelperConfig + c.SSHMountPoint = SSHHelperDefaultMountPoint + if err := hcl.DecodeObject(&c, list); err != nil { + return nil, multierror.Prefix(err, "ssh_helper:") + } + + if c.VaultAddr == "" { + return nil, fmt.Errorf(`missing config "vault_addr"`) + } + return &c, nil +} + +// SSHHelper creates an SSHHelper object which can talk to Vault server with SSH backend +// mounted at default path ("ssh"). +func (c *Client) SSHHelper() *SSHHelper { + return c.SSHHelperWithMountPoint(SSHHelperDefaultMountPoint) +} + +// SSHHelperWithMountPoint creates an SSHHelper object which can talk to Vault server with SSH backend +// mounted at a specific mount point. +func (c *Client) SSHHelperWithMountPoint(mountPoint string) *SSHHelper { + return &SSHHelper{ + c: c, + MountPoint: mountPoint, + } +} + +// Verify verifies if the key provided by user is present in Vault server. The response +// will contain the IP address and username associated with the OTP. In case the +// OTP matches the echo request message, instead of searching an entry for the OTP, +// an echo response message is returned. This feature is used by ssh-helper to verify if +// its configured correctly. +func (c *SSHHelper) Verify(otp string) (*SSHVerifyResponse, error) { + data := map[string]interface{}{ + "otp": otp, + } + verifyPath := fmt.Sprintf("/v1/%s/verify", c.MountPoint) + r := c.c.NewRequest("PUT", verifyPath) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + + if secret.Data == nil { + return nil, nil + } + + var verifyResp SSHVerifyResponse + err = mapstructure.Decode(secret.Data, &verifyResp) + if err != nil { + return nil, err + } + return &verifyResp, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/sys.go b/vendor/github.com/hashicorp/vault/api/sys.go new file mode 100644 index 0000000000..5fb111887c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys.go @@ -0,0 +1,11 @@ +package api + +// Sys is used to perform system-related operations on Vault. +type Sys struct { + c *Client +} + +// Sys is used to return the client for sys-related API calls. +func (c *Client) Sys() *Sys { + return &Sys{c: c} +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_audit.go b/vendor/github.com/hashicorp/vault/api/sys_audit.go new file mode 100644 index 0000000000..d0c6408366 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_audit.go @@ -0,0 +1,134 @@ +package api + +import ( + "context" + "errors" + "fmt" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) AuditHash(path string, input string) (string, error) { + body := map[string]interface{}{ + "input": input, + } + + r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/audit-hash/%s", path)) + if err := r.SetJSONBody(body); err != nil { + return "", err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return "", err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return "", err + } + if secret == nil || secret.Data == nil { + return "", errors.New("data from server response is empty") + } + + hash, ok := secret.Data["hash"] + if !ok { + return "", errors.New("hash not found in response data") + } + hashStr, ok := hash.(string) + if !ok { + return "", errors.New("could not parse hash in response data") + } + + return hashStr, nil +} + +func (c *Sys) ListAudit() (map[string]*Audit, error) { + r := c.c.NewRequest("GET", "/v1/sys/audit") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + mounts := map[string]*Audit{} + err = mapstructure.Decode(secret.Data, &mounts) + if err != nil { + return nil, err + } + + return mounts, nil +} + +// DEPRECATED: Use EnableAuditWithOptions instead +func (c *Sys) EnableAudit( + path string, auditType string, desc string, opts map[string]string) error { + return c.EnableAuditWithOptions(path, &EnableAuditOptions{ + Type: auditType, + Description: desc, + Options: opts, + }) +} + +func (c *Sys) EnableAuditWithOptions(path string, options *EnableAuditOptions) error { + r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/audit/%s", path)) + if err := r.SetJSONBody(options); err != nil { + return err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) DisableAudit(path string) error { + r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/audit/%s", path)) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + + if err == nil { + defer resp.Body.Close() + } + return err +} + +// Structures for the requests/response are all down here. They aren't +// individually documented because the map almost directly to the raw HTTP API +// documentation. Please refer to that documentation for more details. + +type EnableAuditOptions struct { + Type string `json:"type" mapstructure:"type"` + Description string `json:"description" mapstructure:"description"` + Options map[string]string `json:"options" mapstructure:"options"` + Local bool `json:"local" mapstructure:"local"` +} + +type Audit struct { + Type string `json:"type" mapstructure:"type"` + Description string `json:"description" mapstructure:"description"` + Options map[string]string `json:"options" mapstructure:"options"` + Local bool `json:"local" mapstructure:"local"` + Path string `json:"path" mapstructure:"path"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_auth.go b/vendor/github.com/hashicorp/vault/api/sys_auth.go new file mode 100644 index 0000000000..46abae4eff --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_auth.go @@ -0,0 +1,82 @@ +package api + +import ( + "context" + "errors" + "fmt" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) ListAuth() (map[string]*AuthMount, error) { + r := c.c.NewRequest("GET", "/v1/sys/auth") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + mounts := map[string]*AuthMount{} + err = mapstructure.Decode(secret.Data, &mounts) + if err != nil { + return nil, err + } + + return mounts, nil +} + +// DEPRECATED: Use EnableAuthWithOptions instead +func (c *Sys) EnableAuth(path, authType, desc string) error { + return c.EnableAuthWithOptions(path, &EnableAuthOptions{ + Type: authType, + Description: desc, + }) +} + +func (c *Sys) EnableAuthWithOptions(path string, options *EnableAuthOptions) error { + r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/auth/%s", path)) + if err := r.SetJSONBody(options); err != nil { + return err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) DisableAuth(path string) error { + r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/auth/%s", path)) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +// Rather than duplicate, we can use modern Go's type aliasing +type ( + EnableAuthOptions = MountInput + AuthConfigInput = MountConfigInput + AuthMount = MountOutput + AuthConfigOutput = MountConfigOutput +) diff --git a/vendor/github.com/hashicorp/vault/api/sys_capabilities.go b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go new file mode 100644 index 0000000000..64b3951dd1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go @@ -0,0 +1,64 @@ +package api + +import ( + "context" + "errors" + "fmt" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) CapabilitiesSelf(path string) ([]string, error) { + return c.Capabilities(c.c.Token(), path) +} + +func (c *Sys) Capabilities(token, path string) ([]string, error) { + body := map[string]string{ + "token": token, + "path": path, + } + + reqPath := "/v1/sys/capabilities" + if token == c.c.Token() { + reqPath = fmt.Sprintf("%s-self", reqPath) + } + + r := c.c.NewRequest("POST", reqPath) + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var res []string + err = mapstructure.Decode(secret.Data[path], &res) + if err != nil { + return nil, err + } + + if len(res) == 0 { + _, ok := secret.Data["capabilities"] + if ok { + err = mapstructure.Decode(secret.Data["capabilities"], &res) + if err != nil { + return nil, err + } + } + } + + return res, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_config_cors.go b/vendor/github.com/hashicorp/vault/api/sys_config_cors.go new file mode 100644 index 0000000000..ef136dcbb6 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_config_cors.go @@ -0,0 +1,75 @@ +package api + +import ( + "context" + "errors" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) CORSStatus() (*CORSResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/config/cors") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result CORSResponse + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +func (c *Sys) ConfigureCORS(req *CORSRequest) error { + r := c.c.NewRequest("PUT", "/v1/sys/config/cors") + if err := r.SetJSONBody(req); err != nil { + return err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) DisableCORS() error { + r := c.c.NewRequest("DELETE", "/v1/sys/config/cors") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +type CORSRequest struct { + AllowedOrigins []string `json:"allowed_origins" mapstructure:"allowed_origins"` + AllowedHeaders []string `json:"allowed_headers" mapstructure:"allowed_headers"` + Enabled bool `json:"enabled" mapstructure:"enabled"` +} + +type CORSResponse struct { + AllowedOrigins []string `json:"allowed_origins" mapstructure:"allowed_origins"` + AllowedHeaders []string `json:"allowed_headers" mapstructure:"allowed_headers"` + Enabled bool `json:"enabled" mapstructure:"enabled"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_generate_root.go b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go new file mode 100644 index 0000000000..870dacb09e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go @@ -0,0 +1,140 @@ +package api + +import "context" + +func (c *Sys) GenerateRootStatus() (*GenerateRootStatusResponse, error) { + return c.generateRootStatusCommon("/v1/sys/generate-root/attempt") +} + +func (c *Sys) GenerateDROperationTokenStatus() (*GenerateRootStatusResponse, error) { + return c.generateRootStatusCommon("/v1/sys/replication/dr/secondary/generate-operation-token/attempt") +} + +func (c *Sys) GenerateRecoveryOperationTokenStatus() (*GenerateRootStatusResponse, error) { + return c.generateRootStatusCommon("/v1/sys/generate-recovery-token/attempt") +} + +func (c *Sys) generateRootStatusCommon(path string) (*GenerateRootStatusResponse, error) { + r := c.c.NewRequest("GET", path) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result GenerateRootStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) GenerateRootInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.generateRootInitCommon("/v1/sys/generate-root/attempt", otp, pgpKey) +} + +func (c *Sys) GenerateDROperationTokenInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.generateRootInitCommon("/v1/sys/replication/dr/secondary/generate-operation-token/attempt", otp, pgpKey) +} + +func (c *Sys) GenerateRecoveryOperationTokenInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.generateRootInitCommon("/v1/sys/generate-recovery-token/attempt", otp, pgpKey) +} + +func (c *Sys) generateRootInitCommon(path, otp, pgpKey string) (*GenerateRootStatusResponse, error) { + body := map[string]interface{}{ + "otp": otp, + "pgp_key": pgpKey, + } + + r := c.c.NewRequest("PUT", path) + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result GenerateRootStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) GenerateRootCancel() error { + return c.generateRootCancelCommon("/v1/sys/generate-root/attempt") +} + +func (c *Sys) GenerateDROperationTokenCancel() error { + return c.generateRootCancelCommon("/v1/sys/replication/dr/secondary/generate-operation-token/attempt") +} + +func (c *Sys) GenerateRecoveryOperationTokenCancel() error { + return c.generateRootCancelCommon("/v1/sys/generate-recovery-token/attempt") +} + +func (c *Sys) generateRootCancelCommon(path string) error { + r := c.c.NewRequest("DELETE", path) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) GenerateRootUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.generateRootUpdateCommon("/v1/sys/generate-root/update", shard, nonce) +} + +func (c *Sys) GenerateDROperationTokenUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.generateRootUpdateCommon("/v1/sys/replication/dr/secondary/generate-operation-token/update", shard, nonce) +} + +func (c *Sys) GenerateRecoveryOperationTokenUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.generateRootUpdateCommon("/v1/sys/generate-recovery-token/update", shard, nonce) +} + +func (c *Sys) generateRootUpdateCommon(path, shard, nonce string) (*GenerateRootStatusResponse, error) { + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest("PUT", path) + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result GenerateRootStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type GenerateRootStatusResponse struct { + Nonce string `json:"nonce"` + Started bool `json:"started"` + Progress int `json:"progress"` + Required int `json:"required"` + Complete bool `json:"complete"` + EncodedToken string `json:"encoded_token"` + EncodedRootToken string `json:"encoded_root_token"` + PGPFingerprint string `json:"pgp_fingerprint"` + OTP string `json:"otp"` + OTPLength int `json:"otp_length"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_health.go b/vendor/github.com/hashicorp/vault/api/sys_health.go new file mode 100644 index 0000000000..d5d7796008 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_health.go @@ -0,0 +1,41 @@ +package api + +import "context" + +func (c *Sys) Health() (*HealthResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/health") + // If the code is 400 or above it will automatically turn into an error, + // but the sys/health API defaults to returning 5xx when not sealed or + // inited, so we force this code to be something else so we parse correctly + r.Params.Add("uninitcode", "299") + r.Params.Add("sealedcode", "299") + r.Params.Add("standbycode", "299") + r.Params.Add("drsecondarycode", "299") + r.Params.Add("performancestandbycode", "299") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result HealthResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type HealthResponse struct { + Initialized bool `json:"initialized"` + Sealed bool `json:"sealed"` + Standby bool `json:"standby"` + PerformanceStandby bool `json:"performance_standby"` + ReplicationPerformanceMode string `json:"replication_performance_mode"` + ReplicationDRMode string `json:"replication_dr_mode"` + ServerTimeUTC int64 `json:"server_time_utc"` + Version string `json:"version"` + ClusterName string `json:"cluster_name,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` + LastWAL uint64 `json:"last_wal,omitempty"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_init.go b/vendor/github.com/hashicorp/vault/api/sys_init.go new file mode 100644 index 0000000000..0e499c6e3c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_init.go @@ -0,0 +1,61 @@ +package api + +import "context" + +func (c *Sys) InitStatus() (bool, error) { + r := c.c.NewRequest("GET", "/v1/sys/init") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return false, err + } + defer resp.Body.Close() + + var result InitStatusResponse + err = resp.DecodeJSON(&result) + return result.Initialized, err +} + +func (c *Sys) Init(opts *InitRequest) (*InitResponse, error) { + r := c.c.NewRequest("PUT", "/v1/sys/init") + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result InitResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type InitRequest struct { + SecretShares int `json:"secret_shares"` + SecretThreshold int `json:"secret_threshold"` + StoredShares int `json:"stored_shares"` + PGPKeys []string `json:"pgp_keys"` + RecoveryShares int `json:"recovery_shares"` + RecoveryThreshold int `json:"recovery_threshold"` + RecoveryPGPKeys []string `json:"recovery_pgp_keys"` + RootTokenPGPKey string `json:"root_token_pgp_key"` +} + +type InitStatusResponse struct { + Initialized bool +} + +type InitResponse struct { + Keys []string `json:"keys"` + KeysB64 []string `json:"keys_base64"` + RecoveryKeys []string `json:"recovery_keys"` + RecoveryKeysB64 []string `json:"recovery_keys_base64"` + RootToken string `json:"root_token"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_leader.go b/vendor/github.com/hashicorp/vault/api/sys_leader.go new file mode 100644 index 0000000000..1c6be8d880 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_leader.go @@ -0,0 +1,35 @@ +package api + +import ( + "context" + "time" +) + +func (c *Sys) Leader() (*LeaderResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/leader") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result LeaderResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type LeaderResponse struct { + HAEnabled bool `json:"ha_enabled"` + IsSelf bool `json:"is_self"` + ActiveTime time.Time `json:"active_time"` + LeaderAddress string `json:"leader_address"` + LeaderClusterAddress string `json:"leader_cluster_address"` + PerfStandby bool `json:"performance_standby"` + PerfStandbyLastRemoteWAL uint64 `json:"performance_standby_last_remote_wal"` + LastWAL uint64 `json:"last_wal"` + RaftCommittedIndex uint64 `json:"raft_committed_index,omitempty"` + RaftAppliedIndex uint64 `json:"raft_applied_index,omitempty"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_leases.go b/vendor/github.com/hashicorp/vault/api/sys_leases.go new file mode 100644 index 0000000000..e018015ded --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_leases.go @@ -0,0 +1,132 @@ +package api + +import ( + "context" + "errors" +) + +func (c *Sys) Renew(id string, increment int) (*Secret, error) { + r := c.c.NewRequest("PUT", "/v1/sys/leases/renew") + + body := map[string]interface{}{ + "increment": increment, + "lease_id": id, + } + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *Sys) Lookup(id string) (*Secret, error) { + r := c.c.NewRequest("PUT", "/v1/sys/leases/lookup") + + body := map[string]interface{}{ + "lease_id": id, + } + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *Sys) Revoke(id string) error { + r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke") + body := map[string]interface{}{ + "lease_id": id, + } + if err := r.SetJSONBody(body); err != nil { + return err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RevokePrefix(id string) error { + r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke-prefix/"+id) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RevokeForce(id string) error { + r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke-force/"+id) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RevokeWithOptions(opts *RevokeOptions) error { + if opts == nil { + return errors.New("nil options provided") + } + + // Construct path + path := "/v1/sys/leases/revoke/" + switch { + case opts.Force: + path = "/v1/sys/leases/revoke-force/" + case opts.Prefix: + path = "/v1/sys/leases/revoke-prefix/" + } + path += opts.LeaseID + + r := c.c.NewRequest("PUT", path) + if !opts.Force { + body := map[string]interface{}{ + "sync": opts.Sync, + } + if err := r.SetJSONBody(body); err != nil { + return err + } + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +type RevokeOptions struct { + LeaseID string + Force bool + Prefix bool + Sync bool +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_monitor.go b/vendor/github.com/hashicorp/vault/api/sys_monitor.go new file mode 100644 index 0000000000..ec27f22855 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_monitor.go @@ -0,0 +1,64 @@ +package api + +import ( + "bufio" + "context" + "fmt" +) + +// Monitor returns a channel that outputs strings containing the log messages +// coming from the server. +func (c *Sys) Monitor(ctx context.Context, logLevel string) (chan string, error) { + r := c.c.NewRequest("GET", "/v1/sys/monitor") + + if logLevel == "" { + r.Params.Add("log_level", "info") + } else { + r.Params.Add("log_level", logLevel) + } + + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + + logCh := make(chan string, 64) + + go func() { + scanner := bufio.NewScanner(resp.Body) + droppedCount := 0 + + defer close(logCh) + defer resp.Body.Close() + + for { + if ctx.Err() != nil { + return + } + + if !scanner.Scan() { + return + } + + logMessage := scanner.Text() + + if droppedCount > 0 { + select { + case logCh <- fmt.Sprintf("Monitor dropped %d logs during monitor request\n", droppedCount): + droppedCount = 0 + default: + droppedCount++ + continue + } + } + + select { + case logCh <- logMessage: + default: + droppedCount++ + } + } + }() + + return logCh, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_mounts.go b/vendor/github.com/hashicorp/vault/api/sys_mounts.go new file mode 100644 index 0000000000..589df945cf --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_mounts.go @@ -0,0 +1,187 @@ +package api + +import ( + "context" + "errors" + "fmt" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) ListMounts() (map[string]*MountOutput, error) { + r := c.c.NewRequest("GET", "/v1/sys/mounts") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + mounts := map[string]*MountOutput{} + err = mapstructure.Decode(secret.Data, &mounts) + if err != nil { + return nil, err + } + + return mounts, nil +} + +func (c *Sys) Mount(path string, mountInfo *MountInput) error { + r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/mounts/%s", path)) + if err := r.SetJSONBody(mountInfo); err != nil { + return err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) Unmount(path string) error { + r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/mounts/%s", path)) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) Remount(from, to string) error { + body := map[string]interface{}{ + "from": from, + "to": to, + } + + r := c.c.NewRequest("POST", "/v1/sys/remount") + if err := r.SetJSONBody(body); err != nil { + return err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) TuneMount(path string, config MountConfigInput) error { + r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/mounts/%s/tune", path)) + if err := r.SetJSONBody(config); err != nil { + return err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) MountConfig(path string) (*MountConfigOutput, error) { + r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/mounts/%s/tune", path)) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result MountConfigOutput + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +type MountInput struct { + Type string `json:"type"` + Description string `json:"description"` + Config MountConfigInput `json:"config"` + Local bool `json:"local"` + SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"` + ExternalEntropyAccess bool `json:"external_entropy_access" mapstructure:"external_entropy_access"` + Options map[string]string `json:"options"` + + // Deprecated: Newer server responses should be returning this information in the + // Type field (json: "type") instead. + PluginName string `json:"plugin_name,omitempty"` +} + +type MountConfigInput struct { + Options map[string]string `json:"options" mapstructure:"options"` + DefaultLeaseTTL string `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` + Description *string `json:"description,omitempty" mapstructure:"description"` + MaxLeaseTTL string `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` + ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"` + AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` + AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` + ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` + PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` + AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" mapstructure:"allowed_response_headers"` + TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` + + // Deprecated: This field will always be blank for newer server responses. + PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` +} + +type MountOutput struct { + UUID string `json:"uuid"` + Type string `json:"type"` + Description string `json:"description"` + Accessor string `json:"accessor"` + Config MountConfigOutput `json:"config"` + Options map[string]string `json:"options"` + Local bool `json:"local"` + SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"` + ExternalEntropyAccess bool `json:"external_entropy_access" mapstructure:"external_entropy_access"` +} + +type MountConfigOutput struct { + DefaultLeaseTTL int `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` + MaxLeaseTTL int `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` + ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"` + AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` + AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` + ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` + PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` + AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" mapstructure:"allowed_response_headers"` + TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` + + // Deprecated: This field will always be blank for newer server responses. + PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_plugins.go b/vendor/github.com/hashicorp/vault/api/sys_plugins.go new file mode 100644 index 0000000000..c17072d958 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_plugins.go @@ -0,0 +1,337 @@ +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/mitchellh/mapstructure" +) + +// ListPluginsInput is used as input to the ListPlugins function. +type ListPluginsInput struct { + // Type of the plugin. Required. + Type consts.PluginType `json:"type"` +} + +// ListPluginsResponse is the response from the ListPlugins call. +type ListPluginsResponse struct { + // PluginsByType is the list of plugins by type. + PluginsByType map[consts.PluginType][]string `json:"types"` + + // Names is the list of names of the plugins. + // + // Deprecated: Newer server responses should be returning PluginsByType (json: + // "types") instead. + Names []string `json:"names"` +} + +// ListPlugins lists all plugins in the catalog and returns their names as a +// list of strings. +func (c *Sys) ListPlugins(i *ListPluginsInput) (*ListPluginsResponse, error) { + path := "" + method := "" + if i.Type == consts.PluginTypeUnknown { + path = "/v1/sys/plugins/catalog" + method = "GET" + } else { + path = fmt.Sprintf("/v1/sys/plugins/catalog/%s", i.Type) + method = "LIST" + } + + req := c.c.NewRequest(method, path) + if method == "LIST" { + // Set this for broader compatibility, but we use LIST above to be able + // to handle the wrapping lookup function + req.Method = "GET" + req.Params.Set("list", "true") + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, req) + if err != nil && resp == nil { + return nil, err + } + if resp == nil { + return nil, nil + } + defer resp.Body.Close() + + // We received an Unsupported Operation response from Vault, indicating + // Vault of an older version that doesn't support the GET method yet; + // switch it to a LIST. + if resp.StatusCode == 405 { + req.Params.Set("list", "true") + resp, err := c.c.RawRequestWithContext(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var result struct { + Data struct { + Keys []string `json:"keys"` + } `json:"data"` + } + if err := resp.DecodeJSON(&result); err != nil { + return nil, err + } + return &ListPluginsResponse{Names: result.Data.Keys}, nil + } + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + result := &ListPluginsResponse{ + PluginsByType: make(map[consts.PluginType][]string), + } + if i.Type == consts.PluginTypeUnknown { + for pluginTypeStr, pluginsRaw := range secret.Data { + pluginType, err := consts.ParsePluginType(pluginTypeStr) + if err != nil { + return nil, err + } + + pluginsIfc, ok := pluginsRaw.([]interface{}) + if !ok { + return nil, fmt.Errorf("unable to parse plugins for %q type", pluginTypeStr) + } + + plugins := make([]string, len(pluginsIfc)) + for i, nameIfc := range pluginsIfc { + name, ok := nameIfc.(string) + if !ok { + } + plugins[i] = name + } + result.PluginsByType[pluginType] = plugins + } + } else { + var respKeys []string + if err := mapstructure.Decode(secret.Data["keys"], &respKeys); err != nil { + return nil, err + } + result.PluginsByType[i.Type] = respKeys + } + + return result, nil +} + +// GetPluginInput is used as input to the GetPlugin function. +type GetPluginInput struct { + Name string `json:"-"` + + // Type of the plugin. Required. + Type consts.PluginType `json:"type"` +} + +// GetPluginResponse is the response from the GetPlugin call. +type GetPluginResponse struct { + Args []string `json:"args"` + Builtin bool `json:"builtin"` + Command string `json:"command"` + Name string `json:"name"` + SHA256 string `json:"sha256"` +} + +// GetPlugin retrieves information about the plugin. +func (c *Sys) GetPlugin(i *GetPluginInput) (*GetPluginResponse, error) { + path := catalogPathByType(i.Type, i.Name) + req := c.c.NewRequest(http.MethodGet, path) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result struct { + Data *GetPluginResponse + } + err = resp.DecodeJSON(&result) + if err != nil { + return nil, err + } + return result.Data, err +} + +// RegisterPluginInput is used as input to the RegisterPlugin function. +type RegisterPluginInput struct { + // Name is the name of the plugin. Required. + Name string `json:"-"` + + // Type of the plugin. Required. + Type consts.PluginType `json:"type"` + + // Args is the list of args to spawn the process with. + Args []string `json:"args,omitempty"` + + // Command is the command to run. + Command string `json:"command,omitempty"` + + // SHA256 is the shasum of the plugin. + SHA256 string `json:"sha256,omitempty"` +} + +// RegisterPlugin registers the plugin with the given information. +func (c *Sys) RegisterPlugin(i *RegisterPluginInput) error { + path := catalogPathByType(i.Type, i.Name) + req := c.c.NewRequest(http.MethodPut, path) + + if err := req.SetJSONBody(i); err != nil { + return err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, req) + if err == nil { + defer resp.Body.Close() + } + return err +} + +// DeregisterPluginInput is used as input to the DeregisterPlugin function. +type DeregisterPluginInput struct { + // Name is the name of the plugin. Required. + Name string `json:"-"` + + // Type of the plugin. Required. + Type consts.PluginType `json:"type"` +} + +// DeregisterPlugin removes the plugin with the given name from the plugin +// catalog. +func (c *Sys) DeregisterPlugin(i *DeregisterPluginInput) error { + path := catalogPathByType(i.Type, i.Name) + req := c.c.NewRequest(http.MethodDelete, path) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, req) + if err == nil { + defer resp.Body.Close() + } + return err +} + +// ReloadPluginInput is used as input to the ReloadPlugin function. +type ReloadPluginInput struct { + // Plugin is the name of the plugin to reload, as registered in the plugin catalog + Plugin string `json:"plugin"` + + // Mounts is the array of string mount paths of the plugin backends to reload + Mounts []string `json:"mounts"` + + // Scope is the scope of the plugin reload + Scope string `json:"scope"` +} + +// ReloadPlugin reloads mounted plugin backends, possibly returning +// reloadId for a cluster scoped reload +func (c *Sys) ReloadPlugin(i *ReloadPluginInput) (string, error) { + path := "/v1/sys/plugins/reload/backend" + req := c.c.NewRequest(http.MethodPut, path) + + if err := req.SetJSONBody(i); err != nil { + return "", err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + + resp, err := c.c.RawRequestWithContext(ctx, req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if i.Scope == "global" { + // Get the reload id + secret, parseErr := ParseSecret(resp.Body) + if parseErr != nil { + return "", parseErr + } + if _, ok := secret.Data["reload_id"]; ok { + return secret.Data["reload_id"].(string), nil + } + } + return "", err +} + +// ReloadStatus is the status of an individual node's plugin reload +type ReloadStatus struct { + Timestamp time.Time `json:"timestamp" mapstructure:"timestamp"` + Error string `json:"error" mapstructure:"error"` +} + +// ReloadStatusResponse is the combined response of all known completed plugin reloads +type ReloadStatusResponse struct { + ReloadID string `mapstructure:"reload_id"` + Results map[string]*ReloadStatus `mapstructure:"results"` +} + +// ReloadPluginStatusInput is used as input to the ReloadStatusPlugin function. +type ReloadPluginStatusInput struct { + // ReloadID is the ID of the reload operation + ReloadID string `json:"reload_id"` +} + +// ReloadPluginStatus retrieves the status of a reload operation +func (c *Sys) ReloadPluginStatus(reloadStatusInput *ReloadPluginStatusInput) (*ReloadStatusResponse, error) { + path := "/v1/sys/plugins/reload/backend/status" + req := c.c.NewRequest(http.MethodGet, path) + req.Params.Add("reload_id", reloadStatusInput.ReloadID) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + + resp, err := c.c.RawRequestWithContext(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp != nil { + secret, parseErr := ParseSecret(resp.Body) + if parseErr != nil { + return nil, err + } + + var r ReloadStatusResponse + d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.StringToTimeHookFunc(time.RFC3339), + Result: &r, + }) + if err != nil { + return nil, err + } + err = d.Decode(secret.Data) + if err != nil { + return nil, err + } + return &r, nil + } + return nil, nil +} + +// catalogPathByType is a helper to construct the proper API path by plugin type +func catalogPathByType(pluginType consts.PluginType, name string) string { + path := fmt.Sprintf("/v1/sys/plugins/catalog/%s/%s", pluginType, name) + + // Backwards compat, if type is not provided then use old path + if pluginType == consts.PluginTypeUnknown { + path = fmt.Sprintf("/v1/sys/plugins/catalog/%s", name) + } + + return path +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_policy.go b/vendor/github.com/hashicorp/vault/api/sys_policy.go new file mode 100644 index 0000000000..c0c239f960 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_policy.go @@ -0,0 +1,113 @@ +package api + +import ( + "context" + "errors" + "fmt" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) ListPolicies() ([]string, error) { + r := c.c.NewRequest("LIST", "/v1/sys/policies/acl") + // Set this for broader compatibility, but we use LIST above to be able to + // handle the wrapping lookup function + r.Method = "GET" + r.Params.Set("list", "true") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result []string + err = mapstructure.Decode(secret.Data["keys"], &result) + if err != nil { + return nil, err + } + + return result, err +} + +func (c *Sys) GetPolicy(name string) (string, error) { + r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/policies/acl/%s", name)) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + if resp.StatusCode == 404 { + return "", nil + } + } + if err != nil { + return "", err + } + + secret, err := ParseSecret(resp.Body) + if err != nil { + return "", err + } + if secret == nil || secret.Data == nil { + return "", errors.New("data from server response is empty") + } + + if policyRaw, ok := secret.Data["policy"]; ok { + return policyRaw.(string), nil + } + + return "", fmt.Errorf("no policy found in response") +} + +func (c *Sys) PutPolicy(name, rules string) error { + body := map[string]string{ + "policy": rules, + } + + r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/policies/acl/%s", name)) + if err := r.SetJSONBody(body); err != nil { + return err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) DeletePolicy(name string) error { + r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/policies/acl/%s", name)) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +type getPoliciesResp struct { + Rules string `json:"rules"` +} + +type listPoliciesResp struct { + Policies []string `json:"policies"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_raft.go b/vendor/github.com/hashicorp/vault/api/sys_raft.go new file mode 100644 index 0000000000..043a698010 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_raft.go @@ -0,0 +1,370 @@ +package api + +import ( + "archive/tar" + "compress/gzip" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "sync" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/mitchellh/mapstructure" +) + +var ErrIncompleteSnapshot = errors.New("incomplete snapshot, unable to read SHA256SUMS.sealed file") + +// RaftJoinResponse represents the response of the raft join API +type RaftJoinResponse struct { + Joined bool `json:"joined"` +} + +// RaftJoinRequest represents the parameters consumed by the raft join API +type RaftJoinRequest struct { + AutoJoin string `json:"auto_join"` + AutoJoinScheme string `json:"auto_join_scheme"` + AutoJoinPort uint `json:"auto_join_port"` + LeaderAPIAddr string `json:"leader_api_addr"` + LeaderCACert string `json:"leader_ca_cert"` + LeaderClientCert string `json:"leader_client_cert"` + LeaderClientKey string `json:"leader_client_key"` + Retry bool `json:"retry"` + NonVoter bool `json:"non_voter"` +} + +// AutopilotConfig is used for querying/setting the Autopilot configuration. +type AutopilotConfig struct { + CleanupDeadServers bool `json:"cleanup_dead_servers" mapstructure:"cleanup_dead_servers"` + LastContactThreshold time.Duration `json:"last_contact_threshold" mapstructure:"-"` + DeadServerLastContactThreshold time.Duration `json:"dead_server_last_contact_threshold" mapstructure:"-"` + MaxTrailingLogs uint64 `json:"max_trailing_logs" mapstructure:"max_trailing_logs"` + MinQuorum uint `json:"min_quorum" mapstructure:"min_quorum"` + ServerStabilizationTime time.Duration `json:"server_stabilization_time" mapstructure:"-"` +} + +// MarshalJSON makes the autopilot config fields JSON compatible +func (ac *AutopilotConfig) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "cleanup_dead_servers": ac.CleanupDeadServers, + "last_contact_threshold": ac.LastContactThreshold.String(), + "dead_server_last_contact_threshold": ac.DeadServerLastContactThreshold.String(), + "max_trailing_logs": ac.MaxTrailingLogs, + "min_quorum": ac.MinQuorum, + "server_stabilization_time": ac.ServerStabilizationTime.String(), + }) +} + +// UnmarshalJSON parses the autopilot config JSON blob +func (ac *AutopilotConfig) UnmarshalJSON(b []byte) error { + var data interface{} + err := json.Unmarshal(b, &data) + if err != nil { + return err + } + + conf := data.(map[string]interface{}) + if err = mapstructure.WeakDecode(conf, ac); err != nil { + return err + } + if ac.LastContactThreshold, err = parseutil.ParseDurationSecond(conf["last_contact_threshold"]); err != nil { + return err + } + if ac.DeadServerLastContactThreshold, err = parseutil.ParseDurationSecond(conf["dead_server_last_contact_threshold"]); err != nil { + return err + } + if ac.ServerStabilizationTime, err = parseutil.ParseDurationSecond(conf["server_stabilization_time"]); err != nil { + return err + } + return nil +} + +// AutopilotState represents the response of the raft autopilot state API +type AutopilotState struct { + Healthy bool `mapstructure:"healthy"` + FailureTolerance int `mapstructure:"failure_tolerance"` + Servers map[string]*AutopilotServer `mapstructure:"servers"` + Leader string `mapstructure:"leader"` + Voters []string `mapstructure:"voters"` + NonVoters []string `mapstructure:"non_voters"` +} + +// AutopilotServer represents the server blocks in the response of the raft +// autopilot state API. +type AutopilotServer struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + Address string `mapstructure:"address"` + NodeStatus string `mapstructure:"node_status"` + LastContact string `mapstructure:"last_contact"` + LastTerm uint64 `mapstructure:"last_term"` + LastIndex uint64 `mapstructure:"last_index"` + Healthy bool `mapstructure:"healthy"` + StableSince string `mapstructure:"stable_since"` + Status string `mapstructure:"status"` + Meta map[string]string `mapstructure:"meta"` +} + +// RaftJoin adds the node from which this call is invoked from to the raft +// cluster represented by the leader address in the parameter. +func (c *Sys) RaftJoin(opts *RaftJoinRequest) (*RaftJoinResponse, error) { + r := c.c.NewRequest("POST", "/v1/sys/storage/raft/join") + + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RaftJoinResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +// RaftSnapshot invokes the API that takes the snapshot of the raft cluster and +// writes it to the supplied io.Writer. +func (c *Sys) RaftSnapshot(snapWriter io.Writer) error { + r := c.c.NewRequest("GET", "/v1/sys/storage/raft/snapshot") + r.URL.RawQuery = r.Params.Encode() + + req, err := http.NewRequest(http.MethodGet, r.URL.RequestURI(), nil) + if err != nil { + return err + } + + req.URL.User = r.URL.User + req.URL.Scheme = r.URL.Scheme + req.URL.Host = r.URL.Host + req.Host = r.URL.Host + + if r.Headers != nil { + for header, vals := range r.Headers { + for _, val := range vals { + req.Header.Add(header, val) + } + } + } + + if len(r.ClientToken) != 0 { + req.Header.Set(consts.AuthHeaderName, r.ClientToken) + } + + if len(r.WrapTTL) != 0 { + req.Header.Set("X-Vault-Wrap-TTL", r.WrapTTL) + } + + if len(r.MFAHeaderVals) != 0 { + for _, mfaHeaderVal := range r.MFAHeaderVals { + req.Header.Add("X-Vault-MFA", mfaHeaderVal) + } + } + + if r.PolicyOverride { + req.Header.Set("X-Vault-Policy-Override", "true") + } + + // Avoiding the use of RawRequestWithContext which reads the response body + // to determine if the body contains error message. + var result *Response + resp, err := c.c.config.HttpClient.Do(req) + if err != nil { + return err + } + + if resp == nil { + return nil + } + + // Check for a redirect, only allowing for a single redirect + if resp.StatusCode == 301 || resp.StatusCode == 302 || resp.StatusCode == 307 { + // Parse the updated location + respLoc, err := resp.Location() + if err != nil { + return err + } + + // Ensure a protocol downgrade doesn't happen + if req.URL.Scheme == "https" && respLoc.Scheme != "https" { + return fmt.Errorf("redirect would cause protocol downgrade") + } + + // Update the request + req.URL = respLoc + + // Retry the request + resp, err = c.c.config.HttpClient.Do(req) + if err != nil { + return err + } + } + + result = &Response{Response: resp} + if err := result.Error(); err != nil { + return err + } + + // Make sure that the last file in the archive, SHA256SUMS.sealed, is present + // and non-empty. This is to catch cases where the snapshot failed midstream, + // e.g. due to a problem with the seal that prevented encryption of that file. + var wg sync.WaitGroup + wg.Add(1) + var verified bool + + rPipe, wPipe := io.Pipe() + dup := io.TeeReader(resp.Body, wPipe) + go func() { + defer func() { + io.Copy(ioutil.Discard, rPipe) + rPipe.Close() + wg.Done() + }() + + uncompressed, err := gzip.NewReader(rPipe) + if err != nil { + return + } + + t := tar.NewReader(uncompressed) + var h *tar.Header + for { + h, err = t.Next() + if err != nil { + return + } + if h.Name != "SHA256SUMS.sealed" { + continue + } + var b []byte + b, err = ioutil.ReadAll(t) + if err != nil || len(b) == 0 { + return + } + verified = true + return + } + }() + + // Copy bytes from dup to snapWriter. This will have a side effect that + // everything read from dup will be written to wPipe. + _, err = io.Copy(snapWriter, dup) + wPipe.Close() + if err != nil { + rPipe.CloseWithError(err) + return err + } + wg.Wait() + + if !verified { + return ErrIncompleteSnapshot + } + return nil +} + +// RaftSnapshotRestore reads the snapshot from the io.Reader and installs that +// snapshot, returning the cluster to the state defined by it. +func (c *Sys) RaftSnapshotRestore(snapReader io.Reader, force bool) error { + path := "/v1/sys/storage/raft/snapshot" + if force { + path = "/v1/sys/storage/raft/snapshot-force" + } + r := c.c.NewRequest("POST", path) + + r.Body = snapReader + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// RaftAutopilotState returns the state of the raft cluster as seen by autopilot. +func (c *Sys) RaftAutopilotState() (*AutopilotState, error) { + r := c.c.NewRequest("GET", "/v1/sys/storage/raft/autopilot/state") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + if resp.StatusCode == 404 { + return nil, nil + } + } + if err != nil { + return nil, err + } + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result AutopilotState + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +// RaftAutopilotConfiguration fetches the autopilot config. +func (c *Sys) RaftAutopilotConfiguration() (*AutopilotConfig, error) { + r := c.c.NewRequest("GET", "/v1/sys/storage/raft/autopilot/configuration") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + if resp.StatusCode == 404 { + return nil, nil + } + } + if err != nil { + return nil, err + } + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil { + return nil, errors.New("data from server response is empty") + } + + var result AutopilotConfig + if err = mapstructure.Decode(secret.Data, &result); err != nil { + return nil, err + } + if result.LastContactThreshold, err = parseutil.ParseDurationSecond(secret.Data["last_contact_threshold"]); err != nil { + return nil, err + } + if result.DeadServerLastContactThreshold, err = parseutil.ParseDurationSecond(secret.Data["dead_server_last_contact_threshold"]); err != nil { + return nil, err + } + if result.ServerStabilizationTime, err = parseutil.ParseDurationSecond(secret.Data["server_stabilization_time"]); err != nil { + return nil, err + } + + return &result, err +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_rekey.go b/vendor/github.com/hashicorp/vault/api/sys_rekey.go new file mode 100644 index 0000000000..153e486c6d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_rekey.go @@ -0,0 +1,388 @@ +package api + +import ( + "context" + "errors" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) RekeyStatus() (*RekeyStatusResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/rekey/init") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyStatus() (*RekeyStatusResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/rekey-recovery-key/init") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyVerificationStatus() (*RekeyVerificationStatusResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/rekey/verify") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyVerificationStatus() (*RekeyVerificationStatusResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/rekey-recovery-key/verify") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) { + r := c.c.NewRequest("PUT", "/v1/sys/rekey/init") + if err := r.SetJSONBody(config); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) { + r := c.c.NewRequest("PUT", "/v1/sys/rekey-recovery-key/init") + if err := r.SetJSONBody(config); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyCancel() error { + r := c.c.NewRequest("DELETE", "/v1/sys/rekey/init") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyRecoveryKeyCancel() error { + r := c.c.NewRequest("DELETE", "/v1/sys/rekey-recovery-key/init") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyVerificationCancel() error { + r := c.c.NewRequest("DELETE", "/v1/sys/rekey/verify") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyRecoveryKeyVerificationCancel() error { + r := c.c.NewRequest("DELETE", "/v1/sys/rekey-recovery-key/verify") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) { + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest("PUT", "/v1/sys/rekey/update") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) { + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest("PUT", "/v1/sys/rekey-recovery-key/update") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRetrieveBackup() (*RekeyRetrieveResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/rekey/backup") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result RekeyRetrieveResponse + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +func (c *Sys) RekeyRetrieveRecoveryBackup() (*RekeyRetrieveResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/rekey/recovery-key-backup") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result RekeyRetrieveResponse + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +func (c *Sys) RekeyDeleteBackup() error { + r := c.c.NewRequest("DELETE", "/v1/sys/rekey/backup") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + + return err +} + +func (c *Sys) RekeyDeleteRecoveryBackup() error { + r := c.c.NewRequest("DELETE", "/v1/sys/rekey/recovery-key-backup") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + + return err +} + +func (c *Sys) RekeyVerificationUpdate(shard, nonce string) (*RekeyVerificationUpdateResponse, error) { + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest("PUT", "/v1/sys/rekey/verify") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyVerificationUpdate(shard, nonce string) (*RekeyVerificationUpdateResponse, error) { + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest("PUT", "/v1/sys/rekey-recovery-key/verify") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type RekeyInitRequest struct { + SecretShares int `json:"secret_shares"` + SecretThreshold int `json:"secret_threshold"` + StoredShares int `json:"stored_shares"` + PGPKeys []string `json:"pgp_keys"` + Backup bool + RequireVerification bool `json:"require_verification"` +} + +type RekeyStatusResponse struct { + Nonce string `json:"nonce"` + Started bool `json:"started"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` + Required int `json:"required"` + PGPFingerprints []string `json:"pgp_fingerprints"` + Backup bool `json:"backup"` + VerificationRequired bool `json:"verification_required"` + VerificationNonce string `json:"verification_nonce"` +} + +type RekeyUpdateResponse struct { + Nonce string `json:"nonce"` + Complete bool `json:"complete"` + Keys []string `json:"keys"` + KeysB64 []string `json:"keys_base64"` + PGPFingerprints []string `json:"pgp_fingerprints"` + Backup bool `json:"backup"` + VerificationRequired bool `json:"verification_required"` + VerificationNonce string `json:"verification_nonce,omitempty"` +} + +type RekeyRetrieveResponse struct { + Nonce string `json:"nonce" mapstructure:"nonce"` + Keys map[string][]string `json:"keys" mapstructure:"keys"` + KeysB64 map[string][]string `json:"keys_base64" mapstructure:"keys_base64"` +} + +type RekeyVerificationStatusResponse struct { + Nonce string `json:"nonce"` + Started bool `json:"started"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` +} + +type RekeyVerificationUpdateResponse struct { + Nonce string `json:"nonce"` + Complete bool `json:"complete"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_rotate.go b/vendor/github.com/hashicorp/vault/api/sys_rotate.go new file mode 100644 index 0000000000..e081587b11 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_rotate.go @@ -0,0 +1,91 @@ +package api + +import ( + "context" + "encoding/json" + "errors" + "time" +) + +func (c *Sys) Rotate() error { + r := c.c.NewRequest("POST", "/v1/sys/rotate") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) KeyStatus() (*KeyStatus, error) { + r := c.c.NewRequest("GET", "/v1/sys/key-status") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result KeyStatus + + termRaw, ok := secret.Data["term"] + if !ok { + return nil, errors.New("term not found in response") + } + term, ok := termRaw.(json.Number) + if !ok { + return nil, errors.New("could not convert term to a number") + } + term64, err := term.Int64() + if err != nil { + return nil, err + } + result.Term = int(term64) + + installTimeRaw, ok := secret.Data["install_time"] + if !ok { + return nil, errors.New("install_time not found in response") + } + installTimeStr, ok := installTimeRaw.(string) + if !ok { + return nil, errors.New("could not convert install_time to a string") + } + installTime, err := time.Parse(time.RFC3339Nano, installTimeStr) + if err != nil { + return nil, err + } + result.InstallTime = installTime + + encryptionsRaw, ok := secret.Data["encryptions"] + if ok { + encryptions, ok := encryptionsRaw.(json.Number) + if !ok { + return nil, errors.New("could not convert encryptions to a number") + } + encryptions64, err := encryptions.Int64() + if err != nil { + return nil, err + } + result.Encryptions = int(encryptions64) + } + + return &result, err +} + +type KeyStatus struct { + Term int `json:"term"` + InstallTime time.Time `json:"install_time"` + Encryptions int `json:"encryptions"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_seal.go b/vendor/github.com/hashicorp/vault/api/sys_seal.go new file mode 100644 index 0000000000..20d41a28f3 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_seal.go @@ -0,0 +1,87 @@ +package api + +import "context" + +func (c *Sys) SealStatus() (*SealStatusResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/seal-status") + return sealStatusRequest(c, r) +} + +func (c *Sys) Seal() error { + r := c.c.NewRequest("PUT", "/v1/sys/seal") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) ResetUnsealProcess() (*SealStatusResponse, error) { + body := map[string]interface{}{"reset": true} + + r := c.c.NewRequest("PUT", "/v1/sys/unseal") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + return sealStatusRequest(c, r) +} + +func (c *Sys) Unseal(shard string) (*SealStatusResponse, error) { + body := map[string]interface{}{"key": shard} + + r := c.c.NewRequest("PUT", "/v1/sys/unseal") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + return sealStatusRequest(c, r) +} + +func (c *Sys) UnsealWithOptions(opts *UnsealOpts) (*SealStatusResponse, error) { + r := c.c.NewRequest("PUT", "/v1/sys/unseal") + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + return sealStatusRequest(c, r) +} + +func sealStatusRequest(c *Sys, r *Request) (*SealStatusResponse, error) { + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result SealStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type SealStatusResponse struct { + Type string `json:"type"` + Initialized bool `json:"initialized"` + Sealed bool `json:"sealed"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` + Nonce string `json:"nonce"` + Version string `json:"version"` + Migration bool `json:"migration"` + ClusterName string `json:"cluster_name,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` + RecoverySeal bool `json:"recovery_seal"` + StorageType string `json:"storage_type,omitempty"` +} + +type UnsealOpts struct { + Key string `json:"key"` + Reset bool `json:"reset"` + Migrate bool `json:"migrate"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_stepdown.go b/vendor/github.com/hashicorp/vault/api/sys_stepdown.go new file mode 100644 index 0000000000..55dc6fbcb7 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_stepdown.go @@ -0,0 +1,15 @@ +package api + +import "context" + +func (c *Sys) StepDown() error { + r := c.c.NewRequest("PUT", "/v1/sys/step-down") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + return err +} diff --git a/vendor/github.com/hashicorp/vault/sdk/LICENSE b/vendor/github.com/hashicorp/vault/sdk/LICENSE new file mode 100644 index 0000000000..e87a115e46 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go b/vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go new file mode 100644 index 0000000000..e8edcfd291 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/certutil/helpers.go @@ -0,0 +1,1035 @@ +package certutil + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "math/big" + "net" + "net/url" + "strconv" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/mitchellh/mapstructure" + "golang.org/x/crypto/cryptobyte" + cbasn1 "golang.org/x/crypto/cryptobyte/asn1" +) + +// GetHexFormatted returns the byte buffer formatted in hex with +// the specified separator between bytes. +func GetHexFormatted(buf []byte, sep string) string { + var ret bytes.Buffer + for _, cur := range buf { + if ret.Len() > 0 { + fmt.Fprintf(&ret, sep) + } + fmt.Fprintf(&ret, "%02x", cur) + } + return ret.String() +} + +// ParseHexFormatted returns the raw bytes from a formatted hex string +func ParseHexFormatted(in, sep string) []byte { + var ret bytes.Buffer + var err error + var inBits int64 + inBytes := strings.Split(in, sep) + for _, inByte := range inBytes { + if inBits, err = strconv.ParseInt(inByte, 16, 8); err != nil { + return nil + } + ret.WriteByte(byte(inBits)) + } + return ret.Bytes() +} + +// GetSubjKeyID returns the subject key ID, e.g. the SHA1 sum +// of the marshaled public key +func GetSubjKeyID(privateKey crypto.Signer) ([]byte, error) { + if privateKey == nil { + return nil, errutil.InternalError{Err: "passed-in private key is nil"} + } + + marshaledKey, err := x509.MarshalPKIXPublicKey(privateKey.Public()) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error marshalling public key: %s", err)} + } + + subjKeyID := sha1.Sum(marshaledKey) + + return subjKeyID[:], nil +} + +// ParsePKIMap takes a map (for instance, the Secret.Data +// returned from the PKI backend) and returns a ParsedCertBundle. +func ParsePKIMap(data map[string]interface{}) (*ParsedCertBundle, error) { + result := &CertBundle{} + err := mapstructure.Decode(data, result) + if err != nil { + return nil, errutil.UserError{Err: err.Error()} + } + + return result.ToParsedCertBundle() +} + +// ParsePKIJSON takes a JSON-encoded string and returns a ParsedCertBundle. +// +// This can be either the output of an +// issue call from the PKI backend or just its data member; or, +// JSON not coming from the PKI backend. +func ParsePKIJSON(input []byte) (*ParsedCertBundle, error) { + result := &CertBundle{} + err := jsonutil.DecodeJSON(input, &result) + + if err == nil { + return result.ToParsedCertBundle() + } + + var secret Secret + err = jsonutil.DecodeJSON(input, &secret) + + if err == nil { + return ParsePKIMap(secret.Data) + } + + return nil, errutil.UserError{Err: "unable to parse out of either secret data or a secret object"} +} + +// ParsePEMBundle takes a string of concatenated PEM-format certificate +// and private key values and decodes/parses them, checking validity along +// the way. The first certificate must be the subject certificate and issuing +// certificates may follow. There must be at most one private key. +func ParsePEMBundle(pemBundle string) (*ParsedCertBundle, error) { + if len(pemBundle) == 0 { + return nil, errutil.UserError{Err: "empty pem bundle"} + } + + pemBytes := []byte(pemBundle) + var pemBlock *pem.Block + parsedBundle := &ParsedCertBundle{} + var certPath []*CertBlock + + for len(pemBytes) > 0 { + pemBlock, pemBytes = pem.Decode(pemBytes) + if pemBlock == nil { + return nil, errutil.UserError{Err: "no data found in PEM block"} + } + + if signer, err := x509.ParseECPrivateKey(pemBlock.Bytes); err == nil { + if parsedBundle.PrivateKeyType != UnknownPrivateKey { + return nil, errutil.UserError{Err: "more than one private key given; provide only one private key in the bundle"} + } + parsedBundle.PrivateKeyFormat = ECBlock + parsedBundle.PrivateKeyType = ECPrivateKey + parsedBundle.PrivateKeyBytes = pemBlock.Bytes + parsedBundle.PrivateKey = signer + + } else if signer, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes); err == nil { + if parsedBundle.PrivateKeyType != UnknownPrivateKey { + return nil, errutil.UserError{Err: "more than one private key given; provide only one private key in the bundle"} + } + parsedBundle.PrivateKeyType = RSAPrivateKey + parsedBundle.PrivateKeyFormat = PKCS1Block + parsedBundle.PrivateKeyBytes = pemBlock.Bytes + parsedBundle.PrivateKey = signer + } else if signer, err := x509.ParsePKCS8PrivateKey(pemBlock.Bytes); err == nil { + parsedBundle.PrivateKeyFormat = PKCS8Block + + if parsedBundle.PrivateKeyType != UnknownPrivateKey { + return nil, errutil.UserError{Err: "More than one private key given; provide only one private key in the bundle"} + } + switch signer := signer.(type) { + case *rsa.PrivateKey: + parsedBundle.PrivateKey = signer + parsedBundle.PrivateKeyType = RSAPrivateKey + parsedBundle.PrivateKeyBytes = pemBlock.Bytes + case *ecdsa.PrivateKey: + parsedBundle.PrivateKey = signer + parsedBundle.PrivateKeyType = ECPrivateKey + parsedBundle.PrivateKeyBytes = pemBlock.Bytes + case ed25519.PrivateKey: + parsedBundle.PrivateKey = signer + parsedBundle.PrivateKeyType = Ed25519PrivateKey + parsedBundle.PrivateKeyBytes = pemBlock.Bytes + } + } else if certificates, err := x509.ParseCertificates(pemBlock.Bytes); err == nil { + certPath = append(certPath, &CertBlock{ + Certificate: certificates[0], + Bytes: pemBlock.Bytes, + }) + } else if x509.IsEncryptedPEMBlock(pemBlock) { + return nil, errutil.UserError{Err: "Encrypted private key given; provide only decrypted private key in the bundle"} + } + } + + for i, certBlock := range certPath { + if i == 0 { + parsedBundle.Certificate = certBlock.Certificate + parsedBundle.CertificateBytes = certBlock.Bytes + } else { + parsedBundle.CAChain = append(parsedBundle.CAChain, certBlock) + } + } + + if err := parsedBundle.Verify(); err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("verification of parsed bundle failed: %s", err)} + } + + return parsedBundle, nil +} + +// GeneratePrivateKey generates a private key with the specified type and key bits. +func GeneratePrivateKey(keyType string, keyBits int, container ParsedPrivateKeyContainer) error { + return generatePrivateKey(keyType, keyBits, container, nil) +} + +// GeneratePrivateKeyWithRandomSource generates a private key with the specified type and key bits. +// GeneratePrivateKeyWithRandomSource uses randomness from the entropyReader to generate the private key. +func GeneratePrivateKeyWithRandomSource(keyType string, keyBits int, container ParsedPrivateKeyContainer, entropyReader io.Reader) error { + return generatePrivateKey(keyType, keyBits, container, entropyReader) +} + +// generatePrivateKey generates a private key with the specified type and key bits. +// generatePrivateKey uses randomness from the entropyReader to generate the private key. +func generatePrivateKey(keyType string, keyBits int, container ParsedPrivateKeyContainer, entropyReader io.Reader) error { + var err error + var privateKeyType PrivateKeyType + var privateKeyBytes []byte + var privateKey crypto.Signer + + var randReader io.Reader = rand.Reader + if entropyReader != nil { + randReader = entropyReader + } + + switch keyType { + case "rsa": + // XXX: there is a false-positive CodeQL path here around keyBits; + // because of a default zero value in the TypeDurationSecond and + // TypeSignedDurationSecond cases of schema.DefaultOrZero(), it + // thinks it is possible to end up with < 2048 bit RSA Key here. + // While this is true for SSH keys, it isn't true for PKI keys + // due to ValidateKeyTypeLength(...) below. While we could close + // the report as a false-positive, enforcing a minimum keyBits size + // here of 2048 would ensure no other paths exist. + if keyBits < 2048 { + return errutil.InternalError{Err: fmt.Sprintf("insecure bit length for RSA private key: %d", keyBits)} + } + privateKeyType = RSAPrivateKey + privateKey, err = rsa.GenerateKey(randReader, keyBits) + if err != nil { + return errutil.InternalError{Err: fmt.Sprintf("error generating RSA private key: %v", err)} + } + privateKeyBytes = x509.MarshalPKCS1PrivateKey(privateKey.(*rsa.PrivateKey)) + case "ec": + privateKeyType = ECPrivateKey + var curve elliptic.Curve + switch keyBits { + case 224: + curve = elliptic.P224() + case 256: + curve = elliptic.P256() + case 384: + curve = elliptic.P384() + case 521: + curve = elliptic.P521() + default: + return errutil.UserError{Err: fmt.Sprintf("unsupported bit length for EC key: %d", keyBits)} + } + privateKey, err = ecdsa.GenerateKey(curve, randReader) + if err != nil { + return errutil.InternalError{Err: fmt.Sprintf("error generating EC private key: %v", err)} + } + privateKeyBytes, err = x509.MarshalECPrivateKey(privateKey.(*ecdsa.PrivateKey)) + if err != nil { + return errutil.InternalError{Err: fmt.Sprintf("error marshalling EC private key: %v", err)} + } + case "ed25519": + privateKeyType = Ed25519PrivateKey + _, privateKey, err = ed25519.GenerateKey(randReader) + if err != nil { + return errutil.InternalError{Err: fmt.Sprintf("error generating ed25519 private key: %v", err)} + } + privateKeyBytes, err = x509.MarshalPKCS8PrivateKey(privateKey.(ed25519.PrivateKey)) + if err != nil { + return errutil.InternalError{Err: fmt.Sprintf("error marshalling Ed25519 private key: %v", err)} + } + default: + return errutil.UserError{Err: fmt.Sprintf("unknown key type: %s", keyType)} + } + + container.SetParsedPrivateKey(privateKey, privateKeyType, privateKeyBytes) + return nil +} + +// GenerateSerialNumber generates a serial number suitable for a certificate +func GenerateSerialNumber() (*big.Int, error) { + return generateSerialNumber(rand.Reader) +} + +// GenerateSerialNumberWithRandomSource generates a serial number suitable +// for a certificate with custom entropy. +func GenerateSerialNumberWithRandomSource(randReader io.Reader) (*big.Int, error) { + return generateSerialNumber(randReader) +} + +func generateSerialNumber(randReader io.Reader) (*big.Int, error) { + serial, err := rand.Int(randReader, (&big.Int{}).Exp(big.NewInt(2), big.NewInt(159), nil)) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error generating serial number: %v", err)} + } + return serial, nil +} + +// ComparePublicKeys compares two public keys and returns true if they match +func ComparePublicKeys(key1Iface, key2Iface crypto.PublicKey) (bool, error) { + switch key1Iface.(type) { + case *rsa.PublicKey: + key1 := key1Iface.(*rsa.PublicKey) + key2, ok := key2Iface.(*rsa.PublicKey) + if !ok { + return false, fmt.Errorf("key types do not match: %T and %T", key1Iface, key2Iface) + } + if key1.N.Cmp(key2.N) != 0 || + key1.E != key2.E { + return false, nil + } + return true, nil + + case *ecdsa.PublicKey: + key1 := key1Iface.(*ecdsa.PublicKey) + key2, ok := key2Iface.(*ecdsa.PublicKey) + if !ok { + return false, fmt.Errorf("key types do not match: %T and %T", key1Iface, key2Iface) + } + if key1.X.Cmp(key2.X) != 0 || + key1.Y.Cmp(key2.Y) != 0 { + return false, nil + } + key1Params := key1.Params() + key2Params := key2.Params() + if key1Params.P.Cmp(key2Params.P) != 0 || + key1Params.N.Cmp(key2Params.N) != 0 || + key1Params.B.Cmp(key2Params.B) != 0 || + key1Params.Gx.Cmp(key2Params.Gx) != 0 || + key1Params.Gy.Cmp(key2Params.Gy) != 0 || + key1Params.BitSize != key2Params.BitSize { + return false, nil + } + return true, nil + case ed25519.PublicKey: + key1 := key1Iface.(ed25519.PublicKey) + key2, ok := key2Iface.(ed25519.PublicKey) + if !ok { + return false, fmt.Errorf("key types do not match: %T and %T", key1Iface, key2Iface) + } + if !key1.Equal(key2) { + return false, nil + } + return true, nil + default: + return false, fmt.Errorf("cannot compare key with type %T", key1Iface) + } +} + +// ParsePublicKeyPEM is used to parse RSA and ECDSA public keys from PEMs +func ParsePublicKeyPEM(data []byte) (interface{}, error) { + block, data := pem.Decode(data) + if block != nil { + var rawKey interface{} + var err error + if rawKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + rawKey = cert.PublicKey + } else { + return nil, err + } + } + + if rsaPublicKey, ok := rawKey.(*rsa.PublicKey); ok { + return rsaPublicKey, nil + } + if ecPublicKey, ok := rawKey.(*ecdsa.PublicKey); ok { + return ecPublicKey, nil + } + if edPublicKey, ok := rawKey.(ed25519.PublicKey); ok { + return edPublicKey, nil + } + } + + return nil, errors.New("data does not contain any valid public keys") +} + +// addPolicyIdentifiers adds certificate policies extension +// +func AddPolicyIdentifiers(data *CreationBundle, certTemplate *x509.Certificate) { + for _, oidstr := range data.Params.PolicyIdentifiers { + oid, err := StringToOid(oidstr) + if err == nil { + certTemplate.PolicyIdentifiers = append(certTemplate.PolicyIdentifiers, oid) + } + } +} + +// addExtKeyUsageOids adds custom extended key usage OIDs to certificate +func AddExtKeyUsageOids(data *CreationBundle, certTemplate *x509.Certificate) { + for _, oidstr := range data.Params.ExtKeyUsageOIDs { + oid, err := StringToOid(oidstr) + if err == nil { + certTemplate.UnknownExtKeyUsage = append(certTemplate.UnknownExtKeyUsage, oid) + } + } +} + +func HandleOtherCSRSANs(in *x509.CertificateRequest, sans map[string][]string) error { + certTemplate := &x509.Certificate{ + DNSNames: in.DNSNames, + IPAddresses: in.IPAddresses, + EmailAddresses: in.EmailAddresses, + URIs: in.URIs, + } + if err := HandleOtherSANs(certTemplate, sans); err != nil { + return err + } + if len(certTemplate.ExtraExtensions) > 0 { + for _, v := range certTemplate.ExtraExtensions { + in.ExtraExtensions = append(in.ExtraExtensions, v) + } + } + return nil +} + +func HandleOtherSANs(in *x509.Certificate, sans map[string][]string) error { + // If other SANs is empty we return which causes normal Go stdlib parsing + // of the other SAN types + if len(sans) == 0 { + return nil + } + + var rawValues []asn1.RawValue + + // We need to generate an IMPLICIT sequence for compatibility with OpenSSL + // -- it's an open question what the default for RFC 5280 actually is, see + // https://github.com/openssl/openssl/issues/5091 -- so we have to use + // cryptobyte because using the asn1 package's marshaling always produces + // an EXPLICIT sequence. Note that asn1 is way too magical according to + // agl, and cryptobyte is modeled after the CBB/CBS bits that agl put into + // boringssl. + for oid, vals := range sans { + for _, val := range vals { + var b cryptobyte.Builder + oidStr, err := StringToOid(oid) + if err != nil { + return err + } + b.AddASN1ObjectIdentifier(oidStr) + b.AddASN1(cbasn1.Tag(0).ContextSpecific().Constructed(), func(b *cryptobyte.Builder) { + b.AddASN1(cbasn1.UTF8String, func(b *cryptobyte.Builder) { + b.AddBytes([]byte(val)) + }) + }) + m, err := b.Bytes() + if err != nil { + return err + } + rawValues = append(rawValues, asn1.RawValue{Tag: 0, Class: 2, IsCompound: true, Bytes: m}) + } + } + + // If other SANs is empty we return which causes normal Go stdlib parsing + // of the other SAN types + if len(rawValues) == 0 { + return nil + } + + // Append any existing SANs, sans marshalling + rawValues = append(rawValues, marshalSANs(in.DNSNames, in.EmailAddresses, in.IPAddresses, in.URIs)...) + + // Marshal and add to ExtraExtensions + ext := pkix.Extension{ + // This is the defined OID for subjectAltName + Id: asn1.ObjectIdentifier{2, 5, 29, 17}, + } + var err error + ext.Value, err = asn1.Marshal(rawValues) + if err != nil { + return err + } + in.ExtraExtensions = append(in.ExtraExtensions, ext) + + return nil +} + +// Note: Taken from the Go source code since it's not public, and used in the +// modified function below (which also uses these consts upstream) +const ( + nameTypeEmail = 1 + nameTypeDNS = 2 + nameTypeURI = 6 + nameTypeIP = 7 +) + +// Note: Taken from the Go source code since it's not public, plus changed to not marshal +// marshalSANs marshals a list of addresses into a the contents of an X.509 +// SubjectAlternativeName extension. +func marshalSANs(dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL) []asn1.RawValue { + var rawValues []asn1.RawValue + for _, name := range dnsNames { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeDNS, Class: 2, Bytes: []byte(name)}) + } + for _, email := range emailAddresses { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeEmail, Class: 2, Bytes: []byte(email)}) + } + for _, rawIP := range ipAddresses { + // If possible, we always want to encode IPv4 addresses in 4 bytes. + ip := rawIP.To4() + if ip == nil { + ip = rawIP + } + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeIP, Class: 2, Bytes: ip}) + } + for _, uri := range uris { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeURI, Class: 2, Bytes: []byte(uri.String())}) + } + return rawValues +} + +func StringToOid(in string) (asn1.ObjectIdentifier, error) { + split := strings.Split(in, ".") + ret := make(asn1.ObjectIdentifier, 0, len(split)) + for _, v := range split { + i, err := strconv.Atoi(v) + if err != nil { + return nil, err + } + ret = append(ret, i) + } + return asn1.ObjectIdentifier(ret), nil +} + +func ValidateSignatureLength(keyBits int) error { + switch keyBits { + case 256: + case 384: + case 512: + default: + return fmt.Errorf("unsupported signature algorithm: %d", keyBits) + } + return nil +} + +func ValidateKeyTypeLength(keyType string, keyBits int) error { + switch keyType { + case "rsa": + switch keyBits { + case 2048: + case 3072: + case 4096: + case 8192: + default: + return fmt.Errorf("unsupported bit length for RSA key: %d", keyBits) + } + case "ec": + switch keyBits { + case 224: + case 256: + case 384: + case 521: + default: + return fmt.Errorf("unsupported bit length for EC key: %d", keyBits) + } + case "any", "ed25519": + default: + return fmt.Errorf("unknown key type %s", keyType) + } + + return nil +} + +// CreateCertificate uses CreationBundle and the default rand.Reader to +// generate a cert/keypair. +func CreateCertificate(data *CreationBundle) (*ParsedCertBundle, error) { + return createCertificate(data, rand.Reader) +} + +// CreateCertificateWithRandomSource uses CreationBundle and a custom +// io.Reader for randomness to generate a cert/keypair. +func CreateCertificateWithRandomSource(data *CreationBundle, randReader io.Reader) (*ParsedCertBundle, error) { + return createCertificate(data, randReader) +} + +func createCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertBundle, error) { + var err error + result := &ParsedCertBundle{} + + serialNumber, err := GenerateSerialNumber() + if err != nil { + return nil, err + } + + if err := generatePrivateKey(data.Params.KeyType, + data.Params.KeyBits, + result, randReader); err != nil { + return nil, err + } + + subjKeyID, err := GetSubjKeyID(result.PrivateKey) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error getting subject key ID: %s", err)} + } + + certTemplate := &x509.Certificate{ + SerialNumber: serialNumber, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: data.Params.NotAfter, + IsCA: false, + SubjectKeyId: subjKeyID, + Subject: data.Params.Subject, + DNSNames: data.Params.DNSNames, + EmailAddresses: data.Params.EmailAddresses, + IPAddresses: data.Params.IPAddresses, + URIs: data.Params.URIs, + } + if data.Params.NotBeforeDuration > 0 { + certTemplate.NotBefore = time.Now().Add(-1 * data.Params.NotBeforeDuration) + } + + if err := HandleOtherSANs(certTemplate, data.Params.OtherSANs); err != nil { + return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()} + } + + // Add this before calling addKeyUsages + if data.SigningBundle == nil { + certTemplate.IsCA = true + } else if data.Params.BasicConstraintsValidForNonCA { + certTemplate.BasicConstraintsValid = true + certTemplate.IsCA = false + } + + // This will only be filled in from the generation paths + if len(data.Params.PermittedDNSDomains) > 0 { + certTemplate.PermittedDNSDomains = data.Params.PermittedDNSDomains + certTemplate.PermittedDNSDomainsCritical = true + } + + AddPolicyIdentifiers(data, certTemplate) + + AddKeyUsages(data, certTemplate) + + AddExtKeyUsageOids(data, certTemplate) + + certTemplate.IssuingCertificateURL = data.Params.URLs.IssuingCertificates + certTemplate.CRLDistributionPoints = data.Params.URLs.CRLDistributionPoints + certTemplate.OCSPServer = data.Params.URLs.OCSPServers + + var certBytes []byte + if data.SigningBundle != nil { + switch data.SigningBundle.PrivateKeyType { + case RSAPrivateKey: + switch data.Params.SignatureBits { + case 256: + certTemplate.SignatureAlgorithm = x509.SHA256WithRSA + case 384: + certTemplate.SignatureAlgorithm = x509.SHA384WithRSA + case 512: + certTemplate.SignatureAlgorithm = x509.SHA512WithRSA + } + case Ed25519PrivateKey: + certTemplate.SignatureAlgorithm = x509.PureEd25519 + case ECPrivateKey: + switch data.Params.SignatureBits { + case 256: + certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256 + case 384: + certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA384 + case 512: + certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA512 + } + } + + caCert := data.SigningBundle.Certificate + certTemplate.AuthorityKeyId = caCert.SubjectKeyId + + certBytes, err = x509.CreateCertificate(randReader, certTemplate, caCert, result.PrivateKey.Public(), data.SigningBundle.PrivateKey) + } else { + // Creating a self-signed root + if data.Params.MaxPathLength == 0 { + certTemplate.MaxPathLen = 0 + certTemplate.MaxPathLenZero = true + } else { + certTemplate.MaxPathLen = data.Params.MaxPathLength + } + + switch data.Params.KeyType { + case "rsa": + switch data.Params.SignatureBits { + case 256: + certTemplate.SignatureAlgorithm = x509.SHA256WithRSA + case 384: + certTemplate.SignatureAlgorithm = x509.SHA384WithRSA + case 512: + certTemplate.SignatureAlgorithm = x509.SHA512WithRSA + } + case "ed25519": + certTemplate.SignatureAlgorithm = x509.PureEd25519 + case "ec": + switch data.Params.SignatureBits { + case 256: + certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256 + case 384: + certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA384 + case 512: + certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA512 + } + } + + certTemplate.AuthorityKeyId = subjKeyID + certTemplate.BasicConstraintsValid = true + certBytes, err = x509.CreateCertificate(randReader, certTemplate, certTemplate, result.PrivateKey.Public(), result.PrivateKey) + } + + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)} + } + + result.CertificateBytes = certBytes + result.Certificate, err = x509.ParseCertificate(certBytes) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %s", err)} + } + + if data.SigningBundle != nil { + if len(data.SigningBundle.Certificate.AuthorityKeyId) > 0 && + !bytes.Equal(data.SigningBundle.Certificate.AuthorityKeyId, data.SigningBundle.Certificate.SubjectKeyId) { + + result.CAChain = []*CertBlock{ + { + Certificate: data.SigningBundle.Certificate, + Bytes: data.SigningBundle.CertificateBytes, + }, + } + result.CAChain = append(result.CAChain, data.SigningBundle.CAChain...) + } + } + + return result, nil +} + +var oidExtensionBasicConstraints = []int{2, 5, 29, 19} + +// CreateCSR creates a CSR with the default rand.Reader to +// generate a cert/keypair. This is currently only meant +// for use when generating an intermediate certificate. +func CreateCSR(data *CreationBundle, addBasicConstraints bool) (*ParsedCSRBundle, error) { + return createCSR(data, addBasicConstraints, rand.Reader) +} + +// CreateCSRWithRandomSource creates a CSR with a custom io.Reader +// for randomness to generate a cert/keypair. +func CreateCSRWithRandomSource(data *CreationBundle, addBasicConstraints bool, randReader io.Reader) (*ParsedCSRBundle, error) { + return createCSR(data, addBasicConstraints, randReader) +} + +func createCSR(data *CreationBundle, addBasicConstraints bool, randReader io.Reader) (*ParsedCSRBundle, error) { + var err error + result := &ParsedCSRBundle{} + + if err := generatePrivateKey(data.Params.KeyType, + data.Params.KeyBits, + result, randReader); err != nil { + return nil, err + } + + // Like many root CAs, other information is ignored + csrTemplate := &x509.CertificateRequest{ + Subject: data.Params.Subject, + DNSNames: data.Params.DNSNames, + EmailAddresses: data.Params.EmailAddresses, + IPAddresses: data.Params.IPAddresses, + URIs: data.Params.URIs, + } + + if err := HandleOtherCSRSANs(csrTemplate, data.Params.OtherSANs); err != nil { + return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()} + } + + if addBasicConstraints { + type basicConstraints struct { + IsCA bool `asn1:"optional"` + MaxPathLen int `asn1:"optional,default:-1"` + } + val, err := asn1.Marshal(basicConstraints{IsCA: true, MaxPathLen: -1}) + if err != nil { + return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling basic constraints: {{err}}", err).Error()} + } + ext := pkix.Extension{ + Id: oidExtensionBasicConstraints, + Value: val, + Critical: true, + } + csrTemplate.ExtraExtensions = append(csrTemplate.ExtraExtensions, ext) + } + + switch data.Params.KeyType { + case "rsa": + csrTemplate.SignatureAlgorithm = x509.SHA256WithRSA + case "ec": + csrTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256 + case "ed25519": + csrTemplate.SignatureAlgorithm = x509.PureEd25519 + } + + csr, err := x509.CreateCertificateRequest(randReader, csrTemplate, result.PrivateKey) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)} + } + + result.CSRBytes = csr + result.CSR, err = x509.ParseCertificateRequest(csr) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %v", err)} + } + + return result, nil +} + +// SignCertificate performs the heavy lifting +// of generating a certificate from a CSR. +// Returns a ParsedCertBundle sans private keys. +func SignCertificate(data *CreationBundle) (*ParsedCertBundle, error) { + return signCertificate(data, rand.Reader) +} + +// SignCertificateWithRandomSource generates a certificate +// from a CSR, using custom randomness from the randReader. +// Returns a ParsedCertBundle sans private keys. +func SignCertificateWithRandomSource(data *CreationBundle, randReader io.Reader) (*ParsedCertBundle, error) { + return signCertificate(data, randReader) +} + +func signCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertBundle, error) { + switch { + case data == nil: + return nil, errutil.UserError{Err: "nil data bundle given to signCertificate"} + case data.Params == nil: + return nil, errutil.UserError{Err: "nil parameters given to signCertificate"} + case data.SigningBundle == nil: + return nil, errutil.UserError{Err: "nil signing bundle given to signCertificate"} + case data.CSR == nil: + return nil, errutil.UserError{Err: "nil csr given to signCertificate"} + } + + err := data.CSR.CheckSignature() + if err != nil { + return nil, errutil.UserError{Err: "request signature invalid"} + } + + result := &ParsedCertBundle{} + + serialNumber, err := GenerateSerialNumber() + if err != nil { + return nil, err + } + + marshaledKey, err := x509.MarshalPKIXPublicKey(data.CSR.PublicKey) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error marshalling public key: %s", err)} + } + subjKeyID := sha1.Sum(marshaledKey) + + caCert := data.SigningBundle.Certificate + + certTemplate := &x509.Certificate{ + SerialNumber: serialNumber, + Subject: data.Params.Subject, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: data.Params.NotAfter, + SubjectKeyId: subjKeyID[:], + AuthorityKeyId: caCert.SubjectKeyId, + } + if data.Params.NotBeforeDuration > 0 { + certTemplate.NotBefore = time.Now().Add(-1 * data.Params.NotBeforeDuration) + } + + switch data.SigningBundle.PrivateKeyType { + case RSAPrivateKey: + switch data.Params.SignatureBits { + case 256: + certTemplate.SignatureAlgorithm = x509.SHA256WithRSA + case 384: + certTemplate.SignatureAlgorithm = x509.SHA384WithRSA + case 512: + certTemplate.SignatureAlgorithm = x509.SHA512WithRSA + } + case ECPrivateKey: + switch data.Params.SignatureBits { + case 256: + certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256 + case 384: + certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA384 + case 512: + certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA512 + } + } + + if data.Params.UseCSRValues { + certTemplate.Subject = data.CSR.Subject + certTemplate.Subject.ExtraNames = certTemplate.Subject.Names + + certTemplate.DNSNames = data.CSR.DNSNames + certTemplate.EmailAddresses = data.CSR.EmailAddresses + certTemplate.IPAddresses = data.CSR.IPAddresses + certTemplate.URIs = data.CSR.URIs + + for _, name := range data.CSR.Extensions { + if !name.Id.Equal(oidExtensionBasicConstraints) { + certTemplate.ExtraExtensions = append(certTemplate.ExtraExtensions, name) + } + } + + } else { + certTemplate.DNSNames = data.Params.DNSNames + certTemplate.EmailAddresses = data.Params.EmailAddresses + certTemplate.IPAddresses = data.Params.IPAddresses + certTemplate.URIs = data.Params.URIs + } + + if err := HandleOtherSANs(certTemplate, data.Params.OtherSANs); err != nil { + return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()} + } + + AddPolicyIdentifiers(data, certTemplate) + + AddKeyUsages(data, certTemplate) + + AddExtKeyUsageOids(data, certTemplate) + + var certBytes []byte + + certTemplate.IssuingCertificateURL = data.Params.URLs.IssuingCertificates + certTemplate.CRLDistributionPoints = data.Params.URLs.CRLDistributionPoints + certTemplate.OCSPServer = data.SigningBundle.URLs.OCSPServers + + if data.Params.IsCA { + certTemplate.BasicConstraintsValid = true + certTemplate.IsCA = true + + if data.SigningBundle.Certificate.MaxPathLen == 0 && + data.SigningBundle.Certificate.MaxPathLenZero { + return nil, errutil.UserError{Err: "signing certificate has a max path length of zero, and cannot issue further CA certificates"} + } + + certTemplate.MaxPathLen = data.Params.MaxPathLength + if certTemplate.MaxPathLen == 0 { + certTemplate.MaxPathLenZero = true + } + } else if data.Params.BasicConstraintsValidForNonCA { + certTemplate.BasicConstraintsValid = true + certTemplate.IsCA = false + } + + if len(data.Params.PermittedDNSDomains) > 0 { + certTemplate.PermittedDNSDomains = data.Params.PermittedDNSDomains + certTemplate.PermittedDNSDomainsCritical = true + } + + certBytes, err = x509.CreateCertificate(randReader, certTemplate, caCert, data.CSR.PublicKey, data.SigningBundle.PrivateKey) + + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)} + } + + result.CertificateBytes = certBytes + result.Certificate, err = x509.ParseCertificate(certBytes) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %s", err)} + } + + result.CAChain = data.SigningBundle.GetCAChain() + + return result, nil +} + +func NewCertPool(reader io.Reader) (*x509.CertPool, error) { + pemBlock, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + certs, err := parseCertsPEM(pemBlock) + if err != nil { + return nil, fmt.Errorf("error reading certs: %s", err) + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} + +// parseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array +// Returns an error if a certificate could not be parsed, or if the data does not contain any certificates +func parseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { + ok := false + certs := []*x509.Certificate{} + for len(pemCerts) > 0 { + var block *pem.Block + block, pemCerts = pem.Decode(pemCerts) + if block == nil { + break + } + // Only use PEM "CERTIFICATE" blocks without extra headers + if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { + continue + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return certs, err + } + + certs = append(certs, cert) + ok = true + } + + if !ok { + return certs, errors.New("data does not contain any valid RSA or ECDSA certificates") + } + return certs, nil +} + +// GetPublicKeySize returns the key size in bits for a given arbitrary crypto.PublicKey +// Returns -1 for an unsupported key type. +func GetPublicKeySize(key crypto.PublicKey) int { + if key, ok := key.(*rsa.PublicKey); ok { + return key.Size() * 8 + } + if key, ok := key.(*ecdsa.PublicKey); ok { + return key.Params().BitSize + } + if key, ok := key.(ed25519.PublicKey); ok { + return len(key) * 8 + } + if key, ok := key.(dsa.PublicKey); ok { + return key.Y.BitLen() + } + + return -1 +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go b/vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go new file mode 100644 index 0000000000..a557b5d73d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/certutil/types.go @@ -0,0 +1,788 @@ +// Package certutil contains helper functions that are mostly used +// with the PKI backend but can be generally useful. Functionality +// includes helpers for converting a certificate/private key bundle +// between DER and PEM, printing certificate serial numbers, and more. +// +// Functionality specific to the PKI backend includes some types +// and helper methods to make requesting certificates from the +// backend easy. +package certutil + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "net" + "net/url" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/errutil" +) + +const ( + PrivateKeyTypeP521 = "p521" +) + +// This can be one of a few key types so the different params may or may not be filled +type ClusterKeyParams struct { + Type string `json:"type" structs:"type" mapstructure:"type"` + X *big.Int `json:"x" structs:"x" mapstructure:"x"` + Y *big.Int `json:"y" structs:"y" mapstructure:"y"` + D *big.Int `json:"d" structs:"d" mapstructure:"d"` +} + +// Secret is used to attempt to unmarshal a Vault secret +// JSON response, as a convenience +type Secret struct { + Data map[string]interface{} `json:"data"` +} + +// PrivateKeyType holds a string representation of the type of private key (ec +// or rsa) referenced in CertBundle and ParsedCertBundle. This uses colloquial +// names rather than official names, to eliminate confusion +type PrivateKeyType string + +// Well-known PrivateKeyTypes +const ( + UnknownPrivateKey PrivateKeyType = "" + RSAPrivateKey PrivateKeyType = "rsa" + ECPrivateKey PrivateKeyType = "ec" + Ed25519PrivateKey PrivateKeyType = "ed25519" +) + +// TLSUsage controls whether the intended usage of a *tls.Config +// returned from ParsedCertBundle.getTLSConfig is for server use, +// client use, or both, which affects which values are set +type TLSUsage int + +// Well-known TLSUsage types +const ( + TLSUnknown TLSUsage = 0 + TLSServer TLSUsage = 1 << iota + TLSClient +) + +// BlockType indicates the serialization format of the key +type BlockType string + +// Well-known formats +const ( + PKCS1Block BlockType = "RSA PRIVATE KEY" + PKCS8Block BlockType = "PRIVATE KEY" + ECBlock BlockType = "EC PRIVATE KEY" +) + +// ParsedPrivateKeyContainer allows common key setting for certs and CSRs +type ParsedPrivateKeyContainer interface { + SetParsedPrivateKey(crypto.Signer, PrivateKeyType, []byte) +} + +// CertBlock contains the DER-encoded certificate and the PEM +// block's byte array +type CertBlock struct { + Certificate *x509.Certificate + Bytes []byte +} + +// CertBundle contains a key type, a PEM-encoded private key, +// a PEM-encoded certificate, and a string-encoded serial number, +// returned from a successful Issue request +type CertBundle struct { + PrivateKeyType PrivateKeyType `json:"private_key_type" structs:"private_key_type" mapstructure:"private_key_type"` + Certificate string `json:"certificate" structs:"certificate" mapstructure:"certificate"` + IssuingCA string `json:"issuing_ca" structs:"issuing_ca" mapstructure:"issuing_ca"` + CAChain []string `json:"ca_chain" structs:"ca_chain" mapstructure:"ca_chain"` + PrivateKey string `json:"private_key" structs:"private_key" mapstructure:"private_key"` + SerialNumber string `json:"serial_number" structs:"serial_number" mapstructure:"serial_number"` +} + +// ParsedCertBundle contains a key type, a DER-encoded private key, +// and a DER-encoded certificate +type ParsedCertBundle struct { + PrivateKeyType PrivateKeyType + PrivateKeyFormat BlockType + PrivateKeyBytes []byte + PrivateKey crypto.Signer + CertificateBytes []byte + Certificate *x509.Certificate + CAChain []*CertBlock +} + +// CSRBundle contains a key type, a PEM-encoded private key, +// and a PEM-encoded CSR +type CSRBundle struct { + PrivateKeyType PrivateKeyType `json:"private_key_type" structs:"private_key_type" mapstructure:"private_key_type"` + CSR string `json:"csr" structs:"csr" mapstructure:"csr"` + PrivateKey string `json:"private_key" structs:"private_key" mapstructure:"private_key"` +} + +// ParsedCSRBundle contains a key type, a DER-encoded private key, +// and a DER-encoded certificate request +type ParsedCSRBundle struct { + PrivateKeyType PrivateKeyType + PrivateKeyBytes []byte + PrivateKey crypto.Signer + CSRBytes []byte + CSR *x509.CertificateRequest +} + +// ToPEMBundle converts a string-based certificate bundle +// to a PEM-based string certificate bundle in trust path +// order, leaf certificate first +func (c *CertBundle) ToPEMBundle() string { + var result []string + + if len(c.PrivateKey) > 0 { + result = append(result, c.PrivateKey) + } + if len(c.Certificate) > 0 { + result = append(result, c.Certificate) + } + if len(c.CAChain) > 0 { + result = append(result, c.CAChain...) + } + + return strings.Join(result, "\n") +} + +// ToParsedCertBundle converts a string-based certificate bundle +// to a byte-based raw certificate bundle +func (c *CertBundle) ToParsedCertBundle() (*ParsedCertBundle, error) { + result := &ParsedCertBundle{} + var err error + var pemBlock *pem.Block + + if len(c.PrivateKey) > 0 { + pemBlock, _ = pem.Decode([]byte(c.PrivateKey)) + if pemBlock == nil { + return nil, errutil.UserError{Err: "Error decoding private key from cert bundle"} + } + + result.PrivateKeyBytes = pemBlock.Bytes + result.PrivateKeyFormat = BlockType(strings.TrimSpace(pemBlock.Type)) + + switch result.PrivateKeyFormat { + case ECBlock: + result.PrivateKeyType, c.PrivateKeyType = ECPrivateKey, ECPrivateKey + case PKCS1Block: + c.PrivateKeyType, result.PrivateKeyType = RSAPrivateKey, RSAPrivateKey + case PKCS8Block: + t, err := getPKCS8Type(pemBlock.Bytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error getting key type from pkcs#8: %v", err)} + } + result.PrivateKeyType = t + switch t { + case ECPrivateKey: + c.PrivateKeyType = ECPrivateKey + case RSAPrivateKey: + c.PrivateKeyType = RSAPrivateKey + case Ed25519PrivateKey: + c.PrivateKeyType = Ed25519PrivateKey + } + default: + return nil, errutil.UserError{Err: fmt.Sprintf("Unsupported key block type: %s", pemBlock.Type)} + } + + result.PrivateKey, err = result.getSigner() + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error getting signer: %s", err)} + } + } + + if len(c.Certificate) > 0 { + pemBlock, _ = pem.Decode([]byte(c.Certificate)) + if pemBlock == nil { + return nil, errutil.UserError{Err: "Error decoding certificate from cert bundle"} + } + result.CertificateBytes = pemBlock.Bytes + result.Certificate, err = x509.ParseCertificate(result.CertificateBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle: %v", err)} + } + } + switch { + case len(c.CAChain) > 0: + for _, cert := range c.CAChain { + pemBlock, _ := pem.Decode([]byte(cert)) + if pemBlock == nil { + return nil, errutil.UserError{Err: "Error decoding certificate from cert bundle"} + } + + parsedCert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle via CA chain: %v", err)} + } + + certBlock := &CertBlock{ + Bytes: pemBlock.Bytes, + Certificate: parsedCert, + } + result.CAChain = append(result.CAChain, certBlock) + } + + // For backwards compatibility + case len(c.IssuingCA) > 0: + pemBlock, _ = pem.Decode([]byte(c.IssuingCA)) + if pemBlock == nil { + return nil, errutil.UserError{Err: "Error decoding ca certificate from cert bundle"} + } + + parsedCert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle via issuing CA: %v", err)} + } + + certBlock := &CertBlock{ + Bytes: pemBlock.Bytes, + Certificate: parsedCert, + } + result.CAChain = append(result.CAChain, certBlock) + } + + // Populate if it isn't there already + if len(c.SerialNumber) == 0 && len(c.Certificate) > 0 { + c.SerialNumber = GetHexFormatted(result.Certificate.SerialNumber.Bytes(), ":") + } + + return result, nil +} + +// ToCertBundle converts a byte-based raw DER certificate bundle +// to a PEM-based string certificate bundle +func (p *ParsedCertBundle) ToCertBundle() (*CertBundle, error) { + result := &CertBundle{} + block := pem.Block{ + Type: "CERTIFICATE", + } + + if p.Certificate != nil { + result.SerialNumber = strings.TrimSpace(GetHexFormatted(p.Certificate.SerialNumber.Bytes(), ":")) + } + + if p.CertificateBytes != nil && len(p.CertificateBytes) > 0 { + block.Bytes = p.CertificateBytes + result.Certificate = strings.TrimSpace(string(pem.EncodeToMemory(&block))) + } + + for _, caCert := range p.CAChain { + block.Bytes = caCert.Bytes + certificate := strings.TrimSpace(string(pem.EncodeToMemory(&block))) + + result.CAChain = append(result.CAChain, certificate) + } + + if p.PrivateKeyBytes != nil && len(p.PrivateKeyBytes) > 0 { + block.Type = string(p.PrivateKeyFormat) + block.Bytes = p.PrivateKeyBytes + result.PrivateKeyType = p.PrivateKeyType + + // Handle bundle not parsed by us + if block.Type == "" { + switch p.PrivateKeyType { + case ECPrivateKey: + block.Type = string(ECBlock) + case RSAPrivateKey: + block.Type = string(PKCS1Block) + case Ed25519PrivateKey: + block.Type = string(PKCS8Block) + } + } + + result.PrivateKey = strings.TrimSpace(string(pem.EncodeToMemory(&block))) + } + + return result, nil +} + +// Verify checks if the parsed bundle is valid. It validates the public +// key of the certificate to the private key and checks the certificate trust +// chain for path issues. +func (p *ParsedCertBundle) Verify() error { + // If private key exists, check if it matches the public key of cert + if p.PrivateKey != nil && p.Certificate != nil { + equal, err := ComparePublicKeys(p.Certificate.PublicKey, p.PrivateKey.Public()) + if err != nil { + return errwrap.Wrapf("could not compare public and private keys: {{err}}", err) + } + if !equal { + return fmt.Errorf("public key of certificate does not match private key") + } + } + + certPath := p.GetCertificatePath() + if len(certPath) > 1 { + for i, caCert := range certPath[1:] { + if !caCert.Certificate.IsCA { + return fmt.Errorf("certificate %d of certificate chain is not a certificate authority", i+1) + } + if !bytes.Equal(certPath[i].Certificate.AuthorityKeyId, caCert.Certificate.SubjectKeyId) { + return fmt.Errorf("certificate %d of certificate chain ca trust path is incorrect (%q/%q) (%X/%X)", + i+1, + certPath[i].Certificate.Subject.CommonName, caCert.Certificate.Subject.CommonName, + certPath[i].Certificate.AuthorityKeyId, caCert.Certificate.SubjectKeyId) + } + } + } + + return nil +} + +// GetCertificatePath returns a slice of certificates making up a path, pulled +// from the parsed cert bundle +func (p *ParsedCertBundle) GetCertificatePath() []*CertBlock { + var certPath []*CertBlock + + certPath = append(certPath, &CertBlock{ + Certificate: p.Certificate, + Bytes: p.CertificateBytes, + }) + + if len(p.CAChain) > 0 { + // Root CA puts itself in the chain + if p.CAChain[0].Certificate.SerialNumber != p.Certificate.SerialNumber { + certPath = append(certPath, p.CAChain...) + } + } + + return certPath +} + +// GetSigner returns a crypto.Signer corresponding to the private key +// contained in this ParsedCertBundle. The Signer contains a Public() function +// for getting the corresponding public. The Signer can also be +// type-converted to private keys +func (p *ParsedCertBundle) getSigner() (crypto.Signer, error) { + var signer crypto.Signer + var err error + + if p.PrivateKeyBytes == nil || len(p.PrivateKeyBytes) == 0 { + return nil, errutil.UserError{Err: "Given parsed cert bundle does not have private key information"} + } + + switch p.PrivateKeyFormat { + case ECBlock: + signer, err = x509.ParseECPrivateKey(p.PrivateKeyBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private EC key: %s", err)} + } + + case PKCS1Block: + signer, err = x509.ParsePKCS1PrivateKey(p.PrivateKeyBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private RSA key: %s", err)} + } + + case PKCS8Block: + if k, err := x509.ParsePKCS8PrivateKey(p.PrivateKeyBytes); err == nil { + switch k := k.(type) { + case *rsa.PrivateKey, *ecdsa.PrivateKey, ed25519.PrivateKey: + return k.(crypto.Signer), nil + default: + return nil, errutil.UserError{Err: "Found unknown private key type in pkcs#8 wrapping"} + } + } + return nil, errutil.UserError{Err: fmt.Sprintf("Failed to parse pkcs#8 key: %v", err)} + default: + return nil, errutil.UserError{Err: "Unable to determine type of private key; only RSA and EC are supported"} + } + return signer, nil +} + +// SetParsedPrivateKey sets the private key parameters on the bundle +func (p *ParsedCertBundle) SetParsedPrivateKey(privateKey crypto.Signer, privateKeyType PrivateKeyType, privateKeyBytes []byte) { + p.PrivateKey = privateKey + p.PrivateKeyType = privateKeyType + p.PrivateKeyBytes = privateKeyBytes +} + +func getPKCS8Type(bs []byte) (PrivateKeyType, error) { + k, err := x509.ParsePKCS8PrivateKey(bs) + if err != nil { + return UnknownPrivateKey, errutil.UserError{Err: fmt.Sprintf("Failed to parse pkcs#8 key: %v", err)} + } + + switch k.(type) { + case *ecdsa.PrivateKey: + return ECPrivateKey, nil + case *rsa.PrivateKey: + return RSAPrivateKey, nil + case ed25519.PrivateKey: + return Ed25519PrivateKey, nil + default: + return UnknownPrivateKey, errutil.UserError{Err: "Found unknown private key type in pkcs#8 wrapping"} + } +} + +// ToParsedCSRBundle converts a string-based CSR bundle +// to a byte-based raw CSR bundle +func (c *CSRBundle) ToParsedCSRBundle() (*ParsedCSRBundle, error) { + result := &ParsedCSRBundle{} + var err error + var pemBlock *pem.Block + + if len(c.PrivateKey) > 0 { + pemBlock, _ = pem.Decode([]byte(c.PrivateKey)) + if pemBlock == nil { + return nil, errutil.UserError{Err: "Error decoding private key from cert bundle"} + } + result.PrivateKeyBytes = pemBlock.Bytes + + switch BlockType(pemBlock.Type) { + case ECBlock: + result.PrivateKeyType = ECPrivateKey + case PKCS1Block: + result.PrivateKeyType = RSAPrivateKey + default: + // Try to figure it out and correct + if _, err := x509.ParseECPrivateKey(pemBlock.Bytes); err == nil { + result.PrivateKeyType = ECPrivateKey + c.PrivateKeyType = "ec" + } else if _, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes); err == nil { + result.PrivateKeyType = RSAPrivateKey + c.PrivateKeyType = "rsa" + } else if _, err := x509.ParsePKCS8PrivateKey(pemBlock.Bytes); err == nil { + result.PrivateKeyType = Ed25519PrivateKey + c.PrivateKeyType = "ed25519" + } else { + return nil, errutil.UserError{Err: fmt.Sprintf("Unknown private key type in bundle: %s", c.PrivateKeyType)} + } + } + + result.PrivateKey, err = result.getSigner() + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error getting signer: %s", err)} + } + } + + if len(c.CSR) > 0 { + pemBlock, _ = pem.Decode([]byte(c.CSR)) + if pemBlock == nil { + return nil, errutil.UserError{Err: "Error decoding certificate from cert bundle"} + } + result.CSRBytes = pemBlock.Bytes + result.CSR, err = x509.ParseCertificateRequest(result.CSRBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle via CSR: %v", err)} + } + } + + return result, nil +} + +// ToCSRBundle converts a byte-based raw DER certificate bundle +// to a PEM-based string certificate bundle +func (p *ParsedCSRBundle) ToCSRBundle() (*CSRBundle, error) { + result := &CSRBundle{} + block := pem.Block{ + Type: "CERTIFICATE REQUEST", + } + + if p.CSRBytes != nil && len(p.CSRBytes) > 0 { + block.Bytes = p.CSRBytes + result.CSR = strings.TrimSpace(string(pem.EncodeToMemory(&block))) + } + + if p.PrivateKeyBytes != nil && len(p.PrivateKeyBytes) > 0 { + block.Bytes = p.PrivateKeyBytes + switch p.PrivateKeyType { + case RSAPrivateKey: + result.PrivateKeyType = "rsa" + block.Type = "RSA PRIVATE KEY" + case ECPrivateKey: + result.PrivateKeyType = "ec" + block.Type = "EC PRIVATE KEY" + case Ed25519PrivateKey: + result.PrivateKeyType = "ed25519" + block.Type = "PRIVATE KEY" + default: + return nil, errutil.InternalError{Err: "Could not determine private key type when creating block"} + } + result.PrivateKey = strings.TrimSpace(string(pem.EncodeToMemory(&block))) + } + + return result, nil +} + +// GetSigner returns a crypto.Signer corresponding to the private key +// contained in this ParsedCSRBundle. The Signer contains a Public() function +// for getting the corresponding public. The Signer can also be +// type-converted to private keys +func (p *ParsedCSRBundle) getSigner() (crypto.Signer, error) { + var signer crypto.Signer + var err error + + if p.PrivateKeyBytes == nil || len(p.PrivateKeyBytes) == 0 { + return nil, errutil.UserError{Err: "Given parsed cert bundle does not have private key information"} + } + + switch p.PrivateKeyType { + case ECPrivateKey: + signer, err = x509.ParseECPrivateKey(p.PrivateKeyBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private EC key: %s", err)} + } + + case RSAPrivateKey: + signer, err = x509.ParsePKCS1PrivateKey(p.PrivateKeyBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private RSA key: %s", err)} + } + + case Ed25519PrivateKey: + signerd, err := x509.ParsePKCS8PrivateKey(p.PrivateKeyBytes) + signer = signerd.(ed25519.PrivateKey) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private Ed25519 key: %s", err)} + } + + default: + return nil, errutil.UserError{Err: "Unable to determine type of private key; only RSA, Ed25519 and EC are supported"} + } + return signer, nil +} + +// SetParsedPrivateKey sets the private key parameters on the bundle +func (p *ParsedCSRBundle) SetParsedPrivateKey(privateKey crypto.Signer, privateKeyType PrivateKeyType, privateKeyBytes []byte) { + p.PrivateKey = privateKey + p.PrivateKeyType = privateKeyType + p.PrivateKeyBytes = privateKeyBytes +} + +// getTLSConfig returns a TLS config generally suitable for client +// authentication. The returned TLS config can be modified slightly +// to be made suitable for a server requiring client authentication; +// specifically, you should set the value of ClientAuth in the returned +// config to match your needs. +func (p *ParsedCertBundle) GetTLSConfig(usage TLSUsage) (*tls.Config, error) { + tlsCert := tls.Certificate{ + Certificate: [][]byte{}, + } + + tlsConfig := &tls.Config{ + MinVersion: tls.VersionTLS12, + } + + if p.Certificate != nil { + tlsCert.Leaf = p.Certificate + } + + if p.PrivateKey != nil { + tlsCert.PrivateKey = p.PrivateKey + } + + if p.CertificateBytes != nil && len(p.CertificateBytes) > 0 { + tlsCert.Certificate = append(tlsCert.Certificate, p.CertificateBytes) + } + + if len(p.CAChain) > 0 { + for _, cert := range p.CAChain { + tlsCert.Certificate = append(tlsCert.Certificate, cert.Bytes) + } + + // Technically we only need one cert, but this doesn't duplicate code + certBundle, err := p.ToCertBundle() + if err != nil { + return nil, errwrap.Wrapf("error converting parsed bundle to string bundle when getting TLS config: {{err}}", err) + } + + caPool := x509.NewCertPool() + ok := caPool.AppendCertsFromPEM([]byte(certBundle.CAChain[0])) + if !ok { + return nil, fmt.Errorf("could not append CA certificate") + } + + if usage&TLSServer > 0 { + tlsConfig.ClientCAs = caPool + tlsConfig.ClientAuth = tls.VerifyClientCertIfGiven + } + if usage&TLSClient > 0 { + tlsConfig.RootCAs = caPool + } + } + + if tlsCert.Certificate != nil && len(tlsCert.Certificate) > 0 { + tlsConfig.Certificates = []tls.Certificate{tlsCert} + tlsConfig.BuildNameToCertificate() + } + + return tlsConfig, nil +} + +// IssueData is a structure that is suitable for marshaling into a request; +// either via JSON, or into a map[string]interface{} via the structs package +type IssueData struct { + TTL string `json:"ttl" structs:"ttl" mapstructure:"ttl"` + CommonName string `json:"common_name" structs:"common_name" mapstructure:"common_name"` + OU string `json:"ou" structs:"ou" mapstructure:"ou"` + AltNames string `json:"alt_names" structs:"alt_names" mapstructure:"alt_names"` + IPSANs string `json:"ip_sans" structs:"ip_sans" mapstructure:"ip_sans"` + CSR string `json:"csr" structs:"csr" mapstructure:"csr"` + OtherSANs string `json:"other_sans" structs:"other_sans" mapstructure:"other_sans"` +} + +type URLEntries struct { + IssuingCertificates []string `json:"issuing_certificates" structs:"issuing_certificates" mapstructure:"issuing_certificates"` + CRLDistributionPoints []string `json:"crl_distribution_points" structs:"crl_distribution_points" mapstructure:"crl_distribution_points"` + OCSPServers []string `json:"ocsp_servers" structs:"ocsp_servers" mapstructure:"ocsp_servers"` +} + +type CAInfoBundle struct { + ParsedCertBundle + URLs *URLEntries +} + +func (b *CAInfoBundle) GetCAChain() []*CertBlock { + chain := []*CertBlock{} + + // Include issuing CA in Chain, not including Root Authority + if (len(b.Certificate.AuthorityKeyId) > 0 && + !bytes.Equal(b.Certificate.AuthorityKeyId, b.Certificate.SubjectKeyId)) || + (len(b.Certificate.AuthorityKeyId) == 0 && + !bytes.Equal(b.Certificate.RawIssuer, b.Certificate.RawSubject)) { + + chain = append(chain, &CertBlock{ + Certificate: b.Certificate, + Bytes: b.CertificateBytes, + }) + if b.CAChain != nil && len(b.CAChain) > 0 { + chain = append(chain, b.CAChain...) + } + } + + return chain +} + +type CertExtKeyUsage int + +const ( + AnyExtKeyUsage CertExtKeyUsage = 1 << iota + ServerAuthExtKeyUsage + ClientAuthExtKeyUsage + CodeSigningExtKeyUsage + EmailProtectionExtKeyUsage + IpsecEndSystemExtKeyUsage + IpsecTunnelExtKeyUsage + IpsecUserExtKeyUsage + TimeStampingExtKeyUsage + OcspSigningExtKeyUsage + MicrosoftServerGatedCryptoExtKeyUsage + NetscapeServerGatedCryptoExtKeyUsage + MicrosoftCommercialCodeSigningExtKeyUsage + MicrosoftKernelCodeSigningExtKeyUsage +) + +type CreationParameters struct { + Subject pkix.Name + DNSNames []string + EmailAddresses []string + IPAddresses []net.IP + URIs []*url.URL + OtherSANs map[string][]string + IsCA bool + KeyType string + KeyBits int + NotAfter time.Time + KeyUsage x509.KeyUsage + ExtKeyUsage CertExtKeyUsage + ExtKeyUsageOIDs []string + PolicyIdentifiers []string + BasicConstraintsValidForNonCA bool + SignatureBits int + + // Only used when signing a CA cert + UseCSRValues bool + PermittedDNSDomains []string + + // URLs to encode into the certificate + URLs *URLEntries + + // The maximum path length to encode + MaxPathLength int + + // The duration the certificate will use NotBefore + NotBeforeDuration time.Duration +} + +type CreationBundle struct { + Params *CreationParameters + SigningBundle *CAInfoBundle + CSR *x509.CertificateRequest +} + +// addKeyUsages adds appropriate key usages to the template given the creation +// information +func AddKeyUsages(data *CreationBundle, certTemplate *x509.Certificate) { + if data.Params.IsCA { + certTemplate.KeyUsage = x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign) + return + } + + certTemplate.KeyUsage = data.Params.KeyUsage + + if data.Params.ExtKeyUsage&AnyExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageAny) + } + + if data.Params.ExtKeyUsage&ServerAuthExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageServerAuth) + } + + if data.Params.ExtKeyUsage&ClientAuthExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageClientAuth) + } + + if data.Params.ExtKeyUsage&CodeSigningExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageCodeSigning) + } + + if data.Params.ExtKeyUsage&EmailProtectionExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageEmailProtection) + } + + if data.Params.ExtKeyUsage&IpsecEndSystemExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECEndSystem) + } + + if data.Params.ExtKeyUsage&IpsecTunnelExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECTunnel) + } + + if data.Params.ExtKeyUsage&IpsecUserExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECUser) + } + + if data.Params.ExtKeyUsage&TimeStampingExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageTimeStamping) + } + + if data.Params.ExtKeyUsage&OcspSigningExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageOCSPSigning) + } + + if data.Params.ExtKeyUsage&MicrosoftServerGatedCryptoExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftServerGatedCrypto) + } + + if data.Params.ExtKeyUsage&NetscapeServerGatedCryptoExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageNetscapeServerGatedCrypto) + } + + if data.Params.ExtKeyUsage&MicrosoftCommercialCodeSigningExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftCommercialCodeSigning) + } + + if data.Params.ExtKeyUsage&MicrosoftKernelCodeSigningExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftKernelCodeSigning) + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/compressutil/compress.go b/vendor/github.com/hashicorp/vault/sdk/helper/compressutil/compress.go new file mode 100644 index 0000000000..356d4548fa --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/compressutil/compress.go @@ -0,0 +1,207 @@ +package compressutil + +import ( + "bytes" + "compress/gzip" + "compress/lzw" + "fmt" + "io" + + "github.com/golang/snappy" + "github.com/hashicorp/errwrap" + "github.com/pierrec/lz4" +) + +const ( + // A byte value used as a canary prefix for the compressed information + // which is used to distinguish if a JSON input is compressed or not. + // The value of this constant should not be a first character of any + // valid JSON string. + + CompressionTypeGzip = "gzip" + CompressionCanaryGzip byte = 'G' + + CompressionTypeLZW = "lzw" + CompressionCanaryLZW byte = 'L' + + CompressionTypeSnappy = "snappy" + CompressionCanarySnappy byte = 'S' + + CompressionTypeLZ4 = "lz4" + CompressionCanaryLZ4 byte = '4' +) + +// SnappyReadCloser embeds the snappy reader which implements the io.Reader +// interface. The decompress procedure in this utility expects an +// io.ReadCloser. This type implements the io.Closer interface to retain the +// generic way of decompression. +type CompressUtilReadCloser struct { + io.Reader +} + +// Close is a noop method implemented only to satisfy the io.Closer interface +func (c *CompressUtilReadCloser) Close() error { + return nil +} + +// CompressionConfig is used to select a compression type to be performed by +// Compress and Decompress utilities. +// Supported types are: +// * CompressionTypeLZW +// * CompressionTypeGzip +// * CompressionTypeSnappy +// * CompressionTypeLZ4 +// +// When using CompressionTypeGzip, the compression levels can also be chosen: +// * gzip.DefaultCompression +// * gzip.BestSpeed +// * gzip.BestCompression +type CompressionConfig struct { + // Type of the compression algorithm to be used + Type string + + // When using Gzip format, the compression level to employ + GzipCompressionLevel int +} + +// Compress places the canary byte in a buffer and uses the same buffer to fill +// in the compressed information of the given input. The configuration supports +// two type of compression: LZW and Gzip. When using Gzip compression format, +// if GzipCompressionLevel is not specified, the 'gzip.DefaultCompression' will +// be assumed. +func Compress(data []byte, config *CompressionConfig) ([]byte, error) { + var buf bytes.Buffer + var writer io.WriteCloser + var err error + + if config == nil { + return nil, fmt.Errorf("config is nil") + } + + // Write the canary into the buffer and create writer to compress the + // input data based on the configured type + switch config.Type { + case CompressionTypeLZW: + buf.Write([]byte{CompressionCanaryLZW}) + writer = lzw.NewWriter(&buf, lzw.LSB, 8) + + case CompressionTypeGzip: + buf.Write([]byte{CompressionCanaryGzip}) + + switch { + case config.GzipCompressionLevel == gzip.BestCompression, + config.GzipCompressionLevel == gzip.BestSpeed, + config.GzipCompressionLevel == gzip.DefaultCompression: + // These are valid compression levels + default: + // If compression level is set to NoCompression or to + // any invalid value, fallback to Defaultcompression + config.GzipCompressionLevel = gzip.DefaultCompression + } + writer, err = gzip.NewWriterLevel(&buf, config.GzipCompressionLevel) + + case CompressionTypeSnappy: + buf.Write([]byte{CompressionCanarySnappy}) + writer = snappy.NewBufferedWriter(&buf) + + case CompressionTypeLZ4: + buf.Write([]byte{CompressionCanaryLZ4}) + writer = lz4.NewWriter(&buf) + + default: + return nil, fmt.Errorf("unsupported compression type") + } + + if err != nil { + return nil, errwrap.Wrapf("failed to create a compression writer: {{err}}", err) + } + + if writer == nil { + return nil, fmt.Errorf("failed to create a compression writer") + } + + // Compress the input and place it in the same buffer containing the + // canary byte. + if _, err = writer.Write(data); err != nil { + return nil, errwrap.Wrapf("failed to compress input data: err: {{err}}", err) + } + + // Close the io.WriteCloser + if err = writer.Close(); err != nil { + return nil, err + } + + // Return the compressed bytes with canary byte at the start + return buf.Bytes(), nil +} + +// Decompress checks if the first byte in the input matches the canary byte. +// If the first byte is a canary byte, then the input past the canary byte +// will be decompressed using the method specified in the given configuration. +// If the first byte isn't a canary byte, then the utility returns a boolean +// value indicating that the input was not compressed. +func Decompress(data []byte) ([]byte, bool, error) { + var err error + var reader io.ReadCloser + if data == nil || len(data) == 0 { + return nil, false, fmt.Errorf("'data' being decompressed is empty") + } + + canary := data[0] + cData := data[1:] + + switch canary { + // If the first byte matches the canary byte, remove the canary + // byte and try to decompress the data that is after the canary. + case CompressionCanaryGzip: + if len(data) < 2 { + return nil, false, fmt.Errorf("invalid 'data' after the canary") + } + reader, err = gzip.NewReader(bytes.NewReader(cData)) + + case CompressionCanaryLZW: + if len(data) < 2 { + return nil, false, fmt.Errorf("invalid 'data' after the canary") + } + reader = lzw.NewReader(bytes.NewReader(cData), lzw.LSB, 8) + + case CompressionCanarySnappy: + if len(data) < 2 { + return nil, false, fmt.Errorf("invalid 'data' after the canary") + } + reader = &CompressUtilReadCloser{ + Reader: snappy.NewReader(bytes.NewReader(cData)), + } + + case CompressionCanaryLZ4: + if len(data) < 2 { + return nil, false, fmt.Errorf("invalid 'data' after the canary") + } + reader = &CompressUtilReadCloser{ + Reader: lz4.NewReader(bytes.NewReader(cData)), + } + + default: + // If the first byte doesn't match the canary byte, it means + // that the content was not compressed at all. Indicate the + // caller that the input was not compressed. + return nil, true, nil + } + if err != nil { + return nil, false, errwrap.Wrapf("failed to create a compression reader: {{err}}", err) + } + if reader == nil { + return nil, false, fmt.Errorf("failed to create a compression reader") + } + + // Close the io.ReadCloser + defer reader.Close() + + // Read all the compressed data into a buffer + var buf bytes.Buffer + if _, err = io.Copy(&buf, reader); err != nil { + return nil, false, err + } + + return buf.Bytes(), false, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/agent.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/agent.go new file mode 100644 index 0000000000..b62962e37e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/consts/agent.go @@ -0,0 +1,5 @@ +package consts + +// AgentPathCacheClear is the path that the agent will use as its cache-clear +// endpoint. +const AgentPathCacheClear = "/agent/v1/cache-clear" diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/consts.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/consts.go new file mode 100644 index 0000000000..b10f57a229 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/consts/consts.go @@ -0,0 +1,35 @@ +package consts + +const ( + // ExpirationRestoreWorkerCount specifies the number of workers to use while + // restoring leases into the expiration manager + ExpirationRestoreWorkerCount = 64 + + // NamespaceHeaderName is the header set to specify which namespace the + // request is indented for. + NamespaceHeaderName = "X-Vault-Namespace" + + // AuthHeaderName is the name of the header containing the token. + AuthHeaderName = "X-Vault-Token" + + // RequestHeaderName is the name of the header used by the Agent for + // SSRF protection. + RequestHeaderName = "X-Vault-Request" + + // PerformanceReplicationALPN is the negotiated protocol used for + // performance replication. + PerformanceReplicationALPN = "replication_v1" + + // DRReplicationALPN is the negotiated protocol used for dr replication. + DRReplicationALPN = "replication_dr_v1" + + PerfStandbyALPN = "perf_standby_v1" + + RequestForwardingALPN = "req_fw_sb-act_v1" + + RaftStorageALPN = "raft_storage_v1" + + // ReplicationResolverALPN is the negotiated protocol used for + // resolving replicaiton addresses + ReplicationResolverALPN = "replication_resolver_v1" +) diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/error.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/error.go new file mode 100644 index 0000000000..1a9175c639 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/consts/error.go @@ -0,0 +1,25 @@ +package consts + +import "errors" + +var ( + // ErrSealed is returned if an operation is performed on a sealed barrier. + // No operation is expected to succeed before unsealing + ErrSealed = errors.New("Vault is sealed") + + // ErrAPILocked is returned if an operation is performed when the API is + // locked for the request namespace. + ErrAPILocked = errors.New("API access to this namespace has been locked by an administrator") + + // ErrStandby is returned if an operation is performed on a standby Vault. + // No operation is expected to succeed until active. + ErrStandby = errors.New("Vault is in standby mode") + + // ErrPathContainsParentReferences is returned when a path contains parent + // references. + ErrPathContainsParentReferences = errors.New("path cannot contain parent references") + + // ErrInvalidWrappingToken is returned when checking for the validity of + // a wrapping token that turns out to be invalid. + ErrInvalidWrappingToken = errors.New("wrapping token is not valid or does not exist") +) diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/plugin_types.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/plugin_types.go new file mode 100644 index 0000000000..e0a00e4860 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/consts/plugin_types.go @@ -0,0 +1,59 @@ +package consts + +import "fmt" + +var PluginTypes = []PluginType{ + PluginTypeUnknown, + PluginTypeCredential, + PluginTypeDatabase, + PluginTypeSecrets, +} + +type PluginType uint32 + +// This is a list of PluginTypes used by Vault. +// If we need to add any in the future, it would +// be best to add them to the _end_ of the list below +// because they resolve to incrementing numbers, +// which may be saved in state somewhere. Thus if +// the name for one of those numbers changed because +// a value were added to the middle, that could cause +// the wrong plugin types to be read from storage +// for a given underlying number. Example of the problem +// here: https://play.golang.org/p/YAaPw5ww3er +const ( + PluginTypeUnknown PluginType = iota + PluginTypeCredential + PluginTypeDatabase + PluginTypeSecrets +) + +func (p PluginType) String() string { + switch p { + case PluginTypeUnknown: + return "unknown" + case PluginTypeCredential: + return "auth" + case PluginTypeDatabase: + return "database" + case PluginTypeSecrets: + return "secret" + default: + return "unsupported" + } +} + +func ParsePluginType(pluginType string) (PluginType, error) { + switch pluginType { + case "unknown": + return PluginTypeUnknown, nil + case "auth": + return PluginTypeCredential, nil + case "database": + return PluginTypeDatabase, nil + case "secret": + return PluginTypeSecrets, nil + default: + return PluginTypeUnknown, fmt.Errorf("%q is not a supported plugin type", pluginType) + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/replication.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/replication.go new file mode 100644 index 0000000000..f72c2f47ae --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/consts/replication.go @@ -0,0 +1,159 @@ +package consts + +const ( + // N.B. This needs to be excluded from replication despite the name; it's + // merely saying that this is cluster information for the replicated + // cluster. + CoreReplicatedClusterPrefix = "core/cluster/replicated/" + CoreReplicatedClusterPrefixDR = "core/cluster/replicated-dr/" + + CoreReplicatedClusterInfoPath = CoreReplicatedClusterPrefix + "info" + CoreReplicatedClusterSecondariesPrefix = CoreReplicatedClusterPrefix + "secondaries/" + CoreReplicatedClusterInfoPathDR = CoreReplicatedClusterPrefixDR + "info" + CoreReplicatedClusterSecondariesPrefixDR = CoreReplicatedClusterPrefixDR + "secondaries/" + + // This is an identifier for the current secondary in the replicated paths + // manager. It should contain a character that is not allowed in secondary + // ids to ensure it doesn't collide. + CurrentReplicatedSecondaryIdentifier = ".current" + CoreFeatureFlagPath = "core/cluster/feature-flags" +) + +type ReplicationState uint32 + +const ( + _ ReplicationState = iota + OldReplicationPrimary + OldReplicationSecondary + OldReplicationBootstrapping + // Don't add anything here. Adding anything to this Old block would cause + // the rest of the values to change below. This was done originally to + // ensure no overlap between old and new values. + + ReplicationUnknown ReplicationState = 0 + ReplicationPerformancePrimary ReplicationState = 1 << iota // Note -- iota is 5 here! + ReplicationPerformanceSecondary + OldSplitReplicationBootstrapping + ReplicationDRPrimary + ReplicationDRSecondary + ReplicationPerformanceBootstrapping + ReplicationDRBootstrapping + ReplicationPerformanceDisabled + ReplicationDRDisabled + ReplicationPerformanceStandby +) + +// We verify no change to the above values are made +func init() { + if OldReplicationBootstrapping != 3 { + panic("Replication Constants have changed") + } + + if ReplicationPerformancePrimary != 1<<5 { + panic("Replication Constants have changed") + } +} + +func (r ReplicationState) string() string { + switch r { + case ReplicationPerformanceSecondary: + return "secondary" + case ReplicationPerformancePrimary: + return "primary" + case ReplicationPerformanceBootstrapping: + return "bootstrapping" + case ReplicationPerformanceDisabled: + return "disabled" + case ReplicationDRPrimary: + return "primary" + case ReplicationDRSecondary: + return "secondary" + case ReplicationDRBootstrapping: + return "bootstrapping" + case ReplicationDRDisabled: + return "disabled" + } + + return "unknown" +} + +func (r ReplicationState) StateStrings() []string { + var ret []string + if r.HasState(ReplicationPerformanceSecondary) { + ret = append(ret, "perf-secondary") + } + if r.HasState(ReplicationPerformancePrimary) { + ret = append(ret, "perf-primary") + } + if r.HasState(ReplicationPerformanceBootstrapping) { + ret = append(ret, "perf-bootstrapping") + } + if r.HasState(ReplicationPerformanceDisabled) { + ret = append(ret, "perf-disabled") + } + if r.HasState(ReplicationDRPrimary) { + ret = append(ret, "dr-primary") + } + if r.HasState(ReplicationDRSecondary) { + ret = append(ret, "dr-secondary") + } + if r.HasState(ReplicationDRBootstrapping) { + ret = append(ret, "dr-bootstrapping") + } + if r.HasState(ReplicationDRDisabled) { + ret = append(ret, "dr-disabled") + } + if r.HasState(ReplicationPerformanceStandby) { + ret = append(ret, "perfstandby") + } + + return ret +} + +func (r ReplicationState) GetDRString() string { + switch { + case r.HasState(ReplicationDRBootstrapping): + return ReplicationDRBootstrapping.string() + case r.HasState(ReplicationDRPrimary): + return ReplicationDRPrimary.string() + case r.HasState(ReplicationDRSecondary): + return ReplicationDRSecondary.string() + case r.HasState(ReplicationDRDisabled): + return ReplicationDRDisabled.string() + default: + return "unknown" + } +} + +func (r ReplicationState) GetPerformanceString() string { + switch { + case r.HasState(ReplicationPerformanceBootstrapping): + return ReplicationPerformanceBootstrapping.string() + case r.HasState(ReplicationPerformancePrimary): + return ReplicationPerformancePrimary.string() + case r.HasState(ReplicationPerformanceSecondary): + return ReplicationPerformanceSecondary.string() + case r.HasState(ReplicationPerformanceDisabled): + return ReplicationPerformanceDisabled.string() + default: + return "unknown" + } +} + +func (r ReplicationState) IsPrimaryState() bool { + return r.HasState(ReplicationPerformancePrimary | ReplicationDRPrimary) +} + +func (r ReplicationState) HasState(flag ReplicationState) bool { return r&flag != 0 } +func (r *ReplicationState) AddState(flag ReplicationState) { *r |= flag } +func (r *ReplicationState) ClearState(flag ReplicationState) { *r &= ^flag } +func (r *ReplicationState) ToggleState(flag ReplicationState) { *r ^= flag } + +type HAState uint32 + +const ( + _ HAState = iota + Standby + PerfStandby + Active +) diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/cryptoutil/cryptoutil.go b/vendor/github.com/hashicorp/vault/sdk/helper/cryptoutil/cryptoutil.go new file mode 100644 index 0000000000..a37086c645 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/cryptoutil/cryptoutil.go @@ -0,0 +1,11 @@ +package cryptoutil + +import "golang.org/x/crypto/blake2b" + +func Blake2b256Hash(key string) []byte { + hf, _ := blake2b.New256(nil) + + hf.Write([]byte(key)) + + return hf.Sum(nil) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/errutil/error.go b/vendor/github.com/hashicorp/vault/sdk/helper/errutil/error.go new file mode 100644 index 0000000000..0b95efb40e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/errutil/error.go @@ -0,0 +1,20 @@ +package errutil + +// UserError represents an error generated due to invalid user input +type UserError struct { + Err string +} + +func (e UserError) Error() string { + return e.Err +} + +// InternalError represents an error generated internally, +// presumably not due to invalid user input +type InternalError struct { + Err string +} + +func (e InternalError) Error() string { + return e.Err +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/hclutil/hcl.go b/vendor/github.com/hashicorp/vault/sdk/helper/hclutil/hcl.go new file mode 100644 index 0000000000..0b120367d5 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/hclutil/hcl.go @@ -0,0 +1,36 @@ +package hclutil + +import ( + "fmt" + + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl/hcl/ast" +) + +// CheckHCLKeys checks whether the keys in the AST list contains any of the valid keys provided. +func CheckHCLKeys(node ast.Node, valid []string) error { + var list *ast.ObjectList + switch n := node.(type) { + case *ast.ObjectList: + list = n + case *ast.ObjectType: + list = n.List + default: + return fmt.Errorf("cannot check HCL keys of type %T", n) + } + + validMap := make(map[string]struct{}, len(valid)) + for _, v := range valid { + validMap[v] = struct{}{} + } + + var result error + for _, item := range list.Items { + key := item.Keys[0].Token.Value().(string) + if _, ok := validMap[key]; !ok { + result = multierror.Append(result, fmt.Errorf("invalid key %q on line %d", key, item.Assign.Line)) + } + } + + return result +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/jsonutil/json.go b/vendor/github.com/hashicorp/vault/sdk/helper/jsonutil/json.go new file mode 100644 index 0000000000..c03a4f8c8d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/jsonutil/json.go @@ -0,0 +1,100 @@ +package jsonutil + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "fmt" + "io" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/compressutil" +) + +// Encodes/Marshals the given object into JSON +func EncodeJSON(in interface{}) ([]byte, error) { + if in == nil { + return nil, fmt.Errorf("input for encoding is nil") + } + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + if err := enc.Encode(in); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// EncodeJSONAndCompress encodes the given input into JSON and compresses the +// encoded value (using Gzip format BestCompression level, by default). A +// canary byte is placed at the beginning of the returned bytes for the logic +// in decompression method to identify compressed input. +func EncodeJSONAndCompress(in interface{}, config *compressutil.CompressionConfig) ([]byte, error) { + if in == nil { + return nil, fmt.Errorf("input for encoding is nil") + } + + // First JSON encode the given input + encodedBytes, err := EncodeJSON(in) + if err != nil { + return nil, err + } + + if config == nil { + config = &compressutil.CompressionConfig{ + Type: compressutil.CompressionTypeGzip, + GzipCompressionLevel: gzip.BestCompression, + } + } + + return compressutil.Compress(encodedBytes, config) +} + +// DecodeJSON tries to decompress the given data. The call to decompress, fails +// if the content was not compressed in the first place, which is identified by +// a canary byte before the compressed data. If the data is not compressed, it +// is JSON decoded directly. Otherwise the decompressed data will be JSON +// decoded. +func DecodeJSON(data []byte, out interface{}) error { + if data == nil || len(data) == 0 { + return fmt.Errorf("'data' being decoded is nil") + } + if out == nil { + return fmt.Errorf("output parameter 'out' is nil") + } + + // Decompress the data if it was compressed in the first place + decompressedBytes, uncompressed, err := compressutil.Decompress(data) + if err != nil { + return errwrap.Wrapf("failed to decompress JSON: {{err}}", err) + } + if !uncompressed && (decompressedBytes == nil || len(decompressedBytes) == 0) { + return fmt.Errorf("decompressed data being decoded is invalid") + } + + // If the input supplied failed to contain the compression canary, it + // will be notified by the compression utility. Decode the decompressed + // input. + if !uncompressed { + data = decompressedBytes + } + + return DecodeJSONFromReader(bytes.NewReader(data), out) +} + +// Decodes/Unmarshals the given io.Reader pointing to a JSON, into a desired object +func DecodeJSONFromReader(r io.Reader, out interface{}) error { + if r == nil { + return fmt.Errorf("'io.Reader' being decoded is nil") + } + if out == nil { + return fmt.Errorf("output parameter 'out' is nil") + } + + dec := json.NewDecoder(r) + + // While decoding JSON values, interpret the integer values as `json.Number`s instead of `float64`. + dec.UseNumber() + + // Since 'out' is an interface representing a pointer, pass it to the decoder without an '&' + return dec.Decode(out) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/license/feature.go b/vendor/github.com/hashicorp/vault/sdk/helper/license/feature.go new file mode 100644 index 0000000000..c7c000a58a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/license/feature.go @@ -0,0 +1,10 @@ +package license + +// Features is a bitmask of feature flags +type Features uint + +const FeatureNone Features = 0 + +func (f Features) HasFeature(flag Features) bool { + return false +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/locksutil/locks.go b/vendor/github.com/hashicorp/vault/sdk/helper/locksutil/locks.go new file mode 100644 index 0000000000..1c85402493 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/locksutil/locks.go @@ -0,0 +1,59 @@ +package locksutil + +import ( + "sync" + + "github.com/hashicorp/vault/sdk/helper/cryptoutil" +) + +const ( + LockCount = 256 +) + +type LockEntry struct { + sync.RWMutex +} + +// CreateLocks returns an array so that the locks can be iterated over in +// order. +// +// This is only threadsafe if a process is using a single lock, or iterating +// over the entire lock slice in order. Using a consistent order avoids +// deadlocks because you can never have the following: +// +// Lock A, Lock B +// Lock B, Lock A +// +// Where process 1 is now deadlocked trying to lock B, and process 2 deadlocked trying to lock A +// +func CreateLocks() []*LockEntry { + ret := make([]*LockEntry, LockCount) + for i := range ret { + ret[i] = new(LockEntry) + } + return ret +} + +func LockIndexForKey(key string) uint8 { + return uint8(cryptoutil.Blake2b256Hash(key)[0]) +} + +func LockForKey(locks []*LockEntry, key string) *LockEntry { + return locks[LockIndexForKey(key)] +} + +func LocksForKeys(locks []*LockEntry, keys []string) []*LockEntry { + lockIndexes := make(map[uint8]struct{}, len(keys)) + for _, k := range keys { + lockIndexes[LockIndexForKey(k)] = struct{}{} + } + + locksToReturn := make([]*LockEntry, 0, len(keys)) + for i, l := range locks { + if _, ok := lockIndexes[uint8(i)]; ok { + locksToReturn = append(locksToReturn, l) + } + } + + return locksToReturn +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/logging/logging.go b/vendor/github.com/hashicorp/vault/sdk/helper/logging/logging.go new file mode 100644 index 0000000000..a8d30674b1 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/logging/logging.go @@ -0,0 +1,80 @@ +package logging + +import ( + "fmt" + "io" + "os" + "strings" + + log "github.com/hashicorp/go-hclog" +) + +type LogFormat int + +const ( + UnspecifiedFormat LogFormat = iota + StandardFormat + JSONFormat +) + +// Stringer implementation +func (l LogFormat) String() string { + switch l { + case UnspecifiedFormat: + return "unspecified" + case StandardFormat: + return "standard" + case JSONFormat: + return "json" + } + + // unreachable + return "unknown" +} + +// NewVaultLogger creates a new logger with the specified level and a Vault +// formatter +func NewVaultLogger(level log.Level) log.Logger { + return NewVaultLoggerWithWriter(log.DefaultOutput, level) +} + +// NewVaultLoggerWithWriter creates a new logger with the specified level and +// writer and a Vault formatter +func NewVaultLoggerWithWriter(w io.Writer, level log.Level) log.Logger { + opts := &log.LoggerOptions{ + Level: level, + Output: w, + JSONFormat: ParseEnvLogFormat() == JSONFormat, + } + return log.New(opts) +} + +// ParseLogFormat parses the log format from the provided string. +func ParseLogFormat(format string) (LogFormat, error) { + switch strings.ToLower(strings.TrimSpace(format)) { + case "": + return UnspecifiedFormat, nil + case "standard": + return StandardFormat, nil + case "json": + return JSONFormat, nil + default: + return UnspecifiedFormat, fmt.Errorf("Unknown log format: %s", format) + } +} + +// ParseEnvLogFormat parses the log format from an environment variable. +func ParseEnvLogFormat() LogFormat { + logFormat := os.Getenv("VAULT_LOG_FORMAT") + if logFormat == "" { + logFormat = os.Getenv("LOGXI_FORMAT") + } + switch strings.ToLower(logFormat) { + case "json", "vault_json", "vault-json", "vaultjson": + return JSONFormat + case "standard": + return StandardFormat + default: + return UnspecifiedFormat + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pathmanager/pathmanager.go b/vendor/github.com/hashicorp/vault/sdk/helper/pathmanager/pathmanager.go new file mode 100644 index 0000000000..e0e39445b2 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/pathmanager/pathmanager.go @@ -0,0 +1,136 @@ +package pathmanager + +import ( + "strings" + "sync" + + iradix "github.com/hashicorp/go-immutable-radix" +) + +// PathManager is a prefix searchable index of paths +type PathManager struct { + l sync.RWMutex + paths *iradix.Tree +} + +// New creates a new path manager +func New() *PathManager { + return &PathManager{ + paths: iradix.New(), + } +} + +// AddPaths adds path to the paths list +func (p *PathManager) AddPaths(paths []string) { + p.l.Lock() + defer p.l.Unlock() + + txn := p.paths.Txn() + for _, prefix := range paths { + if len(prefix) == 0 { + continue + } + + var exception bool + if strings.HasPrefix(prefix, "!") { + prefix = strings.TrimPrefix(prefix, "!") + exception = true + } + + // We trim any trailing *, but we don't touch whether it is a trailing + // slash or not since we want to be able to ignore prefixes that fully + // specify a file + txn.Insert([]byte(strings.TrimSuffix(prefix, "*")), exception) + } + p.paths = txn.Commit() +} + +// RemovePaths removes paths from the paths list +func (p *PathManager) RemovePaths(paths []string) { + p.l.Lock() + defer p.l.Unlock() + + txn := p.paths.Txn() + for _, prefix := range paths { + if len(prefix) == 0 { + continue + } + + // Exceptions aren't stored with the leading ! so strip it + if strings.HasPrefix(prefix, "!") { + prefix = strings.TrimPrefix(prefix, "!") + } + + // We trim any trailing *, but we don't touch whether it is a trailing + // slash or not since we want to be able to ignore prefixes that fully + // specify a file + txn.Delete([]byte(strings.TrimSuffix(prefix, "*"))) + } + p.paths = txn.Commit() +} + +// RemovePathPrefix removes all paths with the given prefix +func (p *PathManager) RemovePathPrefix(prefix string) { + p.l.Lock() + defer p.l.Unlock() + + // We trim any trailing *, but we don't touch whether it is a trailing + // slash or not since we want to be able to ignore prefixes that fully + // specify a file + p.paths, _ = p.paths.DeletePrefix([]byte(strings.TrimSuffix(prefix, "*"))) +} + +// Len returns the number of paths +func (p *PathManager) Len() int { + return p.paths.Len() +} + +// Paths returns the path list +func (p *PathManager) Paths() []string { + p.l.RLock() + defer p.l.RUnlock() + + paths := make([]string, 0, p.paths.Len()) + walkFn := func(k []byte, v interface{}) bool { + paths = append(paths, string(k)) + return false + } + p.paths.Root().Walk(walkFn) + return paths +} + +// HasPath returns if the prefix for the path exists regardless if it is a path +// (ending with /) or a prefix for a leaf node +func (p *PathManager) HasPath(path string) bool { + p.l.RLock() + defer p.l.RUnlock() + + if _, exceptionRaw, ok := p.paths.Root().LongestPrefix([]byte(path)); ok { + var exception bool + if exceptionRaw != nil { + exception = exceptionRaw.(bool) + } + return !exception + } + return false +} + +// HasExactPath returns if the longest match is an exact match for the +// full path +func (p *PathManager) HasExactPath(path string) bool { + p.l.RLock() + defer p.l.RUnlock() + + if val, exceptionRaw, ok := p.paths.Root().LongestPrefix([]byte(path)); ok { + var exception bool + if exceptionRaw != nil { + exception = exceptionRaw.(bool) + } + + strVal := string(val) + if strings.HasSuffix(strVal, "/") || strVal == path { + return !exception + } + } + return false +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/env.go b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/env.go new file mode 100644 index 0000000000..fd0cd4fb83 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/env.go @@ -0,0 +1,69 @@ +package pluginutil + +import ( + "os" + + "github.com/hashicorp/go-secure-stdlib/mlock" + version "github.com/hashicorp/go-version" +) + +var ( + // PluginMlockEnabled is the ENV name used to pass the configuration for + // enabling mlock + PluginMlockEnabled = "VAULT_PLUGIN_MLOCK_ENABLED" + + // PluginVaultVersionEnv is the ENV name used to pass the version of the + // vault server to the plugin + PluginVaultVersionEnv = "VAULT_VERSION" + + // PluginMetadataModeEnv is an ENV name used to disable TLS communication + // to bootstrap mounting plugins. + PluginMetadataModeEnv = "VAULT_PLUGIN_METADATA_MODE" + + // PluginUnwrapTokenEnv is the ENV name used to pass unwrap tokens to the + // plugin. + PluginUnwrapTokenEnv = "VAULT_UNWRAP_TOKEN" + + // PluginCACertPEMEnv is an ENV name used for holding a CA PEM-encoded + // string. Used for testing. + PluginCACertPEMEnv = "VAULT_TESTING_PLUGIN_CA_PEM" +) + +// OptionallyEnableMlock determines if mlock should be called, and if so enables +// mlock. +func OptionallyEnableMlock() error { + if os.Getenv(PluginMlockEnabled) == "true" { + return mlock.LockMemory() + } + + return nil +} + +// GRPCSupport defaults to returning true, unless VAULT_VERSION is missing or +// it fails to meet the version constraint. +func GRPCSupport() bool { + verString := os.Getenv(PluginVaultVersionEnv) + // If the env var is empty, we fall back to netrpc for backward compatibility. + if verString == "" { + return false + } + if verString != "unknown" { + ver, err := version.NewVersion(verString) + if err != nil { + return true + } + // Due to some regressions on 0.9.2 & 0.9.3 we now require version 0.9.4 + // to allow the plugin framework to default to gRPC. + constraint, err := version.NewConstraint(">= 0.9.4") + if err != nil { + return true + } + return constraint.Check(ver) + } + return true +} + +// InMetadataMode returns true if the plugin calling this function is running in metadata mode. +func InMetadataMode() bool { + return os.Getenv(PluginMetadataModeEnv) == "true" +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/run_config.go b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/run_config.go new file mode 100644 index 0000000000..f801287d7d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/run_config.go @@ -0,0 +1,161 @@ +package pluginutil + +import ( + "context" + "crypto/sha256" + "crypto/tls" + "fmt" + "os/exec" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/version" +) + +type runConfig struct { + // Provided by PluginRunner + command string + args []string + sha256 []byte + + // Initialized with what's in PluginRunner.Env, but can be added to + env []string + + wrapper RunnerUtil + pluginSets map[int]plugin.PluginSet + hs plugin.HandshakeConfig + logger log.Logger + isMetadataMode bool + autoMTLS bool +} + +func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error) { + cmd := exec.Command(rc.command, rc.args...) + cmd.Env = append(cmd.Env, rc.env...) + + // Add the mlock setting to the ENV of the plugin + if rc.wrapper != nil && rc.wrapper.MlockEnabled() { + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMlockEnabled, "true")) + } + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version.GetVersion().Version)) + + if rc.isMetadataMode { + rc.logger = rc.logger.With("metadata", "true") + } + metadataEnv := fmt.Sprintf("%s=%t", PluginMetadataModeEnv, rc.isMetadataMode) + cmd.Env = append(cmd.Env, metadataEnv) + + var clientTLSConfig *tls.Config + if !rc.autoMTLS && !rc.isMetadataMode { + // Get a CA TLS Certificate + certBytes, key, err := generateCert() + if err != nil { + return nil, err + } + + // Use CA to sign a client cert and return a configured TLS config + clientTLSConfig, err = createClientTLSConfig(certBytes, key) + if err != nil { + return nil, err + } + + // Use CA to sign a server cert and wrap the values in a response wrapped + // token. + wrapToken, err := wrapServerConfig(ctx, rc.wrapper, certBytes, key) + if err != nil { + return nil, err + } + + // Add the response wrap token to the ENV of the plugin + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginUnwrapTokenEnv, wrapToken)) + } + + secureConfig := &plugin.SecureConfig{ + Checksum: rc.sha256, + Hash: sha256.New(), + } + + clientConfig := &plugin.ClientConfig{ + HandshakeConfig: rc.hs, + VersionedPlugins: rc.pluginSets, + Cmd: cmd, + SecureConfig: secureConfig, + TLSConfig: clientTLSConfig, + Logger: rc.logger, + AllowedProtocols: []plugin.Protocol{ + plugin.ProtocolNetRPC, + plugin.ProtocolGRPC, + }, + AutoMTLS: rc.autoMTLS, + } + return clientConfig, nil +} + +func (rc runConfig) run(ctx context.Context) (*plugin.Client, error) { + clientConfig, err := rc.makeConfig(ctx) + if err != nil { + return nil, err + } + + client := plugin.NewClient(clientConfig) + return client, nil +} + +type RunOpt func(*runConfig) + +func Env(env ...string) RunOpt { + return func(rc *runConfig) { + rc.env = append(rc.env, env...) + } +} + +func Runner(wrapper RunnerUtil) RunOpt { + return func(rc *runConfig) { + rc.wrapper = wrapper + } +} + +func PluginSets(pluginSets map[int]plugin.PluginSet) RunOpt { + return func(rc *runConfig) { + rc.pluginSets = pluginSets + } +} + +func HandshakeConfig(hs plugin.HandshakeConfig) RunOpt { + return func(rc *runConfig) { + rc.hs = hs + } +} + +func Logger(logger log.Logger) RunOpt { + return func(rc *runConfig) { + rc.logger = logger + } +} + +func MetadataMode(isMetadataMode bool) RunOpt { + return func(rc *runConfig) { + rc.isMetadataMode = isMetadataMode + } +} + +func AutoMTLS(autoMTLS bool) RunOpt { + return func(rc *runConfig) { + rc.autoMTLS = autoMTLS + } +} + +func (r *PluginRunner) RunConfig(ctx context.Context, opts ...RunOpt) (*plugin.Client, error) { + rc := runConfig{ + command: r.Command, + args: r.Args, + sha256: r.Sha256, + env: r.Env, + } + + for _, opt := range opts { + opt(&rc) + } + + return rc.run(ctx) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/runner.go b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/runner.go new file mode 100644 index 0000000000..ecd60eeb34 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/runner.go @@ -0,0 +1,88 @@ +package pluginutil + +import ( + "context" + "time" + + log "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/wrapping" +) + +// Looker defines the plugin Lookup function that looks into the plugin catalog +// for available plugins and returns a PluginRunner +type Looker interface { + LookupPlugin(context.Context, string, consts.PluginType) (*PluginRunner, error) +} + +// RunnerUtil interface defines the functions needed by the runner to wrap the +// metadata needed to run a plugin process. This includes looking up Mlock +// configuration and wrapping data in a response wrapped token. +// logical.SystemView implementations satisfy this interface. +type RunnerUtil interface { + ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) + MlockEnabled() bool +} + +// LookRunnerUtil defines the functions for both Looker and Wrapper +type LookRunnerUtil interface { + Looker + RunnerUtil +} + +// PluginRunner defines the metadata needed to run a plugin securely with +// go-plugin. +type PluginRunner struct { + Name string `json:"name" structs:"name"` + Type consts.PluginType `json:"type" structs:"type"` + Command string `json:"command" structs:"command"` + Args []string `json:"args" structs:"args"` + Env []string `json:"env" structs:"env"` + Sha256 []byte `json:"sha256" structs:"sha256"` + Builtin bool `json:"builtin" structs:"builtin"` + BuiltinFactory func() (interface{}, error) `json:"-" structs:"-"` +} + +// Run takes a wrapper RunnerUtil instance along with the go-plugin parameters and +// returns a configured plugin.Client with TLS Configured and a wrapping token set +// on PluginUnwrapTokenEnv for plugin process consumption. +func (r *PluginRunner) Run(ctx context.Context, wrapper RunnerUtil, pluginSets map[int]plugin.PluginSet, hs plugin.HandshakeConfig, env []string, logger log.Logger) (*plugin.Client, error) { + return r.RunConfig(ctx, + Runner(wrapper), + PluginSets(pluginSets), + HandshakeConfig(hs), + Env(env...), + Logger(logger), + MetadataMode(false), + ) +} + +// RunMetadataMode returns a configured plugin.Client that will dispense a plugin +// in metadata mode. The PluginMetadataModeEnv is passed in as part of the Cmd to +// plugin.Client, and consumed by the plugin process on api.VaultPluginTLSProvider. +func (r *PluginRunner) RunMetadataMode(ctx context.Context, wrapper RunnerUtil, pluginSets map[int]plugin.PluginSet, hs plugin.HandshakeConfig, env []string, logger log.Logger) (*plugin.Client, error) { + return r.RunConfig(ctx, + Runner(wrapper), + PluginSets(pluginSets), + HandshakeConfig(hs), + Env(env...), + Logger(logger), + MetadataMode(true), + ) +} + +// CtxCancelIfCanceled takes a context cancel func and a context. If the context is +// shutdown the cancelfunc is called. This is useful for merging two cancel +// functions. +func CtxCancelIfCanceled(f context.CancelFunc, ctxCanceler context.Context) chan struct{} { + quitCh := make(chan struct{}) + go func() { + select { + case <-quitCh: + case <-ctxCanceler.Done(): + f() + } + }() + return quitCh +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/tls.go b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/tls.go new file mode 100644 index 0000000000..f78f04014c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/pluginutil/tls.go @@ -0,0 +1,108 @@ +package pluginutil + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/helper/certutil" +) + +// generateCert is used internally to create certificates for the plugin +// client and server. +func generateCert() ([]byte, *ecdsa.PrivateKey, error) { + key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + return nil, nil, err + } + + host, err := uuid.GenerateUUID() + if err != nil { + return nil, nil, err + } + + sn, err := certutil.GenerateSerialNumber() + if err != nil { + return nil, nil, err + } + + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + DNSNames: []string{host}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, + SerialNumber: sn, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + IsCA: true, + } + + certBytes, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) + if err != nil { + return nil, nil, errwrap.Wrapf("unable to generate client certificate: {{err}}", err) + } + + return certBytes, key, nil +} + +// createClientTLSConfig creates a signed certificate and returns a configured +// TLS config. +func createClientTLSConfig(certBytes []byte, key *ecdsa.PrivateKey) (*tls.Config, error) { + clientCert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, errwrap.Wrapf("error parsing generated plugin certificate: {{err}}", err) + } + + cert := tls.Certificate{ + Certificate: [][]byte{certBytes}, + PrivateKey: key, + Leaf: clientCert, + } + + clientCertPool := x509.NewCertPool() + clientCertPool.AddCert(clientCert) + + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: clientCertPool, + ClientCAs: clientCertPool, + ClientAuth: tls.RequireAndVerifyClientCert, + ServerName: clientCert.Subject.CommonName, + MinVersion: tls.VersionTLS12, + } + + tlsConfig.BuildNameToCertificate() + + return tlsConfig, nil +} + +// wrapServerConfig is used to create a server certificate and private key, then +// wrap them in an unwrap token for later retrieval by the plugin. +func wrapServerConfig(ctx context.Context, sys RunnerUtil, certBytes []byte, key *ecdsa.PrivateKey) (string, error) { + rawKey, err := x509.MarshalECPrivateKey(key) + if err != nil { + return "", err + } + + wrapInfo, err := sys.ResponseWrapData(ctx, map[string]interface{}{ + "ServerCert": certBytes, + "ServerKey": rawKey, + }, time.Second*60, true) + if err != nil { + return "", err + } + + return wrapInfo.Token, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/strutil/strutil.go b/vendor/github.com/hashicorp/vault/sdk/helper/strutil/strutil.go new file mode 100644 index 0000000000..09cc9425cb --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/strutil/strutil.go @@ -0,0 +1,94 @@ +// DEPRECATED: this has been moved to go-secure-stdlib and will be removed +package strutil + +import ( + extstrutil "github.com/hashicorp/go-secure-stdlib/strutil" +) + +func StrListContainsGlob(haystack []string, needle string) bool { + return extstrutil.StrListContainsGlob(haystack, needle) +} + +func StrListContains(haystack []string, needle string) bool { + return extstrutil.StrListContains(haystack, needle) +} + +func StrListContainsCaseInsensitive(haystack []string, needle string) bool { + return extstrutil.StrListContainsCaseInsensitive(haystack, needle) +} + +func StrListSubset(super, sub []string) bool { + return extstrutil.StrListSubset(super, sub) +} + +func ParseDedupAndSortStrings(input string, sep string) []string { + return extstrutil.ParseDedupAndSortStrings(input, sep) +} + +func ParseDedupLowercaseAndSortStrings(input string, sep string) []string { + return extstrutil.ParseDedupLowercaseAndSortStrings(input, sep) +} + +func ParseKeyValues(input string, out map[string]string, sep string) error { + return extstrutil.ParseKeyValues(input, out, sep) +} + +func ParseArbitraryKeyValues(input string, out map[string]string, sep string) error { + return extstrutil.ParseArbitraryKeyValues(input, out, sep) +} + +func ParseStringSlice(input string, sep string) []string { + return extstrutil.ParseStringSlice(input, sep) +} + +func ParseArbitraryStringSlice(input string, sep string) []string { + return extstrutil.ParseArbitraryStringSlice(input, sep) +} + +func TrimStrings(items []string) []string { + return extstrutil.TrimStrings(items) +} + +func RemoveDuplicates(items []string, lowercase bool) []string { + return extstrutil.RemoveDuplicates(items, lowercase) +} + +func RemoveDuplicatesStable(items []string, caseInsensitive bool) []string { + return extstrutil.RemoveDuplicatesStable(items, caseInsensitive) +} + +func RemoveEmpty(items []string) []string { + return extstrutil.RemoveEmpty(items) +} + +func EquivalentSlices(a, b []string) bool { + return extstrutil.EquivalentSlices(a, b) +} + +func EqualStringMaps(a, b map[string]string) bool { + return extstrutil.EqualStringMaps(a, b) +} + +func StrListDelete(s []string, d string) []string { + return extstrutil.StrListDelete(s, d) +} + +func GlobbedStringsMatch(item, val string) bool { + return extstrutil.GlobbedStringsMatch(item, val) +} + +func AppendIfMissing(slice []string, i string) []string { + return extstrutil.AppendIfMissing(slice, i) +} + +func MergeSlices(args ...[]string) []string { + return extstrutil.MergeSlices(args...) +} + +func Difference(a, b []string, lowercase bool) []string { + return extstrutil.Difference(a, b, lowercase) +} + +func GetString(m map[string]interface{}, key string) (string, error) { + return extstrutil.GetString(m, key) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/wrapping/wrapinfo.go b/vendor/github.com/hashicorp/vault/sdk/helper/wrapping/wrapinfo.go new file mode 100644 index 0000000000..8d8e63340f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/wrapping/wrapinfo.go @@ -0,0 +1,37 @@ +package wrapping + +import "time" + +type ResponseWrapInfo struct { + // Setting to non-zero specifies that the response should be wrapped. + // Specifies the desired TTL of the wrapping token. + TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl" sentinel:""` + + // The token containing the wrapped response + Token string `json:"token" structs:"token" mapstructure:"token" sentinel:""` + + // The token accessor for the wrapped response token + Accessor string `json:"accessor" structs:"accessor" mapstructure:"accessor"` + + // The creation time. This can be used with the TTL to figure out an + // expected expiration. + CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"creation_time" sentinel:""` + + // If the contained response is the output of a token or approle secret-id creation call, the + // created token's/secret-id's accessor will be accessible here + WrappedAccessor string `json:"wrapped_accessor" structs:"wrapped_accessor" mapstructure:"wrapped_accessor" sentinel:""` + + // WrappedEntityID is the entity identifier of the caller who initiated the + // wrapping request + WrappedEntityID string `json:"wrapped_entity_id" structs:"wrapped_entity_id" mapstructure:"wrapped_entity_id" sentinel:""` + + // The format to use. This doesn't get returned, it's only internal. + Format string `json:"format" structs:"format" mapstructure:"format" sentinel:""` + + // CreationPath is the original request path that was used to create + // the wrapped response. + CreationPath string `json:"creation_path" structs:"creation_path" mapstructure:"creation_path" sentinel:""` + + // Controls seal wrapping behavior downstream for specific use cases + SealWrap bool `json:"seal_wrap" structs:"seal_wrap" mapstructure:"seal_wrap" sentinel:""` +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/audit.go b/vendor/github.com/hashicorp/vault/sdk/logical/audit.go new file mode 100644 index 0000000000..8ba70f37e0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/audit.go @@ -0,0 +1,19 @@ +package logical + +type LogInput struct { + Type string + Auth *Auth + Request *Request + Response *Response + OuterErr error + NonHMACReqDataKeys []string + NonHMACRespDataKeys []string +} + +type MarshalOptions struct { + ValueHasher func(string) string +} + +type OptMarshaler interface { + MarshalJSONWithOptions(*MarshalOptions) ([]byte, error) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/auth.go b/vendor/github.com/hashicorp/vault/sdk/logical/auth.go new file mode 100644 index 0000000000..2bfb6e0015 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/auth.go @@ -0,0 +1,107 @@ +package logical + +import ( + "fmt" + "time" + + sockaddr "github.com/hashicorp/go-sockaddr" +) + +// Auth is the resulting authentication information that is part of +// Response for credential backends. +type Auth struct { + LeaseOptions + + // InternalData is JSON-encodable data that is stored with the auth struct. + // This will be sent back during a Renew/Revoke for storing internal data + // used for those operations. + InternalData map[string]interface{} `json:"internal_data" mapstructure:"internal_data" structs:"internal_data"` + + // DisplayName is a non-security sensitive identifier that is + // applicable to this Auth. It is used for logging and prefixing + // of dynamic secrets. For example, DisplayName may be "armon" for + // the github credential backend. If the client token is used to + // generate a SQL credential, the user may be "github-armon-uuid". + // This is to help identify the source without using audit tables. + DisplayName string `json:"display_name" mapstructure:"display_name" structs:"display_name"` + + // Policies is the list of policies that the authenticated user + // is associated with. + Policies []string `json:"policies" mapstructure:"policies" structs:"policies"` + + // TokenPolicies and IdentityPolicies break down the list in Policies to + // help determine where a policy was sourced + TokenPolicies []string `json:"token_policies" mapstructure:"token_policies" structs:"token_policies"` + IdentityPolicies []string `json:"identity_policies" mapstructure:"identity_policies" structs:"identity_policies"` + + // ExternalNamespacePolicies represent the policies authorized from + // different namespaces indexed by respective namespace identifiers + ExternalNamespacePolicies map[string][]string `json:"external_namespace_policies" mapstructure:"external_namespace_policies" structs:"external_namespace_policies"` + + // Indicates that the default policy should not be added by core when + // creating a token. The default policy will still be added if it's + // explicitly defined. + NoDefaultPolicy bool `json:"no_default_policy" mapstructure:"no_default_policy" structs:"no_default_policy"` + + // Metadata is used to attach arbitrary string-type metadata to + // an authenticated user. This metadata will be outputted into the + // audit log. + Metadata map[string]string `json:"metadata" mapstructure:"metadata" structs:"metadata"` + + // ClientToken is the token that is generated for the authentication. + // This will be filled in by Vault core when an auth structure is + // returned. Setting this manually will have no effect. + ClientToken string `json:"client_token" mapstructure:"client_token" structs:"client_token"` + + // Accessor is the identifier for the ClientToken. This can be used + // to perform management functionalities (especially revocation) when + // ClientToken in the audit logs are obfuscated. Accessor can be used + // to revoke a ClientToken and to lookup the capabilities of the ClientToken, + // both without actually knowing the ClientToken. + Accessor string `json:"accessor" mapstructure:"accessor" structs:"accessor"` + + // Period indicates that the token generated using this Auth object + // should never expire. The token should be renewed within the duration + // specified by this period. + Period time.Duration `json:"period" mapstructure:"period" structs:"period"` + + // ExplicitMaxTTL is the max TTL that constrains periodic tokens. For normal + // tokens, this value is constrained by the configured max ttl. + ExplicitMaxTTL time.Duration `json:"explicit_max_ttl" mapstructure:"explicit_max_ttl" structs:"explicit_max_ttl"` + + // Number of allowed uses of the issued token + NumUses int `json:"num_uses" mapstructure:"num_uses" structs:"num_uses"` + + // EntityID is the identifier of the entity in identity store to which the + // identity of the authenticating client belongs to. + EntityID string `json:"entity_id" mapstructure:"entity_id" structs:"entity_id"` + + // Alias is the information about the authenticated client returned by + // the auth backend + Alias *Alias `json:"alias" mapstructure:"alias" structs:"alias"` + + // GroupAliases are the informational mappings of external groups which an + // authenticated user belongs to. This is used to check if there are + // mappings groups for the group aliases in identity store. For all the + // matching groups, the entity ID of the user will be added. + GroupAliases []*Alias `json:"group_aliases" mapstructure:"group_aliases" structs:"group_aliases"` + + // The set of CIDRs that this token can be used with + BoundCIDRs []*sockaddr.SockAddrMarshaler `json:"bound_cidrs"` + + // CreationPath is a path that the backend can return to use in the lease. + // This is currently only supported for the token store where roles may + // change the perceived path of the lease, even though they don't change + // the request path itself. + CreationPath string `json:"creation_path"` + + // TokenType is the type of token being requested + TokenType TokenType `json:"token_type"` + + // Orphan is set if the token does not have a parent + Orphan bool `json:"orphan"` +} + +func (a *Auth) GoString() string { + return fmt.Sprintf("*%#v", *a) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/connection.go b/vendor/github.com/hashicorp/vault/sdk/logical/connection.go new file mode 100644 index 0000000000..a504b10c39 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/connection.go @@ -0,0 +1,15 @@ +package logical + +import ( + "crypto/tls" +) + +// Connection represents the connection information for a request. This +// is present on the Request structure for credential backends. +type Connection struct { + // RemoteAddr is the network address that sent the request. + RemoteAddr string `json:"remote_addr"` + + // ConnState is the TLS connection state if applicable. + ConnState *tls.ConnectionState `sentinel:""` +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/controlgroup.go b/vendor/github.com/hashicorp/vault/sdk/logical/controlgroup.go new file mode 100644 index 0000000000..2ed1b07688 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/controlgroup.go @@ -0,0 +1,17 @@ +package logical + +import ( + "time" +) + +type ControlGroup struct { + Authorizations []*Authz `json:"authorizations"` + RequestTime time.Time `json:"request_time"` + Approved bool `json:"approved"` + NamespaceID string `json:"namespace_id"` +} + +type Authz struct { + Token string `json:"token"` + AuthorizationTime time.Time `json:"authorization_time"` +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/error.go b/vendor/github.com/hashicorp/vault/sdk/logical/error.go new file mode 100644 index 0000000000..02f68dd918 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/error.go @@ -0,0 +1,117 @@ +package logical + +import "errors" + +var ( + // ErrUnsupportedOperation is returned if the operation is not supported + // by the logical backend. + ErrUnsupportedOperation = errors.New("unsupported operation") + + // ErrUnsupportedPath is returned if the path is not supported + // by the logical backend. + ErrUnsupportedPath = errors.New("unsupported path") + + // ErrInvalidRequest is returned if the request is invalid + ErrInvalidRequest = errors.New("invalid request") + + // ErrPermissionDenied is returned if the client is not authorized + ErrPermissionDenied = errors.New("permission denied") + + // ErrMultiAuthzPending is returned if the the request needs more + // authorizations + ErrMultiAuthzPending = errors.New("request needs further approval") + + // ErrUpstreamRateLimited is returned when Vault receives a rate limited + // response from an upstream + ErrUpstreamRateLimited = errors.New("upstream rate limited") + + // ErrPerfStandbyForward is returned when Vault is in a state such that a + // perf standby cannot satisfy a request + ErrPerfStandbyPleaseForward = errors.New("please forward to the active node") + + // ErrLeaseCountQuotaExceeded is returned when a request is rejected due to a lease + // count quota being exceeded. + ErrLeaseCountQuotaExceeded = errors.New("lease count quota exceeded") + + // ErrRateLimitQuotaExceeded is returned when a request is rejected due to a + // rate limit quota being exceeded. + ErrRateLimitQuotaExceeded = errors.New("rate limit quota exceeded") + + // ErrUnrecoverable is returned when a request fails due to something that + // is likely to require manual intervention. This is a generic form of an + // unrecoverable error. + // e.g.: misconfigured or disconnected storage backend. + ErrUnrecoverable = errors.New("unrecoverable error") + + // ErrMissingRequiredState is returned when a request can't be satisfied + // with the data in the local node's storage, based on the provided + // X-Vault-Index request header. + ErrMissingRequiredState = errors.New("required index state not present") + + // Error indicating that the requested path used to serve a purpose in older + // versions, but the functionality has now been removed + ErrPathFunctionalityRemoved = errors.New("functionality on this path has been removed") +) + +type HTTPCodedError interface { + Error() string + Code() int +} + +func CodedError(status int, msg string) HTTPCodedError { + return &codedError{ + Status: status, + Message: msg, + } +} + +var _ HTTPCodedError = (*codedError)(nil) + +type codedError struct { + Status int + Message string +} + +func (e *codedError) Error() string { + return e.Message +} + +func (e *codedError) Code() int { + return e.Status +} + +// Struct to identify user input errors. This is helpful in responding the +// appropriate status codes to clients from the HTTP endpoints. +type StatusBadRequest struct { + Err string +} + +// Implementing error interface +func (s *StatusBadRequest) Error() string { + return s.Err +} + +// This is a new type declared to not cause potential compatibility problems if +// the logic around the CodedError changes; in particular for logical request +// paths it is basically ignored, and changing that behavior might cause +// unforeseen issues. +type ReplicationCodedError struct { + Msg string + Code int +} + +func (r *ReplicationCodedError) Error() string { + return r.Msg +} + +type KeyNotFoundError struct { + Err error +} + +func (e *KeyNotFoundError) WrappedErrors() []error { + return []error{e.Err} +} + +func (e *KeyNotFoundError) Error() string { + return e.Err.Error() +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/identity.pb.go b/vendor/github.com/hashicorp/vault/sdk/logical/identity.pb.go new file mode 100644 index 0000000000..b221ccc3b3 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/identity.pb.go @@ -0,0 +1,477 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.17.3 +// source: sdk/logical/identity.proto + +package logical + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Entity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ID is the unique identifier for the entity + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + // Name is the human-friendly unique identifier for the entity + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Aliases contains thhe alias mappings for the given entity + Aliases []*Alias `protobuf:"bytes,3,rep,name=aliases,proto3" json:"aliases,omitempty"` + // Metadata represents the custom data tied to this entity + Metadata map[string]string `protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Disabled is true if the entity is disabled. + Disabled bool `protobuf:"varint,5,opt,name=disabled,proto3" json:"disabled,omitempty"` + // NamespaceID is the identifier of the namespace to which this entity + // belongs to. + NamespaceID string `protobuf:"bytes,6,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"` +} + +func (x *Entity) Reset() { + *x = Entity{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_identity_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Entity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Entity) ProtoMessage() {} + +func (x *Entity) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_identity_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Entity.ProtoReflect.Descriptor instead. +func (*Entity) Descriptor() ([]byte, []int) { + return file_sdk_logical_identity_proto_rawDescGZIP(), []int{0} +} + +func (x *Entity) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *Entity) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Entity) GetAliases() []*Alias { + if x != nil { + return x.Aliases + } + return nil +} + +func (x *Entity) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Entity) GetDisabled() bool { + if x != nil { + return x.Disabled + } + return false +} + +func (x *Entity) GetNamespaceID() string { + if x != nil { + return x.NamespaceID + } + return "" +} + +type Alias struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // MountType is the backend mount's type to which this identity belongs + MountType string `protobuf:"bytes,1,opt,name=mount_type,json=mountType,proto3" json:"mount_type,omitempty"` + // MountAccessor is the identifier of the mount entry to which this + // identity belongs + MountAccessor string `protobuf:"bytes,2,opt,name=mount_accessor,json=mountAccessor,proto3" json:"mount_accessor,omitempty"` + // Name is the identifier of this identity in its authentication source + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Metadata represents the custom data tied to this alias. Fields added + // to it should have a low rate of change (or no change) because each + // change incurs a storage write, so quickly-changing fields can have + // a significant performance impact at scale. See the SDK's + // "aliasmetadata" package for a helper that eases and standardizes + // using this safely. + Metadata map[string]string `protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // ID is the unique identifier for the alias + ID string `protobuf:"bytes,5,opt,name=ID,proto3" json:"ID,omitempty"` + // NamespaceID is the identifier of the namespace to which this alias + // belongs. + NamespaceID string `protobuf:"bytes,6,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"` + // Custom Metadata represents the custom data tied to this alias + CustomMetadata map[string]string `protobuf:"bytes,7,rep,name=custom_metadata,json=customMetadata,proto3" json:"custom_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Local indicates if the alias only belongs to the cluster where it was + // created. If true, the alias will be stored in a location that are ignored + // by the performance replication subsystem. + Local bool `protobuf:"varint,8,opt,name=local,proto3" json:"local,omitempty"` +} + +func (x *Alias) Reset() { + *x = Alias{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_identity_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Alias) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Alias) ProtoMessage() {} + +func (x *Alias) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_identity_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Alias.ProtoReflect.Descriptor instead. +func (*Alias) Descriptor() ([]byte, []int) { + return file_sdk_logical_identity_proto_rawDescGZIP(), []int{1} +} + +func (x *Alias) GetMountType() string { + if x != nil { + return x.MountType + } + return "" +} + +func (x *Alias) GetMountAccessor() string { + if x != nil { + return x.MountAccessor + } + return "" +} + +func (x *Alias) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Alias) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Alias) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *Alias) GetNamespaceID() string { + if x != nil { + return x.NamespaceID + } + return "" +} + +func (x *Alias) GetCustomMetadata() map[string]string { + if x != nil { + return x.CustomMetadata + } + return nil +} + +func (x *Alias) GetLocal() bool { + if x != nil { + return x.Local + } + return false +} + +type Group struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ID is the unique identifier for the group + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + // Name is the human-friendly unique identifier for the group + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Metadata represents the custom data tied to this group + Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // NamespaceID is the identifier of the namespace to which this group + // belongs to. + NamespaceID string `protobuf:"bytes,4,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"` +} + +func (x *Group) Reset() { + *x = Group{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_identity_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Group) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Group) ProtoMessage() {} + +func (x *Group) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_identity_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Group.ProtoReflect.Descriptor instead. +func (*Group) Descriptor() ([]byte, []int) { + return file_sdk_logical_identity_proto_rawDescGZIP(), []int{2} +} + +func (x *Group) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *Group) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Group) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Group) GetNamespaceID() string { + if x != nil { + return x.NamespaceID + } + return "" +} + +var File_sdk_logical_identity_proto protoreflect.FileDescriptor + +var file_sdk_logical_identity_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6c, 0x6f, + 0x67, 0x69, 0x63, 0x61, 0x6c, 0x22, 0x8d, 0x02, 0x0a, 0x06, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x39, + 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb1, 0x03, 0x0a, 0x05, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, + 0x1d, 0x0a, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, + 0x0a, 0x0e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x08, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6c, 0x6f, + 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x49, 0x44, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x4b, 0x0a, 0x0f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x2e, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x41, 0x0a, 0x13, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc5, 0x01, 0x0a, 0x05, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6c, 0x6f, 0x67, 0x69, + 0x63, 0x61, 0x6c, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x49, 0x64, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x42, 0x28, 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, + 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_sdk_logical_identity_proto_rawDescOnce sync.Once + file_sdk_logical_identity_proto_rawDescData = file_sdk_logical_identity_proto_rawDesc +) + +func file_sdk_logical_identity_proto_rawDescGZIP() []byte { + file_sdk_logical_identity_proto_rawDescOnce.Do(func() { + file_sdk_logical_identity_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_logical_identity_proto_rawDescData) + }) + return file_sdk_logical_identity_proto_rawDescData +} + +var file_sdk_logical_identity_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_sdk_logical_identity_proto_goTypes = []interface{}{ + (*Entity)(nil), // 0: logical.Entity + (*Alias)(nil), // 1: logical.Alias + (*Group)(nil), // 2: logical.Group + nil, // 3: logical.Entity.MetadataEntry + nil, // 4: logical.Alias.MetadataEntry + nil, // 5: logical.Alias.CustomMetadataEntry + nil, // 6: logical.Group.MetadataEntry +} +var file_sdk_logical_identity_proto_depIDxs = []int32{ + 1, // 0: logical.Entity.aliases:type_name -> logical.Alias + 3, // 1: logical.Entity.metadata:type_name -> logical.Entity.MetadataEntry + 4, // 2: logical.Alias.metadata:type_name -> logical.Alias.MetadataEntry + 5, // 3: logical.Alias.custom_metadata:type_name -> logical.Alias.CustomMetadataEntry + 6, // 4: logical.Group.metadata:type_name -> logical.Group.MetadataEntry + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_sdk_logical_identity_proto_init() } +func file_sdk_logical_identity_proto_init() { + if File_sdk_logical_identity_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sdk_logical_identity_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Entity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_logical_identity_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Alias); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_logical_identity_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Group); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sdk_logical_identity_proto_rawDesc, + NumEnums: 0, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sdk_logical_identity_proto_goTypes, + DependencyIndexes: file_sdk_logical_identity_proto_depIDxs, + MessageInfos: file_sdk_logical_identity_proto_msgTypes, + }.Build() + File_sdk_logical_identity_proto = out.File + file_sdk_logical_identity_proto_rawDesc = nil + file_sdk_logical_identity_proto_goTypes = nil + file_sdk_logical_identity_proto_depIDxs = nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/identity.proto b/vendor/github.com/hashicorp/vault/sdk/logical/identity.proto new file mode 100644 index 0000000000..11c7678231 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/identity.proto @@ -0,0 +1,76 @@ +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/sdk/logical"; + +package logical; + +message Entity { + // ID is the unique identifier for the entity + string ID = 1; + + // Name is the human-friendly unique identifier for the entity + string name = 2; + + // Aliases contains thhe alias mappings for the given entity + repeated Alias aliases = 3; + + // Metadata represents the custom data tied to this entity + map metadata = 4; + + // Disabled is true if the entity is disabled. + bool disabled = 5; + + // NamespaceID is the identifier of the namespace to which this entity + // belongs to. + string namespace_id = 6; +} + +message Alias { + // MountType is the backend mount's type to which this identity belongs + string mount_type = 1; + + // MountAccessor is the identifier of the mount entry to which this + // identity belongs + string mount_accessor = 2; + + // Name is the identifier of this identity in its authentication source + string name = 3; + + // Metadata represents the custom data tied to this alias. Fields added + // to it should have a low rate of change (or no change) because each + // change incurs a storage write, so quickly-changing fields can have + // a significant performance impact at scale. See the SDK's + // "aliasmetadata" package for a helper that eases and standardizes + // using this safely. + map metadata = 4; + + // ID is the unique identifier for the alias + string ID = 5; + + // NamespaceID is the identifier of the namespace to which this alias + // belongs. + string namespace_id = 6; + + // Custom Metadata represents the custom data tied to this alias + map custom_metadata = 7; + + // Local indicates if the alias only belongs to the cluster where it was + // created. If true, the alias will be stored in a location that are ignored + // by the performance replication subsystem. + bool local = 8; +} + +message Group { + // ID is the unique identifier for the group + string ID = 1; + + // Name is the human-friendly unique identifier for the group + string name = 2; + + // Metadata represents the custom data tied to this group + map metadata = 3; + + // NamespaceID is the identifier of the namespace to which this group + // belongs to. + string namespace_id = 4; +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/lease.go b/vendor/github.com/hashicorp/vault/sdk/logical/lease.go new file mode 100644 index 0000000000..97bbe4f658 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/lease.go @@ -0,0 +1,53 @@ +package logical + +import ( + "time" +) + +// LeaseOptions is an embeddable struct to capture common lease +// settings between a Secret and Auth +type LeaseOptions struct { + // TTL is the duration that this secret is valid for. Vault + // will automatically revoke it after the duration. + TTL time.Duration `json:"lease"` + + // MaxTTL is the maximum duration that this secret is valid for. + MaxTTL time.Duration `json:"max_ttl"` + + // Renewable, if true, means that this secret can be renewed. + Renewable bool `json:"renewable"` + + // Increment will be the lease increment that the user requested. + // This is only available on a Renew operation and has no effect + // when returning a response. + Increment time.Duration `json:"-"` + + // IssueTime is the time of issue for the original lease. This is + // only available on Renew and Revoke operations and has no effect when returning + // a response. It can be used to enforce maximum lease periods by + // a logical backend. + IssueTime time.Time `json:"-"` +} + +// LeaseEnabled checks if leasing is enabled +func (l *LeaseOptions) LeaseEnabled() bool { + return l.TTL > 0 +} + +// LeaseTotal is the lease duration with a guard against a negative TTL +func (l *LeaseOptions) LeaseTotal() time.Duration { + if l.TTL <= 0 { + return 0 + } + + return l.TTL +} + +// ExpirationTime computes the time until expiration including the grace period +func (l *LeaseOptions) ExpirationTime() time.Time { + var expireTime time.Time + if l.LeaseEnabled() { + expireTime = time.Now().Add(l.LeaseTotal()) + } + return expireTime +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/logical.go b/vendor/github.com/hashicorp/vault/sdk/logical/logical.go new file mode 100644 index 0000000000..cec2d19c0e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/logical.go @@ -0,0 +1,139 @@ +package logical + +import ( + "context" + + log "github.com/hashicorp/go-hclog" +) + +// BackendType is the type of backend that is being implemented +type BackendType uint32 + +// The these are the types of backends that can be derived from +// logical.Backend +const ( + TypeUnknown BackendType = 0 // This is also the zero-value for BackendType + TypeLogical BackendType = 1 + TypeCredential BackendType = 2 +) + +// Stringer implementation +func (b BackendType) String() string { + switch b { + case TypeLogical: + return "secret" + case TypeCredential: + return "auth" + } + + return "unknown" +} + +// Backend interface must be implemented to be "mountable" at +// a given path. Requests flow through a router which has various mount +// points that flow to a logical backend. The logic of each backend is flexible, +// and this is what allows materialized keys to function. There can be specialized +// logical backends for various upstreams (Consul, PostgreSQL, MySQL, etc) that can +// interact with remote APIs to generate keys dynamically. This interface also +// allows for a "procfs" like interaction, as internal state can be exposed by +// acting like a logical backend and being mounted. +type Backend interface { + + // Initialize is used to initialize a plugin after it has been mounted. + Initialize(context.Context, *InitializationRequest) error + + // HandleRequest is used to handle a request and generate a response. + // The backends must check the operation type and handle appropriately. + HandleRequest(context.Context, *Request) (*Response, error) + + // SpecialPaths is a list of paths that are special in some way. + // See PathType for the types of special paths. The key is the type + // of the special path, and the value is a list of paths for this type. + // This is not a regular expression but is an exact match. If the path + // ends in '*' then it is a prefix-based match. The '*' can only appear + // at the end. + SpecialPaths() *Paths + + // System provides an interface to access certain system configuration + // information, such as globally configured default and max lease TTLs. + System() SystemView + + // Logger provides an interface to access the underlying logger. This + // is useful when a struct embeds a Backend-implemented struct that + // contains a private instance of logger. + Logger() log.Logger + + // HandleExistenceCheck is used to handle a request and generate a response + // indicating whether the given path exists or not; this is used to + // understand whether the request must have a Create or Update capability + // ACL applied. The first bool indicates whether an existence check + // function was found for the backend; the second indicates whether, if an + // existence check function was found, the item exists or not. + HandleExistenceCheck(context.Context, *Request) (bool, bool, error) + + // Cleanup is invoked during an unmount of a backend to allow it to + // handle any cleanup like connection closing or releasing of file handles. + Cleanup(context.Context) + + // InvalidateKey may be invoked when an object is modified that belongs + // to the backend. The backend can use this to clear any caches or reset + // internal state as needed. + InvalidateKey(context.Context, string) + + // Setup is used to set up the backend based on the provided backend + // configuration. + Setup(context.Context, *BackendConfig) error + + // Type returns the BackendType for the particular backend + Type() BackendType +} + +// BackendConfig is provided to the factory to initialize the backend +type BackendConfig struct { + // View should not be stored, and should only be used for initialization + StorageView Storage + + // The backend should use this logger. The log should not contain any secrets. + Logger log.Logger + + // System provides a view into a subset of safe system information that + // is useful for backends, such as the default/max lease TTLs + System SystemView + + // BackendUUID is a unique identifier provided to this backend. It's useful + // when a backend needs a consistent and unique string without using storage. + BackendUUID string + + // Config is the opaque user configuration provided when mounting + Config map[string]string +} + +// Factory is the factory function to create a logical backend. +type Factory func(context.Context, *BackendConfig) (Backend, error) + +// Paths is the structure of special paths that is used for SpecialPaths. +type Paths struct { + // Root are the paths that require a root token to access + Root []string + + // Unauthenticated are the paths that can be accessed without any auth. + // These can't be regular expressions, it is either exact match, a prefix + // match and/or a wildcard match. For prefix match, append '*' as a suffix. + // For a wildcard match, use '+' in the segment to match any identifier + // (e.g. 'foo/+/bar'). Note that '+' can't be adjacent to a non-slash. + Unauthenticated []string + + // LocalStorage are paths (prefixes) that are local to this instance; this + // indicates that these paths should not be replicated + LocalStorage []string + + // SealWrapStorage are storage paths that, when using a capable seal, + // should be seal wrapped with extra encryption. It is exact matching + // unless it ends with '/' in which case it will be treated as a prefix. + SealWrapStorage []string +} + +type Auditor interface { + AuditRequest(ctx context.Context, input *LogInput) error + AuditResponse(ctx context.Context, input *LogInput) error +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/logical_storage.go b/vendor/github.com/hashicorp/vault/sdk/logical/logical_storage.go new file mode 100644 index 0000000000..16b85cd797 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/logical_storage.go @@ -0,0 +1,52 @@ +package logical + +import ( + "context" + + "github.com/hashicorp/vault/sdk/physical" +) + +type LogicalStorage struct { + underlying physical.Backend +} + +func (s *LogicalStorage) Get(ctx context.Context, key string) (*StorageEntry, error) { + entry, err := s.underlying.Get(ctx, key) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + return &StorageEntry{ + Key: entry.Key, + Value: entry.Value, + SealWrap: entry.SealWrap, + }, nil +} + +func (s *LogicalStorage) Put(ctx context.Context, entry *StorageEntry) error { + return s.underlying.Put(ctx, &physical.Entry{ + Key: entry.Key, + Value: entry.Value, + SealWrap: entry.SealWrap, + }) +} + +func (s *LogicalStorage) Delete(ctx context.Context, key string) error { + return s.underlying.Delete(ctx, key) +} + +func (s *LogicalStorage) List(ctx context.Context, prefix string) ([]string, error) { + return s.underlying.List(ctx, prefix) +} + +func (s *LogicalStorage) Underlying() physical.Backend { + return s.underlying +} + +func NewLogicalStorage(underlying physical.Backend) *LogicalStorage { + return &LogicalStorage{ + underlying: underlying, + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/plugin.pb.go b/vendor/github.com/hashicorp/vault/sdk/logical/plugin.pb.go new file mode 100644 index 0000000000..46de77666d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/plugin.pb.go @@ -0,0 +1,146 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.17.3 +// source: sdk/logical/plugin.proto + +package logical + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type PluginEnvironment struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // VaultVersion is the version of the Vault server + VaultVersion string `protobuf:"bytes,1,opt,name=vault_version,json=vaultVersion,proto3" json:"vault_version,omitempty"` +} + +func (x *PluginEnvironment) Reset() { + *x = PluginEnvironment{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_plugin_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PluginEnvironment) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PluginEnvironment) ProtoMessage() {} + +func (x *PluginEnvironment) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_plugin_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PluginEnvironment.ProtoReflect.Descriptor instead. +func (*PluginEnvironment) Descriptor() ([]byte, []int) { + return file_sdk_logical_plugin_proto_rawDescGZIP(), []int{0} +} + +func (x *PluginEnvironment) GetVaultVersion() string { + if x != nil { + return x.VaultVersion + } + return "" +} + +var File_sdk_logical_plugin_proto protoreflect.FileDescriptor + +var file_sdk_logical_plugin_proto_rawDesc = []byte{ + 0x0a, 0x18, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6c, 0x6f, 0x67, 0x69, + 0x63, 0x61, 0x6c, 0x22, 0x38, 0x0a, 0x11, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, + 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x75, 0x6c, + 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x28, 0x5a, + 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, + 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sdk_logical_plugin_proto_rawDescOnce sync.Once + file_sdk_logical_plugin_proto_rawDescData = file_sdk_logical_plugin_proto_rawDesc +) + +func file_sdk_logical_plugin_proto_rawDescGZIP() []byte { + file_sdk_logical_plugin_proto_rawDescOnce.Do(func() { + file_sdk_logical_plugin_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_logical_plugin_proto_rawDescData) + }) + return file_sdk_logical_plugin_proto_rawDescData +} + +var file_sdk_logical_plugin_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_sdk_logical_plugin_proto_goTypes = []interface{}{ + (*PluginEnvironment)(nil), // 0: logical.PluginEnvironment +} +var file_sdk_logical_plugin_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_sdk_logical_plugin_proto_init() } +func file_sdk_logical_plugin_proto_init() { + if File_sdk_logical_plugin_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sdk_logical_plugin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PluginEnvironment); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sdk_logical_plugin_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sdk_logical_plugin_proto_goTypes, + DependencyIndexes: file_sdk_logical_plugin_proto_depIdxs, + MessageInfos: file_sdk_logical_plugin_proto_msgTypes, + }.Build() + File_sdk_logical_plugin_proto = out.File + file_sdk_logical_plugin_proto_rawDesc = nil + file_sdk_logical_plugin_proto_goTypes = nil + file_sdk_logical_plugin_proto_depIdxs = nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/plugin.proto b/vendor/github.com/hashicorp/vault/sdk/logical/plugin.proto new file mode 100644 index 0000000000..5992c21395 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/plugin.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/sdk/logical"; + +package logical; + +message PluginEnvironment { + // VaultVersion is the version of the Vault server + string vault_version = 1; +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/request.go b/vendor/github.com/hashicorp/vault/sdk/logical/request.go new file mode 100644 index 0000000000..829c155fd0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/request.go @@ -0,0 +1,379 @@ +package logical + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + "github.com/mitchellh/copystructure" +) + +// RequestWrapInfo is a struct that stores information about desired response +// and seal wrapping behavior +type RequestWrapInfo struct { + // Setting to non-zero specifies that the response should be wrapped. + // Specifies the desired TTL of the wrapping token. + TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl" sentinel:""` + + // The format to use for the wrapped response; if not specified it's a bare + // token + Format string `json:"format" structs:"format" mapstructure:"format" sentinel:""` + + // A flag to conforming backends that data for a given request should be + // seal wrapped + SealWrap bool `json:"seal_wrap" structs:"seal_wrap" mapstructure:"seal_wrap" sentinel:""` +} + +func (r *RequestWrapInfo) SentinelGet(key string) (interface{}, error) { + if r == nil { + return nil, nil + } + switch key { + case "ttl": + return r.TTL, nil + case "ttl_seconds": + return int64(r.TTL.Seconds()), nil + } + + return nil, nil +} + +func (r *RequestWrapInfo) SentinelKeys() []string { + return []string{ + "ttl", + "ttl_seconds", + } +} + +type ClientTokenSource uint32 + +const ( + NoClientToken ClientTokenSource = iota + ClientTokenFromVaultHeader + ClientTokenFromAuthzHeader +) + +type WALState struct { + ClusterID string + LocalIndex uint64 + ReplicatedIndex uint64 +} + +const indexStateCtxKey = "index_state" + +// IndexStateContext returns a context with an added value holding the index +// state that should be populated on writes. +func IndexStateContext(ctx context.Context, state *WALState) context.Context { + return context.WithValue(ctx, indexStateCtxKey, state) +} + +// IndexStateFromContext is a helper to look up if the provided context contains +// an index state pointer. +func IndexStateFromContext(ctx context.Context) *WALState { + s, ok := ctx.Value(indexStateCtxKey).(*WALState) + if !ok { + return nil + } + return s +} + +// Request is a struct that stores the parameters and context of a request +// being made to Vault. It is used to abstract the details of the higher level +// request protocol from the handlers. +// +// Note: Many of these have Sentinel disabled because they are values populated +// by the router after policy checks; the token namespace would be the right +// place to access them via Sentinel +type Request struct { + // Id is the uuid associated with each request + ID string `json:"id" structs:"id" mapstructure:"id" sentinel:""` + + // If set, the name given to the replication secondary where this request + // originated + ReplicationCluster string `json:"replication_cluster" structs:"replication_cluster" mapstructure:"replication_cluster" sentinel:""` + + // Operation is the requested operation type + Operation Operation `json:"operation" structs:"operation" mapstructure:"operation"` + + // Path is the full path of the request + Path string `json:"path" structs:"path" mapstructure:"path" sentinel:""` + + // Request data is an opaque map that must have string keys. + Data map[string]interface{} `json:"map" structs:"data" mapstructure:"data"` + + // Storage can be used to durably store and retrieve state. + Storage Storage `json:"-" sentinel:""` + + // Secret will be non-nil only for Revoke and Renew operations + // to represent the secret that was returned prior. + Secret *Secret `json:"secret" structs:"secret" mapstructure:"secret" sentinel:""` + + // Auth will be non-nil only for Renew operations + // to represent the auth that was returned prior. + Auth *Auth `json:"auth" structs:"auth" mapstructure:"auth" sentinel:""` + + // Headers will contain the http headers from the request. This value will + // be used in the audit broker to ensure we are auditing only the allowed + // headers. + Headers map[string][]string `json:"headers" structs:"headers" mapstructure:"headers" sentinel:""` + + // Connection will be non-nil only for credential providers to + // inspect the connection information and potentially use it for + // authentication/protection. + Connection *Connection `json:"connection" structs:"connection" mapstructure:"connection"` + + // ClientToken is provided to the core so that the identity + // can be verified and ACLs applied. This value is passed + // through to the logical backends but after being salted and + // hashed. + ClientToken string `json:"client_token" structs:"client_token" mapstructure:"client_token" sentinel:""` + + // ClientTokenAccessor is provided to the core so that the it can get + // logged as part of request audit logging. + ClientTokenAccessor string `json:"client_token_accessor" structs:"client_token_accessor" mapstructure:"client_token_accessor" sentinel:""` + + // DisplayName is provided to the logical backend to help associate + // dynamic secrets with the source entity. This is not a sensitive + // name, but is useful for operators. + DisplayName string `json:"display_name" structs:"display_name" mapstructure:"display_name" sentinel:""` + + // MountPoint is provided so that a logical backend can generate + // paths relative to itself. The `Path` is effectively the client + // request path with the MountPoint trimmed off. + MountPoint string `json:"mount_point" structs:"mount_point" mapstructure:"mount_point" sentinel:""` + + // MountType is provided so that a logical backend can make decisions + // based on the specific mount type (e.g., if a mount type has different + // aliases, generating different defaults depending on the alias) + MountType string `json:"mount_type" structs:"mount_type" mapstructure:"mount_type" sentinel:""` + + // MountAccessor is provided so that identities returned by the authentication + // backends can be tied to the mount it belongs to. + MountAccessor string `json:"mount_accessor" structs:"mount_accessor" mapstructure:"mount_accessor" sentinel:""` + + // WrapInfo contains requested response wrapping parameters + WrapInfo *RequestWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info" sentinel:""` + + // ClientTokenRemainingUses represents the allowed number of uses left on the + // token supplied + ClientTokenRemainingUses int `json:"client_token_remaining_uses" structs:"client_token_remaining_uses" mapstructure:"client_token_remaining_uses"` + + // EntityID is the identity of the caller extracted out of the token used + // to make this request + EntityID string `json:"entity_id" structs:"entity_id" mapstructure:"entity_id" sentinel:""` + + // PolicyOverride indicates that the requestor wishes to override + // soft-mandatory Sentinel policies + PolicyOverride bool `json:"policy_override" structs:"policy_override" mapstructure:"policy_override"` + + // Whether the request is unauthenticated, as in, had no client token + // attached. Useful in some situations where the client token is not made + // accessible. + Unauthenticated bool `json:"unauthenticated" structs:"unauthenticated" mapstructure:"unauthenticated"` + + // MFACreds holds the parsed MFA information supplied over the API as part of + // X-Vault-MFA header + MFACreds MFACreds `json:"mfa_creds" structs:"mfa_creds" mapstructure:"mfa_creds" sentinel:""` + + // Cached token entry. This avoids another lookup in request handling when + // we've already looked it up at http handling time. Note that this token + // has not been "used", as in it will not properly take into account use + // count limitations. As a result this field should only ever be used for + // transport to a function that would otherwise do a lookup and then + // properly use the token. + tokenEntry *TokenEntry + + // For replication, contains the last WAL on the remote side after handling + // the request, used for best-effort avoidance of stale read-after-write + lastRemoteWAL uint64 + + // ControlGroup holds the authorizations that have happened on this + // request + ControlGroup *ControlGroup `json:"control_group" structs:"control_group" mapstructure:"control_group" sentinel:""` + + // ClientTokenSource tells us where the client token was sourced from, so + // we can delete it before sending off to plugins + ClientTokenSource ClientTokenSource + + // HTTPRequest, if set, can be used to access fields from the HTTP request + // that generated this logical.Request object, such as the request body. + HTTPRequest *http.Request `json:"-" sentinel:""` + + // ResponseWriter if set can be used to stream a response value to the http + // request that generated this logical.Request object. + ResponseWriter *HTTPResponseWriter `json:"-" sentinel:""` + + // requiredState is used internally to propagate the X-Vault-Index request + // header to later levels of request processing that operate only on + // logical.Request. + requiredState []string + + // responseState is used internally to propagate the state that should appear + // in response headers; it's attached to the request rather than the response + // because not all requests yields non-nil responses. + responseState *WALState + + // ClientID is the identity of the caller. If the token is associated with an + // entity, it will be the same as the EntityID . If the token has no entity, + // this will be the sha256(sorted policies + namespace) associated with the + // client token. + ClientID string `json:"client_id" structs:"client_id" mapstructure:"client_id" sentinel:""` +} + +// Clone returns a deep copy of the request by using copystructure +func (r *Request) Clone() (*Request, error) { + cpy, err := copystructure.Copy(r) + if err != nil { + return nil, err + } + return cpy.(*Request), nil +} + +// Get returns a data field and guards for nil Data +func (r *Request) Get(key string) interface{} { + if r.Data == nil { + return nil + } + return r.Data[key] +} + +// GetString returns a data field as a string +func (r *Request) GetString(key string) string { + raw := r.Get(key) + s, _ := raw.(string) + return s +} + +func (r *Request) GoString() string { + return fmt.Sprintf("*%#v", *r) +} + +func (r *Request) SentinelGet(key string) (interface{}, error) { + switch key { + case "path": + // Sanitize it here so that it's consistent in policies + return strings.TrimPrefix(r.Path, "/"), nil + + case "wrapping", "wrap_info": + // If the pointer is nil accessing the wrap info is considered + // "undefined" so this allows us to instead discover a TTL of zero + if r.WrapInfo == nil { + return &RequestWrapInfo{}, nil + } + return r.WrapInfo, nil + } + + return nil, nil +} + +func (r *Request) SentinelKeys() []string { + return []string{ + "path", + "wrapping", + "wrap_info", + } +} + +func (r *Request) LastRemoteWAL() uint64 { + return r.lastRemoteWAL +} + +func (r *Request) SetLastRemoteWAL(last uint64) { + r.lastRemoteWAL = last +} + +func (r *Request) RequiredState() []string { + return r.requiredState +} + +func (r *Request) SetRequiredState(state []string) { + r.requiredState = state +} + +func (r *Request) ResponseState() *WALState { + return r.responseState +} + +func (r *Request) SetResponseState(w *WALState) { + r.responseState = w +} + +func (r *Request) TokenEntry() *TokenEntry { + return r.tokenEntry +} + +func (r *Request) SetTokenEntry(te *TokenEntry) { + r.tokenEntry = te +} + +// RenewRequest creates the structure of the renew request. +func RenewRequest(path string, secret *Secret, data map[string]interface{}) *Request { + return &Request{ + Operation: RenewOperation, + Path: path, + Data: data, + Secret: secret, + } +} + +// RenewAuthRequest creates the structure of the renew request for an auth. +func RenewAuthRequest(path string, auth *Auth, data map[string]interface{}) *Request { + return &Request{ + Operation: RenewOperation, + Path: path, + Data: data, + Auth: auth, + } +} + +// RevokeRequest creates the structure of the revoke request. +func RevokeRequest(path string, secret *Secret, data map[string]interface{}) *Request { + return &Request{ + Operation: RevokeOperation, + Path: path, + Data: data, + Secret: secret, + } +} + +// RollbackRequest creates the structure of the revoke request. +func RollbackRequest(path string) *Request { + return &Request{ + Operation: RollbackOperation, + Path: path, + Data: make(map[string]interface{}), + } +} + +// Operation is an enum that is used to specify the type +// of request being made +type Operation string + +const ( + // The operations below are called per path + CreateOperation Operation = "create" + ReadOperation = "read" + UpdateOperation = "update" + PatchOperation = "patch" + DeleteOperation = "delete" + ListOperation = "list" + HelpOperation = "help" + AliasLookaheadOperation = "alias-lookahead" + + // The operations below are called globally, the path is less relevant. + RevokeOperation Operation = "revoke" + RenewOperation = "renew" + RollbackOperation = "rollback" +) + +type MFACreds map[string][]string + +// InitializationRequest stores the parameters and context of an Initialize() +// call being made to a logical.Backend. +type InitializationRequest struct { + + // Storage can be used to durably store and retrieve state. + Storage Storage +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/response.go b/vendor/github.com/hashicorp/vault/sdk/logical/response.go new file mode 100644 index 0000000000..a675112539 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/response.go @@ -0,0 +1,221 @@ +package logical + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "sync/atomic" + + "github.com/hashicorp/vault/sdk/helper/wrapping" +) + +const ( + // HTTPContentType can be specified in the Data field of a Response + // so that the HTTP front end can specify a custom Content-Type associated + // with the HTTPRawBody. This can only be used for non-secrets, and should + // be avoided unless absolutely necessary, such as implementing a specification. + // The value must be a string. + HTTPContentType = "http_content_type" + + // HTTPRawBody is the raw content of the HTTP body that goes with the HTTPContentType. + // This can only be specified for non-secrets, and should should be similarly + // avoided like the HTTPContentType. The value must be a byte slice. + HTTPRawBody = "http_raw_body" + + // HTTPStatusCode is the response code of the HTTP body that goes with the HTTPContentType. + // This can only be specified for non-secrets, and should should be similarly + // avoided like the HTTPContentType. The value must be an integer. + HTTPStatusCode = "http_status_code" + + // For unwrapping we may need to know whether the value contained in the + // raw body is already JSON-unmarshaled. The presence of this key indicates + // that it has already been unmarshaled. That way we don't need to simply + // ignore errors. + HTTPRawBodyAlreadyJSONDecoded = "http_raw_body_already_json_decoded" + + // If set, HTTPCacheControlHeader will replace the default Cache-Control=no-store header + // set by the generic wrapping handler. The value must be a string. + HTTPCacheControlHeader = "http_raw_cache_control" + + // If set, HTTPPragmaHeader will set the Pragma response header. + // The value must be a string. + HTTPPragmaHeader = "http_raw_pragma" + + // If set, HTTPWWWAuthenticateHeader will set the WWW-Authenticate response header. + // The value must be a string. + HTTPWWWAuthenticateHeader = "http_www_authenticate" +) + +// Response is a struct that stores the response of a request. +// It is used to abstract the details of the higher level request protocol. +type Response struct { + // Secret, if not nil, denotes that this response represents a secret. + Secret *Secret `json:"secret" structs:"secret" mapstructure:"secret"` + + // Auth, if not nil, contains the authentication information for + // this response. This is only checked and means something for + // credential backends. + Auth *Auth `json:"auth" structs:"auth" mapstructure:"auth"` + + // Response data is an opaque map that must have string keys. For + // secrets, this data is sent down to the user as-is. To store internal + // data that you don't want the user to see, store it in + // Secret.InternalData. + Data map[string]interface{} `json:"data" structs:"data" mapstructure:"data"` + + // Redirect is an HTTP URL to redirect to for further authentication. + // This is only valid for credential backends. This will be blanked + // for any logical backend and ignored. + Redirect string `json:"redirect" structs:"redirect" mapstructure:"redirect"` + + // Warnings allow operations or backends to return warnings in response + // to user actions without failing the action outright. + Warnings []string `json:"warnings" structs:"warnings" mapstructure:"warnings"` + + // Information for wrapping the response in a cubbyhole + WrapInfo *wrapping.ResponseWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info"` + + // Headers will contain the http headers from the plugin that it wishes to + // have as part of the output + Headers map[string][]string `json:"headers" structs:"headers" mapstructure:"headers"` +} + +// AddWarning adds a warning into the response's warning list +func (r *Response) AddWarning(warning string) { + if r.Warnings == nil { + r.Warnings = make([]string, 0, 1) + } + r.Warnings = append(r.Warnings, warning) +} + +// IsError returns true if this response seems to indicate an error. +func (r *Response) IsError() bool { + return r != nil && r.Data != nil && len(r.Data) == 1 && r.Data["error"] != nil +} + +func (r *Response) Error() error { + if !r.IsError() { + return nil + } + switch r.Data["error"].(type) { + case string: + return errors.New(r.Data["error"].(string)) + case error: + return r.Data["error"].(error) + } + return nil +} + +// HelpResponse is used to format a help response +func HelpResponse(text string, seeAlso []string, oapiDoc interface{}) *Response { + return &Response{ + Data: map[string]interface{}{ + "help": text, + "see_also": seeAlso, + "openapi": oapiDoc, + }, + } +} + +// ErrorResponse is used to format an error response +func ErrorResponse(text string, vargs ...interface{}) *Response { + if len(vargs) > 0 { + text = fmt.Sprintf(text, vargs...) + } + return &Response{ + Data: map[string]interface{}{ + "error": text, + }, + } +} + +// ListResponse is used to format a response to a list operation. +func ListResponse(keys []string) *Response { + resp := &Response{ + Data: map[string]interface{}{}, + } + if len(keys) != 0 { + resp.Data["keys"] = keys + } + return resp +} + +// ListResponseWithInfo is used to format a response to a list operation and +// return the keys as well as a map with corresponding key info. +func ListResponseWithInfo(keys []string, keyInfo map[string]interface{}) *Response { + resp := ListResponse(keys) + + keyInfoData := make(map[string]interface{}) + for _, key := range keys { + val, ok := keyInfo[key] + if ok { + keyInfoData[key] = val + } + } + + if len(keyInfoData) > 0 { + resp.Data["key_info"] = keyInfoData + } + + return resp +} + +// RespondWithStatusCode takes a response and converts it to a raw response with +// the provided Status Code. +func RespondWithStatusCode(resp *Response, req *Request, code int) (*Response, error) { + ret := &Response{ + Data: map[string]interface{}{ + HTTPContentType: "application/json", + HTTPStatusCode: code, + }, + } + + if resp != nil { + httpResp := LogicalResponseToHTTPResponse(resp) + + if req != nil { + httpResp.RequestID = req.ID + } + + body, err := json.Marshal(httpResp) + if err != nil { + return nil, err + } + + // We default to string here so that the value is HMAC'd via audit. + // Since this function is always marshaling to JSON, this is + // appropriate. + ret.Data[HTTPRawBody] = string(body) + } + + return ret, nil +} + +// HTTPResponseWriter is optionally added to a request object and can be used to +// write directly to the HTTP response writer. +type HTTPResponseWriter struct { + http.ResponseWriter + written *uint32 +} + +// NewHTTPResponseWriter creates a new HTTPResponseWriter object that wraps the +// provided io.Writer. +func NewHTTPResponseWriter(w http.ResponseWriter) *HTTPResponseWriter { + return &HTTPResponseWriter{ + ResponseWriter: w, + written: new(uint32), + } +} + +// Write will write the bytes to the underlying io.Writer. +func (rw *HTTPResponseWriter) Write(bytes []byte) (int, error) { + atomic.StoreUint32(rw.written, 1) + + return rw.ResponseWriter.Write(bytes) +} + +// Written tells us if the writer has been written to yet. +func (rw *HTTPResponseWriter) Written() bool { + return atomic.LoadUint32(rw.written) == 1 +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/response_util.go b/vendor/github.com/hashicorp/vault/sdk/logical/response_util.go new file mode 100644 index 0000000000..353ef569a4 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/response_util.go @@ -0,0 +1,180 @@ +package logical + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + + "github.com/hashicorp/errwrap" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/sdk/helper/consts" +) + +// RespondErrorCommon pulls most of the functionality from http's +// respondErrorCommon and some of http's handleLogical and makes it available +// to both the http package and elsewhere. +func RespondErrorCommon(req *Request, resp *Response, err error) (int, error) { + if err == nil && (resp == nil || !resp.IsError()) { + switch { + case req.Operation == ReadOperation, req.Operation == PatchOperation: + if resp == nil { + return http.StatusNotFound, nil + } + + // Basically: if we have empty "keys" or no keys at all, 404. This + // provides consistency with GET. + case req.Operation == ListOperation && (resp == nil || resp.WrapInfo == nil): + if resp == nil { + return http.StatusNotFound, nil + } + if len(resp.Data) == 0 { + if len(resp.Warnings) > 0 { + return 0, nil + } + return http.StatusNotFound, nil + } + keysRaw, ok := resp.Data["keys"] + if !ok || keysRaw == nil { + // If we don't have keys but have other data, return as-is + if len(resp.Data) > 0 || len(resp.Warnings) > 0 { + return 0, nil + } + return http.StatusNotFound, nil + } + + var keys []string + switch keysRaw.(type) { + case []interface{}: + keys = make([]string, len(keysRaw.([]interface{}))) + for i, el := range keysRaw.([]interface{}) { + s, ok := el.(string) + if !ok { + return http.StatusInternalServerError, nil + } + keys[i] = s + } + + case []string: + keys = keysRaw.([]string) + default: + return http.StatusInternalServerError, nil + } + + if len(keys) == 0 { + return http.StatusNotFound, nil + } + } + + return 0, nil + } + + if errwrap.ContainsType(err, new(ReplicationCodedError)) { + var allErrors error + var codedErr *ReplicationCodedError + errwrap.Walk(err, func(inErr error) { + newErr, ok := inErr.(*ReplicationCodedError) + if ok { + codedErr = newErr + } else { + allErrors = multierror.Append(allErrors, inErr) + } + }) + if allErrors != nil { + return codedErr.Code, multierror.Append(fmt.Errorf("errors from both primary and secondary; primary error was %v; secondary errors follow", codedErr.Msg), allErrors) + } + return codedErr.Code, errors.New(codedErr.Msg) + } + + // Start out with internal server error since in most of these cases there + // won't be a response so this won't be overridden + statusCode := http.StatusInternalServerError + // If we actually have a response, start out with bad request + if resp != nil { + statusCode = http.StatusBadRequest + } + + // Now, check the error itself; if it has a specific logical error, set the + // appropriate code + if err != nil { + switch { + case errwrap.ContainsType(err, new(StatusBadRequest)): + statusCode = http.StatusBadRequest + case errwrap.Contains(err, ErrPermissionDenied.Error()): + statusCode = http.StatusForbidden + case errwrap.Contains(err, consts.ErrInvalidWrappingToken.Error()): + statusCode = http.StatusBadRequest + case errwrap.Contains(err, ErrUnsupportedOperation.Error()): + statusCode = http.StatusMethodNotAllowed + case errwrap.Contains(err, ErrUnsupportedPath.Error()): + statusCode = http.StatusNotFound + case errwrap.Contains(err, ErrInvalidRequest.Error()): + statusCode = http.StatusBadRequest + case errwrap.Contains(err, ErrUpstreamRateLimited.Error()): + statusCode = http.StatusBadGateway + case errwrap.Contains(err, ErrRateLimitQuotaExceeded.Error()): + statusCode = http.StatusTooManyRequests + case errwrap.Contains(err, ErrLeaseCountQuotaExceeded.Error()): + statusCode = http.StatusTooManyRequests + case errwrap.Contains(err, ErrMissingRequiredState.Error()): + statusCode = http.StatusPreconditionFailed + case errwrap.Contains(err, ErrPathFunctionalityRemoved.Error()): + statusCode = http.StatusNotFound + } + } + + if resp != nil && resp.IsError() { + err = fmt.Errorf("%s", resp.Data["error"].(string)) + } + + return statusCode, err +} + +// AdjustErrorStatusCode adjusts the status that will be sent in error +// conditions in a way that can be shared across http's respondError and other +// locations. +func AdjustErrorStatusCode(status *int, err error) { + // Handle nested errors + if t, ok := err.(*multierror.Error); ok { + for _, e := range t.Errors { + AdjustErrorStatusCode(status, e) + } + } + + // Adjust status code when sealed + if errwrap.Contains(err, consts.ErrSealed.Error()) { + *status = http.StatusServiceUnavailable + } + + if errwrap.Contains(err, consts.ErrAPILocked.Error()) { + *status = http.StatusServiceUnavailable + } + + // Adjust status code on + if errwrap.Contains(err, "http: request body too large") { + *status = http.StatusRequestEntityTooLarge + } + + // Allow HTTPCoded error passthrough to specify a code + if t, ok := err.(HTTPCodedError); ok { + *status = t.Code() + } +} + +func RespondError(w http.ResponseWriter, status int, err error) { + AdjustErrorStatusCode(&status, err) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(status) + + type ErrorResponse struct { + Errors []string `json:"errors"` + } + resp := &ErrorResponse{Errors: make([]string, 0, 1)} + if err != nil { + resp.Errors = append(resp.Errors, err.Error()) + } + + enc := json.NewEncoder(w) + enc.Encode(resp) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/secret.go b/vendor/github.com/hashicorp/vault/sdk/logical/secret.go new file mode 100644 index 0000000000..a2128d8689 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/secret.go @@ -0,0 +1,30 @@ +package logical + +import "fmt" + +// Secret represents the secret part of a response. +type Secret struct { + LeaseOptions + + // InternalData is JSON-encodable data that is stored with the secret. + // This will be sent back during a Renew/Revoke for storing internal data + // used for those operations. + InternalData map[string]interface{} `json:"internal_data" sentinel:""` + + // LeaseID is the ID returned to the user to manage this secret. + // This is generated by Vault core. Any set value will be ignored. + // For requests, this will always be blank. + LeaseID string `sentinel:""` +} + +func (s *Secret) Validate() error { + if s.TTL < 0 { + return fmt.Errorf("ttl duration must not be less than zero") + } + + return nil +} + +func (s *Secret) GoString() string { + return fmt.Sprintf("*%#v", *s) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/storage.go b/vendor/github.com/hashicorp/vault/sdk/logical/storage.go new file mode 100644 index 0000000000..0802ad01a0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/storage.go @@ -0,0 +1,158 @@ +package logical + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +// ErrReadOnly is returned when a backend does not support +// writing. This can be caused by a read-only replica or secondary +// cluster operation. +var ErrReadOnly = errors.New("cannot write to readonly storage") + +// ErrSetupReadOnly is returned when a write operation is attempted on a +// storage while the backend is still being setup. +var ErrSetupReadOnly = errors.New("cannot write to storage during setup") + +// Storage is the way that logical backends are able read/write data. +type Storage interface { + List(context.Context, string) ([]string, error) + Get(context.Context, string) (*StorageEntry, error) + Put(context.Context, *StorageEntry) error + Delete(context.Context, string) error +} + +// StorageEntry is the entry for an item in a Storage implementation. +type StorageEntry struct { + Key string + Value []byte + SealWrap bool +} + +// DecodeJSON decodes the 'Value' present in StorageEntry. +func (e *StorageEntry) DecodeJSON(out interface{}) error { + return jsonutil.DecodeJSON(e.Value, out) +} + +// StorageEntryJSON creates a StorageEntry with a JSON-encoded value. +func StorageEntryJSON(k string, v interface{}) (*StorageEntry, error) { + encodedBytes, err := jsonutil.EncodeJSON(v) + if err != nil { + return nil, errwrap.Wrapf("failed to encode storage entry: {{err}}", err) + } + + return &StorageEntry{ + Key: k, + Value: encodedBytes, + }, nil +} + +type ClearableView interface { + List(context.Context, string) ([]string, error) + Delete(context.Context, string) error +} + +// ScanView is used to scan all the keys in a view iteratively +func ScanView(ctx context.Context, view ClearableView, cb func(path string)) error { + frontier := []string{""} + for len(frontier) > 0 { + n := len(frontier) + current := frontier[n-1] + frontier = frontier[:n-1] + + // List the contents + contents, err := view.List(ctx, current) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("list failed at path %q: {{err}}", current), err) + } + + // Handle the contents in the directory + for _, c := range contents { + // Exit if the context has been canceled + if ctx.Err() != nil { + return ctx.Err() + } + fullPath := current + c + if strings.HasSuffix(c, "/") { + frontier = append(frontier, fullPath) + } else { + cb(fullPath) + } + } + } + return nil +} + +// CollectKeys is used to collect all the keys in a view +func CollectKeys(ctx context.Context, view ClearableView) ([]string, error) { + return CollectKeysWithPrefix(ctx, view, "") +} + +// CollectKeysWithPrefix is used to collect all the keys in a view with a given prefix string +func CollectKeysWithPrefix(ctx context.Context, view ClearableView, prefix string) ([]string, error) { + var keys []string + + cb := func(path string) { + if strings.HasPrefix(path, prefix) { + keys = append(keys, path) + } + } + + // Scan for all the keys + if err := ScanView(ctx, view, cb); err != nil { + return nil, err + } + return keys, nil +} + +// ClearView is used to delete all the keys in a view +func ClearView(ctx context.Context, view ClearableView) error { + return ClearViewWithLogging(ctx, view, nil) +} + +func ClearViewWithLogging(ctx context.Context, view ClearableView, logger hclog.Logger) error { + if view == nil { + return nil + } + + if logger == nil { + logger = hclog.NewNullLogger() + } + + // Collect all the keys + keys, err := CollectKeys(ctx, view) + if err != nil { + return err + } + + logger.Debug("clearing view", "total_keys", len(keys)) + + // Delete all the keys + var pctDone int + for idx, key := range keys { + // Rather than keep trying to do stuff with a canceled context, bail; + // storage will fail anyways + if ctx.Err() != nil { + return ctx.Err() + } + if err := view.Delete(ctx, key); err != nil { + return err + } + + newPctDone := idx * 100.0 / len(keys) + if int(newPctDone) > pctDone { + pctDone = int(newPctDone) + logger.Trace("view deletion progress", "percent", pctDone, "keys_deleted", idx) + } + } + + logger.Debug("view cleared") + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/storage_inmem.go b/vendor/github.com/hashicorp/vault/sdk/logical/storage_inmem.go new file mode 100644 index 0000000000..65368a070f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/storage_inmem.go @@ -0,0 +1,87 @@ +package logical + +import ( + "context" + "sync" + + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/sdk/physical/inmem" +) + +// InmemStorage implements Storage and stores all data in memory. It is +// basically a straight copy of physical.Inmem, but it prevents backends from +// having to load all of physical's dependencies (which are legion) just to +// have some testing storage. +type InmemStorage struct { + underlying physical.Backend + once sync.Once +} + +func (s *InmemStorage) Get(ctx context.Context, key string) (*StorageEntry, error) { + s.once.Do(s.init) + + entry, err := s.underlying.Get(ctx, key) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + return &StorageEntry{ + Key: entry.Key, + Value: entry.Value, + SealWrap: entry.SealWrap, + }, nil +} + +func (s *InmemStorage) Put(ctx context.Context, entry *StorageEntry) error { + s.once.Do(s.init) + + return s.underlying.Put(ctx, &physical.Entry{ + Key: entry.Key, + Value: entry.Value, + SealWrap: entry.SealWrap, + }) +} + +func (s *InmemStorage) Delete(ctx context.Context, key string) error { + s.once.Do(s.init) + + return s.underlying.Delete(ctx, key) +} + +func (s *InmemStorage) List(ctx context.Context, prefix string) ([]string, error) { + s.once.Do(s.init) + + return s.underlying.List(ctx, prefix) +} + +func (s *InmemStorage) Underlying() *inmem.InmemBackend { + s.once.Do(s.init) + + return s.underlying.(*inmem.InmemBackend) +} + +func (s *InmemStorage) FailPut(fail bool) *InmemStorage { + s.Underlying().FailPut(fail) + return s +} + +func (s *InmemStorage) FailGet(fail bool) *InmemStorage { + s.Underlying().FailGet(fail) + return s +} + +func (s *InmemStorage) FailDelete(fail bool) *InmemStorage { + s.Underlying().FailDelete(fail) + return s +} + +func (s *InmemStorage) FailList(fail bool) *InmemStorage { + s.Underlying().FailList(fail) + return s +} + +func (s *InmemStorage) init() { + s.underlying, _ = inmem.NewInmem(nil, nil) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/storage_view.go b/vendor/github.com/hashicorp/vault/sdk/logical/storage_view.go new file mode 100644 index 0000000000..2cd07715c2 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/storage_view.go @@ -0,0 +1,110 @@ +package logical + +import ( + "context" + "errors" + "strings" +) + +type StorageView struct { + storage Storage + prefix string +} + +var ErrRelativePath = errors.New("relative paths not supported") + +func NewStorageView(storage Storage, prefix string) *StorageView { + return &StorageView{ + storage: storage, + prefix: prefix, + } +} + +// logical.Storage impl. +func (s *StorageView) List(ctx context.Context, prefix string) ([]string, error) { + if err := s.SanityCheck(prefix); err != nil { + return nil, err + } + return s.storage.List(ctx, s.ExpandKey(prefix)) +} + +// logical.Storage impl. +func (s *StorageView) Get(ctx context.Context, key string) (*StorageEntry, error) { + if err := s.SanityCheck(key); err != nil { + return nil, err + } + entry, err := s.storage.Get(ctx, s.ExpandKey(key)) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + entry.Key = s.TruncateKey(entry.Key) + + return &StorageEntry{ + Key: entry.Key, + Value: entry.Value, + SealWrap: entry.SealWrap, + }, nil +} + +// logical.Storage impl. +func (s *StorageView) Put(ctx context.Context, entry *StorageEntry) error { + if entry == nil { + return errors.New("cannot write nil entry") + } + + if err := s.SanityCheck(entry.Key); err != nil { + return err + } + + expandedKey := s.ExpandKey(entry.Key) + + nested := &StorageEntry{ + Key: expandedKey, + Value: entry.Value, + SealWrap: entry.SealWrap, + } + + return s.storage.Put(ctx, nested) +} + +// logical.Storage impl. +func (s *StorageView) Delete(ctx context.Context, key string) error { + if err := s.SanityCheck(key); err != nil { + return err + } + + expandedKey := s.ExpandKey(key) + + return s.storage.Delete(ctx, expandedKey) +} + +func (s *StorageView) Prefix() string { + return s.prefix +} + +// SubView constructs a nested sub-view using the given prefix +func (s *StorageView) SubView(prefix string) *StorageView { + sub := s.ExpandKey(prefix) + return &StorageView{storage: s.storage, prefix: sub} +} + +// SanityCheck is used to perform a sanity check on a key +func (s *StorageView) SanityCheck(key string) error { + if strings.Contains(key, "..") { + return ErrRelativePath + } + return nil +} + +// ExpandKey is used to expand to the full key path with the prefix +func (s *StorageView) ExpandKey(suffix string) string { + return s.prefix + suffix +} + +// TruncateKey is used to remove the prefix of the key +func (s *StorageView) TruncateKey(full string) string { + return strings.TrimPrefix(full, s.prefix) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/system_view.go b/vendor/github.com/hashicorp/vault/sdk/logical/system_view.go new file mode 100644 index 0000000000..8ea6766b99 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/system_view.go @@ -0,0 +1,211 @@ +package logical + +import ( + "context" + "errors" + "fmt" + "io" + "time" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/license" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/helper/wrapping" +) + +// SystemView exposes system configuration information in a safe way +// for logical backends to consume +type SystemView interface { + // DefaultLeaseTTL returns the default lease TTL set in Vault configuration + DefaultLeaseTTL() time.Duration + + // MaxLeaseTTL returns the max lease TTL set in Vault configuration; backend + // authors should take care not to issue credentials that last longer than + // this value, as Vault will revoke them + MaxLeaseTTL() time.Duration + + // Returns true if the mount is tainted. A mount is tainted if it is in the + // process of being unmounted. This should only be used in special + // circumstances; a primary use-case is as a guard in revocation functions. + // If revocation of a backend's leases fails it can keep the unmounting + // process from being successful. If the reason for this failure is not + // relevant when the mount is tainted (for instance, saving a CRL to disk + // when the stored CRL will be removed during the unmounting process + // anyways), we can ignore the errors to allow unmounting to complete. + Tainted() bool + + // Returns true if caching is disabled. If true, no caches should be used, + // despite known slowdowns. + CachingDisabled() bool + + // When run from a system view attached to a request, indicates whether the + // request is affecting a local mount or not + LocalMount() bool + + // ReplicationState indicates the state of cluster replication + ReplicationState() consts.ReplicationState + + // HasFeature returns true if the feature is currently enabled + HasFeature(feature license.Features) bool + + // ResponseWrapData wraps the given data in a cubbyhole and returns the + // token used to unwrap. + ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) + + // LookupPlugin looks into the plugin catalog for a plugin with the given + // name. Returns a PluginRunner or an error if a plugin can not be found. + LookupPlugin(context.Context, string, consts.PluginType) (*pluginutil.PluginRunner, error) + + // MlockEnabled returns the configuration setting for enabling mlock on + // plugins. + MlockEnabled() bool + + // EntityInfo returns a subset of information related to the identity entity + // for the given entity id + EntityInfo(entityID string) (*Entity, error) + + // GroupsForEntity returns the group membership information for the provided + // entity id + GroupsForEntity(entityID string) ([]*Group, error) + + // PluginEnv returns Vault environment information used by plugins + PluginEnv(context.Context) (*PluginEnvironment, error) + + // GeneratePasswordFromPolicy generates a password from the policy referenced. + // If the policy does not exist, this will return an error. + GeneratePasswordFromPolicy(ctx context.Context, policyName string) (password string, err error) +} + +type PasswordPolicy interface { + // Generate a random password + Generate(context.Context, io.Reader) (string, error) +} + +type ExtendedSystemView interface { + Auditor() Auditor + ForwardGenericRequest(context.Context, *Request) (*Response, error) +} + +type PasswordGenerator func() (password string, err error) + +type StaticSystemView struct { + DefaultLeaseTTLVal time.Duration + MaxLeaseTTLVal time.Duration + SudoPrivilegeVal bool + TaintedVal bool + CachingDisabledVal bool + Primary bool + EnableMlock bool + LocalMountVal bool + ReplicationStateVal consts.ReplicationState + EntityVal *Entity + GroupsVal []*Group + Features license.Features + VaultVersion string + PluginEnvironment *PluginEnvironment + PasswordPolicies map[string]PasswordGenerator +} + +type noopAuditor struct{} + +func (a noopAuditor) AuditRequest(ctx context.Context, input *LogInput) error { + return nil +} + +func (a noopAuditor) AuditResponse(ctx context.Context, input *LogInput) error { + return nil +} + +func (d StaticSystemView) Auditor() Auditor { + return noopAuditor{} +} + +func (d StaticSystemView) ForwardGenericRequest(ctx context.Context, req *Request) (*Response, error) { + return nil, errors.New("ForwardGenericRequest is not implemented in StaticSystemView") +} + +func (d StaticSystemView) DefaultLeaseTTL() time.Duration { + return d.DefaultLeaseTTLVal +} + +func (d StaticSystemView) MaxLeaseTTL() time.Duration { + return d.MaxLeaseTTLVal +} + +func (d StaticSystemView) SudoPrivilege(_ context.Context, path string, token string) bool { + return d.SudoPrivilegeVal +} + +func (d StaticSystemView) Tainted() bool { + return d.TaintedVal +} + +func (d StaticSystemView) CachingDisabled() bool { + return d.CachingDisabledVal +} + +func (d StaticSystemView) LocalMount() bool { + return d.LocalMountVal +} + +func (d StaticSystemView) ReplicationState() consts.ReplicationState { + return d.ReplicationStateVal +} + +func (d StaticSystemView) ResponseWrapData(_ context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) { + return nil, errors.New("ResponseWrapData is not implemented in StaticSystemView") +} + +func (d StaticSystemView) LookupPlugin(_ context.Context, _ string, _ consts.PluginType) (*pluginutil.PluginRunner, error) { + return nil, errors.New("LookupPlugin is not implemented in StaticSystemView") +} + +func (d StaticSystemView) MlockEnabled() bool { + return d.EnableMlock +} + +func (d StaticSystemView) EntityInfo(entityID string) (*Entity, error) { + return d.EntityVal, nil +} + +func (d StaticSystemView) GroupsForEntity(entityID string) ([]*Group, error) { + return d.GroupsVal, nil +} + +func (d StaticSystemView) HasFeature(feature license.Features) bool { + return d.Features.HasFeature(feature) +} + +func (d StaticSystemView) PluginEnv(_ context.Context) (*PluginEnvironment, error) { + return d.PluginEnvironment, nil +} + +func (d StaticSystemView) GeneratePasswordFromPolicy(ctx context.Context, policyName string) (password string, err error) { + select { + case <-ctx.Done(): + return "", fmt.Errorf("context timed out") + default: + } + + if d.PasswordPolicies == nil { + return "", fmt.Errorf("password policy not found") + } + policy, exists := d.PasswordPolicies[policyName] + if !exists { + return "", fmt.Errorf("password policy not found") + } + return policy() +} + +func (d *StaticSystemView) SetPasswordPolicy(name string, generator PasswordGenerator) { + if d.PasswordPolicies == nil { + d.PasswordPolicies = map[string]PasswordGenerator{} + } + d.PasswordPolicies[name] = generator +} + +func (d *StaticSystemView) DeletePasswordPolicy(name string) (existed bool) { + _, existed = d.PasswordPolicies[name] + delete(d.PasswordPolicies, name) + return existed +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/testing.go b/vendor/github.com/hashicorp/vault/sdk/logical/testing.go new file mode 100644 index 0000000000..765f09826d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/testing.go @@ -0,0 +1,87 @@ +package logical + +import ( + "context" + "reflect" + "time" + + testing "github.com/mitchellh/go-testing-interface" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" +) + +// TestRequest is a helper to create a purely in-memory Request struct. +func TestRequest(t testing.T, op Operation, path string) *Request { + return &Request{ + Operation: op, + Path: path, + Data: make(map[string]interface{}), + Storage: new(InmemStorage), + Connection: &Connection{}, + } +} + +// TestStorage is a helper that can be used from unit tests to verify +// the behavior of a Storage impl. +func TestStorage(t testing.T, s Storage) { + keys, err := s.List(context.Background(), "") + if err != nil { + t.Fatalf("list error: %s", err) + } + if len(keys) > 0 { + t.Fatalf("should have no keys to start: %#v", keys) + } + + entry := &StorageEntry{Key: "foo", Value: []byte("bar")} + if err := s.Put(context.Background(), entry); err != nil { + t.Fatalf("put error: %s", err) + } + + actual, err := s.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("get error: %s", err) + } + if !reflect.DeepEqual(actual, entry) { + t.Fatalf("wrong value. Expected: %#v\nGot: %#v", entry, actual) + } + + keys, err = s.List(context.Background(), "") + if err != nil { + t.Fatalf("list error: %s", err) + } + if !reflect.DeepEqual(keys, []string{"foo"}) { + t.Fatalf("bad keys: %#v", keys) + } + + if err := s.Delete(context.Background(), "foo"); err != nil { + t.Fatalf("put error: %s", err) + } + + keys, err = s.List(context.Background(), "") + if err != nil { + t.Fatalf("list error: %s", err) + } + if len(keys) > 0 { + t.Fatalf("should have no keys to start: %#v", keys) + } +} + +func TestSystemView() *StaticSystemView { + defaultLeaseTTLVal := time.Hour * 24 + maxLeaseTTLVal := time.Hour * 24 * 2 + return &StaticSystemView{ + DefaultLeaseTTLVal: defaultLeaseTTLVal, + MaxLeaseTTLVal: maxLeaseTTLVal, + } +} + +func TestBackendConfig() *BackendConfig { + bc := &BackendConfig{ + Logger: logging.NewVaultLogger(log.Trace), + System: TestSystemView(), + Config: make(map[string]string), + } + + return bc +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/token.go b/vendor/github.com/hashicorp/vault/sdk/logical/token.go new file mode 100644 index 0000000000..0586d768ea --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/token.go @@ -0,0 +1,245 @@ +package logical + +import ( + "fmt" + "time" + + sockaddr "github.com/hashicorp/go-sockaddr" +) + +type TokenType uint8 + +const ( + // TokenTypeDefault means "use the default, if any, that is currently set + // on the mount". If not set, results in a Service token. + TokenTypeDefault TokenType = iota + + // TokenTypeService is a "normal" Vault token for long-lived services + TokenTypeService + + // TokenTypeBatch is a batch token + TokenTypeBatch + + // TokenTypeDefaultService, configured on a mount, means that if + // TokenTypeDefault is sent back by the mount, create Service tokens + TokenTypeDefaultService + + // TokenTypeDefaultBatch, configured on a mount, means that if + // TokenTypeDefault is sent back by the mount, create Batch tokens + TokenTypeDefaultBatch +) + +func (t *TokenType) UnmarshalJSON(b []byte) error { + if len(b) == 1 { + *t = TokenType(b[0] - '0') + return nil + } + + // Handle upgrade from pre-1.2 where we were serialized as string: + s := string(b) + switch s { + case `"default"`, `""`: + *t = TokenTypeDefault + case `"service"`: + *t = TokenTypeService + case `"batch"`: + *t = TokenTypeBatch + case `"default-service"`: + *t = TokenTypeDefaultService + case `"default-batch"`: + *t = TokenTypeDefaultBatch + default: + return fmt.Errorf("unknown token type %q", s) + } + return nil +} + +func (t TokenType) String() string { + switch t { + case TokenTypeDefault: + return "default" + case TokenTypeService: + return "service" + case TokenTypeBatch: + return "batch" + case TokenTypeDefaultService: + return "default-service" + case TokenTypeDefaultBatch: + return "default-batch" + default: + panic("unreachable") + } +} + +// TokenEntry is used to represent a given token +type TokenEntry struct { + Type TokenType `json:"type" mapstructure:"type" structs:"type" sentinel:""` + + // ID of this entry, generally a random UUID + ID string `json:"id" mapstructure:"id" structs:"id" sentinel:""` + + // Accessor for this token, a random UUID + Accessor string `json:"accessor" mapstructure:"accessor" structs:"accessor" sentinel:""` + + // Parent token, used for revocation trees + Parent string `json:"parent" mapstructure:"parent" structs:"parent" sentinel:""` + + // Which named policies should be used + Policies []string `json:"policies" mapstructure:"policies" structs:"policies"` + + // InlinePolicy specifies ACL rules to be applied to this token entry. + InlinePolicy string `json:"inline_policy" mapstructure:"inline_policy" structs:"inline_policy"` + + // Used for audit trails, this is something like "auth/user/login" + Path string `json:"path" mapstructure:"path" structs:"path"` + + // Used for auditing. This could include things like "source", "user", "ip" + Meta map[string]string `json:"meta" mapstructure:"meta" structs:"meta" sentinel:"meta"` + + // InternalMeta is used to store internal metadata. This metadata will not be audit logged or returned from lookup APIs. + InternalMeta map[string]string `json:"internal_meta" mapstructure:"internal_meta" structs:"internal_meta"` + + // Used for operators to be able to associate with the source + DisplayName string `json:"display_name" mapstructure:"display_name" structs:"display_name"` + + // Used to restrict the number of uses (zero is unlimited). This is to + // support one-time-tokens (generalized). There are a few special values: + // if it's -1 it has run through its use counts and is executing its final + // use; if it's -2 it is tainted, which means revocation is currently + // running on it; and if it's -3 it's also tainted but revocation + // previously ran and failed, so this hints the tidy function to try it + // again. + NumUses int `json:"num_uses" mapstructure:"num_uses" structs:"num_uses"` + + // Time of token creation + CreationTime int64 `json:"creation_time" mapstructure:"creation_time" structs:"creation_time" sentinel:""` + + // Duration set when token was created + TTL time.Duration `json:"ttl" mapstructure:"ttl" structs:"ttl" sentinel:""` + + // Explicit maximum TTL on the token + ExplicitMaxTTL time.Duration `json:"explicit_max_ttl" mapstructure:"explicit_max_ttl" structs:"explicit_max_ttl" sentinel:""` + + // If set, the role that was used for parameters at creation time + Role string `json:"role" mapstructure:"role" structs:"role"` + + // If set, the period of the token. This is only used when created directly + // through the create endpoint; periods managed by roles or other auth + // backends are subject to those renewal rules. + Period time.Duration `json:"period" mapstructure:"period" structs:"period" sentinel:""` + + // These are the deprecated fields + DisplayNameDeprecated string `json:"DisplayName" mapstructure:"DisplayName" structs:"DisplayName" sentinel:""` + NumUsesDeprecated int `json:"NumUses" mapstructure:"NumUses" structs:"NumUses" sentinel:""` + CreationTimeDeprecated int64 `json:"CreationTime" mapstructure:"CreationTime" structs:"CreationTime" sentinel:""` + ExplicitMaxTTLDeprecated time.Duration `json:"ExplicitMaxTTL" mapstructure:"ExplicitMaxTTL" structs:"ExplicitMaxTTL" sentinel:""` + + // EntityID is the ID of the entity associated with this token. + EntityID string `json:"entity_id" mapstructure:"entity_id" structs:"entity_id"` + + // If NoIdentityPolicies is true, the token will not inherit + // identity policies from the associated EntityID. + NoIdentityPolicies bool `json:"no_identity_policies" mapstructure:"no_identity_policies" structs:"no_identity_policies"` + + // The set of CIDRs that this token can be used with + BoundCIDRs []*sockaddr.SockAddrMarshaler `json:"bound_cidrs" sentinel:""` + + // NamespaceID is the identifier of the namespace to which this token is + // confined to. Do not return this value over the API when the token is + // being looked up. + NamespaceID string `json:"namespace_id" mapstructure:"namespace_id" structs:"namespace_id" sentinel:""` + + // CubbyholeID is the identifier of the cubbyhole storage belonging to this + // token + CubbyholeID string `json:"cubbyhole_id" mapstructure:"cubbyhole_id" structs:"cubbyhole_id" sentinel:""` +} + +func (te *TokenEntry) SentinelGet(key string) (interface{}, error) { + if te == nil { + return nil, nil + } + switch key { + case "policies": + return te.Policies, nil + + case "path": + return te.Path, nil + + case "display_name": + return te.DisplayName, nil + + case "num_uses": + return te.NumUses, nil + + case "role": + return te.Role, nil + + case "entity_id": + return te.EntityID, nil + + case "period": + return te.Period, nil + + case "period_seconds": + return int64(te.Period.Seconds()), nil + + case "explicit_max_ttl": + return te.ExplicitMaxTTL, nil + + case "explicit_max_ttl_seconds": + return int64(te.ExplicitMaxTTL.Seconds()), nil + + case "creation_ttl": + return te.TTL, nil + + case "creation_ttl_seconds": + return int64(te.TTL.Seconds()), nil + + case "creation_time": + return time.Unix(te.CreationTime, 0).Format(time.RFC3339Nano), nil + + case "creation_time_unix": + return time.Unix(te.CreationTime, 0), nil + + case "meta", "metadata": + return te.Meta, nil + + case "type": + teType := te.Type + switch teType { + case TokenTypeBatch, TokenTypeService: + case TokenTypeDefault: + teType = TokenTypeService + default: + return "unknown", nil + } + return teType.String(), nil + } + + return nil, nil +} + +func (te *TokenEntry) SentinelKeys() []string { + return []string{ + "period", + "period_seconds", + "explicit_max_ttl", + "explicit_max_ttl_seconds", + "creation_ttl", + "creation_ttl_seconds", + "creation_time", + "creation_time_unix", + "meta", + "metadata", + "type", + } +} + +// IsRoot returns false if the token is not root (or doesn't exist) +func (te *TokenEntry) IsRoot() bool { + if te == nil { + return false + } + + return len(te.Policies) == 1 && te.Policies[0] == "root" +} diff --git a/vendor/github.com/hashicorp/vault/sdk/logical/translate_response.go b/vendor/github.com/hashicorp/vault/sdk/logical/translate_response.go new file mode 100644 index 0000000000..6f0ff342f9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/logical/translate_response.go @@ -0,0 +1,157 @@ +package logical + +import ( + "bytes" + "encoding/json" + "fmt" + "time" +) + +// This logic was pulled from the http package so that it can be used for +// encoding wrapped responses as well. It simply translates the logical +// response to an http response, with the values we want and omitting the +// values we don't. +func LogicalResponseToHTTPResponse(input *Response) *HTTPResponse { + httpResp := &HTTPResponse{ + Data: input.Data, + Warnings: input.Warnings, + Headers: input.Headers, + } + + if input.Secret != nil { + httpResp.LeaseID = input.Secret.LeaseID + httpResp.Renewable = input.Secret.Renewable + httpResp.LeaseDuration = int(input.Secret.TTL.Seconds()) + } + + // If we have authentication information, then + // set up the result structure. + if input.Auth != nil { + httpResp.Auth = &HTTPAuth{ + ClientToken: input.Auth.ClientToken, + Accessor: input.Auth.Accessor, + Policies: input.Auth.Policies, + TokenPolicies: input.Auth.TokenPolicies, + IdentityPolicies: input.Auth.IdentityPolicies, + Metadata: input.Auth.Metadata, + LeaseDuration: int(input.Auth.TTL.Seconds()), + Renewable: input.Auth.Renewable, + EntityID: input.Auth.EntityID, + TokenType: input.Auth.TokenType.String(), + Orphan: input.Auth.Orphan, + } + } + + return httpResp +} + +func HTTPResponseToLogicalResponse(input *HTTPResponse) *Response { + logicalResp := &Response{ + Data: input.Data, + Warnings: input.Warnings, + Headers: input.Headers, + } + + if input.LeaseID != "" { + logicalResp.Secret = &Secret{ + LeaseID: input.LeaseID, + } + logicalResp.Secret.Renewable = input.Renewable + logicalResp.Secret.TTL = time.Second * time.Duration(input.LeaseDuration) + } + + if input.Auth != nil { + logicalResp.Auth = &Auth{ + ClientToken: input.Auth.ClientToken, + Accessor: input.Auth.Accessor, + Policies: input.Auth.Policies, + TokenPolicies: input.Auth.TokenPolicies, + IdentityPolicies: input.Auth.IdentityPolicies, + Metadata: input.Auth.Metadata, + EntityID: input.Auth.EntityID, + Orphan: input.Auth.Orphan, + } + logicalResp.Auth.Renewable = input.Auth.Renewable + logicalResp.Auth.TTL = time.Second * time.Duration(input.Auth.LeaseDuration) + switch input.Auth.TokenType { + case "service": + logicalResp.Auth.TokenType = TokenTypeService + case "batch": + logicalResp.Auth.TokenType = TokenTypeBatch + } + } + + return logicalResp +} + +type HTTPResponse struct { + RequestID string `json:"request_id"` + LeaseID string `json:"lease_id"` + Renewable bool `json:"renewable"` + LeaseDuration int `json:"lease_duration"` + Data map[string]interface{} `json:"data"` + WrapInfo *HTTPWrapInfo `json:"wrap_info"` + Warnings []string `json:"warnings"` + Headers map[string][]string `json:"-"` + Auth *HTTPAuth `json:"auth"` +} + +type HTTPAuth struct { + ClientToken string `json:"client_token"` + Accessor string `json:"accessor"` + Policies []string `json:"policies"` + TokenPolicies []string `json:"token_policies,omitempty"` + IdentityPolicies []string `json:"identity_policies,omitempty"` + Metadata map[string]string `json:"metadata"` + LeaseDuration int `json:"lease_duration"` + Renewable bool `json:"renewable"` + EntityID string `json:"entity_id"` + TokenType string `json:"token_type"` + Orphan bool `json:"orphan"` +} + +type HTTPWrapInfo struct { + Token string `json:"token"` + Accessor string `json:"accessor"` + TTL int `json:"ttl"` + CreationTime string `json:"creation_time"` + CreationPath string `json:"creation_path"` + WrappedAccessor string `json:"wrapped_accessor,omitempty"` +} + +type HTTPSysInjector struct { + Response *HTTPResponse +} + +func (h HTTPSysInjector) MarshalJSON() ([]byte, error) { + j, err := json.Marshal(h.Response) + if err != nil { + return nil, err + } + // Fast path no data or empty data + if h.Response.Data == nil || len(h.Response.Data) == 0 { + return j, nil + } + // Marshaling a response will always be a JSON object, meaning it will + // always start with '{', so we hijack this to prepend necessary values + // Make a guess at the capacity, and write the object opener + buf := bytes.NewBuffer(make([]byte, 0, len(j)*2)) + buf.WriteRune('{') + for k, v := range h.Response.Data { + // Marshal each key/value individually + mk, err := json.Marshal(k) + if err != nil { + return nil, err + } + mv, err := json.Marshal(v) + if err != nil { + return nil, err + } + // Write into the final buffer. We'll never have a valid response + // without any fields so we can unconditionally add a comma after each. + buf.WriteString(fmt.Sprintf("%s: %s, ", mk, mv)) + } + // Add the rest, without the first '{' + buf.Write(j[1:]) + return buf.Bytes(), nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/cache.go b/vendor/github.com/hashicorp/vault/sdk/physical/cache.go new file mode 100644 index 0000000000..52768776a6 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/cache.go @@ -0,0 +1,261 @@ +package physical + +import ( + "context" + "sync/atomic" + + metrics "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + lru "github.com/hashicorp/golang-lru" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/helper/pathmanager" +) + +const ( + // DefaultCacheSize is used if no cache size is specified for NewCache + DefaultCacheSize = 128 * 1024 + + // refreshCacheCtxKey is a ctx value that denotes the cache should be + // refreshed during a Get call. + refreshCacheCtxKey = "refresh_cache" +) + +// These paths don't need to be cached by the LRU cache. This should +// particularly help memory pressure when unsealing. +var cacheExceptionsPaths = []string{ + "wal/logs/", + "index/pages/", + "index-dr/pages/", + "sys/expire/", + "core/poison-pill", + "core/raft/tls", + "core/license", +} + +// CacheRefreshContext returns a context with an added value denoting if the +// cache should attempt a refresh. +func CacheRefreshContext(ctx context.Context, r bool) context.Context { + return context.WithValue(ctx, refreshCacheCtxKey, r) +} + +// cacheRefreshFromContext is a helper to look up if the provided context is +// requesting a cache refresh. +func cacheRefreshFromContext(ctx context.Context) bool { + r, ok := ctx.Value(refreshCacheCtxKey).(bool) + if !ok { + return false + } + return r +} + +// Cache is used to wrap an underlying physical backend +// and provide an LRU cache layer on top. Most of the reads done by +// Vault are for policy objects so there is a large read reduction +// by using a simple write-through cache. +type Cache struct { + backend Backend + lru *lru.TwoQueueCache + locks []*locksutil.LockEntry + logger log.Logger + enabled *uint32 + cacheExceptions *pathmanager.PathManager + metricSink metrics.MetricSink +} + +// TransactionalCache is a Cache that wraps the physical that is transactional +type TransactionalCache struct { + *Cache + Transactional +} + +// Verify Cache satisfies the correct interfaces +var ( + _ ToggleablePurgemonster = (*Cache)(nil) + _ ToggleablePurgemonster = (*TransactionalCache)(nil) + _ Backend = (*Cache)(nil) + _ Transactional = (*TransactionalCache)(nil) +) + +// NewCache returns a physical cache of the given size. +// If no size is provided, the default size is used. +func NewCache(b Backend, size int, logger log.Logger, metricSink metrics.MetricSink) *Cache { + if logger.IsDebug() { + logger.Debug("creating LRU cache", "size", size) + } + if size <= 0 { + size = DefaultCacheSize + } + + pm := pathmanager.New() + pm.AddPaths(cacheExceptionsPaths) + + cache, _ := lru.New2Q(size) + c := &Cache{ + backend: b, + lru: cache, + locks: locksutil.CreateLocks(), + logger: logger, + // This fails safe. + enabled: new(uint32), + cacheExceptions: pm, + metricSink: metricSink, + } + return c +} + +func NewTransactionalCache(b Backend, size int, logger log.Logger, metricSink metrics.MetricSink) *TransactionalCache { + c := &TransactionalCache{ + Cache: NewCache(b, size, logger, metricSink), + Transactional: b.(Transactional), + } + return c +} + +func (c *Cache) ShouldCache(key string) bool { + if atomic.LoadUint32(c.enabled) == 0 { + return false + } + + return !c.cacheExceptions.HasPath(key) +} + +// SetEnabled is used to toggle whether the cache is on or off. It must be +// called with true to actually activate the cache after creation. +func (c *Cache) SetEnabled(enabled bool) { + if enabled { + atomic.StoreUint32(c.enabled, 1) + return + } + atomic.StoreUint32(c.enabled, 0) +} + +// Purge is used to clear the cache +func (c *Cache) Purge(ctx context.Context) { + // Lock the world + for _, lock := range c.locks { + lock.Lock() + defer lock.Unlock() + } + + c.lru.Purge() +} + +func (c *Cache) Put(ctx context.Context, entry *Entry) error { + if entry != nil && !c.ShouldCache(entry.Key) { + return c.backend.Put(ctx, entry) + } + + lock := locksutil.LockForKey(c.locks, entry.Key) + lock.Lock() + defer lock.Unlock() + + err := c.backend.Put(ctx, entry) + if err == nil { + c.lru.Add(entry.Key, entry) + c.metricSink.IncrCounter([]string{"cache", "write"}, 1) + } + return err +} + +func (c *Cache) Get(ctx context.Context, key string) (*Entry, error) { + if !c.ShouldCache(key) { + return c.backend.Get(ctx, key) + } + + lock := locksutil.LockForKey(c.locks, key) + lock.RLock() + defer lock.RUnlock() + + // Check the LRU first + if !cacheRefreshFromContext(ctx) { + if raw, ok := c.lru.Get(key); ok { + if raw == nil { + return nil, nil + } + c.metricSink.IncrCounter([]string{"cache", "hit"}, 1) + return raw.(*Entry), nil + } + } + + c.metricSink.IncrCounter([]string{"cache", "miss"}, 1) + // Read from the underlying backend + ent, err := c.backend.Get(ctx, key) + if err != nil { + return nil, err + } + + // Cache the result + c.lru.Add(key, ent) + + return ent, nil +} + +func (c *Cache) Delete(ctx context.Context, key string) error { + if !c.ShouldCache(key) { + return c.backend.Delete(ctx, key) + } + + lock := locksutil.LockForKey(c.locks, key) + lock.Lock() + defer lock.Unlock() + + err := c.backend.Delete(ctx, key) + if err == nil { + c.lru.Remove(key) + } + return err +} + +func (c *Cache) List(ctx context.Context, prefix string) ([]string, error) { + // Always pass-through as this would be difficult to cache. For the same + // reason we don't lock as we can't reasonably know which locks to readlock + // ahead of time. + return c.backend.List(ctx, prefix) +} + +func (c *TransactionalCache) Locks() []*locksutil.LockEntry { + return c.locks +} + +func (c *TransactionalCache) LRU() *lru.TwoQueueCache { + return c.lru +} + +func (c *TransactionalCache) Transaction(ctx context.Context, txns []*TxnEntry) error { + // Bypass the locking below + if atomic.LoadUint32(c.enabled) == 0 { + return c.Transactional.Transaction(ctx, txns) + } + + // Collect keys that need to be locked + var keys []string + for _, curr := range txns { + keys = append(keys, curr.Entry.Key) + } + // Lock the keys + for _, l := range locksutil.LocksForKeys(c.locks, keys) { + l.Lock() + defer l.Unlock() + } + + if err := c.Transactional.Transaction(ctx, txns); err != nil { + return err + } + + for _, txn := range txns { + if !c.ShouldCache(txn.Entry.Key) { + continue + } + + switch txn.Operation { + case PutOperation: + c.lru.Add(txn.Entry.Key, txn.Entry) + c.metricSink.IncrCounter([]string{"cache", "write"}, 1) + case DeleteOperation: + c.lru.Remove(txn.Entry.Key) + c.metricSink.IncrCounter([]string{"cache", "delete"}, 1) + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/encoding.go b/vendor/github.com/hashicorp/vault/sdk/physical/encoding.go new file mode 100644 index 0000000000..dbde84cc6d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/encoding.go @@ -0,0 +1,108 @@ +package physical + +import ( + "context" + "errors" + "strings" + "unicode" + "unicode/utf8" +) + +var ( + ErrNonUTF8 = errors.New("key contains invalid UTF-8 characters") + ErrNonPrintable = errors.New("key contains non-printable characters") +) + +// StorageEncoding is used to add errors into underlying physical requests +type StorageEncoding struct { + Backend +} + +// TransactionalStorageEncoding is the transactional version of the error +// injector +type TransactionalStorageEncoding struct { + *StorageEncoding + Transactional +} + +// Verify StorageEncoding satisfies the correct interfaces +var ( + _ Backend = (*StorageEncoding)(nil) + _ Transactional = (*TransactionalStorageEncoding)(nil) +) + +// NewStorageEncoding returns a wrapped physical backend and verifies the key +// encoding +func NewStorageEncoding(b Backend) Backend { + enc := &StorageEncoding{ + Backend: b, + } + + if bTxn, ok := b.(Transactional); ok { + return &TransactionalStorageEncoding{ + StorageEncoding: enc, + Transactional: bTxn, + } + } + + return enc +} + +func (e *StorageEncoding) containsNonPrintableChars(key string) bool { + idx := strings.IndexFunc(key, func(c rune) bool { + return !unicode.IsPrint(c) + }) + + return idx != -1 +} + +func (e *StorageEncoding) Put(ctx context.Context, entry *Entry) error { + if !utf8.ValidString(entry.Key) { + return ErrNonUTF8 + } + + if e.containsNonPrintableChars(entry.Key) { + return ErrNonPrintable + } + + return e.Backend.Put(ctx, entry) +} + +func (e *StorageEncoding) Delete(ctx context.Context, key string) error { + if !utf8.ValidString(key) { + return ErrNonUTF8 + } + + if e.containsNonPrintableChars(key) { + return ErrNonPrintable + } + + return e.Backend.Delete(ctx, key) +} + +func (e *TransactionalStorageEncoding) Transaction(ctx context.Context, txns []*TxnEntry) error { + for _, txn := range txns { + if !utf8.ValidString(txn.Entry.Key) { + return ErrNonUTF8 + } + + if e.containsNonPrintableChars(txn.Entry.Key) { + return ErrNonPrintable + } + + } + + return e.Transactional.Transaction(ctx, txns) +} + +func (e *StorageEncoding) Purge(ctx context.Context) { + if purgeable, ok := e.Backend.(ToggleablePurgemonster); ok { + purgeable.Purge(ctx) + } +} + +func (e *StorageEncoding) SetEnabled(enabled bool) { + if purgeable, ok := e.Backend.(ToggleablePurgemonster); ok { + purgeable.SetEnabled(enabled) + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/entry.go b/vendor/github.com/hashicorp/vault/sdk/physical/entry.go new file mode 100644 index 0000000000..418b0d2ca5 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/entry.go @@ -0,0 +1,11 @@ +package physical + +// Entry is used to represent data stored by the physical backend +type Entry struct { + Key string + Value []byte + SealWrap bool `json:"seal_wrap,omitempty"` + + // Only used in replication + ValueHash []byte +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/error.go b/vendor/github.com/hashicorp/vault/sdk/physical/error.go new file mode 100644 index 0000000000..b547e4e428 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/error.go @@ -0,0 +1,110 @@ +package physical + +import ( + "context" + "errors" + "math/rand" + "sync" + "time" + + log "github.com/hashicorp/go-hclog" +) + +const ( + // DefaultErrorPercent is used to determin how often we error + DefaultErrorPercent = 20 +) + +// ErrorInjector is used to add errors into underlying physical requests +type ErrorInjector struct { + backend Backend + errorPercent int + randomLock *sync.Mutex + random *rand.Rand +} + +// TransactionalErrorInjector is the transactional version of the error +// injector +type TransactionalErrorInjector struct { + *ErrorInjector + Transactional +} + +// Verify ErrorInjector satisfies the correct interfaces +var ( + _ Backend = (*ErrorInjector)(nil) + _ Transactional = (*TransactionalErrorInjector)(nil) +) + +// NewErrorInjector returns a wrapped physical backend to inject error +func NewErrorInjector(b Backend, errorPercent int, logger log.Logger) *ErrorInjector { + if errorPercent < 0 || errorPercent > 100 { + errorPercent = DefaultErrorPercent + } + logger.Info("creating error injector") + + return &ErrorInjector{ + backend: b, + errorPercent: errorPercent, + randomLock: new(sync.Mutex), + random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), + } +} + +// NewTransactionalErrorInjector creates a new transactional ErrorInjector +func NewTransactionalErrorInjector(b Backend, errorPercent int, logger log.Logger) *TransactionalErrorInjector { + return &TransactionalErrorInjector{ + ErrorInjector: NewErrorInjector(b, errorPercent, logger), + Transactional: b.(Transactional), + } +} + +func (e *ErrorInjector) SetErrorPercentage(p int) { + e.errorPercent = p +} + +func (e *ErrorInjector) addError() error { + e.randomLock.Lock() + roll := e.random.Intn(100) + e.randomLock.Unlock() + if roll < e.errorPercent { + return errors.New("random error") + } + + return nil +} + +func (e *ErrorInjector) Put(ctx context.Context, entry *Entry) error { + if err := e.addError(); err != nil { + return err + } + return e.backend.Put(ctx, entry) +} + +func (e *ErrorInjector) Get(ctx context.Context, key string) (*Entry, error) { + if err := e.addError(); err != nil { + return nil, err + } + return e.backend.Get(ctx, key) +} + +func (e *ErrorInjector) Delete(ctx context.Context, key string) error { + if err := e.addError(); err != nil { + return err + } + return e.backend.Delete(ctx, key) +} + +func (e *ErrorInjector) List(ctx context.Context, prefix string) ([]string, error) { + if err := e.addError(); err != nil { + return nil, err + } + return e.backend.List(ctx, prefix) +} + +func (e *TransactionalErrorInjector) Transaction(ctx context.Context, txns []*TxnEntry) error { + if err := e.addError(); err != nil { + return err + } + return e.Transactional.Transaction(ctx, txns) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go b/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go new file mode 100644 index 0000000000..b366eb84bf --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go @@ -0,0 +1,292 @@ +package inmem + +import ( + "context" + "errors" + "fmt" + "os" + "strconv" + "strings" + "sync" + "sync/atomic" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/physical" + + radix "github.com/armon/go-radix" +) + +// Verify interfaces are satisfied +var ( + _ physical.Backend = (*InmemBackend)(nil) + _ physical.HABackend = (*InmemHABackend)(nil) + _ physical.HABackend = (*TransactionalInmemHABackend)(nil) + _ physical.Lock = (*InmemLock)(nil) + _ physical.Transactional = (*TransactionalInmemBackend)(nil) + _ physical.Transactional = (*TransactionalInmemHABackend)(nil) +) + +var ( + PutDisabledError = errors.New("put operations disabled in inmem backend") + GetDisabledError = errors.New("get operations disabled in inmem backend") + DeleteDisabledError = errors.New("delete operations disabled in inmem backend") + ListDisabledError = errors.New("list operations disabled in inmem backend") +) + +// InmemBackend is an in-memory only physical backend. It is useful +// for testing and development situations where the data is not +// expected to be durable. +type InmemBackend struct { + sync.RWMutex + root *radix.Tree + permitPool *physical.PermitPool + logger log.Logger + failGet *uint32 + failPut *uint32 + failDelete *uint32 + failList *uint32 + logOps bool + maxValueSize int +} + +type TransactionalInmemBackend struct { + InmemBackend +} + +// NewInmem constructs a new in-memory backend +func NewInmem(conf map[string]string, logger log.Logger) (physical.Backend, error) { + maxValueSize := 0 + maxValueSizeStr, ok := conf["max_value_size"] + if ok { + var err error + maxValueSize, err = strconv.Atoi(maxValueSizeStr) + if err != nil { + return nil, err + } + } + + return &InmemBackend{ + root: radix.New(), + permitPool: physical.NewPermitPool(physical.DefaultParallelOperations), + logger: logger, + failGet: new(uint32), + failPut: new(uint32), + failDelete: new(uint32), + failList: new(uint32), + logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "", + maxValueSize: maxValueSize, + }, nil +} + +// Basically for now just creates a permit pool of size 1 so only one operation +// can run at a time +func NewTransactionalInmem(conf map[string]string, logger log.Logger) (physical.Backend, error) { + maxValueSize := 0 + maxValueSizeStr, ok := conf["max_value_size"] + if ok { + var err error + maxValueSize, err = strconv.Atoi(maxValueSizeStr) + if err != nil { + return nil, err + } + } + + return &TransactionalInmemBackend{ + InmemBackend: InmemBackend{ + root: radix.New(), + permitPool: physical.NewPermitPool(1), + logger: logger, + failGet: new(uint32), + failPut: new(uint32), + failDelete: new(uint32), + failList: new(uint32), + logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "", + maxValueSize: maxValueSize, + }, + }, nil +} + +// Put is used to insert or update an entry +func (i *InmemBackend) Put(ctx context.Context, entry *physical.Entry) error { + i.permitPool.Acquire() + defer i.permitPool.Release() + + i.Lock() + defer i.Unlock() + + return i.PutInternal(ctx, entry) +} + +func (i *InmemBackend) PutInternal(ctx context.Context, entry *physical.Entry) error { + if i.logOps { + i.logger.Trace("put", "key", entry.Key) + } + if atomic.LoadUint32(i.failPut) != 0 { + return PutDisabledError + } + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if i.maxValueSize > 0 && len(entry.Value) > i.maxValueSize { + return fmt.Errorf("%s", physical.ErrValueTooLarge) + } + + i.root.Insert(entry.Key, entry.Value) + return nil +} + +func (i *InmemBackend) FailPut(fail bool) { + var val uint32 + if fail { + val = 1 + } + atomic.StoreUint32(i.failPut, val) +} + +// Get is used to fetch an entry +func (i *InmemBackend) Get(ctx context.Context, key string) (*physical.Entry, error) { + i.permitPool.Acquire() + defer i.permitPool.Release() + + i.RLock() + defer i.RUnlock() + + return i.GetInternal(ctx, key) +} + +func (i *InmemBackend) GetInternal(ctx context.Context, key string) (*physical.Entry, error) { + if i.logOps { + i.logger.Trace("get", "key", key) + } + if atomic.LoadUint32(i.failGet) != 0 { + return nil, GetDisabledError + } + + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + if raw, ok := i.root.Get(key); ok { + return &physical.Entry{ + Key: key, + Value: raw.([]byte), + }, nil + } + return nil, nil +} + +func (i *InmemBackend) FailGet(fail bool) { + var val uint32 + if fail { + val = 1 + } + atomic.StoreUint32(i.failGet, val) +} + +// Delete is used to permanently delete an entry +func (i *InmemBackend) Delete(ctx context.Context, key string) error { + i.permitPool.Acquire() + defer i.permitPool.Release() + + i.Lock() + defer i.Unlock() + + return i.DeleteInternal(ctx, key) +} + +func (i *InmemBackend) DeleteInternal(ctx context.Context, key string) error { + if i.logOps { + i.logger.Trace("delete", "key", key) + } + if atomic.LoadUint32(i.failDelete) != 0 { + return DeleteDisabledError + } + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + i.root.Delete(key) + return nil +} + +func (i *InmemBackend) FailDelete(fail bool) { + var val uint32 + if fail { + val = 1 + } + atomic.StoreUint32(i.failDelete, val) +} + +// List is used to list all the keys under a given +// prefix, up to the next prefix. +func (i *InmemBackend) List(ctx context.Context, prefix string) ([]string, error) { + i.permitPool.Acquire() + defer i.permitPool.Release() + + i.RLock() + defer i.RUnlock() + + return i.ListInternal(ctx, prefix) +} + +func (i *InmemBackend) ListInternal(ctx context.Context, prefix string) ([]string, error) { + if i.logOps { + i.logger.Trace("list", "prefix", prefix) + } + if atomic.LoadUint32(i.failList) != 0 { + return nil, ListDisabledError + } + + var out []string + seen := make(map[string]interface{}) + walkFn := func(s string, v interface{}) bool { + trimmed := strings.TrimPrefix(s, prefix) + sep := strings.Index(trimmed, "/") + if sep == -1 { + out = append(out, trimmed) + } else { + trimmed = trimmed[:sep+1] + if _, ok := seen[trimmed]; !ok { + out = append(out, trimmed) + seen[trimmed] = struct{}{} + } + } + return false + } + i.root.WalkPrefix(prefix, walkFn) + + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + return out, nil +} + +func (i *InmemBackend) FailList(fail bool) { + var val uint32 + if fail { + val = 1 + } + atomic.StoreUint32(i.failList, val) +} + +// Implements the transaction interface +func (t *TransactionalInmemBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { + t.permitPool.Acquire() + defer t.permitPool.Release() + + t.Lock() + defer t.Unlock() + + return physical.GenericTransactionHandler(ctx, t, txns) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem_ha.go b/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem_ha.go new file mode 100644 index 0000000000..64fcb3a66d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem_ha.go @@ -0,0 +1,167 @@ +package inmem + +import ( + "fmt" + "sync" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/physical" +) + +type InmemHABackend struct { + physical.Backend + locks map[string]string + l *sync.Mutex + cond *sync.Cond + logger log.Logger +} + +type TransactionalInmemHABackend struct { + physical.Transactional + InmemHABackend +} + +// NewInmemHA constructs a new in-memory HA backend. This is only for testing. +func NewInmemHA(_ map[string]string, logger log.Logger) (physical.Backend, error) { + be, err := NewInmem(nil, logger) + if err != nil { + return nil, err + } + + in := &InmemHABackend{ + Backend: be, + locks: make(map[string]string), + logger: logger, + l: new(sync.Mutex), + } + in.cond = sync.NewCond(in.l) + return in, nil +} + +func NewTransactionalInmemHA(_ map[string]string, logger log.Logger) (physical.Backend, error) { + transInmem, err := NewTransactionalInmem(nil, logger) + if err != nil { + return nil, err + } + inmemHA := InmemHABackend{ + Backend: transInmem, + locks: make(map[string]string), + logger: logger, + l: new(sync.Mutex), + } + + in := &TransactionalInmemHABackend{ + InmemHABackend: inmemHA, + Transactional: transInmem.(physical.Transactional), + } + in.cond = sync.NewCond(in.l) + return in, nil +} + +// LockWith is used for mutual exclusion based on the given key. +func (i *InmemHABackend) LockWith(key, value string) (physical.Lock, error) { + l := &InmemLock{ + in: i, + key: key, + value: value, + } + return l, nil +} + +// LockMapSize is used in some tests to determine whether this backend has ever +// been used for HA purposes rather than simply for storage +func (i *InmemHABackend) LockMapSize() int { + return len(i.locks) +} + +// HAEnabled indicates whether the HA functionality should be exposed. +// Currently always returns true. +func (i *InmemHABackend) HAEnabled() bool { + return true +} + +// InmemLock is an in-memory Lock implementation for the HABackend +type InmemLock struct { + in *InmemHABackend + key string + value string + + held bool + leaderCh chan struct{} + l sync.Mutex +} + +func (i *InmemLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + i.l.Lock() + defer i.l.Unlock() + if i.held { + return nil, fmt.Errorf("lock already held") + } + + // Attempt an async acquisition + didLock := make(chan struct{}) + releaseCh := make(chan bool, 1) + go func() { + // Wait to acquire the lock + i.in.l.Lock() + _, ok := i.in.locks[i.key] + for ok { + i.in.cond.Wait() + _, ok = i.in.locks[i.key] + } + i.in.locks[i.key] = i.value + i.in.l.Unlock() + + // Signal that lock is held + close(didLock) + + // Handle an early abort + release := <-releaseCh + if release { + i.in.l.Lock() + delete(i.in.locks, i.key) + i.in.l.Unlock() + i.in.cond.Broadcast() + } + }() + + // Wait for lock acquisition or shutdown + select { + case <-didLock: + releaseCh <- false + case <-stopCh: + releaseCh <- true + return nil, nil + } + + // Create the leader channel + i.held = true + i.leaderCh = make(chan struct{}) + return i.leaderCh, nil +} + +func (i *InmemLock) Unlock() error { + i.l.Lock() + defer i.l.Unlock() + + if !i.held { + return nil + } + + close(i.leaderCh) + i.leaderCh = nil + i.held = false + + i.in.l.Lock() + delete(i.in.locks, i.key) + i.in.l.Unlock() + i.in.cond.Broadcast() + return nil +} + +func (i *InmemLock) Value() (bool, string, error) { + i.in.l.Lock() + val, ok := i.in.locks[i.key] + i.in.l.Unlock() + return ok, val, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/latency.go b/vendor/github.com/hashicorp/vault/sdk/physical/latency.go new file mode 100644 index 0000000000..18b2c4c145 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/latency.go @@ -0,0 +1,113 @@ +package physical + +import ( + "context" + "math/rand" + "sync" + "time" + + log "github.com/hashicorp/go-hclog" + uberAtomic "go.uber.org/atomic" +) + +const ( + // DefaultJitterPercent is used if no cache size is specified for NewCache + DefaultJitterPercent = 20 +) + +// LatencyInjector is used to add latency into underlying physical requests +type LatencyInjector struct { + logger log.Logger + backend Backend + latency *uberAtomic.Duration + jitterPercent int + randomLock *sync.Mutex + random *rand.Rand +} + +// TransactionalLatencyInjector is the transactional version of the latency +// injector +type TransactionalLatencyInjector struct { + *LatencyInjector + Transactional +} + +// Verify LatencyInjector satisfies the correct interfaces +var ( + _ Backend = (*LatencyInjector)(nil) + _ Transactional = (*TransactionalLatencyInjector)(nil) +) + +// NewLatencyInjector returns a wrapped physical backend to simulate latency +func NewLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *LatencyInjector { + if jitter < 0 || jitter > 100 { + jitter = DefaultJitterPercent + } + logger.Info("creating latency injector") + + return &LatencyInjector{ + logger: logger, + backend: b, + latency: uberAtomic.NewDuration(latency), + jitterPercent: jitter, + randomLock: new(sync.Mutex), + random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), + } +} + +// NewTransactionalLatencyInjector creates a new transactional LatencyInjector +func NewTransactionalLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *TransactionalLatencyInjector { + return &TransactionalLatencyInjector{ + LatencyInjector: NewLatencyInjector(b, latency, jitter, logger), + Transactional: b.(Transactional), + } +} + +func (l *LatencyInjector) SetLatency(latency time.Duration) { + l.logger.Info("Changing backend latency", "latency", latency) + l.latency.Store(latency) +} + +func (l *LatencyInjector) addLatency() { + // Calculate a value between 1 +- jitter% + percent := 100 + if l.jitterPercent > 0 { + min := 100 - l.jitterPercent + max := 100 + l.jitterPercent + l.randomLock.Lock() + percent = l.random.Intn(max-min) + min + l.randomLock.Unlock() + } + latencyDuration := time.Duration(int(l.latency.Load()) * percent / 100) + time.Sleep(latencyDuration) +} + +// Put is a latent put request +func (l *LatencyInjector) Put(ctx context.Context, entry *Entry) error { + l.addLatency() + return l.backend.Put(ctx, entry) +} + +// Get is a latent get request +func (l *LatencyInjector) Get(ctx context.Context, key string) (*Entry, error) { + l.addLatency() + return l.backend.Get(ctx, key) +} + +// Delete is a latent delete request +func (l *LatencyInjector) Delete(ctx context.Context, key string) error { + l.addLatency() + return l.backend.Delete(ctx, key) +} + +// List is a latent list request +func (l *LatencyInjector) List(ctx context.Context, prefix string) ([]string, error) { + l.addLatency() + return l.backend.List(ctx, prefix) +} + +// Transaction is a latent transaction request +func (l *TransactionalLatencyInjector) Transaction(ctx context.Context, txns []*TxnEntry) error { + l.addLatency() + return l.Transactional.Transaction(ctx, txns) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/physical.go b/vendor/github.com/hashicorp/vault/sdk/physical/physical.go new file mode 100644 index 0000000000..8cc4e9ab17 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/physical.go @@ -0,0 +1,133 @@ +package physical + +import ( + "context" + "strings" + + log "github.com/hashicorp/go-hclog" +) + +const DefaultParallelOperations = 128 + +// The operation type +type Operation string + +const ( + DeleteOperation Operation = "delete" + GetOperation = "get" + ListOperation = "list" + PutOperation = "put" +) + +const ( + ErrValueTooLarge = "put failed due to value being too large" +) + +// Backend is the interface required for a physical +// backend. A physical backend is used to durably store +// data outside of Vault. As such, it is completely untrusted, +// and is only accessed via a security barrier. The backends +// must represent keys in a hierarchical manner. All methods +// are expected to be thread safe. +type Backend interface { + // Put is used to insert or update an entry + Put(ctx context.Context, entry *Entry) error + + // Get is used to fetch an entry + Get(ctx context.Context, key string) (*Entry, error) + + // Delete is used to permanently delete an entry + Delete(ctx context.Context, key string) error + + // List is used to list all the keys under a given + // prefix, up to the next prefix. + List(ctx context.Context, prefix string) ([]string, error) +} + +// HABackend is an extensions to the standard physical +// backend to support high-availability. Vault only expects to +// use mutual exclusion to allow multiple instances to act as a +// hot standby for a leader that services all requests. +type HABackend interface { + // LockWith is used for mutual exclusion based on the given key. + LockWith(key, value string) (Lock, error) + + // Whether or not HA functionality is enabled + HAEnabled() bool +} + +// ToggleablePurgemonster is an interface for backends that can toggle on or +// off special functionality and/or support purging. This is only used for the +// cache, don't use it for other things. +type ToggleablePurgemonster interface { + Purge(ctx context.Context) + SetEnabled(bool) +} + +// RedirectDetect is an optional interface that an HABackend +// can implement. If they do, a redirect address can be automatically +// detected. +type RedirectDetect interface { + // DetectHostAddr is used to detect the host address + DetectHostAddr() (string, error) +} + +type Lock interface { + // Lock is used to acquire the given lock + // The stopCh is optional and if closed should interrupt the lock + // acquisition attempt. The return struct should be closed when + // leadership is lost. + Lock(stopCh <-chan struct{}) (<-chan struct{}, error) + + // Unlock is used to release the lock + Unlock() error + + // Returns the value of the lock and if it is held + Value() (bool, string, error) +} + +// Factory is the factory function to create a physical backend. +type Factory func(config map[string]string, logger log.Logger) (Backend, error) + +// PermitPool is used to limit maximum outstanding requests +type PermitPool struct { + sem chan int +} + +// NewPermitPool returns a new permit pool with the provided +// number of permits +func NewPermitPool(permits int) *PermitPool { + if permits < 1 { + permits = DefaultParallelOperations + } + return &PermitPool{ + sem: make(chan int, permits), + } +} + +// Acquire returns when a permit has been acquired +func (c *PermitPool) Acquire() { + c.sem <- 1 +} + +// Release returns a permit to the pool +func (c *PermitPool) Release() { + <-c.sem +} + +// Get number of requests in the permit pool +func (c *PermitPool) CurrentPermits() int { + return len(c.sem) +} + +// Prefixes is a shared helper function returns all parent 'folders' for a +// given vault key. +// e.g. for 'foo/bar/baz', it returns ['foo', 'foo/bar'] +func Prefixes(s string) []string { + components := strings.Split(s, "/") + result := []string{} + for i := 1; i < len(components); i++ { + result = append(result, strings.Join(components[:i], "/")) + } + return result +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/physical_access.go b/vendor/github.com/hashicorp/vault/sdk/physical/physical_access.go new file mode 100644 index 0000000000..7497313afc --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/physical_access.go @@ -0,0 +1,40 @@ +package physical + +import ( + "context" +) + +// PhysicalAccess is a wrapper around physical.Backend that allows Core to +// expose its physical storage operations through PhysicalAccess() while +// restricting the ability to modify Core.physical itself. +type PhysicalAccess struct { + physical Backend +} + +var _ Backend = (*PhysicalAccess)(nil) + +func NewPhysicalAccess(physical Backend) *PhysicalAccess { + return &PhysicalAccess{physical: physical} +} + +func (p *PhysicalAccess) Put(ctx context.Context, entry *Entry) error { + return p.physical.Put(ctx, entry) +} + +func (p *PhysicalAccess) Get(ctx context.Context, key string) (*Entry, error) { + return p.physical.Get(ctx, key) +} + +func (p *PhysicalAccess) Delete(ctx context.Context, key string) error { + return p.physical.Delete(ctx, key) +} + +func (p *PhysicalAccess) List(ctx context.Context, prefix string) ([]string, error) { + return p.physical.List(ctx, prefix) +} + +func (p *PhysicalAccess) Purge(ctx context.Context) { + if purgeable, ok := p.physical.(ToggleablePurgemonster); ok { + purgeable.Purge(ctx) + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/physical_view.go b/vendor/github.com/hashicorp/vault/sdk/physical/physical_view.go new file mode 100644 index 0000000000..189ac93172 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/physical_view.go @@ -0,0 +1,94 @@ +package physical + +import ( + "context" + "errors" + "strings" +) + +var ErrRelativePath = errors.New("relative paths not supported") + +// View represents a prefixed view of a physical backend +type View struct { + backend Backend + prefix string +} + +// Verify View satisfies the correct interfaces +var _ Backend = (*View)(nil) + +// NewView takes an underlying physical backend and returns +// a view of it that can only operate with the given prefix. +func NewView(backend Backend, prefix string) *View { + return &View{ + backend: backend, + prefix: prefix, + } +} + +// List the contents of the prefixed view +func (v *View) List(ctx context.Context, prefix string) ([]string, error) { + if err := v.sanityCheck(prefix); err != nil { + return nil, err + } + return v.backend.List(ctx, v.expandKey(prefix)) +} + +// Get the key of the prefixed view +func (v *View) Get(ctx context.Context, key string) (*Entry, error) { + if err := v.sanityCheck(key); err != nil { + return nil, err + } + entry, err := v.backend.Get(ctx, v.expandKey(key)) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + entry.Key = v.truncateKey(entry.Key) + + return &Entry{ + Key: entry.Key, + Value: entry.Value, + }, nil +} + +// Put the entry into the prefix view +func (v *View) Put(ctx context.Context, entry *Entry) error { + if err := v.sanityCheck(entry.Key); err != nil { + return err + } + + nested := &Entry{ + Key: v.expandKey(entry.Key), + Value: entry.Value, + } + return v.backend.Put(ctx, nested) +} + +// Delete the entry from the prefix view +func (v *View) Delete(ctx context.Context, key string) error { + if err := v.sanityCheck(key); err != nil { + return err + } + return v.backend.Delete(ctx, v.expandKey(key)) +} + +// sanityCheck is used to perform a sanity check on a key +func (v *View) sanityCheck(key string) error { + if strings.Contains(key, "..") { + return ErrRelativePath + } + return nil +} + +// expandKey is used to expand to the full key path with the prefix +func (v *View) expandKey(suffix string) string { + return v.prefix + suffix +} + +// truncateKey is used to remove the prefix of the key +func (v *View) truncateKey(full string) string { + return strings.TrimPrefix(full, v.prefix) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/testing.go b/vendor/github.com/hashicorp/vault/sdk/physical/testing.go new file mode 100644 index 0000000000..6e0ddfcc0e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/testing.go @@ -0,0 +1,497 @@ +package physical + +import ( + "context" + "reflect" + "sort" + "testing" + "time" +) + +func ExerciseBackend(t testing.TB, b Backend) { + t.Helper() + + // Should be empty + keys, err := b.List(context.Background(), "") + if err != nil { + t.Fatalf("initial list failed: %v", err) + } + if len(keys) != 0 { + t.Errorf("initial not empty: %v", keys) + } + + // Delete should work if it does not exist + err = b.Delete(context.Background(), "foo") + if err != nil { + t.Fatalf("idempotent delete: %v", err) + } + + // Get should not fail, but be nil + out, err := b.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("initial get failed: %v", err) + } + if out != nil { + t.Errorf("initial get was not nil: %v", out) + } + + // Make an entry + e := &Entry{Key: "foo", Value: []byte("test")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("put failed: %v", err) + } + + // Get should work + out, err = b.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("get failed: %v", err) + } + if !reflect.DeepEqual(out, e) { + t.Errorf("bad: %v expected: %v", out, e) + } + + // List should not be empty + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("list failed: %v", err) + } + if len(keys) != 1 || keys[0] != "foo" { + t.Errorf("keys[0] did not equal foo: %v", keys) + } + + // Delete should work + err = b.Delete(context.Background(), "foo") + if err != nil { + t.Fatalf("delete: %v", err) + } + + // Should be empty + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("list after delete: %v", err) + } + if len(keys) != 0 { + t.Errorf("list after delete not empty: %v", keys) + } + + // Get should fail + out, err = b.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("get after delete: %v", err) + } + if out != nil { + t.Errorf("get after delete not nil: %v", out) + } + + // Multiple Puts should work; GH-189 + e = &Entry{Key: "foo", Value: []byte("test")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("multi put 1 failed: %v", err) + } + e = &Entry{Key: "foo", Value: []byte("test")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("multi put 2 failed: %v", err) + } + + // Make a nested entry + e = &Entry{Key: "foo/bar", Value: []byte("baz")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("nested put failed: %v", err) + } + + // Get should work + out, err = b.Get(context.Background(), "foo/bar") + if err != nil { + t.Fatalf("get failed: %v", err) + } + if !reflect.DeepEqual(out, e) { + t.Errorf("bad: %v expected: %v", out, e) + } + + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("list multi failed: %v", err) + } + sort.Strings(keys) + if len(keys) != 2 || keys[0] != "foo" || keys[1] != "foo/" { + t.Errorf("expected 2 keys [foo, foo/]: %v", keys) + } + + // Delete with children should work + err = b.Delete(context.Background(), "foo") + if err != nil { + t.Fatalf("delete after multi: %v", err) + } + + // Get should return the child + out, err = b.Get(context.Background(), "foo/bar") + if err != nil { + t.Fatalf("get after multi delete: %v", err) + } + if out == nil { + t.Errorf("get after multi delete not nil: %v", out) + } + + // Removal of nested secret should not leave artifacts + e = &Entry{Key: "foo/nested1/nested2/nested3", Value: []byte("baz")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("deep nest: %v", err) + } + + err = b.Delete(context.Background(), "foo/nested1/nested2/nested3") + if err != nil { + t.Fatalf("failed to remove deep nest: %v", err) + } + + keys, err = b.List(context.Background(), "foo/") + if err != nil { + t.Fatalf("err: %v", err) + } + if len(keys) != 1 || keys[0] != "bar" { + t.Errorf("should be exactly 1 key == bar: %v", keys) + } + + // Make a second nested entry to test prefix removal + e = &Entry{Key: "foo/zip", Value: []byte("zap")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("failed to create second nested: %v", err) + } + + // Delete should not remove the prefix + err = b.Delete(context.Background(), "foo/bar") + if err != nil { + t.Fatalf("failed to delete nested prefix: %v", err) + } + + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("list nested prefix: %v", err) + } + if len(keys) != 1 || keys[0] != "foo/" { + t.Errorf("should be exactly 1 key == foo/: %v", keys) + } + + // Delete should remove the prefix + err = b.Delete(context.Background(), "foo/zip") + if err != nil { + t.Fatalf("failed to delete second prefix: %v", err) + } + + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("listing after second delete failed: %v", err) + } + if len(keys) != 0 { + t.Errorf("should be empty at end: %v", keys) + } + + // When the root path is empty, adding and removing deep nested values should not break listing + e = &Entry{Key: "foo/nested1/nested2/value1", Value: []byte("baz")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("deep nest: %v", err) + } + + e = &Entry{Key: "foo/nested1/nested2/value2", Value: []byte("baz")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("deep nest: %v", err) + } + + err = b.Delete(context.Background(), "foo/nested1/nested2/value2") + if err != nil { + t.Fatalf("failed to remove deep nest: %v", err) + } + + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("listing of root failed after deletion: %v", err) + } + if len(keys) == 0 { + t.Errorf("root is returning empty after deleting a single nested value, expected nested1/: %v", keys) + keys, err = b.List(context.Background(), "foo/nested1") + if err != nil { + t.Fatalf("listing of expected nested path 'foo/nested1' failed: %v", err) + } + // prove that the root should not be empty and that foo/nested1 exists + if len(keys) != 0 { + t.Logf(" keys can still be listed from nested1/ so it's not empty, expected nested2/: %v", keys) + } + } + + // cleanup left over listing bug test value + err = b.Delete(context.Background(), "foo/nested1/nested2/value1") + if err != nil { + t.Fatalf("failed to remove deep nest: %v", err) + } + + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("listing of root failed after delete of deep nest: %v", err) + } + if len(keys) != 0 { + t.Errorf("should be empty at end: %v", keys) + } +} + +func ExerciseBackend_ListPrefix(t testing.TB, b Backend) { + t.Helper() + + e1 := &Entry{Key: "foo", Value: []byte("test")} + e2 := &Entry{Key: "foo/bar", Value: []byte("test")} + e3 := &Entry{Key: "foo/bar/baz", Value: []byte("test")} + + defer func() { + b.Delete(context.Background(), "foo") + b.Delete(context.Background(), "foo/bar") + b.Delete(context.Background(), "foo/bar/baz") + }() + + err := b.Put(context.Background(), e1) + if err != nil { + t.Fatalf("failed to put entry 1: %v", err) + } + err = b.Put(context.Background(), e2) + if err != nil { + t.Fatalf("failed to put entry 2: %v", err) + } + err = b.Put(context.Background(), e3) + if err != nil { + t.Fatalf("failed to put entry 3: %v", err) + } + + // Scan the root + keys, err := b.List(context.Background(), "") + if err != nil { + t.Fatalf("list root: %v", err) + } + sort.Strings(keys) + if len(keys) != 2 || keys[0] != "foo" || keys[1] != "foo/" { + t.Errorf("root expected [foo foo/]: %v", keys) + } + + // Scan foo/ + keys, err = b.List(context.Background(), "foo/") + if err != nil { + t.Fatalf("list level 1: %v", err) + } + sort.Strings(keys) + if len(keys) != 2 || keys[0] != "bar" || keys[1] != "bar/" { + t.Errorf("level 1 expected [bar bar/]: %v", keys) + } + + // Scan foo/bar/ + keys, err = b.List(context.Background(), "foo/bar/") + if err != nil { + t.Fatalf("list level 2: %v", err) + } + sort.Strings(keys) + if len(keys) != 1 || keys[0] != "baz" { + t.Errorf("level 1 expected [baz]: %v", keys) + } +} + +func ExerciseHABackend(t testing.TB, b HABackend, b2 HABackend) { + t.Helper() + + // Get the lock + lock, err := b.LockWith("foo", "bar") + if err != nil { + t.Fatalf("initial lock: %v", err) + } + + // Attempt to lock + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("lock attempt 1: %v", err) + } + if leaderCh == nil { + t.Fatalf("missing leaderCh") + } + + // Check the value + held, val, err := lock.Value() + if err != nil { + t.Fatalf("err: %v", err) + } + if !held { + t.Errorf("should be held") + } + if val != "bar" { + t.Errorf("expected value bar: %v", err) + } + + // Second acquisition should fail + lock2, err := b2.LockWith("foo", "baz") + if err != nil { + t.Fatalf("lock 2: %v", err) + } + + // Cancel attempt in 50 msec + stopCh := make(chan struct{}) + time.AfterFunc(50*time.Millisecond, func() { + close(stopCh) + }) + + // Attempt to lock + leaderCh2, err := lock2.Lock(stopCh) + if err != nil { + t.Fatalf("stop lock 2: %v", err) + } + if leaderCh2 != nil { + t.Errorf("should not have gotten leaderCh: %v", leaderCh2) + } + + // Release the first lock + lock.Unlock() + + // Attempt to lock should work + leaderCh2, err = lock2.Lock(nil) + if err != nil { + t.Fatalf("lock 2 lock: %v", err) + } + if leaderCh2 == nil { + t.Errorf("should get leaderCh") + } + + // Check the value + held, val, err = lock2.Value() + if err != nil { + t.Fatalf("value: %v", err) + } + if !held { + t.Errorf("should still be held") + } + if val != "baz" { + t.Errorf("expected: baz, got: %v", val) + } + + // Cleanup + lock2.Unlock() +} + +func ExerciseTransactionalBackend(t testing.TB, b Backend) { + t.Helper() + tb, ok := b.(Transactional) + if !ok { + t.Fatal("Not a transactional backend") + } + + txns := SetupTestingTransactions(t, b) + + if err := tb.Transaction(context.Background(), txns); err != nil { + t.Fatal(err) + } + + keys, err := b.List(context.Background(), "") + if err != nil { + t.Fatal(err) + } + + expected := []string{"foo", "zip"} + + sort.Strings(keys) + sort.Strings(expected) + if !reflect.DeepEqual(keys, expected) { + t.Fatalf("mismatch: expected\n%#v\ngot\n%#v\n", expected, keys) + } + + entry, err := b.Get(context.Background(), "foo") + if err != nil { + t.Fatal(err) + } + if entry == nil { + t.Fatal("got nil entry") + } + if entry.Value == nil { + t.Fatal("got nil value") + } + if string(entry.Value) != "bar3" { + t.Fatal("updates did not apply correctly") + } + + entry, err = b.Get(context.Background(), "zip") + if err != nil { + t.Fatal(err) + } + if entry == nil { + t.Fatal("got nil entry") + } + if entry.Value == nil { + t.Fatal("got nil value") + } + if string(entry.Value) != "zap3" { + t.Fatal("updates did not apply correctly") + } +} + +func SetupTestingTransactions(t testing.TB, b Backend) []*TxnEntry { + t.Helper() + // Add a few keys so that we test rollback with deletion + if err := b.Put(context.Background(), &Entry{ + Key: "foo", + Value: []byte("bar"), + }); err != nil { + t.Fatal(err) + } + if err := b.Put(context.Background(), &Entry{ + Key: "zip", + Value: []byte("zap"), + }); err != nil { + t.Fatal(err) + } + if err := b.Put(context.Background(), &Entry{ + Key: "deleteme", + }); err != nil { + t.Fatal(err) + } + if err := b.Put(context.Background(), &Entry{ + Key: "deleteme2", + }); err != nil { + t.Fatal(err) + } + + txns := []*TxnEntry{ + { + Operation: PutOperation, + Entry: &Entry{ + Key: "foo", + Value: []byte("bar2"), + }, + }, + { + Operation: DeleteOperation, + Entry: &Entry{ + Key: "deleteme", + }, + }, + { + Operation: PutOperation, + Entry: &Entry{ + Key: "foo", + Value: []byte("bar3"), + }, + }, + { + Operation: DeleteOperation, + Entry: &Entry{ + Key: "deleteme2", + }, + }, + { + Operation: PutOperation, + Entry: &Entry{ + Key: "zip", + Value: []byte("zap3"), + }, + }, + } + + return txns +} diff --git a/vendor/github.com/hashicorp/vault/sdk/physical/transactions.go b/vendor/github.com/hashicorp/vault/sdk/physical/transactions.go new file mode 100644 index 0000000000..19f0d2cbed --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/physical/transactions.go @@ -0,0 +1,131 @@ +package physical + +import ( + "context" + + multierror "github.com/hashicorp/go-multierror" +) + +// TxnEntry is an operation that takes atomically as part of +// a transactional update. Only supported by Transactional backends. +type TxnEntry struct { + Operation Operation + Entry *Entry +} + +// Transactional is an optional interface for backends that +// support doing transactional updates of multiple keys. This is +// required for some features such as replication. +type Transactional interface { + // The function to run a transaction + Transaction(context.Context, []*TxnEntry) error +} + +type TransactionalBackend interface { + Backend + Transactional +} + +type PseudoTransactional interface { + // An internal function should do no locking or permit pool acquisition. + // Depending on the backend and if it natively supports transactions, these + // may simply chain to the normal backend functions. + GetInternal(context.Context, string) (*Entry, error) + PutInternal(context.Context, *Entry) error + DeleteInternal(context.Context, string) error +} + +// Implements the transaction interface +func GenericTransactionHandler(ctx context.Context, t PseudoTransactional, txns []*TxnEntry) (retErr error) { + rollbackStack := make([]*TxnEntry, 0, len(txns)) + var dirty bool + + // We walk the transactions in order; each successful operation goes into a + // LIFO for rollback if we hit an error along the way +TxnWalk: + for _, txn := range txns { + switch txn.Operation { + case DeleteOperation: + entry, err := t.GetInternal(ctx, txn.Entry.Key) + if err != nil { + retErr = multierror.Append(retErr, err) + dirty = true + break TxnWalk + } + if entry == nil { + // Nothing to delete or roll back + continue + } + rollbackEntry := &TxnEntry{ + Operation: PutOperation, + Entry: &Entry{ + Key: entry.Key, + Value: entry.Value, + }, + } + err = t.DeleteInternal(ctx, txn.Entry.Key) + if err != nil { + retErr = multierror.Append(retErr, err) + dirty = true + break TxnWalk + } + rollbackStack = append([]*TxnEntry{rollbackEntry}, rollbackStack...) + + case PutOperation: + entry, err := t.GetInternal(ctx, txn.Entry.Key) + if err != nil { + retErr = multierror.Append(retErr, err) + dirty = true + break TxnWalk + } + // Nothing existed so in fact rolling back requires a delete + var rollbackEntry *TxnEntry + if entry == nil { + rollbackEntry = &TxnEntry{ + Operation: DeleteOperation, + Entry: &Entry{ + Key: txn.Entry.Key, + }, + } + } else { + rollbackEntry = &TxnEntry{ + Operation: PutOperation, + Entry: &Entry{ + Key: entry.Key, + Value: entry.Value, + }, + } + } + + err = t.PutInternal(ctx, txn.Entry) + if err != nil { + retErr = multierror.Append(retErr, err) + dirty = true + break TxnWalk + } + rollbackStack = append([]*TxnEntry{rollbackEntry}, rollbackStack...) + } + } + + // Need to roll back because we hit an error along the way + if dirty { + // While traversing this, if we get an error, we continue anyways in + // best-effort fashion + for _, txn := range rollbackStack { + switch txn.Operation { + case DeleteOperation: + err := t.DeleteInternal(ctx, txn.Entry.Key) + if err != nil { + retErr = multierror.Append(retErr, err) + } + case PutOperation: + err := t.PutInternal(ctx, txn.Entry) + if err != nil { + retErr = multierror.Append(retErr, err) + } + } + } + } + + return +} diff --git a/vendor/github.com/hashicorp/vault/sdk/version/cgo.go b/vendor/github.com/hashicorp/vault/sdk/version/cgo.go new file mode 100644 index 0000000000..2ed493a1fb --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/version/cgo.go @@ -0,0 +1,7 @@ +// +build cgo + +package version + +func init() { + CgoEnabled = true +} diff --git a/vendor/github.com/hashicorp/vault/sdk/version/version.go b/vendor/github.com/hashicorp/vault/sdk/version/version.go new file mode 100644 index 0000000000..e0db36e8f0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/version/version.go @@ -0,0 +1,74 @@ +package version + +import ( + "bytes" + "fmt" +) + +// VersionInfo +type VersionInfo struct { + Revision string + Version string + VersionPrerelease string + VersionMetadata string +} + +func GetVersion() *VersionInfo { + ver := Version + rel := VersionPrerelease + md := VersionMetadata + if GitDescribe != "" { + ver = GitDescribe + } + if GitDescribe == "" && rel == "" && VersionPrerelease != "" { + rel = "dev" + } + + return &VersionInfo{ + Revision: GitCommit, + Version: ver, + VersionPrerelease: rel, + VersionMetadata: md, + } +} + +func (c *VersionInfo) VersionNumber() string { + if Version == "unknown" && VersionPrerelease == "unknown" { + return "(version unknown)" + } + + version := fmt.Sprintf("%s", c.Version) + + if c.VersionPrerelease != "" { + version = fmt.Sprintf("%s-%s", version, c.VersionPrerelease) + } + + if c.VersionMetadata != "" { + version = fmt.Sprintf("%s+%s", version, c.VersionMetadata) + } + + return version +} + +func (c *VersionInfo) FullVersionNumber(rev bool) string { + var versionString bytes.Buffer + + if Version == "unknown" && VersionPrerelease == "unknown" { + return "Vault (version unknown)" + } + + fmt.Fprintf(&versionString, "Vault v%s", c.Version) + if c.VersionPrerelease != "" { + fmt.Fprintf(&versionString, "-%s", c.VersionPrerelease) + } + + if c.VersionMetadata != "" { + fmt.Fprintf(&versionString, "+%s", c.VersionMetadata) + } + + if rev && c.Revision != "" { + fmt.Fprintf(&versionString, " (%s)", c.Revision) + } + + return versionString.String() +} diff --git a/vendor/github.com/hashicorp/vault/sdk/version/version_base.go b/vendor/github.com/hashicorp/vault/sdk/version/version_base.go new file mode 100644 index 0000000000..272099247c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/version/version_base.go @@ -0,0 +1,14 @@ +package version + +var ( + // The git commit that was compiled. This will be filled in by the compiler. + GitCommit string + GitDescribe string + + // Whether cgo is enabled or not; set at build time + CgoEnabled bool + + Version = "1.9.0" + VersionPrerelease = "dev" + VersionMetadata = "" +) diff --git a/vendor/github.com/hashicorp/yamux/.gitignore b/vendor/github.com/hashicorp/yamux/.gitignore new file mode 100644 index 0000000000..836562412f --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/hashicorp/yamux/LICENSE b/vendor/github.com/hashicorp/yamux/LICENSE new file mode 100644 index 0000000000..f0e5c79e18 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/vendor/github.com/hashicorp/yamux/README.md b/vendor/github.com/hashicorp/yamux/README.md new file mode 100644 index 0000000000..d4db7fc99b --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/README.md @@ -0,0 +1,86 @@ +# Yamux + +Yamux (Yet another Multiplexer) is a multiplexing library for Golang. +It relies on an underlying connection to provide reliability +and ordering, such as TCP or Unix domain sockets, and provides +stream-oriented multiplexing. It is inspired by SPDY but is not +interoperable with it. + +Yamux features include: + +* Bi-directional streams + * Streams can be opened by either client or server + * Useful for NAT traversal + * Server-side push support +* Flow control + * Avoid starvation + * Back-pressure to prevent overwhelming a receiver +* Keep Alives + * Enables persistent connections over a load balancer +* Efficient + * Enables thousands of logical streams with low overhead + +## Documentation + +For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/yamux). + +## Specification + +The full specification for Yamux is provided in the `spec.md` file. +It can be used as a guide to implementors of interoperable libraries. + +## Usage + +Using Yamux is remarkably simple: + +```go + +func client() { + // Get a TCP connection + conn, err := net.Dial(...) + if err != nil { + panic(err) + } + + // Setup client side of yamux + session, err := yamux.Client(conn, nil) + if err != nil { + panic(err) + } + + // Open a new stream + stream, err := session.Open() + if err != nil { + panic(err) + } + + // Stream implements net.Conn + stream.Write([]byte("ping")) +} + +func server() { + // Accept a TCP connection + conn, err := listener.Accept() + if err != nil { + panic(err) + } + + // Setup server side of yamux + session, err := yamux.Server(conn, nil) + if err != nil { + panic(err) + } + + // Accept a stream + stream, err := session.Accept() + if err != nil { + panic(err) + } + + // Listen for a message + buf := make([]byte, 4) + stream.Read(buf) +} + +``` + diff --git a/vendor/github.com/hashicorp/yamux/addr.go b/vendor/github.com/hashicorp/yamux/addr.go new file mode 100644 index 0000000000..be6ebca9c7 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/addr.go @@ -0,0 +1,60 @@ +package yamux + +import ( + "fmt" + "net" +) + +// hasAddr is used to get the address from the underlying connection +type hasAddr interface { + LocalAddr() net.Addr + RemoteAddr() net.Addr +} + +// yamuxAddr is used when we cannot get the underlying address +type yamuxAddr struct { + Addr string +} + +func (*yamuxAddr) Network() string { + return "yamux" +} + +func (y *yamuxAddr) String() string { + return fmt.Sprintf("yamux:%s", y.Addr) +} + +// Addr is used to get the address of the listener. +func (s *Session) Addr() net.Addr { + return s.LocalAddr() +} + +// LocalAddr is used to get the local address of the +// underlying connection. +func (s *Session) LocalAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"local"} + } + return addr.LocalAddr() +} + +// RemoteAddr is used to get the address of remote end +// of the underlying connection +func (s *Session) RemoteAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"remote"} + } + return addr.RemoteAddr() +} + +// LocalAddr returns the local address +func (s *Stream) LocalAddr() net.Addr { + return s.session.LocalAddr() +} + +// LocalAddr returns the remote address +func (s *Stream) RemoteAddr() net.Addr { + return s.session.RemoteAddr() +} diff --git a/vendor/github.com/hashicorp/yamux/const.go b/vendor/github.com/hashicorp/yamux/const.go new file mode 100644 index 0000000000..4f52938287 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/const.go @@ -0,0 +1,157 @@ +package yamux + +import ( + "encoding/binary" + "fmt" +) + +var ( + // ErrInvalidVersion means we received a frame with an + // invalid version + ErrInvalidVersion = fmt.Errorf("invalid protocol version") + + // ErrInvalidMsgType means we received a frame with an + // invalid message type + ErrInvalidMsgType = fmt.Errorf("invalid msg type") + + // ErrSessionShutdown is used if there is a shutdown during + // an operation + ErrSessionShutdown = fmt.Errorf("session shutdown") + + // ErrStreamsExhausted is returned if we have no more + // stream ids to issue + ErrStreamsExhausted = fmt.Errorf("streams exhausted") + + // ErrDuplicateStream is used if a duplicate stream is + // opened inbound + ErrDuplicateStream = fmt.Errorf("duplicate stream initiated") + + // ErrReceiveWindowExceeded indicates the window was exceeded + ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded") + + // ErrTimeout is used when we reach an IO deadline + ErrTimeout = fmt.Errorf("i/o deadline reached") + + // ErrStreamClosed is returned when using a closed stream + ErrStreamClosed = fmt.Errorf("stream closed") + + // ErrUnexpectedFlag is set when we get an unexpected flag + ErrUnexpectedFlag = fmt.Errorf("unexpected flag") + + // ErrRemoteGoAway is used when we get a go away from the other side + ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections") + + // ErrConnectionReset is sent if a stream is reset. This can happen + // if the backlog is exceeded, or if there was a remote GoAway. + ErrConnectionReset = fmt.Errorf("connection reset") + + // ErrConnectionWriteTimeout indicates that we hit the "safety valve" + // timeout writing to the underlying stream connection. + ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout") + + // ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close + ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout") +) + +const ( + // protoVersion is the only version we support + protoVersion uint8 = 0 +) + +const ( + // Data is used for data frames. They are followed + // by length bytes worth of payload. + typeData uint8 = iota + + // WindowUpdate is used to change the window of + // a given stream. The length indicates the delta + // update to the window. + typeWindowUpdate + + // Ping is sent as a keep-alive or to measure + // the RTT. The StreamID and Length value are echoed + // back in the response. + typePing + + // GoAway is sent to terminate a session. The StreamID + // should be 0 and the length is an error code. + typeGoAway +) + +const ( + // SYN is sent to signal a new stream. May + // be sent with a data payload + flagSYN uint16 = 1 << iota + + // ACK is sent to acknowledge a new stream. May + // be sent with a data payload + flagACK + + // FIN is sent to half-close the given stream. + // May be sent with a data payload. + flagFIN + + // RST is used to hard close a given stream. + flagRST +) + +const ( + // initialStreamWindow is the initial stream window size + initialStreamWindow uint32 = 256 * 1024 +) + +const ( + // goAwayNormal is sent on a normal termination + goAwayNormal uint32 = iota + + // goAwayProtoErr sent on a protocol error + goAwayProtoErr + + // goAwayInternalErr sent on an internal error + goAwayInternalErr +) + +const ( + sizeOfVersion = 1 + sizeOfType = 1 + sizeOfFlags = 2 + sizeOfStreamID = 4 + sizeOfLength = 4 + headerSize = sizeOfVersion + sizeOfType + sizeOfFlags + + sizeOfStreamID + sizeOfLength +) + +type header []byte + +func (h header) Version() uint8 { + return h[0] +} + +func (h header) MsgType() uint8 { + return h[1] +} + +func (h header) Flags() uint16 { + return binary.BigEndian.Uint16(h[2:4]) +} + +func (h header) StreamID() uint32 { + return binary.BigEndian.Uint32(h[4:8]) +} + +func (h header) Length() uint32 { + return binary.BigEndian.Uint32(h[8:12]) +} + +func (h header) String() string { + return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d", + h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length()) +} + +func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) { + h[0] = protoVersion + h[1] = msgType + binary.BigEndian.PutUint16(h[2:4], flags) + binary.BigEndian.PutUint32(h[4:8], streamID) + binary.BigEndian.PutUint32(h[8:12], length) +} diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go new file mode 100644 index 0000000000..7abc7c744c --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/mux.go @@ -0,0 +1,87 @@ +package yamux + +import ( + "fmt" + "io" + "os" + "time" +) + +// Config is used to tune the Yamux session +type Config struct { + // AcceptBacklog is used to limit how many streams may be + // waiting an accept. + AcceptBacklog int + + // EnableKeepalive is used to do a period keep alive + // messages using a ping. + EnableKeepAlive bool + + // KeepAliveInterval is how often to perform the keep alive + KeepAliveInterval time.Duration + + // ConnectionWriteTimeout is meant to be a "safety valve" timeout after + // we which will suspect a problem with the underlying connection and + // close it. This is only applied to writes, where's there's generally + // an expectation that things will move along quickly. + ConnectionWriteTimeout time.Duration + + // MaxStreamWindowSize is used to control the maximum + // window size that we allow for a stream. + MaxStreamWindowSize uint32 + + // LogOutput is used to control the log destination + LogOutput io.Writer +} + +// DefaultConfig is used to return a default configuration +func DefaultConfig() *Config { + return &Config{ + AcceptBacklog: 256, + EnableKeepAlive: true, + KeepAliveInterval: 30 * time.Second, + ConnectionWriteTimeout: 10 * time.Second, + MaxStreamWindowSize: initialStreamWindow, + LogOutput: os.Stderr, + } +} + +// VerifyConfig is used to verify the sanity of configuration +func VerifyConfig(config *Config) error { + if config.AcceptBacklog <= 0 { + return fmt.Errorf("backlog must be positive") + } + if config.KeepAliveInterval == 0 { + return fmt.Errorf("keep-alive interval must be positive") + } + if config.MaxStreamWindowSize < initialStreamWindow { + return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow) + } + return nil +} + +// Server is used to initialize a new server-side connection. +// There must be at most one server-side connection. If a nil config is +// provided, the DefaultConfiguration will be used. +func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, false), nil +} + +// Client is used to initialize a new client-side connection. +// There must be at most one client-side connection. +func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, true), nil +} diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go new file mode 100644 index 0000000000..32ba02e023 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/session.go @@ -0,0 +1,648 @@ +package yamux + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "net" + "strings" + "sync" + "sync/atomic" + "time" +) + +// Session is used to wrap a reliable ordered connection and to +// multiplex it into multiple streams. +type Session struct { + // remoteGoAway indicates the remote side does + // not want futher connections. Must be first for alignment. + remoteGoAway int32 + + // localGoAway indicates that we should stop + // accepting futher connections. Must be first for alignment. + localGoAway int32 + + // nextStreamID is the next stream we should + // send. This depends if we are a client/server. + nextStreamID uint32 + + // config holds our configuration + config *Config + + // logger is used for our logs + logger *log.Logger + + // conn is the underlying connection + conn io.ReadWriteCloser + + // bufRead is a buffered reader + bufRead *bufio.Reader + + // pings is used to track inflight pings + pings map[uint32]chan struct{} + pingID uint32 + pingLock sync.Mutex + + // streams maps a stream id to a stream, and inflight has an entry + // for any outgoing stream that has not yet been established. Both are + // protected by streamLock. + streams map[uint32]*Stream + inflight map[uint32]struct{} + streamLock sync.Mutex + + // synCh acts like a semaphore. It is sized to the AcceptBacklog which + // is assumed to be symmetric between the client and server. This allows + // the client to avoid exceeding the backlog and instead blocks the open. + synCh chan struct{} + + // acceptCh is used to pass ready streams to the client + acceptCh chan *Stream + + // sendCh is used to mark a stream as ready to send, + // or to send a header out directly. + sendCh chan sendReady + + // recvDoneCh is closed when recv() exits to avoid a race + // between stream registration and stream shutdown + recvDoneCh chan struct{} + + // shutdown is used to safely close a session + shutdown bool + shutdownErr error + shutdownCh chan struct{} + shutdownLock sync.Mutex +} + +// sendReady is used to either mark a stream as ready +// or to directly send a header +type sendReady struct { + Hdr []byte + Body io.Reader + Err chan error +} + +// newSession is used to construct a new session +func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { + s := &Session{ + config: config, + logger: log.New(config.LogOutput, "", log.LstdFlags), + conn: conn, + bufRead: bufio.NewReader(conn), + pings: make(map[uint32]chan struct{}), + streams: make(map[uint32]*Stream), + inflight: make(map[uint32]struct{}), + synCh: make(chan struct{}, config.AcceptBacklog), + acceptCh: make(chan *Stream, config.AcceptBacklog), + sendCh: make(chan sendReady, 64), + recvDoneCh: make(chan struct{}), + shutdownCh: make(chan struct{}), + } + if client { + s.nextStreamID = 1 + } else { + s.nextStreamID = 2 + } + go s.recv() + go s.send() + if config.EnableKeepAlive { + go s.keepalive() + } + return s +} + +// IsClosed does a safe check to see if we have shutdown +func (s *Session) IsClosed() bool { + select { + case <-s.shutdownCh: + return true + default: + return false + } +} + +// CloseChan returns a read-only channel which is closed as +// soon as the session is closed. +func (s *Session) CloseChan() <-chan struct{} { + return s.shutdownCh +} + +// NumStreams returns the number of currently open streams +func (s *Session) NumStreams() int { + s.streamLock.Lock() + num := len(s.streams) + s.streamLock.Unlock() + return num +} + +// Open is used to create a new stream as a net.Conn +func (s *Session) Open() (net.Conn, error) { + conn, err := s.OpenStream() + if err != nil { + return nil, err + } + return conn, nil +} + +// OpenStream is used to create a new stream +func (s *Session) OpenStream() (*Stream, error) { + if s.IsClosed() { + return nil, ErrSessionShutdown + } + if atomic.LoadInt32(&s.remoteGoAway) == 1 { + return nil, ErrRemoteGoAway + } + + // Block if we have too many inflight SYNs + select { + case s.synCh <- struct{}{}: + case <-s.shutdownCh: + return nil, ErrSessionShutdown + } + +GET_ID: + // Get an ID, and check for stream exhaustion + id := atomic.LoadUint32(&s.nextStreamID) + if id >= math.MaxUint32-1 { + return nil, ErrStreamsExhausted + } + if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) { + goto GET_ID + } + + // Register the stream + stream := newStream(s, id, streamInit) + s.streamLock.Lock() + s.streams[id] = stream + s.inflight[id] = struct{}{} + s.streamLock.Unlock() + + // Send the window update to create + if err := stream.sendWindowUpdate(); err != nil { + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: aborted stream open without inflight syn semaphore") + } + return nil, err + } + return stream, nil +} + +// Accept is used to block until the next available stream +// is ready to be accepted. +func (s *Session) Accept() (net.Conn, error) { + conn, err := s.AcceptStream() + if err != nil { + return nil, err + } + return conn, err +} + +// AcceptStream is used to block until the next available stream +// is ready to be accepted. +func (s *Session) AcceptStream() (*Stream, error) { + select { + case stream := <-s.acceptCh: + if err := stream.sendWindowUpdate(); err != nil { + return nil, err + } + return stream, nil + case <-s.shutdownCh: + return nil, s.shutdownErr + } +} + +// Close is used to close the session and all streams. +// Attempts to send a GoAway before closing the connection. +func (s *Session) Close() error { + s.shutdownLock.Lock() + defer s.shutdownLock.Unlock() + + if s.shutdown { + return nil + } + s.shutdown = true + if s.shutdownErr == nil { + s.shutdownErr = ErrSessionShutdown + } + close(s.shutdownCh) + s.conn.Close() + <-s.recvDoneCh + + s.streamLock.Lock() + defer s.streamLock.Unlock() + for _, stream := range s.streams { + stream.forceClose() + } + return nil +} + +// exitErr is used to handle an error that is causing the +// session to terminate. +func (s *Session) exitErr(err error) { + s.shutdownLock.Lock() + if s.shutdownErr == nil { + s.shutdownErr = err + } + s.shutdownLock.Unlock() + s.Close() +} + +// GoAway can be used to prevent accepting further +// connections. It does not close the underlying conn. +func (s *Session) GoAway() error { + return s.waitForSend(s.goAway(goAwayNormal), nil) +} + +// goAway is used to send a goAway message +func (s *Session) goAway(reason uint32) header { + atomic.SwapInt32(&s.localGoAway, 1) + hdr := header(make([]byte, headerSize)) + hdr.encode(typeGoAway, 0, 0, reason) + return hdr +} + +// Ping is used to measure the RTT response time +func (s *Session) Ping() (time.Duration, error) { + // Get a channel for the ping + ch := make(chan struct{}) + + // Get a new ping id, mark as pending + s.pingLock.Lock() + id := s.pingID + s.pingID++ + s.pings[id] = ch + s.pingLock.Unlock() + + // Send the ping request + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagSYN, 0, id) + if err := s.waitForSend(hdr, nil); err != nil { + return 0, err + } + + // Wait for a response + start := time.Now() + select { + case <-ch: + case <-time.After(s.config.ConnectionWriteTimeout): + s.pingLock.Lock() + delete(s.pings, id) // Ignore it if a response comes later. + s.pingLock.Unlock() + return 0, ErrTimeout + case <-s.shutdownCh: + return 0, ErrSessionShutdown + } + + // Compute the RTT + return time.Now().Sub(start), nil +} + +// keepalive is a long running goroutine that periodically does +// a ping to keep the connection alive. +func (s *Session) keepalive() { + for { + select { + case <-time.After(s.config.KeepAliveInterval): + _, err := s.Ping() + if err != nil { + if err != ErrSessionShutdown { + s.logger.Printf("[ERR] yamux: keepalive failed: %v", err) + s.exitErr(ErrKeepAliveTimeout) + } + return + } + case <-s.shutdownCh: + return + } + } +} + +// waitForSendErr waits to send a header, checking for a potential shutdown +func (s *Session) waitForSend(hdr header, body io.Reader) error { + errCh := make(chan error, 1) + return s.waitForSendErr(hdr, body, errCh) +} + +// waitForSendErr waits to send a header with optional data, checking for a +// potential shutdown. Since there's the expectation that sends can happen +// in a timely manner, we enforce the connection write timeout here. +func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error { + t := timerPool.Get() + timer := t.(*time.Timer) + timer.Reset(s.config.ConnectionWriteTimeout) + defer func() { + timer.Stop() + select { + case <-timer.C: + default: + } + timerPool.Put(t) + }() + + ready := sendReady{Hdr: hdr, Body: body, Err: errCh} + select { + case s.sendCh <- ready: + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } + + select { + case err := <-errCh: + return err + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } +} + +// sendNoWait does a send without waiting. Since there's the expectation that +// the send happens right here, we enforce the connection write timeout if we +// can't queue the header to be sent. +func (s *Session) sendNoWait(hdr header) error { + t := timerPool.Get() + timer := t.(*time.Timer) + timer.Reset(s.config.ConnectionWriteTimeout) + defer func() { + timer.Stop() + select { + case <-timer.C: + default: + } + timerPool.Put(t) + }() + + select { + case s.sendCh <- sendReady{Hdr: hdr}: + return nil + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } +} + +// send is a long running goroutine that sends data +func (s *Session) send() { + for { + select { + case ready := <-s.sendCh: + // Send a header if ready + if ready.Hdr != nil { + sent := 0 + for sent < len(ready.Hdr) { + n, err := s.conn.Write(ready.Hdr[sent:]) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) + asyncSendErr(ready.Err, err) + s.exitErr(err) + return + } + sent += n + } + } + + // Send data from a body if given + if ready.Body != nil { + _, err := io.Copy(s.conn, ready.Body) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write body: %v", err) + asyncSendErr(ready.Err, err) + s.exitErr(err) + return + } + } + + // No error, successful send + asyncSendErr(ready.Err, nil) + case <-s.shutdownCh: + return + } + } +} + +// recv is a long running goroutine that accepts new data +func (s *Session) recv() { + if err := s.recvLoop(); err != nil { + s.exitErr(err) + } +} + +// Ensure that the index of the handler (typeData/typeWindowUpdate/etc) matches the message type +var ( + handlers = []func(*Session, header) error{ + typeData: (*Session).handleStreamMessage, + typeWindowUpdate: (*Session).handleStreamMessage, + typePing: (*Session).handlePing, + typeGoAway: (*Session).handleGoAway, + } +) + +// recvLoop continues to receive data until a fatal error is encountered +func (s *Session) recvLoop() error { + defer close(s.recvDoneCh) + hdr := header(make([]byte, headerSize)) + for { + // Read the header + if _, err := io.ReadFull(s.bufRead, hdr); err != nil { + if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") { + s.logger.Printf("[ERR] yamux: Failed to read header: %v", err) + } + return err + } + + // Verify the version + if hdr.Version() != protoVersion { + s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version()) + return ErrInvalidVersion + } + + mt := hdr.MsgType() + if mt < typeData || mt > typeGoAway { + return ErrInvalidMsgType + } + + if err := handlers[mt](s, hdr); err != nil { + return err + } + } +} + +// handleStreamMessage handles either a data or window update frame +func (s *Session) handleStreamMessage(hdr header) error { + // Check for a new stream creation + id := hdr.StreamID() + flags := hdr.Flags() + if flags&flagSYN == flagSYN { + if err := s.incomingStream(id); err != nil { + return err + } + } + + // Get the stream + s.streamLock.Lock() + stream := s.streams[id] + s.streamLock.Unlock() + + // If we do not have a stream, likely we sent a RST + if stream == nil { + // Drain any data on the wire + if hdr.MsgType() == typeData && hdr.Length() > 0 { + s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id) + if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil { + s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err) + return nil + } + } else { + s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr) + } + return nil + } + + // Check if this is a window update + if hdr.MsgType() == typeWindowUpdate { + if err := stream.incrSendWindow(hdr, flags); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil + } + + // Read the new data + if err := stream.readData(hdr, flags, s.bufRead); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil +} + +// handlePing is invokde for a typePing frame +func (s *Session) handlePing(hdr header) error { + flags := hdr.Flags() + pingID := hdr.Length() + + // Check if this is a query, respond back in a separate context so we + // don't interfere with the receiving thread blocking for the write. + if flags&flagSYN == flagSYN { + go func() { + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagACK, 0, pingID) + if err := s.sendNoWait(hdr); err != nil { + s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err) + } + }() + return nil + } + + // Handle a response + s.pingLock.Lock() + ch := s.pings[pingID] + if ch != nil { + delete(s.pings, pingID) + close(ch) + } + s.pingLock.Unlock() + return nil +} + +// handleGoAway is invokde for a typeGoAway frame +func (s *Session) handleGoAway(hdr header) error { + code := hdr.Length() + switch code { + case goAwayNormal: + atomic.SwapInt32(&s.remoteGoAway, 1) + case goAwayProtoErr: + s.logger.Printf("[ERR] yamux: received protocol error go away") + return fmt.Errorf("yamux protocol error") + case goAwayInternalErr: + s.logger.Printf("[ERR] yamux: received internal error go away") + return fmt.Errorf("remote yamux internal error") + default: + s.logger.Printf("[ERR] yamux: received unexpected go away") + return fmt.Errorf("unexpected go away received") + } + return nil +} + +// incomingStream is used to create a new incoming stream +func (s *Session) incomingStream(id uint32) error { + // Reject immediately if we are doing a go away + if atomic.LoadInt32(&s.localGoAway) == 1 { + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(hdr) + } + + // Allocate a new stream + stream := newStream(s, id, streamSYNReceived) + + s.streamLock.Lock() + defer s.streamLock.Unlock() + + // Check if stream already exists + if _, ok := s.streams[id]; ok { + s.logger.Printf("[ERR] yamux: duplicate stream declared") + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return ErrDuplicateStream + } + + // Register the stream + s.streams[id] = stream + + // Check if we've exceeded the backlog + select { + case s.acceptCh <- stream: + return nil + default: + // Backlog exceeded! RST the stream + s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset") + delete(s.streams, id) + stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(stream.sendHdr) + } +} + +// closeStream is used to close a stream once both sides have +// issued a close. If there was an in-flight SYN and the stream +// was not yet established, then this will give the credit back. +func (s *Session) closeStream(id uint32) { + s.streamLock.Lock() + if _, ok := s.inflight[id]; ok { + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: SYN tracking out of sync") + } + } + delete(s.streams, id) + s.streamLock.Unlock() +} + +// establishStream is used to mark a stream that was in the +// SYN Sent state as established. +func (s *Session) establishStream(id uint32) { + s.streamLock.Lock() + if _, ok := s.inflight[id]; ok { + delete(s.inflight, id) + } else { + s.logger.Printf("[ERR] yamux: established stream without inflight SYN (no tracking entry)") + } + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: established stream without inflight SYN (didn't have semaphore)") + } + s.streamLock.Unlock() +} diff --git a/vendor/github.com/hashicorp/yamux/spec.md b/vendor/github.com/hashicorp/yamux/spec.md new file mode 100644 index 0000000000..183d797bde --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/spec.md @@ -0,0 +1,140 @@ +# Specification + +We use this document to detail the internal specification of Yamux. +This is used both as a guide for implementing Yamux, but also for +alternative interoperable libraries to be built. + +# Framing + +Yamux uses a streaming connection underneath, but imposes a message +framing so that it can be shared between many logical streams. Each +frame contains a header like: + +* Version (8 bits) +* Type (8 bits) +* Flags (16 bits) +* StreamID (32 bits) +* Length (32 bits) + +This means that each header has a 12 byte overhead. +All fields are encoded in network order (big endian). +Each field is described below: + +## Version Field + +The version field is used for future backward compatibility. At the +current time, the field is always set to 0, to indicate the initial +version. + +## Type Field + +The type field is used to switch the frame message type. The following +message types are supported: + +* 0x0 Data - Used to transmit data. May transmit zero length payloads + depending on the flags. + +* 0x1 Window Update - Used to updated the senders receive window size. + This is used to implement per-session flow control. + +* 0x2 Ping - Used to measure RTT. It can also be used to heart-beat + and do keep-alives over TCP. + +* 0x3 Go Away - Used to close a session. + +## Flag Field + +The flags field is used to provide additional information related +to the message type. The following flags are supported: + +* 0x1 SYN - Signals the start of a new stream. May be sent with a data or + window update message. Also sent with a ping to indicate outbound. + +* 0x2 ACK - Acknowledges the start of a new stream. May be sent with a data + or window update message. Also sent with a ping to indicate response. + +* 0x4 FIN - Performs a half-close of a stream. May be sent with a data + message or window update. + +* 0x8 RST - Reset a stream immediately. May be sent with a data or + window update message. + +## StreamID Field + +The StreamID field is used to identify the logical stream the frame +is addressing. The client side should use odd ID's, and the server even. +This prevents any collisions. Additionally, the 0 ID is reserved to represent +the session. + +Both Ping and Go Away messages should always use the 0 StreamID. + +## Length Field + +The meaning of the length field depends on the message type: + +* Data - provides the length of bytes following the header +* Window update - provides a delta update to the window size +* Ping - Contains an opaque value, echoed back +* Go Away - Contains an error code + +# Message Flow + +There is no explicit connection setup, as Yamux relies on an underlying +transport to be provided. However, there is a distinction between client +and server side of the connection. + +## Opening a stream + +To open a stream, an initial data or window update frame is sent +with a new StreamID. The SYN flag should be set to signal a new stream. + +The receiver must then reply with either a data or window update frame +with the StreamID along with the ACK flag to accept the stream or with +the RST flag to reject the stream. + +Because we are relying on the reliable stream underneath, a connection +can begin sending data once the SYN flag is sent. The corresponding +ACK does not need to be received. This is particularly well suited +for an RPC system where a client wants to open a stream and immediately +fire a request without waiting for the RTT of the ACK. + +This does introduce the possibility of a connection being rejected +after data has been sent already. This is a slight semantic difference +from TCP, where the conection cannot be refused after it is opened. +Clients should be prepared to handle this by checking for an error +that indicates a RST was received. + +## Closing a stream + +To close a stream, either side sends a data or window update frame +along with the FIN flag. This does a half-close indicating the sender +will send no further data. + +Once both sides have closed the connection, the stream is closed. + +Alternatively, if an error occurs, the RST flag can be used to +hard close a stream immediately. + +## Flow Control + +When Yamux is initially starts each stream with a 256KB window size. +There is no window size for the session. + +To prevent the streams from stalling, window update frames should be +sent regularly. Yamux can be configured to provide a larger limit for +windows sizes. Both sides assume the initial 256KB window, but can +immediately send a window update as part of the SYN/ACK indicating a +larger window. + +Both sides should track the number of bytes sent in Data frames +only, as only they are tracked as part of the window size. + +## Session termination + +When a session is being terminated, the Go Away message should +be sent. The Length should be set to one of the following to +provide an error code: + +* 0x0 Normal termination +* 0x1 Protocol error +* 0x2 Internal error diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go new file mode 100644 index 0000000000..aa23919739 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/stream.go @@ -0,0 +1,470 @@ +package yamux + +import ( + "bytes" + "io" + "sync" + "sync/atomic" + "time" +) + +type streamState int + +const ( + streamInit streamState = iota + streamSYNSent + streamSYNReceived + streamEstablished + streamLocalClose + streamRemoteClose + streamClosed + streamReset +) + +// Stream is used to represent a logical stream +// within a session. +type Stream struct { + recvWindow uint32 + sendWindow uint32 + + id uint32 + session *Session + + state streamState + stateLock sync.Mutex + + recvBuf *bytes.Buffer + recvLock sync.Mutex + + controlHdr header + controlErr chan error + controlHdrLock sync.Mutex + + sendHdr header + sendErr chan error + sendLock sync.Mutex + + recvNotifyCh chan struct{} + sendNotifyCh chan struct{} + + readDeadline atomic.Value // time.Time + writeDeadline atomic.Value // time.Time +} + +// newStream is used to construct a new stream within +// a given session for an ID +func newStream(session *Session, id uint32, state streamState) *Stream { + s := &Stream{ + id: id, + session: session, + state: state, + controlHdr: header(make([]byte, headerSize)), + controlErr: make(chan error, 1), + sendHdr: header(make([]byte, headerSize)), + sendErr: make(chan error, 1), + recvWindow: initialStreamWindow, + sendWindow: initialStreamWindow, + recvNotifyCh: make(chan struct{}, 1), + sendNotifyCh: make(chan struct{}, 1), + } + s.readDeadline.Store(time.Time{}) + s.writeDeadline.Store(time.Time{}) + return s +} + +// Session returns the associated stream session +func (s *Stream) Session() *Session { + return s.session +} + +// StreamID returns the ID of this stream +func (s *Stream) StreamID() uint32 { + return s.id +} + +// Read is used to read from the stream +func (s *Stream) Read(b []byte) (n int, err error) { + defer asyncNotify(s.recvNotifyCh) +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamRemoteClose: + fallthrough + case streamClosed: + s.recvLock.Lock() + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.recvLock.Unlock() + s.stateLock.Unlock() + return 0, io.EOF + } + s.recvLock.Unlock() + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + s.recvLock.Lock() + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.recvLock.Unlock() + goto WAIT + } + + // Read any bytes + n, _ = s.recvBuf.Read(b) + s.recvLock.Unlock() + + // Send a window update potentially + err = s.sendWindowUpdate() + return n, err + +WAIT: + var timeout <-chan time.Time + var timer *time.Timer + readDeadline := s.readDeadline.Load().(time.Time) + if !readDeadline.IsZero() { + delay := readDeadline.Sub(time.Now()) + timer = time.NewTimer(delay) + timeout = timer.C + } + select { + case <-s.recvNotifyCh: + if timer != nil { + timer.Stop() + } + goto START + case <-timeout: + return 0, ErrTimeout + } +} + +// Write is used to write to the stream +func (s *Stream) Write(b []byte) (n int, err error) { + s.sendLock.Lock() + defer s.sendLock.Unlock() + total := 0 + for total < len(b) { + n, err := s.write(b[total:]) + total += n + if err != nil { + return total, err + } + } + return total, nil +} + +// write is used to write to the stream, may return on +// a short write. +func (s *Stream) write(b []byte) (n int, err error) { + var flags uint16 + var max uint32 + var body io.Reader +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamClosed: + s.stateLock.Unlock() + return 0, ErrStreamClosed + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + window := atomic.LoadUint32(&s.sendWindow) + if window == 0 { + goto WAIT + } + + // Determine the flags if any + flags = s.sendFlags() + + // Send up to our send window + max = min(window, uint32(len(b))) + body = bytes.NewReader(b[:max]) + + // Send the header + s.sendHdr.encode(typeData, flags, s.id, max) + if err = s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { + return 0, err + } + + // Reduce our send window + atomic.AddUint32(&s.sendWindow, ^uint32(max-1)) + + // Unlock + return int(max), err + +WAIT: + var timeout <-chan time.Time + writeDeadline := s.writeDeadline.Load().(time.Time) + if !writeDeadline.IsZero() { + delay := writeDeadline.Sub(time.Now()) + timeout = time.After(delay) + } + select { + case <-s.sendNotifyCh: + goto START + case <-timeout: + return 0, ErrTimeout + } + return 0, nil +} + +// sendFlags determines any flags that are appropriate +// based on the current stream state +func (s *Stream) sendFlags() uint16 { + s.stateLock.Lock() + defer s.stateLock.Unlock() + var flags uint16 + switch s.state { + case streamInit: + flags |= flagSYN + s.state = streamSYNSent + case streamSYNReceived: + flags |= flagACK + s.state = streamEstablished + } + return flags +} + +// sendWindowUpdate potentially sends a window update enabling +// further writes to take place. Must be invoked with the lock. +func (s *Stream) sendWindowUpdate() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + // Determine the delta update + max := s.session.config.MaxStreamWindowSize + var bufLen uint32 + s.recvLock.Lock() + if s.recvBuf != nil { + bufLen = uint32(s.recvBuf.Len()) + } + delta := (max - bufLen) - s.recvWindow + + // Determine the flags if any + flags := s.sendFlags() + + // Check if we can omit the update + if delta < (max/2) && flags == 0 { + s.recvLock.Unlock() + return nil + } + + // Update our window + s.recvWindow += delta + s.recvLock.Unlock() + + // Send the header + s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + return err + } + return nil +} + +// sendClose is used to send a FIN +func (s *Stream) sendClose() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + flags := s.sendFlags() + flags |= flagFIN + s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + return err + } + return nil +} + +// Close is used to close the stream +func (s *Stream) Close() error { + closeStream := false + s.stateLock.Lock() + switch s.state { + // Opened means we need to signal a close + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamLocalClose + goto SEND_CLOSE + + case streamLocalClose: + case streamRemoteClose: + s.state = streamClosed + closeStream = true + goto SEND_CLOSE + + case streamClosed: + case streamReset: + default: + panic("unhandled state") + } + s.stateLock.Unlock() + return nil +SEND_CLOSE: + s.stateLock.Unlock() + s.sendClose() + s.notifyWaiting() + if closeStream { + s.session.closeStream(s.id) + } + return nil +} + +// forceClose is used for when the session is exiting +func (s *Stream) forceClose() { + s.stateLock.Lock() + s.state = streamClosed + s.stateLock.Unlock() + s.notifyWaiting() +} + +// processFlags is used to update the state of the stream +// based on set flags, if any. Lock must be held +func (s *Stream) processFlags(flags uint16) error { + // Close the stream without holding the state lock + closeStream := false + defer func() { + if closeStream { + s.session.closeStream(s.id) + } + }() + + s.stateLock.Lock() + defer s.stateLock.Unlock() + if flags&flagACK == flagACK { + if s.state == streamSYNSent { + s.state = streamEstablished + } + s.session.establishStream(s.id) + } + if flags&flagFIN == flagFIN { + switch s.state { + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamRemoteClose + s.notifyWaiting() + case streamLocalClose: + s.state = streamClosed + closeStream = true + s.notifyWaiting() + default: + s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state) + return ErrUnexpectedFlag + } + } + if flags&flagRST == flagRST { + s.state = streamReset + closeStream = true + s.notifyWaiting() + } + return nil +} + +// notifyWaiting notifies all the waiting channels +func (s *Stream) notifyWaiting() { + asyncNotify(s.recvNotifyCh) + asyncNotify(s.sendNotifyCh) +} + +// incrSendWindow updates the size of our send window +func (s *Stream) incrSendWindow(hdr header, flags uint16) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Increase window, unblock a sender + atomic.AddUint32(&s.sendWindow, hdr.Length()) + asyncNotify(s.sendNotifyCh) + return nil +} + +// readData is used to handle a data frame +func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Check that our recv window is not exceeded + length := hdr.Length() + if length == 0 { + return nil + } + + // Wrap in a limited reader + conn = &io.LimitedReader{R: conn, N: int64(length)} + + // Copy into buffer + s.recvLock.Lock() + + if length > s.recvWindow { + s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length) + return ErrRecvWindowExceeded + } + + if s.recvBuf == nil { + // Allocate the receive buffer just-in-time to fit the full data frame. + // This way we can read in the whole packet without further allocations. + s.recvBuf = bytes.NewBuffer(make([]byte, 0, length)) + } + if _, err := io.Copy(s.recvBuf, conn); err != nil { + s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err) + s.recvLock.Unlock() + return err + } + + // Decrement the receive window + s.recvWindow -= length + s.recvLock.Unlock() + + // Unblock any readers + asyncNotify(s.recvNotifyCh) + return nil +} + +// SetDeadline sets the read and write deadlines +func (s *Stream) SetDeadline(t time.Time) error { + if err := s.SetReadDeadline(t); err != nil { + return err + } + if err := s.SetWriteDeadline(t); err != nil { + return err + } + return nil +} + +// SetReadDeadline sets the deadline for future Read calls. +func (s *Stream) SetReadDeadline(t time.Time) error { + s.readDeadline.Store(t) + return nil +} + +// SetWriteDeadline sets the deadline for future Write calls +func (s *Stream) SetWriteDeadline(t time.Time) error { + s.writeDeadline.Store(t) + return nil +} + +// Shrink is used to compact the amount of buffers utilized +// This is useful when using Yamux in a connection pool to reduce +// the idle memory utilization. +func (s *Stream) Shrink() { + s.recvLock.Lock() + if s.recvBuf != nil && s.recvBuf.Len() == 0 { + s.recvBuf = nil + } + s.recvLock.Unlock() +} diff --git a/vendor/github.com/hashicorp/yamux/util.go b/vendor/github.com/hashicorp/yamux/util.go new file mode 100644 index 0000000000..8a73e9249a --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/util.go @@ -0,0 +1,43 @@ +package yamux + +import ( + "sync" + "time" +) + +var ( + timerPool = &sync.Pool{ + New: func() interface{} { + timer := time.NewTimer(time.Hour * 1e6) + timer.Stop() + return timer + }, + } +) + +// asyncSendErr is used to try an async send of an error +func asyncSendErr(ch chan error, err error) { + if ch == nil { + return + } + select { + case ch <- err: + default: + } +} + +// asyncNotify is used to signal a waiting goroutine +func asyncNotify(ch chan struct{}) { + select { + case ch <- struct{}{}: + default: + } +} + +// min computes the minimum of two values +func min(a, b uint32) uint32 { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/mattn/go-colorable/.travis.yml b/vendor/github.com/mattn/go-colorable/.travis.yml index 98db8f060b..7942c565ce 100644 --- a/vendor/github.com/mattn/go-colorable/.travis.yml +++ b/vendor/github.com/mattn/go-colorable/.travis.yml @@ -1,9 +1,15 @@ language: go +sudo: false go: + - 1.13.x - tip before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover + - go get -t -v ./... + script: - - $HOME/gopath/bin/goveralls -repotoken xnXqRGwgW3SXIguzxf90ZSK1GPYZPaGrw + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) + diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md index 56729a92ca..e055952b66 100644 --- a/vendor/github.com/mattn/go-colorable/README.md +++ b/vendor/github.com/mattn/go-colorable/README.md @@ -1,8 +1,8 @@ # go-colorable -[![Godoc Reference](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) [![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable) -[![Coverage Status](https://coveralls.io/repos/github/mattn/go-colorable/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-colorable?branch=master) +[![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable) +[![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) [![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) Colorable writer for windows. diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go index 0b0aef8370..1f7806fe16 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_appengine.go +++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -27,3 +27,11 @@ func NewColorableStdout() io.Writer { func NewColorableStderr() io.Writer { return os.Stderr } + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go index 3fb771dcca..08cbd1e0fa 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_others.go +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -28,3 +28,11 @@ func NewColorableStdout() io.Writer { func NewColorableStderr() io.Writer { return os.Stderr } + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go index 1bd628f25c..b9e936344c 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_windows.go +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -27,6 +27,8 @@ const ( backgroundRed = 0x40 backgroundIntensity = 0x80 backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) + + cENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 ) const ( @@ -78,6 +80,8 @@ var ( procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") ) @@ -98,6 +102,10 @@ func NewColorable(file *os.File) io.Writer { } if isatty.IsTerminal(file.Fd()) { + var mode uint32 + if r, _, _ := procGetConsoleMode.Call(file.Fd(), uintptr(unsafe.Pointer(&mode))); r != 0 && mode&cENABLE_VIRTUAL_TERMINAL_PROCESSING != 0 { + return file + } var csbi consoleScreenBufferInfo handle := syscall.Handle(file.Fd()) procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) @@ -1003,3 +1011,23 @@ func n256setup() { n256backAttr[i] = c.backgroundAttr() } } + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + var mode uint32 + h := os.Stdout.Fd() + if r, _, _ := procGetConsoleMode.Call(h, uintptr(unsafe.Pointer(&mode))); r != 0 { + if r, _, _ = procSetConsoleMode.Call(h, uintptr(mode|cENABLE_VIRTUAL_TERMINAL_PROCESSING)); r != 0 { + if enabled != nil { + *enabled = true + } + return func() { + procSetConsoleMode.Call(h, uintptr(mode)) + } + } + } + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/go.mod b/vendor/github.com/mattn/go-colorable/go.mod index ef3ca9d4c3..1e590b8199 100644 --- a/vendor/github.com/mattn/go-colorable/go.mod +++ b/vendor/github.com/mattn/go-colorable/go.mod @@ -1,3 +1,8 @@ module github.com/mattn/go-colorable -require github.com/mattn/go-isatty v0.0.8 +require ( + github.com/mattn/go-isatty v0.0.12 + golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae // indirect +) + +go 1.13 diff --git a/vendor/github.com/mattn/go-colorable/go.sum b/vendor/github.com/mattn/go-colorable/go.sum index 2c12960ec7..cf5b95d97c 100644 --- a/vendor/github.com/mattn/go-colorable/go.sum +++ b/vendor/github.com/mattn/go-colorable/go.sum @@ -1,4 +1,5 @@ -github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/mattn/go-colorable/go.test.sh b/vendor/github.com/mattn/go-colorable/go.test.sh new file mode 100644 index 0000000000..012162b077 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml index 5597e026dd..604314dd44 100644 --- a/vendor/github.com/mattn/go-isatty/.travis.yml +++ b/vendor/github.com/mattn/go-isatty/.travis.yml @@ -1,13 +1,14 @@ language: go +sudo: false go: + - 1.13.x - tip -os: - - linux - - osx - before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover + - go get -t -v ./... + script: - - $HOME/gopath/bin/goveralls -repotoken 3gHdORO5k5ziZcWMBxnd9LrMZaJs8m9x5 + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md index 1e69004bb0..38418353e3 100644 --- a/vendor/github.com/mattn/go-isatty/README.md +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -1,7 +1,7 @@ # go-isatty [![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) -[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty) +[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty) [![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) [![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) diff --git a/vendor/github.com/mattn/go-isatty/go.mod b/vendor/github.com/mattn/go-isatty/go.mod index a8ddf404fc..605c4c2210 100644 --- a/vendor/github.com/mattn/go-isatty/go.mod +++ b/vendor/github.com/mattn/go-isatty/go.mod @@ -1,5 +1,5 @@ module github.com/mattn/go-isatty -require golang.org/x/sys v0.0.0-20191008105621-543471e840be +go 1.12 -go 1.14 +require golang.org/x/sys v0.0.0-20200116001909-b77594299b42 diff --git a/vendor/github.com/mattn/go-isatty/go.sum b/vendor/github.com/mattn/go-isatty/go.sum index c141fc53a9..912e29cbc1 100644 --- a/vendor/github.com/mattn/go-isatty/go.sum +++ b/vendor/github.com/mattn/go-isatty/go.sum @@ -1,4 +1,2 @@ -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/mattn/go-isatty/go.test.sh b/vendor/github.com/mattn/go-isatty/go.test.sh new file mode 100644 index 0000000000..012162b077 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-isatty/isatty_android.go b/vendor/github.com/mattn/go-isatty/isatty_android.go deleted file mode 100644 index d3567cb5bf..0000000000 --- a/vendor/github.com/mattn/go-isatty/isatty_android.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build android - -package isatty - -import ( - "syscall" - "unsafe" -) - -const ioctlReadTermios = syscall.TCGETS - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} - -// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go index 07e93039db..711f288085 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -3,18 +3,12 @@ package isatty -import ( - "syscall" - "unsafe" -) - -const ioctlReadTermios = syscall.TIOCGETA +import "golang.org/x/sys/unix" // IsTerminal return true if the file descriptor is terminal. func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 + _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA) + return err == nil } // IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 diff --git a/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/vendor/github.com/mattn/go-isatty/isatty_plan9.go index bc0a70920f..c5b6e0c084 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_plan9.go +++ b/vendor/github.com/mattn/go-isatty/isatty_plan9.go @@ -8,7 +8,7 @@ import ( // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { - path, err := syscall.Fd2path(fd) + path, err := syscall.Fd2path(int(fd)) if err != nil { return false } diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go index 453b025d0d..31a1ca973c 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -1,6 +1,5 @@ // +build linux aix // +build !appengine -// +build !android package isatty diff --git a/vendor/github.com/mattn/go-isatty/renovate.json b/vendor/github.com/mattn/go-isatty/renovate.json new file mode 100644 index 0000000000..5ae9d96b74 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/renovate.json @@ -0,0 +1,8 @@ +{ + "extends": [ + "config:base" + ], + "postUpdateOptions": [ + "gomodTidy" + ] +} diff --git a/vendor/github.com/mitchellh/copystructure/.travis.yml b/vendor/github.com/mitchellh/copystructure/.travis.yml new file mode 100644 index 0000000000..d7b9589ab1 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/.travis.yml @@ -0,0 +1,12 @@ +language: go + +go: + - 1.7 + - tip + +script: + - go test + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE new file mode 100644 index 0000000000..2298515904 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md new file mode 100644 index 0000000000..bcb8c8d2cb --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/README.md @@ -0,0 +1,21 @@ +# copystructure + +copystructure is a Go library for deep copying values in Go. + +This allows you to copy Go values that may contain reference values +such as maps, slices, or pointers, and copy their data as well instead +of just their references. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/copystructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). + +The `Copy` function has examples associated with it there. diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go new file mode 100644 index 0000000000..db6a6aa1a1 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copier_time.go @@ -0,0 +1,15 @@ +package copystructure + +import ( + "reflect" + "time" +) + +func init() { + Copiers[reflect.TypeOf(time.Time{})] = timeCopier +} + +func timeCopier(v interface{}) (interface{}, error) { + // Just... copy it. + return v.(time.Time), nil +} diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go new file mode 100644 index 0000000000..140435255e --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copystructure.go @@ -0,0 +1,548 @@ +package copystructure + +import ( + "errors" + "reflect" + "sync" + + "github.com/mitchellh/reflectwalk" +) + +// Copy returns a deep copy of v. +func Copy(v interface{}) (interface{}, error) { + return Config{}.Copy(v) +} + +// CopierFunc is a function that knows how to deep copy a specific type. +// Register these globally with the Copiers variable. +type CopierFunc func(interface{}) (interface{}, error) + +// Copiers is a map of types that behave specially when they are copied. +// If a type is found in this map while deep copying, this function +// will be called to copy it instead of attempting to copy all fields. +// +// The key should be the type, obtained using: reflect.TypeOf(value with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) + +// Must is a helper that wraps a call to a function returning +// (interface{}, error) and panics if the error is non-nil. It is intended +// for use in variable initializations and should only be used when a copy +// error should be a crashing case. +func Must(v interface{}, err error) interface{} { + if err != nil { + panic("copy error: " + err.Error()) + } + + return v +} + +var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true") + +type Config struct { + // Lock any types that are a sync.Locker and are not a mutex while copying. + // If there is an RLocker method, use that to get the sync.Locker. + Lock bool + + // Copiers is a map of types associated with a CopierFunc. Use the global + // Copiers map if this is nil. + Copiers map[reflect.Type]CopierFunc +} + +func (c Config) Copy(v interface{}) (interface{}, error) { + if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr { + return nil, errPointerRequired + } + + w := new(walker) + if c.Lock { + w.useLocks = true + } + + if c.Copiers == nil { + c.Copiers = Copiers + } + + err := reflectwalk.Walk(v, w) + if err != nil { + return nil, err + } + + // Get the result. If the result is nil, then we want to turn it + // into a typed nil if we can. + result := w.Result + if result == nil { + val := reflect.ValueOf(v) + result = reflect.Indirect(reflect.New(val.Type())).Interface() + } + + return result, nil +} + +// Return the key used to index interfaces types we've seen. Store the number +// of pointers in the upper 32bits, and the depth in the lower 32bits. This is +// easy to calculate, easy to match a key with our current depth, and we don't +// need to deal with initializing and cleaning up nested maps or slices. +func ifaceKey(pointers, depth int) uint64 { + return uint64(pointers)<<32 | uint64(depth) +} + +type walker struct { + Result interface{} + + depth int + ignoreDepth int + vals []reflect.Value + cs []reflect.Value + + // This stores the number of pointers we've walked over, indexed by depth. + ps []int + + // If an interface is indirected by a pointer, we need to know the type of + // interface to create when creating the new value. Store the interface + // types here, indexed by both the walk depth and the number of pointers + // already seen at that depth. Use ifaceKey to calculate the proper uint64 + // value. + ifaceTypes map[uint64]reflect.Type + + // any locks we've taken, indexed by depth + locks []sync.Locker + // take locks while walking the structure + useLocks bool +} + +func (w *walker) Enter(l reflectwalk.Location) error { + w.depth++ + + // ensure we have enough elements to index via w.depth + for w.depth >= len(w.locks) { + w.locks = append(w.locks, nil) + } + + for len(w.ps) < w.depth+1 { + w.ps = append(w.ps, 0) + } + + return nil +} + +func (w *walker) Exit(l reflectwalk.Location) error { + locker := w.locks[w.depth] + w.locks[w.depth] = nil + if locker != nil { + defer locker.Unlock() + } + + // clear out pointers and interfaces as we exit the stack + w.ps[w.depth] = 0 + + for k := range w.ifaceTypes { + mask := uint64(^uint32(0)) + if k&mask == uint64(w.depth) { + delete(w.ifaceTypes, k) + } + } + + w.depth-- + if w.ignoreDepth > w.depth { + w.ignoreDepth = 0 + } + + if w.ignoring() { + return nil + } + + switch l { + case reflectwalk.Array: + fallthrough + case reflectwalk.Map: + fallthrough + case reflectwalk.Slice: + w.replacePointerMaybe() + + // Pop map off our container + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.MapValue: + // Pop off the key and value + mv := w.valPop() + mk := w.valPop() + m := w.cs[len(w.cs)-1] + + // If mv is the zero value, SetMapIndex deletes the key form the map, + // or in this case never adds it. We need to create a properly typed + // zero value so that this key can be set. + if !mv.IsValid() { + mv = reflect.Zero(m.Elem().Type().Elem()) + } + m.Elem().SetMapIndex(mk, mv) + case reflectwalk.ArrayElem: + // Pop off the value and the index and set it on the array + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + a := w.cs[len(w.cs)-1] + ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call + if ae.CanSet() { + ae.Set(v) + } + } + case reflectwalk.SliceElem: + // Pop off the value and the index and set it on the slice + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + se := s.Elem().Index(i) + if se.CanSet() { + se.Set(v) + } + } + case reflectwalk.Struct: + w.replacePointerMaybe() + + // Remove the struct from the container stack + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.StructField: + // Pop off the value and the field + v := w.valPop() + f := w.valPop().Interface().(reflect.StructField) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + + if sf.CanSet() { + sf.Set(v) + } + } + case reflectwalk.WalkLoc: + // Clear out the slices for GC + w.cs = nil + w.vals = nil + } + + return nil +} + +func (w *walker) Map(m reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(m) + + // Create the map. If the map itself is nil, then just make a nil map + var newMap reflect.Value + if m.IsNil() { + newMap = reflect.New(m.Type()) + } else { + newMap = wrapPtr(reflect.MakeMap(m.Type())) + } + + w.cs = append(w.cs, newMap) + w.valPush(newMap) + return nil +} + +func (w *walker) MapElem(m, k, v reflect.Value) error { + return nil +} + +func (w *walker) PointerEnter(v bool) error { + if v { + w.ps[w.depth]++ + } + return nil +} + +func (w *walker) PointerExit(v bool) error { + if v { + w.ps[w.depth]-- + } + return nil +} + +func (w *walker) Interface(v reflect.Value) error { + if !v.IsValid() { + return nil + } + if w.ifaceTypes == nil { + w.ifaceTypes = make(map[uint64]reflect.Type) + } + + w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type() + return nil +} + +func (w *walker) Primitive(v reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(v) + + // IsValid verifies the v is non-zero and CanInterface verifies + // that we're allowed to read this value (unexported fields). + var newV reflect.Value + if v.IsValid() && v.CanInterface() { + newV = reflect.New(v.Type()) + newV.Elem().Set(v) + } + + w.valPush(newV) + w.replacePointerMaybe() + return nil +} + +func (w *walker) Slice(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var newS reflect.Value + if s.IsNil() { + newS = reflect.New(s.Type()) + } else { + newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap())) + } + + w.cs = append(w.cs, newS) + w.valPush(newS) + return nil +} + +func (w *walker) SliceElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the slice here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Array(a reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(a) + + newA := reflect.New(a.Type()) + + w.cs = append(w.cs, newA) + w.valPush(newA) + return nil +} + +func (w *walker) ArrayElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the array here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Struct(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var v reflect.Value + if c, ok := Copiers[s.Type()]; ok { + // We have a Copier for this struct, so we use that copier to + // get the copy, and we ignore anything deeper than this. + w.ignoreDepth = w.depth + + dup, err := c(s.Interface()) + if err != nil { + return err + } + + // We need to put a pointer to the value on the value stack, + // so allocate a new pointer and set it. + v = reflect.New(s.Type()) + reflect.Indirect(v).Set(reflect.ValueOf(dup)) + } else { + // No copier, we copy ourselves and allow reflectwalk to guide + // us deeper into the structure for copying. + v = reflect.New(s.Type()) + } + + // Push the value onto the value stack for setting the struct field, + // and add the struct itself to the containers stack in case we walk + // deeper so that its own fields can be modified. + w.valPush(v) + w.cs = append(w.cs, v) + + return nil +} + +func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { + if w.ignoring() { + return nil + } + + // If PkgPath is non-empty, this is a private (unexported) field. + // We do not set this unexported since the Go runtime doesn't allow us. + if f.PkgPath != "" { + return reflectwalk.SkipEntry + } + + // Push the field onto the stack, we'll handle it when we exit + // the struct field in Exit... + w.valPush(reflect.ValueOf(f)) + return nil +} + +// ignore causes the walker to ignore any more values until we exit this on +func (w *walker) ignore() { + w.ignoreDepth = w.depth +} + +func (w *walker) ignoring() bool { + return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth +} + +func (w *walker) pointerPeek() bool { + return w.ps[w.depth] > 0 +} + +func (w *walker) valPop() reflect.Value { + result := w.vals[len(w.vals)-1] + w.vals = w.vals[:len(w.vals)-1] + + // If we're out of values, that means we popped everything off. In + // this case, we reset the result so the next pushed value becomes + // the result. + if len(w.vals) == 0 { + w.Result = nil + } + + return result +} + +func (w *walker) valPush(v reflect.Value) { + w.vals = append(w.vals, v) + + // If we haven't set the result yet, then this is the result since + // it is the first (outermost) value we're seeing. + if w.Result == nil && v.IsValid() { + w.Result = v.Interface() + } +} + +func (w *walker) replacePointerMaybe() { + // Determine the last pointer value. If it is NOT a pointer, then + // we need to push that onto the stack. + if !w.pointerPeek() { + w.valPush(reflect.Indirect(w.valPop())) + return + } + + v := w.valPop() + + // If the expected type is a pointer to an interface of any depth, + // such as *interface{}, **interface{}, etc., then we need to convert + // the value "v" from *CONCRETE to *interface{} so types match for + // Set. + // + // Example if v is type *Foo where Foo is a struct, v would become + // *interface{} instead. This only happens if we have an interface expectation + // at this depth. + // + // For more info, see GH-16 + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface { + y := reflect.New(iType) // Create *interface{} + y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced) + v = y // v is now typed *interface{} (where *v = Foo) + } + + for i := 1; i < w.ps[w.depth]; i++ { + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok { + iface := reflect.New(iType).Elem() + iface.Set(v) + v = iface + } + + p := reflect.New(v.Type()) + p.Elem().Set(v) + v = p + } + + w.valPush(v) +} + +// if this value is a Locker, lock it and add it to the locks slice +func (w *walker) lock(v reflect.Value) { + if !w.useLocks { + return + } + + if !v.IsValid() || !v.CanInterface() { + return + } + + type rlocker interface { + RLocker() sync.Locker + } + + var locker sync.Locker + + // We can't call Interface() on a value directly, since that requires + // a copy. This is OK, since the pointer to a value which is a sync.Locker + // is also a sync.Locker. + if v.Kind() == reflect.Ptr { + switch l := v.Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } else if v.CanAddr() { + switch l := v.Addr().Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } + + // still no callable locker + if locker == nil { + return + } + + // don't lock a mutex directly + switch locker.(type) { + case *sync.Mutex, *sync.RWMutex: + return + } + + locker.Lock() + w.locks[w.depth] = locker +} + +// wrapPtr is a helper that takes v and always make it *v. copystructure +// stores things internally as pointers until the last moment before unwrapping +func wrapPtr(v reflect.Value) reflect.Value { + if !v.IsValid() { + return v + } + vPtr := reflect.New(v.Type()) + vPtr.Elem().Set(v) + return vPtr +} diff --git a/vendor/github.com/mitchellh/copystructure/go.mod b/vendor/github.com/mitchellh/copystructure/go.mod new file mode 100644 index 0000000000..d01864309b --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/go.mod @@ -0,0 +1,3 @@ +module github.com/mitchellh/copystructure + +require github.com/mitchellh/reflectwalk v1.0.0 diff --git a/vendor/github.com/mitchellh/copystructure/go.sum b/vendor/github.com/mitchellh/copystructure/go.sum new file mode 100644 index 0000000000..be57245619 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/go.sum @@ -0,0 +1,2 @@ +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE new file mode 100644 index 0000000000..f9c841a51e --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md new file mode 100644 index 0000000000..d70706d5b3 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/README.md @@ -0,0 +1,14 @@ +# go-homedir + +This is a Go library for detecting the user's home directory without +the use of cgo, so the library can be used in cross-compilation environments. + +Usage is incredibly simple, just call `homedir.Dir()` to get the home directory +for a user, and `homedir.Expand()` to expand the `~` in a path to the home +directory. + +**Why not just use `os/user`?** The built-in `os/user` package requires +cgo on Darwin systems. This means that any Go code that uses that package +cannot cross compile. But 99% of the time the use for `os/user` is just to +retrieve the home directory, which we can do for the current user without +cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/go.mod b/vendor/github.com/mitchellh/go-homedir/go.mod new file mode 100644 index 0000000000..7efa09a043 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/go-homedir diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go new file mode 100644 index 0000000000..25378537ea --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/homedir.go @@ -0,0 +1,167 @@ +package homedir + +import ( + "bytes" + "errors" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" +) + +// DisableCache will disable caching of the home directory. Caching is enabled +// by default. +var DisableCache bool + +var homedirCache string +var cacheLock sync.RWMutex + +// Dir returns the home directory for the executing user. +// +// This uses an OS-specific method for discovering the home directory. +// An error is returned if a home directory cannot be detected. +func Dir() (string, error) { + if !DisableCache { + cacheLock.RLock() + cached := homedirCache + cacheLock.RUnlock() + if cached != "" { + return cached, nil + } + } + + cacheLock.Lock() + defer cacheLock.Unlock() + + var result string + var err error + if runtime.GOOS == "windows" { + result, err = dirWindows() + } else { + // Unix-like system, so just assume Unix + result, err = dirUnix() + } + + if err != nil { + return "", err + } + homedirCache = result + return result, nil +} + +// Expand expands the path to include the home directory if the path +// is prefixed with `~`. If it isn't prefixed with `~`, the path is +// returned as-is. +func Expand(path string) (string, error) { + if len(path) == 0 { + return path, nil + } + + if path[0] != '~' { + return path, nil + } + + if len(path) > 1 && path[1] != '/' && path[1] != '\\' { + return "", errors.New("cannot expand user-specific home dir") + } + + dir, err := Dir() + if err != nil { + return "", err + } + + return filepath.Join(dir, path[1:]), nil +} + +// Reset clears the cache, forcing the next call to Dir to re-detect +// the home directory. This generally never has to be called, but can be +// useful in tests if you're modifying the home directory via the HOME +// env var or something. +func Reset() { + cacheLock.Lock() + defer cacheLock.Unlock() + homedirCache = "" +} + +func dirUnix() (string, error) { + homeEnv := "HOME" + if runtime.GOOS == "plan9" { + // On plan9, env vars are lowercase. + homeEnv = "home" + } + + // First prefer the HOME environmental variable + if home := os.Getenv(homeEnv); home != "" { + return home, nil + } + + var stdout bytes.Buffer + + // If that fails, try OS specific commands + if runtime.GOOS == "darwin" { + cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`) + cmd.Stdout = &stdout + if err := cmd.Run(); err == nil { + result := strings.TrimSpace(stdout.String()) + if result != "" { + return result, nil + } + } + } else { + cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + // If the error is ErrNotFound, we ignore it. Otherwise, return it. + if err != exec.ErrNotFound { + return "", err + } + } else { + if passwd := strings.TrimSpace(stdout.String()); passwd != "" { + // username:password:uid:gid:gecos:home:shell + passwdParts := strings.SplitN(passwd, ":", 7) + if len(passwdParts) > 5 { + return passwdParts[5], nil + } + } + } + } + + // If all else fails, try the shell + stdout.Reset() + cmd := exec.Command("sh", "-c", "cd && pwd") + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + return "", err + } + + result := strings.TrimSpace(stdout.String()) + if result == "" { + return "", errors.New("blank output when reading home directory") + } + + return result, nil +} + +func dirWindows() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + // Prefer standard environment variable USERPROFILE + if home := os.Getenv("USERPROFILE"); home != "" { + return home, nil + } + + drive := os.Getenv("HOMEDRIVE") + path := os.Getenv("HOMEPATH") + home := drive + path + if drive == "" || path == "" { + return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank") + } + + return home, nil +} diff --git a/vendor/github.com/mitchellh/go-testing-interface/.travis.yml b/vendor/github.com/mitchellh/go-testing-interface/.travis.yml new file mode 100644 index 0000000000..928d000ec4 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.8 + - 1.x + - tip + +script: + - go test + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/mitchellh/go-testing-interface/LICENSE b/vendor/github.com/mitchellh/go-testing-interface/LICENSE new file mode 100644 index 0000000000..a3866a291f --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-testing-interface/README.md b/vendor/github.com/mitchellh/go-testing-interface/README.md new file mode 100644 index 0000000000..26781bbae8 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/README.md @@ -0,0 +1,52 @@ +# go-testing-interface + +go-testing-interface is a Go library that exports an interface that +`*testing.T` implements as well as a runtime version you can use in its +place. + +The purpose of this library is so that you can export test helpers as a +public API without depending on the "testing" package, since you can't +create a `*testing.T` struct manually. This lets you, for example, use the +public testing APIs to generate mock data at runtime, rather than just at +test time. + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/go-testing-interface). + +Given a test helper written using `go-testing-interface` like this: + + import "github.com/mitchellh/go-testing-interface" + + func TestHelper(t testing.T) { + t.Fatal("I failed") + } + +You can call the test helper in a real test easily: + + import "testing" + + func TestThing(t *testing.T) { + TestHelper(t) + } + +You can also call the test helper at runtime if needed: + + import "github.com/mitchellh/go-testing-interface" + + func main() { + TestHelper(&testing.RuntimeT{}) + } + +## Why?! + +**Why would I call a test helper that takes a *testing.T at runtime?** + +You probably shouldn't. The only use case I've seen (and I've had) for this +is to implement a "dev mode" for a service where the test helpers are used +to populate mock data, create a mock DB, perhaps run service dependencies +in-memory, etc. + +Outside of a "dev mode", I've never seen a use case for this and I think +there shouldn't be one since the point of the `testing.T` interface is that +you can fail immediately. diff --git a/vendor/github.com/mitchellh/go-testing-interface/go.mod b/vendor/github.com/mitchellh/go-testing-interface/go.mod new file mode 100644 index 0000000000..062796de72 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/go-testing-interface diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing.go b/vendor/github.com/mitchellh/go-testing-interface/testing.go new file mode 100644 index 0000000000..204afb4200 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/testing.go @@ -0,0 +1,84 @@ +// +build !go1.9 + +package testing + +import ( + "fmt" + "log" +) + +// T is the interface that mimics the standard library *testing.T. +// +// In unit tests you can just pass a *testing.T struct. At runtime, outside +// of tests, you can pass in a RuntimeT struct from this package. +type T interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool +} + +// RuntimeT implements T and can be instantiated and run at runtime to +// mimic *testing.T behavior. Unlike *testing.T, this will simply panic +// for calls to Fatal. For calls to Error, you'll have to check the errors +// list to determine whether to exit yourself. Name and Skip methods are +// unimplemented noops. +type RuntimeT struct { + failed bool +} + +func (t *RuntimeT) Error(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.Fail() +} + +func (t *RuntimeT) Errorf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) + t.Fail() +} + +func (t *RuntimeT) Fatal(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.FailNow() +} + +func (t *RuntimeT) Fatalf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) + t.FailNow() +} + +func (t *RuntimeT) Fail() { + t.failed = true +} + +func (t *RuntimeT) FailNow() { + panic("testing.T failed, see logs for output (if any)") +} + +func (t *RuntimeT) Failed() bool { + return t.failed +} + +func (t *RuntimeT) Log(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) +} + +func (t *RuntimeT) Logf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) +} + +func (t *RuntimeT) Name() string { return "" } +func (t *RuntimeT) Skip(args ...interface{}) {} +func (t *RuntimeT) SkipNow() {} +func (t *RuntimeT) Skipf(format string, args ...interface{}) {} +func (t *RuntimeT) Skipped() bool { return false } diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go new file mode 100644 index 0000000000..31b42cadf8 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go @@ -0,0 +1,108 @@ +// +build go1.9 + +// NOTE: This is a temporary copy of testing.go for Go 1.9 with the addition +// of "Helper" to the T interface. Go 1.9 at the time of typing is in RC +// and is set for release shortly. We'll support this on master as the default +// as soon as 1.9 is released. + +package testing + +import ( + "fmt" + "log" +) + +// T is the interface that mimics the standard library *testing.T. +// +// In unit tests you can just pass a *testing.T struct. At runtime, outside +// of tests, you can pass in a RuntimeT struct from this package. +type T interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool + Helper() +} + +// RuntimeT implements T and can be instantiated and run at runtime to +// mimic *testing.T behavior. Unlike *testing.T, this will simply panic +// for calls to Fatal. For calls to Error, you'll have to check the errors +// list to determine whether to exit yourself. +type RuntimeT struct { + skipped bool + failed bool +} + +func (t *RuntimeT) Error(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.Fail() +} + +func (t *RuntimeT) Errorf(format string, args ...interface{}) { + log.Printf(format, args...) + t.Fail() +} + +func (t *RuntimeT) Fail() { + t.failed = true +} + +func (t *RuntimeT) FailNow() { + panic("testing.T failed, see logs for output (if any)") +} + +func (t *RuntimeT) Failed() bool { + return t.failed +} + +func (t *RuntimeT) Fatal(args ...interface{}) { + log.Print(args...) + t.FailNow() +} + +func (t *RuntimeT) Fatalf(format string, args ...interface{}) { + log.Printf(format, args...) + t.FailNow() +} + +func (t *RuntimeT) Log(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) +} + +func (t *RuntimeT) Logf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) +} + +func (t *RuntimeT) Name() string { + return "" +} + +func (t *RuntimeT) Skip(args ...interface{}) { + log.Print(args...) + t.SkipNow() +} + +func (t *RuntimeT) SkipNow() { + t.skipped = true +} + +func (t *RuntimeT) Skipf(format string, args ...interface{}) { + log.Printf(format, args...) + t.SkipNow() +} + +func (t *RuntimeT) Skipped() bool { + return t.skipped +} + +func (t *RuntimeT) Helper() {} diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md index 3b3cb723f8..9fe803a5e9 100644 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -1,3 +1,61 @@ +## 1.4.2 + +* Custom name matchers to support any sort of casing, formatting, etc. for + field names. [GH-250] +* Fix possible panic in ComposeDecodeHookFunc [GH-251] + +## 1.4.1 + +* Fix regression where `*time.Time` value would be set to empty and not be sent + to decode hooks properly [GH-232] + +## 1.4.0 + +* A new decode hook type `DecodeHookFuncValue` has been added that has + access to the full values. [GH-183] +* Squash is now supported with embedded fields that are struct pointers [GH-205] +* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] + +## 1.3.3 + +* Decoding maps from maps creates a settable value for decode hooks [GH-203] + +## 1.3.2 + +* Decode into interface type with a struct value is supported [GH-187] + +## 1.3.1 + +* Squash should only squash embedded structs. [GH-194] + +## 1.3.0 + +* Added `",omitempty"` support. This will ignore zero values in the source + structure when encoding. [GH-145] + +## 1.2.3 + +* Fix duplicate entries in Keys list with pointer values. [GH-185] + +## 1.2.2 + +* Do not add unsettable (unexported) values to the unused metadata key + or "remain" value. [GH-150] + +## 1.2.1 + +* Go modules checksum mismatch fix + +## 1.2.0 + +* Added support to capture unused values in a field using the `",remain"` value + in the mapstructure tag. There is an example to showcase usage. +* Added `DecoderConfig` option to always squash embedded structs +* `json.Number` can decode into `uint` types +* Empty slices are preserved and not replaced with nil slices +* Fix panic that can occur in when decoding a map into a nil slice of structs +* Improved package documentation for godoc + ## 1.1.2 * Fix error when decode hook decodes interface implementation into interface diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go index 1f0abc65ab..4d4bbc733b 100644 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -1,6 +1,7 @@ package mapstructure import ( + "encoding" "errors" "fmt" "net" @@ -16,10 +17,11 @@ func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { // Create variables here so we can reference them with the reflect pkg var f1 DecodeHookFuncType var f2 DecodeHookFuncKind + var f3 DecodeHookFuncValue // Fill in the variables into this interface and the rest is done // automatically using the reflect package. - potential := []interface{}{f1, f2} + potential := []interface{}{f1, f2, f3} v := reflect.ValueOf(h) vt := v.Type() @@ -38,13 +40,15 @@ func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { // that took reflect.Kind instead of reflect.Type. func DecodeHookExec( raw DecodeHookFunc, - from reflect.Type, to reflect.Type, - data interface{}) (interface{}, error) { + from reflect.Value, to reflect.Value) (interface{}, error) { + switch f := typedDecodeHook(raw).(type) { case DecodeHookFuncType: - return f(from, to, data) + return f(from.Type(), to.Type(), from.Interface()) case DecodeHookFuncKind: - return f(from.Kind(), to.Kind(), data) + return f(from.Kind(), to.Kind(), from.Interface()) + case DecodeHookFuncValue: + return f(from, to) default: return nil, errors.New("invalid decode hook signature") } @@ -56,22 +60,17 @@ func DecodeHookExec( // The composed funcs are called in order, with the result of the // previous transformation. func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { var err error + data := f.Interface() + + newFrom := f for _, f1 := range fs { - data, err = DecodeHookExec(f1, f, t, data) + data, err = DecodeHookExec(f1, newFrom, t) if err != nil { return nil, err } - - // Modify the from kind to be correct with the new data - f = nil - if val := reflect.ValueOf(data); val.IsValid() { - f = val.Type() - } + newFrom = reflect.ValueOf(data) } return data, nil @@ -215,3 +214,44 @@ func WeaklyTypedHook( return data, nil } + +func RecursiveStructToMapHookFunc() DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + if f.Kind() != reflect.Struct { + return f.Interface(), nil + } + + var i interface{} = struct{}{} + if t.Type() != reflect.TypeOf(&i).Elem() { + return f.Interface(), nil + } + + m := make(map[string]interface{}) + t.Set(reflect.ValueOf(m)) + + return f.Interface(), nil + } +} + +// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies +// strings to the UnmarshalText function, when the target type +// implements the encoding.TextUnmarshaler interface +func TextUnmarshallerHookFunc() DecodeHookFuncType { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + result := reflect.New(t).Interface() + unmarshaller, ok := result.(encoding.TextUnmarshaler) + if !ok { + return data, nil + } + if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { + return nil, err + } + return result, nil + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/go.mod b/vendor/github.com/mitchellh/mapstructure/go.mod index d2a7125620..a03ae97308 100644 --- a/vendor/github.com/mitchellh/mapstructure/go.mod +++ b/vendor/github.com/mitchellh/mapstructure/go.mod @@ -1 +1,3 @@ module github.com/mitchellh/mapstructure + +go 1.14 diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index 256ee63fbf..dcee0f2d63 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -1,10 +1,161 @@ -// Package mapstructure exposes functionality to convert an arbitrary -// map[string]interface{} into a native Go structure. +// Package mapstructure exposes functionality to convert one arbitrary +// Go type into another, typically to convert a map[string]interface{} +// into a native Go structure. // // The Go structure can be arbitrarily complex, containing slices, // other structs, etc. and the decoder will properly decode nested // maps and so on into the proper structures in the native Go struct. // See the examples to see what the decoder is capable of. +// +// The simplest function to start with is Decode. +// +// Field Tags +// +// When decoding to a struct, mapstructure will use the field name by +// default to perform the mapping. For example, if a struct has a field +// "Username" then mapstructure will look for a key in the source value +// of "username" (case insensitive). +// +// type User struct { +// Username string +// } +// +// You can change the behavior of mapstructure by using struct tags. +// The default struct tag that mapstructure looks for is "mapstructure" +// but you can customize it using DecoderConfig. +// +// Renaming Fields +// +// To rename the key that mapstructure looks for, use the "mapstructure" +// tag and set a value directly. For example, to change the "username" example +// above to "user": +// +// type User struct { +// Username string `mapstructure:"user"` +// } +// +// Embedded Structs and Squashing +// +// Embedded structs are treated as if they're another field with that name. +// By default, the two structs below are equivalent when decoding with +// mapstructure: +// +// type Person struct { +// Name string +// } +// +// type Friend struct { +// Person +// } +// +// type Friend struct { +// Person Person +// } +// +// This would require an input that looks like below: +// +// map[string]interface{}{ +// "person": map[string]interface{}{"name": "alice"}, +// } +// +// If your "person" value is NOT nested, then you can append ",squash" to +// your tag value and mapstructure will treat it as if the embedded struct +// were part of the struct directly. Example: +// +// type Friend struct { +// Person `mapstructure:",squash"` +// } +// +// Now the following input would be accepted: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// When decoding from a struct to a map, the squash tag squashes the struct +// fields into a single map. Using the example structs from above: +// +// Friend{Person: Person{Name: "alice"}} +// +// Will be decoded into a map: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// DecoderConfig has a field that changes the behavior of mapstructure +// to always squash embedded structs. +// +// Remainder Values +// +// If there are any unmapped keys in the source value, mapstructure by +// default will silently ignore them. You can error by setting ErrorUnused +// in DecoderConfig. If you're using Metadata you can also maintain a slice +// of the unused keys. +// +// You can also use the ",remain" suffix on your tag to collect all unused +// values in a map. The field with this tag MUST be a map type and should +// probably be a "map[string]interface{}" or "map[interface{}]interface{}". +// See example below: +// +// type Friend struct { +// Name string +// Other map[string]interface{} `mapstructure:",remain"` +// } +// +// Given the input below, Other would be populated with the other +// values that weren't used (everything but "name"): +// +// map[string]interface{}{ +// "name": "bob", +// "address": "123 Maple St.", +// } +// +// Omit Empty Values +// +// When decoding from a struct to any other value, you may use the +// ",omitempty" suffix on your tag to omit that value if it equates to +// the zero value. The zero value of all types is specified in the Go +// specification. +// +// For example, the zero type of a numeric type is zero ("0"). If the struct +// field value is zero and a numeric type, the field is empty, and it won't +// be encoded into the destination type. +// +// type Source { +// Age int `mapstructure:",omitempty"` +// } +// +// Unexported fields +// +// Since unexported (private) struct fields cannot be set outside the package +// where they are defined, the decoder will simply skip them. +// +// For this output type definition: +// +// type Exported struct { +// private string // this unexported field will be skipped +// Public string +// } +// +// Using this map as input: +// +// map[string]interface{}{ +// "private": "I will be ignored", +// "Public": "I made it through!", +// } +// +// The following struct will be decoded: +// +// type Exported struct { +// private: "" // field is left with an empty string (zero value) +// Public: "I made it through!" +// } +// +// Other Configuration +// +// mapstructure is highly configurable. See the DecoderConfig struct +// for other features and options that are supported. package mapstructure import ( @@ -21,10 +172,11 @@ import ( // data transformations. See "DecodeHook" in the DecoderConfig // struct. // -// The type should be DecodeHookFuncType or DecodeHookFuncKind. -// Either is accepted. Types are a superset of Kinds (Types can return -// Kinds) and are generally a richer thing to use, but Kinds are simpler -// if you only need those. +// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or +// DecodeHookFuncValue. +// Values are a superset of Types (Values can return types), and Types are a +// superset of Kinds (Types can return Kinds) and are generally a richer thing +// to use, but Kinds are simpler if you only need those. // // The reason DecodeHookFunc is multi-typed is for backwards compatibility: // we started with Kinds and then realized Types were the better solution, @@ -40,15 +192,22 @@ type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface // source and target types. type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) +// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target +// values. +type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) + // DecoderConfig is the configuration that is used to create a new decoder // and allows customization of various aspects of decoding. type DecoderConfig struct { // DecodeHook, if set, will be called before any decoding and any // type conversion (if WeaklyTypedInput is on). This lets you modify - // the values before they're set down onto the resulting struct. + // the values before they're set down onto the resulting struct. The + // DecodeHook is called for every map and value in the input. This means + // that if a struct has embedded fields with squash tags the decode hook + // is called only once with all of the input data, not once for each + // embedded struct. // - // If an error is returned, the entire decode will fail with that - // error. + // If an error is returned, the entire decode will fail with that error. DecodeHook DecodeHookFunc // If ErrorUnused is true, then it is an error for there to exist @@ -80,6 +239,14 @@ type DecoderConfig struct { // WeaklyTypedInput bool + // Squash will squash embedded structs. A squash tag may also be + // added to an individual struct field using a tag. For example: + // + // type Parent struct { + // Child `mapstructure:",squash"` + // } + Squash bool + // Metadata is the struct that will contain extra metadata about // the decoding. If this is nil, then no metadata will be tracked. Metadata *Metadata @@ -91,6 +258,11 @@ type DecoderConfig struct { // The tag name that mapstructure reads for field names. This // defaults to "mapstructure" TagName string + + // MatchName is the function used to match the map key to the struct + // field name or tag. Defaults to `strings.EqualFold`. This can be used + // to implement case-sensitive tag values, support snake casing, etc. + MatchName func(mapKey, fieldName string) bool } // A Decoder takes a raw interface value and turns it into structured @@ -209,6 +381,10 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { config.TagName = "mapstructure" } + if config.MatchName == nil { + config.MatchName = strings.EqualFold + } + result := &Decoder{ config: config, } @@ -261,9 +437,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e if d.config.DecodeHook != nil { // We have a DecodeHook, so let's pre-process the input. var err error - input, err = DecodeHookExec( - d.config.DecodeHook, - inputVal.Type(), outVal.Type(), input) + input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) if err != nil { return fmt.Errorf("error decoding '%s': %s", name, err) } @@ -271,6 +445,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e var err error outputKind := getKind(outVal) + addMetaKey := true switch outputKind { case reflect.Bool: err = d.decodeBool(name, input, outVal) @@ -289,7 +464,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e case reflect.Map: err = d.decodeMap(name, input, outVal) case reflect.Ptr: - err = d.decodePtr(name, input, outVal) + addMetaKey, err = d.decodePtr(name, input, outVal) case reflect.Slice: err = d.decodeSlice(name, input, outVal) case reflect.Array: @@ -303,7 +478,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e // If we reached here, then we successfully decoded SOMETHING, so // mark the key as used if we're tracking metainput. - if d.config.Metadata != nil && name != "" { + if addMetaKey && d.config.Metadata != nil && name != "" { d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) } @@ -314,7 +489,34 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e // value to "data" of that type. func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { if val.IsValid() && val.Elem().IsValid() { - return d.decode(name, data, val.Elem()) + elem := val.Elem() + + // If we can't address this element, then its not writable. Instead, + // we make a copy of the value (which is a pointer and therefore + // writable), decode into that, and replace the whole value. + copied := false + if !elem.CanAddr() { + copied = true + + // Make *T + copy := reflect.New(elem.Type()) + + // *T = elem + copy.Elem().Set(elem) + + // Set elem so we decode into it + elem = copy + } + + // Decode. If we have an error then return. We also return right + // away if we're not a copy because that means we decoded directly. + if err := d.decode(name, data, elem); err != nil || !copied { + return err + } + + // If we're a copy, we need to set te final result + val.Set(elem.Elem()) + return nil } dataVal := reflect.ValueOf(data) @@ -386,8 +588,8 @@ func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) if !converted { return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } return nil @@ -412,7 +614,12 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er val.SetInt(0) } case dataKind == reflect.String && d.config.WeaklyTypedInput: - i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseInt(str, 0, val.Type().Bits()) if err == nil { val.SetInt(i) } else { @@ -428,8 +635,8 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er val.SetInt(i) default: return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } return nil @@ -438,6 +645,7 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { dataVal := reflect.Indirect(reflect.ValueOf(data)) dataKind := getKind(dataVal) + dataType := dataVal.Type() switch { case dataKind == reflect.Int: @@ -463,16 +671,33 @@ func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) e val.SetUint(0) } case dataKind == reflect.String && d.config.WeaklyTypedInput: - i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseUint(str, 0, val.Type().Bits()) if err == nil { val.SetUint(i) } else { return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) default: return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } return nil @@ -502,8 +727,8 @@ func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) e } default: return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } return nil @@ -528,7 +753,12 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) val.SetFloat(0) } case dataKind == reflect.String && d.config.WeaklyTypedInput: - f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) + str := dataVal.String() + if str == "" { + str = "0" + } + + f, err := strconv.ParseFloat(str, val.Type().Bits()) if err == nil { val.SetFloat(f) } else { @@ -544,8 +774,8 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) val.SetFloat(i) default: return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } return nil @@ -596,7 +826,7 @@ func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val ref for i := 0; i < dataVal.Len(); i++ { err := d.decode( - fmt.Sprintf("%s[%d]", name, i), + name+"["+strconv.Itoa(i)+"]", dataVal.Index(i).Interface(), val) if err != nil { return err @@ -629,7 +859,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle } for _, k := range dataVal.MapKeys() { - fieldName := fmt.Sprintf("%s[%s]", name, k) + fieldName := name + "[" + k.String() + "]" // First decode the key into the proper type currentKey := reflect.Indirect(reflect.New(valKeyType)) @@ -678,27 +908,40 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re } tagValue := f.Tag.Get(d.config.TagName) - tagParts := strings.Split(tagValue, ",") + keyName := f.Name + + // If Squash is set in the config, we squash the field down. + squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous // Determine the name of the key in the map - keyName := f.Name - if tagParts[0] != "" { - if tagParts[0] == "-" { + if index := strings.Index(tagValue, ","); index != -1 { + if tagValue[:index] == "-" { + continue + } + // If "omitempty" is specified in the tag, it ignores empty values. + if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { continue } - keyName = tagParts[0] - } - // If "squash" is specified in the tag, we squash the field down. - squash := false - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break + // If "squash" is specified in the tag, we squash the field down. + squash = !squash && strings.Index(tagValue[index+1:], "squash") != -1 + if squash { + // When squashing, the embedded type can be a pointer to a struct. + if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { + v = v.Elem() + } + + // The final type must be a struct + if v.Kind() != reflect.Struct { + return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) + } } - } - if squash && v.Kind() != reflect.Struct { - return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) + keyName = tagValue[:index] + } else if len(tagValue) > 0 { + if tagValue == "-" { + continue + } + keyName = tagValue } switch v.Kind() { @@ -713,11 +956,22 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re mType := reflect.MapOf(vKeyType, vElemType) vMap := reflect.MakeMap(mType) - err := d.decode(keyName, x.Interface(), vMap) + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(vMap.Type()) + reflect.Indirect(addrVal).Set(vMap) + + err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) if err != nil { return err } + // the underlying map may have been completely overwritten so pull + // it indirectly out of the enclosing value. + vMap = reflect.Indirect(addrVal) + if squash { for _, k := range vMap.MapKeys() { valMap.SetMapIndex(k, vMap.MapIndex(k)) @@ -738,7 +992,7 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re return nil } -func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { // If the input data is nil, then we want to just set the output // pointer to be nil as well. isNil := data == nil @@ -759,7 +1013,7 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er val.Set(nilValue) } - return nil + return true, nil } // Create an element of the concrete (non pointer) type and decode @@ -773,16 +1027,16 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er } if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { - return err + return false, err } val.Set(realVal) } else { if err := d.decode(name, data, reflect.Indirect(val)); err != nil { - return err + return false, err } } - return nil + return false, nil } func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { @@ -791,8 +1045,8 @@ func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) e dataVal := reflect.Indirect(reflect.ValueOf(data)) if val.Type() != dataVal.Type() { return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } val.Set(dataVal) return nil @@ -805,8 +1059,8 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) valElemType := valType.Elem() sliceType := reflect.SliceOf(valElemType) - valSlice := val - if valSlice.IsNil() || d.config.ZeroFields { + // If we have a non array/slice type then we first attempt to convert. + if dataValKind != reflect.Array && dataValKind != reflect.Slice { if d.config.WeaklyTypedInput { switch { // Slice and array we use the normal logic @@ -833,18 +1087,17 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) } } - // Check input type - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - - } + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + } - // If the input value is empty, then don't allocate since non-nil != nil - if dataVal.Len() == 0 { - return nil - } + // If the input value is nil, then don't allocate since empty != nil + if dataVal.IsNil() { + return nil + } + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { // Make a new slice to hold our result, same size as the original data. valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) } @@ -859,7 +1112,7 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) } currentField := valSlice.Index(i) - fieldName := fmt.Sprintf("%s[%d]", name, i) + fieldName := name + "[" + strconv.Itoa(i) + "]" if err := d.decode(fieldName, currentData, currentField); err != nil { errors = appendErrors(errors, err) } @@ -926,7 +1179,7 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) currentData := dataVal.Index(i).Interface() currentField := valArray.Index(i) - fieldName := fmt.Sprintf("%s[%d]", name, i) + fieldName := name + "[" + strconv.Itoa(i) + "]" if err := d.decode(fieldName, currentData, currentField); err != nil { errors = appendErrors(errors, err) } @@ -962,13 +1215,23 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) // Not the most efficient way to do this but we can optimize later if // we want to. To convert from struct to struct we go to map first // as an intermediary. - m := make(map[string]interface{}) - mval := reflect.Indirect(reflect.ValueOf(&m)) - if err := d.decodeMapFromStruct(name, dataVal, mval, mval); err != nil { + + // Make a new map to hold our result + mapType := reflect.TypeOf((map[string]interface{})(nil)) + mval := reflect.MakeMap(mapType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(mval.Type()) + + reflect.Indirect(addrVal).Set(mval) + if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { return err } - result := d.decodeStructFromMap(name, mval, val) + result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) return result default: @@ -1005,6 +1268,11 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e field reflect.StructField val reflect.Value } + + // remainField is set to a valid field set with the "remain" tag if + // we are keeping track of remaining values. + var remainField *field + fields := []field{} for len(structs) > 0 { structVal := structs[0] @@ -1014,30 +1282,47 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e for i := 0; i < structType.NumField(); i++ { fieldType := structType.Field(i) - fieldKind := fieldType.Type.Kind() + fieldVal := structVal.Field(i) + if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { + // Handle embedded struct pointers as embedded structs. + fieldVal = fieldVal.Elem() + } // If "squash" is specified in the tag, we squash the field down. - squash := false + squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous + remain := false + + // We always parse the tags cause we're looking for other tags too tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") for _, tag := range tagParts[1:] { if tag == "squash" { squash = true break } + + if tag == "remain" { + remain = true + break + } } if squash { - if fieldKind != reflect.Struct { + if fieldVal.Kind() != reflect.Struct { errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) } else { - structs = append(structs, structVal.FieldByName(fieldType.Name)) + structs = append(structs, fieldVal) } continue } - // Normal struct field, store it away - fields = append(fields, field{fieldType, structVal.Field(i)}) + // Build our field + if remain { + remainField = &field{fieldType, fieldVal} + } else { + // Normal struct field, store it away + fields = append(fields, field{fieldType, fieldVal}) + } } } @@ -1064,7 +1349,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e continue } - if strings.EqualFold(mK, fieldName) { + if d.config.MatchName(mK, fieldName) { rawMapKey = dataValKey rawMapVal = dataVal.MapIndex(dataValKey) break @@ -1078,9 +1363,6 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e } } - // Delete the key we're using from the unused map so we stop tracking - delete(dataValKeysUnused, rawMapKey.Interface()) - if !fieldValue.IsValid() { // This should never happen panic("field is not valid") @@ -1092,10 +1374,13 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e continue } + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + // If the name is empty string, then we're at the root, and we // don't dot-join the fields. if name != "" { - fieldName = fmt.Sprintf("%s.%s", name, fieldName) + fieldName = name + "." + fieldName } if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { @@ -1103,6 +1388,25 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e } } + // If we have a "remain"-tagged field and we have unused keys then + // we put the unused keys directly into the remain field. + if remainField != nil && len(dataValKeysUnused) > 0 { + // Build a map of only the unused values + remain := map[interface{}]interface{}{} + for key := range dataValKeysUnused { + remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() + } + + // Decode it as-if we were just decoding this map onto our map. + if err := d.decodeMap(name, remain, remainField.val); err != nil { + errors = appendErrors(errors, err) + } + + // Set the map to nil so we have none so that the next check will + // not error (ErrorUnused) + dataValKeysUnused = nil + } + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { keys := make([]string, 0, len(dataValKeysUnused)) for rawKey := range dataValKeysUnused { @@ -1123,7 +1427,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e for rawKey := range dataValKeysUnused { key := rawKey.(string) if name != "" { - key = fmt.Sprintf("%s.%s", name, key) + key = name + "." + key } d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) @@ -1133,6 +1437,24 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e return nil } +func isEmptyValue(v reflect.Value) bool { + switch getKind(v) { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + func getKind(val reflect.Value) reflect.Kind { kind := val.Kind() diff --git a/vendor/github.com/mitchellh/reflectwalk/.travis.yml b/vendor/github.com/mitchellh/reflectwalk/.travis.yml new file mode 100644 index 0000000000..4f2ee4d973 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE new file mode 100644 index 0000000000..f9c841a51e --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md new file mode 100644 index 0000000000..ac82cd2e15 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/README.md @@ -0,0 +1,6 @@ +# reflectwalk + +reflectwalk is a Go library for "walking" a value in Go using reflection, +in the same way a directory tree can be "walked" on the filesystem. Walking +a complex structure can allow you to do manipulations on unknown structures +such as those decoded from JSON. diff --git a/vendor/github.com/mitchellh/reflectwalk/go.mod b/vendor/github.com/mitchellh/reflectwalk/go.mod new file mode 100644 index 0000000000..52bb7c469e --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/reflectwalk diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go new file mode 100644 index 0000000000..6a7f176117 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location.go @@ -0,0 +1,19 @@ +package reflectwalk + +//go:generate stringer -type=Location location.go + +type Location uint + +const ( + None Location = iota + Map + MapKey + MapValue + Slice + SliceElem + Array + ArrayElem + Struct + StructField + WalkLoc +) diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go new file mode 100644 index 0000000000..70760cf4c7 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=Location location.go"; DO NOT EDIT. + +package reflectwalk + +import "fmt" + +const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc" + +var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73} + +func (i Location) String() string { + if i >= Location(len(_Location_index)-1) { + return fmt.Sprintf("Location(%d)", i) + } + return _Location_name[_Location_index[i]:_Location_index[i+1]] +} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go new file mode 100644 index 0000000000..d7ab7b6d78 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go @@ -0,0 +1,401 @@ +// reflectwalk is a package that allows you to "walk" complex structures +// similar to how you may "walk" a filesystem: visiting every element one +// by one and calling callback functions allowing you to handle and manipulate +// those elements. +package reflectwalk + +import ( + "errors" + "reflect" +) + +// PrimitiveWalker implementations are able to handle primitive values +// within complex structures. Primitive values are numbers, strings, +// booleans, funcs, chans. +// +// These primitive values are often members of more complex +// structures (slices, maps, etc.) that are walkable by other interfaces. +type PrimitiveWalker interface { + Primitive(reflect.Value) error +} + +// InterfaceWalker implementations are able to handle interface values as they +// are encountered during the walk. +type InterfaceWalker interface { + Interface(reflect.Value) error +} + +// MapWalker implementations are able to handle individual elements +// found within a map structure. +type MapWalker interface { + Map(m reflect.Value) error + MapElem(m, k, v reflect.Value) error +} + +// SliceWalker implementations are able to handle slice elements found +// within complex structures. +type SliceWalker interface { + Slice(reflect.Value) error + SliceElem(int, reflect.Value) error +} + +// ArrayWalker implementations are able to handle array elements found +// within complex structures. +type ArrayWalker interface { + Array(reflect.Value) error + ArrayElem(int, reflect.Value) error +} + +// StructWalker is an interface that has methods that are called for +// structs when a Walk is done. +type StructWalker interface { + Struct(reflect.Value) error + StructField(reflect.StructField, reflect.Value) error +} + +// EnterExitWalker implementations are notified before and after +// they walk deeper into complex structures (into struct fields, +// into slice elements, etc.) +type EnterExitWalker interface { + Enter(Location) error + Exit(Location) error +} + +// PointerWalker implementations are notified when the value they're +// walking is a pointer or not. Pointer is called for _every_ value whether +// it is a pointer or not. +type PointerWalker interface { + PointerEnter(bool) error + PointerExit(bool) error +} + +// SkipEntry can be returned from walk functions to skip walking +// the value of this field. This is only valid in the following functions: +// +// - Struct: skips all fields from being walked +// - StructField: skips walking the struct value +// +var SkipEntry = errors.New("skip this entry") + +// Walk takes an arbitrary value and an interface and traverses the +// value, calling callbacks on the interface if they are supported. +// The interface should implement one or more of the walker interfaces +// in this package, such as PrimitiveWalker, StructWalker, etc. +func Walk(data, walker interface{}) (err error) { + v := reflect.ValueOf(data) + ew, ok := walker.(EnterExitWalker) + if ok { + err = ew.Enter(WalkLoc) + } + + if err == nil { + err = walk(v, walker) + } + + if ok && err == nil { + err = ew.Exit(WalkLoc) + } + + return +} + +func walk(v reflect.Value, w interface{}) (err error) { + // Determine if we're receiving a pointer and if so notify the walker. + // The logic here is convoluted but very important (tests will fail if + // almost any part is changed). I will try to explain here. + // + // First, we check if the value is an interface, if so, we really need + // to check the interface's VALUE to see whether it is a pointer. + // + // Check whether the value is then a pointer. If so, then set pointer + // to true to notify the user. + // + // If we still have a pointer or an interface after the indirections, then + // we unwrap another level + // + // At this time, we also set "v" to be the dereferenced value. This is + // because once we've unwrapped the pointer we want to use that value. + pointer := false + pointerV := v + + for { + if pointerV.Kind() == reflect.Interface { + if iw, ok := w.(InterfaceWalker); ok { + if err = iw.Interface(pointerV); err != nil { + return + } + } + + pointerV = pointerV.Elem() + } + + if pointerV.Kind() == reflect.Ptr { + pointer = true + v = reflect.Indirect(pointerV) + } + if pw, ok := w.(PointerWalker); ok { + if err = pw.PointerEnter(pointer); err != nil { + return + } + + defer func(pointer bool) { + if err != nil { + return + } + + err = pw.PointerExit(pointer) + }(pointer) + } + + if pointer { + pointerV = v + } + pointer = false + + // If we still have a pointer or interface we have to indirect another level. + switch pointerV.Kind() { + case reflect.Ptr, reflect.Interface: + continue + } + break + } + + // We preserve the original value here because if it is an interface + // type, we want to pass that directly into the walkPrimitive, so that + // we can set it. + originalV := v + if v.Kind() == reflect.Interface { + v = v.Elem() + } + + k := v.Kind() + if k >= reflect.Int && k <= reflect.Complex128 { + k = reflect.Int + } + + switch k { + // Primitives + case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid: + err = walkPrimitive(originalV, w) + return + case reflect.Map: + err = walkMap(v, w) + return + case reflect.Slice: + err = walkSlice(v, w) + return + case reflect.Struct: + err = walkStruct(v, w) + return + case reflect.Array: + err = walkArray(v, w) + return + default: + panic("unsupported type: " + k.String()) + } +} + +func walkMap(v reflect.Value, w interface{}) error { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Map) + } + + if mw, ok := w.(MapWalker); ok { + if err := mw.Map(v); err != nil { + return err + } + } + + for _, k := range v.MapKeys() { + kv := v.MapIndex(k) + + if mw, ok := w.(MapWalker); ok { + if err := mw.MapElem(v, k, kv); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(MapKey) + } + + if err := walk(k, w); err != nil { + return err + } + + if ok { + ew.Exit(MapKey) + ew.Enter(MapValue) + } + + if err := walk(kv, w); err != nil { + return err + } + + if ok { + ew.Exit(MapValue) + } + } + + if ewok { + ew.Exit(Map) + } + + return nil +} + +func walkPrimitive(v reflect.Value, w interface{}) error { + if pw, ok := w.(PrimitiveWalker); ok { + return pw.Primitive(v) + } + + return nil +} + +func walkSlice(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Slice) + } + + if sw, ok := w.(SliceWalker); ok { + if err := sw.Slice(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if sw, ok := w.(SliceWalker); ok { + if err := sw.SliceElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(SliceElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(SliceElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Slice) + } + + return nil +} + +func walkArray(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Array) + } + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.Array(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.ArrayElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(ArrayElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(ArrayElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Array) + } + + return nil +} + +func walkStruct(v reflect.Value, w interface{}) (err error) { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Struct) + } + + skip := false + if sw, ok := w.(StructWalker); ok { + err = sw.Struct(v) + if err == SkipEntry { + skip = true + err = nil + } + if err != nil { + return + } + } + + if !skip { + vt := v.Type() + for i := 0; i < vt.NumField(); i++ { + sf := vt.Field(i) + f := v.FieldByIndex([]int{i}) + + if sw, ok := w.(StructWalker); ok { + err = sw.StructField(sf, f) + + // SkipEntry just pretends this field doesn't even exist + if err == SkipEntry { + continue + } + + if err != nil { + return + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(StructField) + } + + err = walk(f, w) + if err != nil { + return + } + + if ok { + ew.Exit(StructField) + } + } + } + + if ewok { + ew.Exit(Struct) + } + + return nil +} diff --git a/vendor/github.com/oklog/run/.gitignore b/vendor/github.com/oklog/run/.gitignore new file mode 100644 index 0000000000..a1338d6851 --- /dev/null +++ b/vendor/github.com/oklog/run/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ diff --git a/vendor/github.com/oklog/run/.travis.yml b/vendor/github.com/oklog/run/.travis.yml new file mode 100644 index 0000000000..362bdd41c0 --- /dev/null +++ b/vendor/github.com/oklog/run/.travis.yml @@ -0,0 +1,12 @@ +language: go +sudo: false +go: + - 1.x + - tip +install: + - go get -v github.com/golang/lint/golint + - go build ./... +script: + - go vet ./... + - $HOME/gopath/bin/golint . + - go test -v -race ./... diff --git a/vendor/github.com/oklog/run/LICENSE b/vendor/github.com/oklog/run/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/oklog/run/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/oklog/run/README.md b/vendor/github.com/oklog/run/README.md new file mode 100644 index 0000000000..a7228cd9a3 --- /dev/null +++ b/vendor/github.com/oklog/run/README.md @@ -0,0 +1,73 @@ +# run + +[![GoDoc](https://godoc.org/github.com/oklog/run?status.svg)](https://godoc.org/github.com/oklog/run) +[![Build Status](https://travis-ci.org/oklog/run.svg?branch=master)](https://travis-ci.org/oklog/run) +[![Go Report Card](https://goreportcard.com/badge/github.com/oklog/run)](https://goreportcard.com/report/github.com/oklog/run) +[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/run/master/LICENSE) + +run.Group is a universal mechanism to manage goroutine lifecycles. + +Create a zero-value run.Group, and then add actors to it. Actors are defined as +a pair of functions: an **execute** function, which should run synchronously; +and an **interrupt** function, which, when invoked, should cause the execute +function to return. Finally, invoke Run, which blocks until the first actor +returns. This general-purpose API allows callers to model pretty much any +runnable task, and achieve well-defined lifecycle semantics for the group. + +run.Group was written to manage component lifecycles in func main for +[OK Log](https://github.com/oklog/oklog). +But it's useful in any circumstance where you need to orchestrate multiple +goroutines as a unit whole. +[Click here](https://www.youtube.com/watch?v=LHe1Cb_Ud_M&t=15m45s) to see a +video of a talk where run.Group is described. + +## Examples + +### context.Context + +```go +ctx, cancel := context.WithCancel(context.Background()) +g.Add(func() error { + return myProcess(ctx, ...) +}, func(error) { + cancel() +}) +``` + +### net.Listener + +```go +ln, _ := net.Listen("tcp", ":8080") +g.Add(func() error { + return http.Serve(ln, nil) +}, func(error) { + ln.Close() +}) +``` + +### io.ReadCloser + +```go +var conn io.ReadCloser = ... +g.Add(func() error { + s := bufio.NewScanner(conn) + for s.Scan() { + println(s.Text()) + } + return s.Err() +}, func(error) { + conn.Close() +}) +``` + +## Comparisons + +Package run is somewhat similar to package +[errgroup](https://godoc.org/golang.org/x/sync/errgroup), +except it doesn't require actor goroutines to understand context semantics. + +It's somewhat similar to package +[tomb.v1](https://godoc.org/gopkg.in/tomb.v1) or +[tomb.v2](https://godoc.org/gopkg.in/tomb.v2), +except it has a much smaller API surface, delegating e.g. staged shutdown of +goroutines to the caller. diff --git a/vendor/github.com/oklog/run/group.go b/vendor/github.com/oklog/run/group.go new file mode 100644 index 0000000000..832d47dd16 --- /dev/null +++ b/vendor/github.com/oklog/run/group.go @@ -0,0 +1,62 @@ +// Package run implements an actor-runner with deterministic teardown. It is +// somewhat similar to package errgroup, except it does not require actor +// goroutines to understand context semantics. This makes it suitable for use in +// more circumstances; for example, goroutines which are handling connections +// from net.Listeners, or scanning input from a closable io.Reader. +package run + +// Group collects actors (functions) and runs them concurrently. +// When one actor (function) returns, all actors are interrupted. +// The zero value of a Group is useful. +type Group struct { + actors []actor +} + +// Add an actor (function) to the group. Each actor must be pre-emptable by an +// interrupt function. That is, if interrupt is invoked, execute should return. +// Also, it must be safe to call interrupt even after execute has returned. +// +// The first actor (function) to return interrupts all running actors. +// The error is passed to the interrupt functions, and is returned by Run. +func (g *Group) Add(execute func() error, interrupt func(error)) { + g.actors = append(g.actors, actor{execute, interrupt}) +} + +// Run all actors (functions) concurrently. +// When the first actor returns, all others are interrupted. +// Run only returns when all actors have exited. +// Run returns the error returned by the first exiting actor. +func (g *Group) Run() error { + if len(g.actors) == 0 { + return nil + } + + // Run each actor. + errors := make(chan error, len(g.actors)) + for _, a := range g.actors { + go func(a actor) { + errors <- a.execute() + }(a) + } + + // Wait for the first actor to stop. + err := <-errors + + // Signal all actors to stop. + for _, a := range g.actors { + a.interrupt(err) + } + + // Wait for all actors to stop. + for i := 1; i < cap(errors); i++ { + <-errors + } + + // Return the original error. + return err +} + +type actor struct { + execute func() error + interrupt func(error) +} diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore new file mode 100644 index 0000000000..5e98735047 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/.gitignore @@ -0,0 +1,34 @@ +# Created by https://www.gitignore.io/api/macos + +### macOS ### +*.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +# End of https://www.gitignore.io/api/macos + +cmd/*/*exe +.idea \ No newline at end of file diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml new file mode 100644 index 0000000000..fd6c6db713 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/.travis.yml @@ -0,0 +1,24 @@ +language: go + +env: + - GO111MODULE=off + +go: + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - master + +matrix: + fast_finish: true + allow_failures: + - go: master + +sudo: false + +script: + - go test -v -cpu=2 + - go test -v -cpu=2 -race + - go test -v -cpu=2 -tags noasm + - go test -v -cpu=2 -race -tags noasm diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/LICENSE new file mode 100644 index 0000000000..bd899d8353 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2015, Pierre Curto +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of xxHash nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md new file mode 100644 index 0000000000..4ee388e81b --- /dev/null +++ b/vendor/github.com/pierrec/lz4/README.md @@ -0,0 +1,90 @@ +# lz4 : LZ4 compression in pure Go + +[![GoDoc](https://godoc.org/github.com/pierrec/lz4?status.svg)](https://godoc.org/github.com/pierrec/lz4) +[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4) +[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4) +[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags) + +## Overview + +This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks. +The implementation is based on the reference C [one](https://github.com/lz4/lz4). + +## Install + +Assuming you have the go toolchain installed: + +``` +go get github.com/pierrec/lz4 +``` + +There is a command line interface tool to compress and decompress LZ4 files. + +``` +go install github.com/pierrec/lz4/cmd/lz4c +``` + +Usage + +``` +Usage of lz4c: + -version + print the program version + +Subcommands: +Compress the given files or from stdin to stdout. +compress [arguments] [ ...] + -bc + enable block checksum + -l int + compression level (0=fastest) + -sc + disable stream checksum + -size string + block max size [64K,256K,1M,4M] (default "4M") + +Uncompress the given files or from stdin to stdout. +uncompress [arguments] [ ...] + +``` + + +## Example + +``` +// Compress and uncompress an input string. +s := "hello world" +r := strings.NewReader(s) + +// The pipe will uncompress the data from the writer. +pr, pw := io.Pipe() +zw := lz4.NewWriter(pw) +zr := lz4.NewReader(pr) + +go func() { + // Compress the input string. + _, _ = io.Copy(zw, r) + _ = zw.Close() // Make sure the writer is closed + _ = pw.Close() // Terminate the pipe +}() + +_, _ = io.Copy(os.Stdout, zr) + +// Output: +// hello world +``` + +## Contributing + +Contributions are very welcome for bug fixing, performance improvements...! + +- Open an issue with a proper description +- Send a pull request with appropriate test case(s) + +## Contributors + +Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) so far! + +Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder. + +Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code. diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go new file mode 100644 index 0000000000..664d9be580 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/block.go @@ -0,0 +1,413 @@ +package lz4 + +import ( + "encoding/binary" + "math/bits" + "sync" +) + +// blockHash hashes the lower 6 bytes into a value < htSize. +func blockHash(x uint64) uint32 { + const prime6bytes = 227718039650203 + return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog)) +} + +// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. +func CompressBlockBound(n int) int { + return n + n/255 + 16 +} + +// UncompressBlock uncompresses the source buffer into the destination one, +// and returns the uncompressed size. +// +// The destination buffer must be sized appropriately. +// +// An error is returned if the source data is invalid or the destination buffer is too small. +func UncompressBlock(src, dst []byte) (int, error) { + if len(src) == 0 { + return 0, nil + } + if di := decodeBlock(dst, src); di >= 0 { + return di, nil + } + return 0, ErrInvalidSourceShortBuffer +} + +// CompressBlock compresses the source buffer into the destination one. +// This is the fast version of LZ4 compression and also the default one. +// +// The argument hashTable is scratch space for a hash table used by the +// compressor. If provided, it should have length at least 1<<16. If it is +// shorter (or nil), CompressBlock allocates its own hash table. +// +// The size of the compressed data is returned. +// +// If the destination buffer size is lower than CompressBlockBound and +// the compressed size is 0 and no error, then the data is incompressible. +// +// An error is returned if the destination buffer is too small. +func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) { + defer recoverBlock(&err) + + // Return 0, nil only if the destination buffer size is < CompressBlockBound. + isNotCompressible := len(dst) < CompressBlockBound(len(src)) + + // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. + // This significantly speeds up incompressible data and usually has very small impact on compression. + // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) + const adaptSkipLog = 7 + if len(hashTable) < htSize { + htIface := htPool.Get() + defer htPool.Put(htIface) + hashTable = (*(htIface).(*[htSize]int))[:] + } + // Prove to the compiler the table has at least htSize elements. + // The compiler can see that "uint32() >> hashShift" cannot be out of bounds. + hashTable = hashTable[:htSize] + + // si: Current position of the search. + // anchor: Position of the current literals. + var si, di, anchor int + sn := len(src) - mfLimit + if sn <= 0 { + goto lastLiterals + } + + // Fast scan strategy: the hash table only stores the last 4 bytes sequences. + for si < sn { + // Hash the next 6 bytes (sequence)... + match := binary.LittleEndian.Uint64(src[si:]) + h := blockHash(match) + h2 := blockHash(match >> 8) + + // We check a match at s, s+1 and s+2 and pick the first one we get. + // Checking 3 only requires us to load the source one. + ref := hashTable[h] + ref2 := hashTable[h2] + hashTable[h] = si + hashTable[h2] = si + 1 + offset := si - ref + + // If offset <= 0 we got an old entry in the hash table. + if offset <= 0 || offset >= winSize || // Out of window. + uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches. + // No match. Start calculating another hash. + // The processor can usually do this out-of-order. + h = blockHash(match >> 16) + ref = hashTable[h] + + // Check the second match at si+1 + si += 1 + offset = si - ref2 + + if offset <= 0 || offset >= winSize || + uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) { + // No match. Check the third match at si+2 + si += 1 + offset = si - ref + hashTable[h] = si + + if offset <= 0 || offset >= winSize || + uint32(match>>16) != binary.LittleEndian.Uint32(src[ref:]) { + // Skip one extra byte (at si+3) before we check 3 matches again. + si += 2 + (si-anchor)>>adaptSkipLog + continue + } + } + } + + // Match found. + lLen := si - anchor // Literal length. + // We already matched 4 bytes. + mLen := 4 + + // Extend backwards if we can, reducing literals. + tOff := si - offset - 1 + for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] { + si-- + tOff-- + lLen-- + mLen++ + } + + // Add the match length, so we continue search at the end. + // Use mLen to store the offset base. + si, mLen = si+mLen, si+minMatch + + // Find the longest match by looking by batches of 8 bytes. + for si+8 < sn { + x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:]) + if x == 0 { + si += 8 + } else { + // Stop is first non-zero byte. + si += bits.TrailingZeros64(x) >> 3 + break + } + } + + mLen = si - mLen + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // Encode literals length. + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + di++ + l := lLen - 0xF + for ; l >= 0xFF; l -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(l) + } + di++ + + // Literals. + copy(dst[di:di+lLen], src[anchor:anchor+lLen]) + di += lLen + 2 + anchor = si + + // Encode offset. + _ = dst[di] // Bound check elimination. + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // Encode match length part 2. + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(mLen) + di++ + } + // Check if we can load next values. + if si >= sn { + break + } + // Hash match end-2 + h = blockHash(binary.LittleEndian.Uint64(src[si-2:])) + hashTable[h] = si - 2 + } + +lastLiterals: + if isNotCompressible && anchor == 0 { + // Incompressible. + return 0, nil + } + + // Last literals. + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + di++ + for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(lLen) + } + di++ + + // Write the last literals. + if isNotCompressible && di >= anchor { + // Incompressible. + return 0, nil + } + di += copy(dst[di:di+len(src)-anchor], src[anchor:]) + return di, nil +} + +// Pool of hash tables for CompressBlock. +var htPool = sync.Pool{ + New: func() interface{} { + return new([htSize]int) + }, +} + +// blockHash hashes 4 bytes into a value < winSize. +func blockHashHC(x uint32) uint32 { + const hasher uint32 = 2654435761 // Knuth multiplicative hash. + return x * hasher >> (32 - winSizeLog) +} + +// CompressBlockHC compresses the source buffer src into the destination dst +// with max search depth (use 0 or negative value for no max). +// +// CompressBlockHC compression ratio is better than CompressBlock but it is also slower. +// +// The size of the compressed data is returned. +// +// If the destination buffer size is lower than CompressBlockBound and +// the compressed size is 0 and no error, then the data is incompressible. +// +// An error is returned if the destination buffer is too small. +func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) { + defer recoverBlock(&err) + + // Return 0, nil only if the destination buffer size is < CompressBlockBound. + isNotCompressible := len(dst) < CompressBlockBound(len(src)) + + // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. + // This significantly speeds up incompressible data and usually has very small impact on compression. + // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) + const adaptSkipLog = 7 + + var si, di, anchor int + + // hashTable: stores the last position found for a given hash + // chainTable: stores previous positions for a given hash + var hashTable, chainTable [winSize]int + + if depth <= 0 { + depth = winSize + } + + sn := len(src) - mfLimit + if sn <= 0 { + goto lastLiterals + } + + for si < sn { + // Hash the next 4 bytes (sequence). + match := binary.LittleEndian.Uint32(src[si:]) + h := blockHashHC(match) + + // Follow the chain until out of window and give the longest match. + mLen := 0 + offset := 0 + for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] { + // The first (mLen==0) or next byte (mLen>=minMatch) at current match length + // must match to improve on the match length. + if src[next+mLen] != src[si+mLen] { + continue + } + ml := 0 + // Compare the current position with a previous with the same hash. + for ml < sn-si { + x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:]) + if x == 0 { + ml += 8 + } else { + // Stop is first non-zero byte. + ml += bits.TrailingZeros64(x) >> 3 + break + } + } + if ml < minMatch || ml <= mLen { + // Match too small (>adaptSkipLog + continue + } + + // Match found. + // Update hash/chain tables with overlapping bytes: + // si already hashed, add everything from si+1 up to the match length. + winStart := si + 1 + if ws := si + mLen - winSize; ws > winStart { + winStart = ws + } + for si, ml := winStart, si+mLen; si < ml; { + match >>= 8 + match |= uint32(src[si+3]) << 24 + h := blockHashHC(match) + chainTable[si&winMask] = hashTable[h] + hashTable[h] = si + si++ + } + + lLen := si - anchor + si += mLen + mLen -= minMatch // Match length does not include minMatch. + + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // Encode literals length. + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + di++ + l := lLen - 0xF + for ; l >= 0xFF; l -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(l) + } + di++ + + // Literals. + copy(dst[di:di+lLen], src[anchor:anchor+lLen]) + di += lLen + anchor = si + + // Encode offset. + di += 2 + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // Encode match length part 2. + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(mLen) + di++ + } + } + + if isNotCompressible && anchor == 0 { + // Incompressible. + return 0, nil + } + + // Last literals. +lastLiterals: + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + di++ + lLen -= 0xF + for ; lLen >= 0xFF; lLen -= 0xFF { + dst[di] = 0xFF + di++ + } + dst[di] = byte(lLen) + } + di++ + + // Write the last literals. + if isNotCompressible && di >= anchor { + // Incompressible. + return 0, nil + } + di += copy(dst[di:di+len(src)-anchor], src[anchor:]) + return di, nil +} diff --git a/vendor/github.com/pierrec/lz4/debug.go b/vendor/github.com/pierrec/lz4/debug.go new file mode 100644 index 0000000000..bc5e78d40f --- /dev/null +++ b/vendor/github.com/pierrec/lz4/debug.go @@ -0,0 +1,23 @@ +// +build lz4debug + +package lz4 + +import ( + "fmt" + "os" + "path/filepath" + "runtime" +) + +const debugFlag = true + +func debug(args ...interface{}) { + _, file, line, _ := runtime.Caller(1) + file = filepath.Base(file) + + f := fmt.Sprintf("LZ4: %s:%d %s", file, line, args[0]) + if f[len(f)-1] != '\n' { + f += "\n" + } + fmt.Fprintf(os.Stderr, f, args[1:]...) +} diff --git a/vendor/github.com/pierrec/lz4/debug_stub.go b/vendor/github.com/pierrec/lz4/debug_stub.go new file mode 100644 index 0000000000..44211ad964 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/debug_stub.go @@ -0,0 +1,7 @@ +// +build !lz4debug + +package lz4 + +const debugFlag = false + +func debug(args ...interface{}) {} diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.go b/vendor/github.com/pierrec/lz4/decode_amd64.go new file mode 100644 index 0000000000..43cc14fbe2 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/decode_amd64.go @@ -0,0 +1,8 @@ +// +build !appengine +// +build gc +// +build !noasm + +package lz4 + +//go:noescape +func decodeBlock(dst, src []byte) int diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.s b/vendor/github.com/pierrec/lz4/decode_amd64.s new file mode 100644 index 0000000000..20fef39759 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/decode_amd64.s @@ -0,0 +1,375 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// AX scratch +// BX scratch +// CX scratch +// DX token +// +// DI &dst +// SI &src +// R8 &dst + len(dst) +// R9 &src + len(src) +// R11 &dst +// R12 short output end +// R13 short input end +// func decodeBlock(dst, src []byte) int +// using 50 bytes of stack currently +TEXT ·decodeBlock(SB), NOSPLIT, $64-56 + MOVQ dst_base+0(FP), DI + MOVQ DI, R11 + MOVQ dst_len+8(FP), R8 + ADDQ DI, R8 + + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R9 + ADDQ SI, R9 + + // shortcut ends + // short output end + MOVQ R8, R12 + SUBQ $32, R12 + // short input end + MOVQ R9, R13 + SUBQ $16, R13 + +loop: + // for si < len(src) + CMPQ SI, R9 + JGE end + + // token := uint32(src[si]) + MOVBQZX (SI), DX + INCQ SI + + // lit_len = token >> 4 + // if lit_len > 0 + // CX = lit_len + MOVQ DX, CX + SHRQ $4, CX + + // if lit_len != 0xF + CMPQ CX, $0xF + JEQ lit_len_loop_pre + CMPQ DI, R12 + JGE lit_len_loop_pre + CMPQ SI, R13 + JGE lit_len_loop_pre + + // copy shortcut + + // A two-stage shortcut for the most common case: + // 1) If the literal length is 0..14, and there is enough space, + // enter the shortcut and copy 16 bytes on behalf of the literals + // (in the fast mode, only 8 bytes can be safely copied this way). + // 2) Further if the match length is 4..18, copy 18 bytes in a similar + // manner; but we ensure that there's enough space in the output for + // those 18 bytes earlier, upon entering the shortcut (in other words, + // there is a combined check for both stages). + + // copy literal + MOVOU (SI), X0 + MOVOU X0, (DI) + ADDQ CX, DI + ADDQ CX, SI + + MOVQ DX, CX + ANDQ $0xF, CX + + // The second stage: prepare for match copying, decode full info. + // If it doesn't work out, the info won't be wasted. + // offset := uint16(data[:2]) + MOVWQZX (SI), DX + ADDQ $2, SI + + MOVQ DI, AX + SUBQ DX, AX + CMPQ AX, DI + JGT err_short_buf + + // if we can't do the second stage then jump straight to read the + // match length, we already have the offset. + CMPQ CX, $0xF + JEQ match_len_loop_pre + CMPQ DX, $8 + JLT match_len_loop_pre + CMPQ AX, R11 + JLT err_short_buf + + // memcpy(op + 0, match + 0, 8); + MOVQ (AX), BX + MOVQ BX, (DI) + // memcpy(op + 8, match + 8, 8); + MOVQ 8(AX), BX + MOVQ BX, 8(DI) + // memcpy(op +16, match +16, 2); + MOVW 16(AX), BX + MOVW BX, 16(DI) + + ADDQ $4, DI // minmatch + ADDQ CX, DI + + // shortcut complete, load next token + JMP loop + +lit_len_loop_pre: + // if lit_len > 0 + CMPQ CX, $0 + JEQ offset + CMPQ CX, $0xF + JNE copy_literal + +lit_len_loop: + // for src[si] == 0xFF + CMPB (SI), $0xFF + JNE lit_len_finalise + + // bounds check src[si+1] + MOVQ SI, AX + ADDQ $1, AX + CMPQ AX, R9 + JGT err_short_buf + + // lit_len += 0xFF + ADDQ $0xFF, CX + INCQ SI + JMP lit_len_loop + +lit_len_finalise: + // lit_len += int(src[si]) + // si++ + MOVBQZX (SI), AX + ADDQ AX, CX + INCQ SI + +copy_literal: + // bounds check src and dst + MOVQ SI, AX + ADDQ CX, AX + CMPQ AX, R9 + JGT err_short_buf + + MOVQ DI, AX + ADDQ CX, AX + CMPQ AX, R8 + JGT err_short_buf + + // whats a good cut off to call memmove? + CMPQ CX, $16 + JGT memmove_lit + + // if len(dst[di:]) < 16 + MOVQ R8, AX + SUBQ DI, AX + CMPQ AX, $16 + JLT memmove_lit + + // if len(src[si:]) < 16 + MOVQ R9, AX + SUBQ SI, AX + CMPQ AX, $16 + JLT memmove_lit + + MOVOU (SI), X0 + MOVOU X0, (DI) + + JMP finish_lit_copy + +memmove_lit: + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + // spill + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) // need len to inc SI, DI after + MOVB DX, 48(SP) + CALL runtime·memmove(SB) + + // restore registers + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVB 48(SP), DX + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + +finish_lit_copy: + ADDQ CX, SI + ADDQ CX, DI + + CMPQ SI, R9 + JGE end + +offset: + // CX := mLen + // free up DX to use for offset + MOVQ DX, CX + + MOVQ SI, AX + ADDQ $2, AX + CMPQ AX, R9 + JGT err_short_buf + + // offset + // DX := int(src[si]) | int(src[si+1])<<8 + MOVWQZX (SI), DX + ADDQ $2, SI + + // 0 offset is invalid + CMPQ DX, $0 + JEQ err_corrupt + + ANDB $0xF, CX + +match_len_loop_pre: + // if mlen != 0xF + CMPB CX, $0xF + JNE copy_match + +match_len_loop: + // for src[si] == 0xFF + // lit_len += 0xFF + CMPB (SI), $0xFF + JNE match_len_finalise + + // bounds check src[si+1] + MOVQ SI, AX + ADDQ $1, AX + CMPQ AX, R9 + JGT err_short_buf + + ADDQ $0xFF, CX + INCQ SI + JMP match_len_loop + +match_len_finalise: + // lit_len += int(src[si]) + // si++ + MOVBQZX (SI), AX + ADDQ AX, CX + INCQ SI + +copy_match: + // mLen += minMatch + ADDQ $4, CX + + // check we have match_len bytes left in dst + // di+match_len < len(dst) + MOVQ DI, AX + ADDQ CX, AX + CMPQ AX, R8 + JGT err_short_buf + + // DX = offset + // CX = match_len + // BX = &dst + (di - offset) + MOVQ DI, BX + SUBQ DX, BX + + // check BX is within dst + // if BX < &dst + CMPQ BX, R11 + JLT err_short_buf + + // if offset + match_len < di + MOVQ BX, AX + ADDQ CX, AX + CMPQ DI, AX + JGT copy_interior_match + + // AX := len(dst[:di]) + // MOVQ DI, AX + // SUBQ R11, AX + + // copy 16 bytes at a time + // if di-offset < 16 copy 16-(di-offset) bytes to di + // then do the remaining + +copy_match_loop: + // for match_len >= 0 + // dst[di] = dst[i] + // di++ + // i++ + MOVB (BX), AX + MOVB AX, (DI) + INCQ DI + INCQ BX + DECQ CX + + CMPQ CX, $0 + JGT copy_match_loop + + JMP loop + +copy_interior_match: + CMPQ CX, $16 + JGT memmove_match + + // if len(dst[di:]) < 16 + MOVQ R8, AX + SUBQ DI, AX + CMPQ AX, $16 + JLT memmove_match + + MOVOU (BX), X0 + MOVOU X0, (DI) + + ADDQ CX, DI + JMP loop + +memmove_match: + // memmove(to, from, len) + MOVQ DI, 0(SP) + MOVQ BX, 8(SP) + MOVQ CX, 16(SP) + // spill + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) // need len to inc SI, DI after + CALL runtime·memmove(SB) + + // restore registers + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + + // recalc initial values + MOVQ dst_base+0(FP), R8 + MOVQ R8, R11 // TODO: make these sensible numbers + ADDQ dst_len+8(FP), R8 + MOVQ src_base+24(FP), R9 + ADDQ src_len+32(FP), R9 + MOVQ R8, R12 + SUBQ $32, R12 + MOVQ R9, R13 + SUBQ $16, R13 + + ADDQ CX, DI + JMP loop + +err_corrupt: + MOVQ $-1, ret+48(FP) + RET + +err_short_buf: + MOVQ $-2, ret+48(FP) + RET + +end: + SUBQ R11, DI + MOVQ DI, ret+48(FP) + RET diff --git a/vendor/github.com/pierrec/lz4/decode_other.go b/vendor/github.com/pierrec/lz4/decode_other.go new file mode 100644 index 0000000000..919888edf7 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/decode_other.go @@ -0,0 +1,98 @@ +// +build !amd64 appengine !gc noasm + +package lz4 + +func decodeBlock(dst, src []byte) (ret int) { + const hasError = -2 + defer func() { + if recover() != nil { + ret = hasError + } + }() + + var si, di int + for { + // Literals and match lengths (token). + b := int(src[si]) + si++ + + // Literals. + if lLen := b >> 4; lLen > 0 { + switch { + case lLen < 0xF && si+16 < len(src): + // Shortcut 1 + // if we have enough room in src and dst, and the literals length + // is small enough (0..14) then copy all 16 bytes, even if not all + // are part of the literals. + copy(dst[di:], src[si:si+16]) + si += lLen + di += lLen + if mLen := b & 0xF; mLen < 0xF { + // Shortcut 2 + // if the match length (4..18) fits within the literals, then copy + // all 18 bytes, even if not all are part of the literals. + mLen += 4 + if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset { + i := di - offset + end := i + 18 + if end > len(dst) { + // The remaining buffer may not hold 18 bytes. + // See https://github.com/pierrec/lz4/issues/51. + end = len(dst) + } + copy(dst[di:], dst[i:end]) + si += 2 + di += mLen + continue + } + } + case lLen == 0xF: + for src[si] == 0xFF { + lLen += 0xFF + si++ + } + lLen += int(src[si]) + si++ + fallthrough + default: + copy(dst[di:di+lLen], src[si:si+lLen]) + si += lLen + di += lLen + } + } + if si >= len(src) { + return di + } + + offset := int(src[si]) | int(src[si+1])<<8 + if offset == 0 { + return hasError + } + si += 2 + + // Match. + mLen := b & 0xF + if mLen == 0xF { + for src[si] == 0xFF { + mLen += 0xFF + si++ + } + mLen += int(src[si]) + si++ + } + mLen += minMatch + + // Copy the match. + expanded := dst[di-offset:] + if mLen > offset { + // Efficiently copy the match dst[di-offset:di] into the dst slice. + bytesToCopy := offset * (mLen / offset) + for n := offset; n <= bytesToCopy+offset; n *= 2 { + copy(expanded[n:], expanded[:n]) + } + di += bytesToCopy + mLen -= bytesToCopy + } + di += copy(dst[di:di+mLen], expanded[:mLen]) + } +} diff --git a/vendor/github.com/pierrec/lz4/errors.go b/vendor/github.com/pierrec/lz4/errors.go new file mode 100644 index 0000000000..1c45d1813c --- /dev/null +++ b/vendor/github.com/pierrec/lz4/errors.go @@ -0,0 +1,30 @@ +package lz4 + +import ( + "errors" + "fmt" + "os" + rdebug "runtime/debug" +) + +var ( + // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed + // block is corrupted or the destination buffer is not large enough for the uncompressed data. + ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short") + // ErrInvalid is returned when reading an invalid LZ4 archive. + ErrInvalid = errors.New("lz4: bad magic number") + // ErrBlockDependency is returned when attempting to decompress an archive created with block dependency. + ErrBlockDependency = errors.New("lz4: block dependency not supported") + // ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position. + ErrUnsupportedSeek = errors.New("lz4: can only seek forward from io.SeekCurrent") +) + +func recoverBlock(e *error) { + if r := recover(); r != nil && *e == nil { + if debugFlag { + fmt.Fprintln(os.Stderr, r) + rdebug.PrintStack() + } + *e = ErrInvalidSourceShortBuffer + } +} diff --git a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go new file mode 100644 index 0000000000..7a76a6bce2 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go @@ -0,0 +1,223 @@ +// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version). +// (https://github.com/Cyan4973/XXH/) +package xxh32 + +import ( + "encoding/binary" +) + +const ( + prime1 uint32 = 2654435761 + prime2 uint32 = 2246822519 + prime3 uint32 = 3266489917 + prime4 uint32 = 668265263 + prime5 uint32 = 374761393 + + primeMask = 0xFFFFFFFF + prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984 + prime1minus = uint32((-int64(prime1)) & primeMask) // 1640531535 +) + +// XXHZero represents an xxhash32 object with seed 0. +type XXHZero struct { + v1 uint32 + v2 uint32 + v3 uint32 + v4 uint32 + totalLen uint64 + buf [16]byte + bufused int +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (xxh XXHZero) Sum(b []byte) []byte { + h32 := xxh.Sum32() + return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24)) +} + +// Reset resets the Hash to its initial state. +func (xxh *XXHZero) Reset() { + xxh.v1 = prime1plus2 + xxh.v2 = prime2 + xxh.v3 = 0 + xxh.v4 = prime1minus + xxh.totalLen = 0 + xxh.bufused = 0 +} + +// Size returns the number of bytes returned by Sum(). +func (xxh *XXHZero) Size() int { + return 4 +} + +// BlockSize gives the minimum number of bytes accepted by Write(). +func (xxh *XXHZero) BlockSize() int { + return 1 +} + +// Write adds input bytes to the Hash. +// It never returns an error. +func (xxh *XXHZero) Write(input []byte) (int, error) { + if xxh.totalLen == 0 { + xxh.Reset() + } + n := len(input) + m := xxh.bufused + + xxh.totalLen += uint64(n) + + r := len(xxh.buf) - m + if n < r { + copy(xxh.buf[m:], input) + xxh.bufused += len(input) + return n, nil + } + + p := 0 + // Causes compiler to work directly from registers instead of stack: + v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4 + if m > 0 { + // some data left from previous update + copy(xxh.buf[xxh.bufused:], input[:r]) + xxh.bufused += len(input) - r + + // fast rotl(13) + buf := xxh.buf[:16] // BCE hint. + v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1 + p = r + xxh.bufused = 0 + } + + for n := n - 16; p <= n; p += 16 { + sub := input[p:][:16] //BCE hint for compiler + v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 + } + xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4 + + copy(xxh.buf[xxh.bufused:], input[p:]) + xxh.bufused += len(input) - p + + return n, nil +} + +// Sum32 returns the 32 bits Hash value. +func (xxh *XXHZero) Sum32() uint32 { + h32 := uint32(xxh.totalLen) + if h32 >= 16 { + h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4) + } else { + h32 += prime5 + } + + p := 0 + n := xxh.bufused + buf := xxh.buf + for n := n - 4; p <= n; p += 4 { + h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3 + h32 = rol17(h32) * prime4 + } + for ; p < n; p++ { + h32 += uint32(buf[p]) * prime5 + h32 = rol11(h32) * prime1 + } + + h32 ^= h32 >> 15 + h32 *= prime2 + h32 ^= h32 >> 13 + h32 *= prime3 + h32 ^= h32 >> 16 + + return h32 +} + +// ChecksumZero returns the 32bits Hash value. +func ChecksumZero(input []byte) uint32 { + n := len(input) + h32 := uint32(n) + + if n < 16 { + h32 += prime5 + } else { + v1 := prime1plus2 + v2 := prime2 + v3 := uint32(0) + v4 := prime1minus + p := 0 + for n := n - 16; p <= n; p += 16 { + sub := input[p:][:16] //BCE hint for compiler + v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 + v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 + v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 + v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 + } + input = input[p:] + n -= p + h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + } + + p := 0 + for n := n - 4; p <= n; p += 4 { + h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3 + h32 = rol17(h32) * prime4 + } + for p < n { + h32 += uint32(input[p]) * prime5 + h32 = rol11(h32) * prime1 + p++ + } + + h32 ^= h32 >> 15 + h32 *= prime2 + h32 ^= h32 >> 13 + h32 *= prime3 + h32 ^= h32 >> 16 + + return h32 +} + +// Uint32Zero hashes x with seed 0. +func Uint32Zero(x uint32) uint32 { + h := prime5 + 4 + x*prime3 + h = rol17(h) * prime4 + h ^= h >> 15 + h *= prime2 + h ^= h >> 13 + h *= prime3 + h ^= h >> 16 + return h +} + +func rol1(u uint32) uint32 { + return u<<1 | u>>31 +} + +func rol7(u uint32) uint32 { + return u<<7 | u>>25 +} + +func rol11(u uint32) uint32 { + return u<<11 | u>>21 +} + +func rol12(u uint32) uint32 { + return u<<12 | u>>20 +} + +func rol13(u uint32) uint32 { + return u<<13 | u>>19 +} + +func rol17(u uint32) uint32 { + return u<<17 | u>>15 +} + +func rol18(u uint32) uint32 { + return u<<18 | u>>14 +} diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go new file mode 100644 index 0000000000..6c73539a34 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/lz4.go @@ -0,0 +1,113 @@ +// Package lz4 implements reading and writing lz4 compressed data (a frame), +// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html. +// +// Although the block level compression and decompression functions are exposed and are fully compatible +// with the lz4 block format definition, they are low level and should not be used directly. +// For a complete description of an lz4 compressed block, see: +// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html +// +// See https://github.com/Cyan4973/lz4 for the reference C implementation. +// +package lz4 + +import "math/bits" + +import "sync" + +const ( + // Extension is the LZ4 frame file name extension + Extension = ".lz4" + // Version is the LZ4 frame format version + Version = 1 + + frameMagic uint32 = 0x184D2204 + frameSkipMagic uint32 = 0x184D2A50 + + // The following constants are used to setup the compression algorithm. + minMatch = 4 // the minimum size of the match sequence size (4 bytes) + winSizeLog = 16 // LZ4 64Kb window size limit + winSize = 1 << winSizeLog + winMask = winSize - 1 // 64Kb window of previous data for dependent blocks + compressedBlockFlag = 1 << 31 + compressedBlockMask = compressedBlockFlag - 1 + + // hashLog determines the size of the hash table used to quickly find a previous match position. + // Its value influences the compression speed and memory usage, the lower the faster, + // but at the expense of the compression ratio. + // 16 seems to be the best compromise for fast compression. + hashLog = 16 + htSize = 1 << hashLog + + mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes. +) + +// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb. +const ( + blockSize64K = 1 << (16 + 2*iota) + blockSize256K + blockSize1M + blockSize4M +) + +var ( + // Keep a pool of buffers for each valid block sizes. + bsMapValue = [...]*sync.Pool{ + newBufferPool(2 * blockSize64K), + newBufferPool(2 * blockSize256K), + newBufferPool(2 * blockSize1M), + newBufferPool(2 * blockSize4M), + } +) + +// newBufferPool returns a pool for buffers of the given size. +func newBufferPool(size int) *sync.Pool { + return &sync.Pool{ + New: func() interface{} { + return make([]byte, size) + }, + } +} + +// getBuffer returns a buffer to its pool. +func getBuffer(size int) []byte { + idx := blockSizeValueToIndex(size) - 4 + return bsMapValue[idx].Get().([]byte) +} + +// putBuffer returns a buffer to its pool. +func putBuffer(size int, buf []byte) { + if cap(buf) > 0 { + idx := blockSizeValueToIndex(size) - 4 + bsMapValue[idx].Put(buf[:cap(buf)]) + } +} +func blockSizeIndexToValue(i byte) int { + return 1 << (16 + 2*uint(i)) +} +func isValidBlockSize(size int) bool { + const blockSizeMask = blockSize64K | blockSize256K | blockSize1M | blockSize4M + + return size&blockSizeMask > 0 && bits.OnesCount(uint(size)) == 1 +} +func blockSizeValueToIndex(size int) byte { + return 4 + byte(bits.TrailingZeros(uint(size)>>16)/2) +} + +// Header describes the various flags that can be set on a Writer or obtained from a Reader. +// The default values match those of the LZ4 frame format definition +// (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html). +// +// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls. +// It is the caller's responsibility to check them if necessary. +type Header struct { + BlockChecksum bool // Compressed blocks checksum flag. + NoChecksum bool // Frame checksum flag. + BlockMaxSize int // Size of the uncompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB. + Size uint64 // Frame total size. It is _not_ computed by the Writer. + CompressionLevel int // Compression level (higher is better, use 0 for fastest compression). + done bool // Header processed flag (Read or Write and checked). +} + +func (h *Header) Reset() { + h.done = false +} diff --git a/vendor/github.com/pierrec/lz4/lz4_go1.10.go b/vendor/github.com/pierrec/lz4/lz4_go1.10.go new file mode 100644 index 0000000000..9a0fb00709 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/lz4_go1.10.go @@ -0,0 +1,29 @@ +//+build go1.10 + +package lz4 + +import ( + "fmt" + "strings" +) + +func (h Header) String() string { + var s strings.Builder + + s.WriteString(fmt.Sprintf("%T{", h)) + if h.BlockChecksum { + s.WriteString("BlockChecksum: true ") + } + if h.NoChecksum { + s.WriteString("NoChecksum: true ") + } + if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { + s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs)) + } + if l := h.CompressionLevel; l != 0 { + s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l)) + } + s.WriteByte('}') + + return s.String() +} diff --git a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go new file mode 100644 index 0000000000..12c761a2e7 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go @@ -0,0 +1,29 @@ +//+build !go1.10 + +package lz4 + +import ( + "bytes" + "fmt" +) + +func (h Header) String() string { + var s bytes.Buffer + + s.WriteString(fmt.Sprintf("%T{", h)) + if h.BlockChecksum { + s.WriteString("BlockChecksum: true ") + } + if h.NoChecksum { + s.WriteString("NoChecksum: true ") + } + if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { + s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs)) + } + if l := h.CompressionLevel; l != 0 { + s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l)) + } + s.WriteByte('}') + + return s.String() +} diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go new file mode 100644 index 0000000000..87dd72bd0d --- /dev/null +++ b/vendor/github.com/pierrec/lz4/reader.go @@ -0,0 +1,335 @@ +package lz4 + +import ( + "encoding/binary" + "fmt" + "io" + "io/ioutil" + + "github.com/pierrec/lz4/internal/xxh32" +) + +// Reader implements the LZ4 frame decoder. +// The Header is set after the first call to Read(). +// The Header may change between Read() calls in case of concatenated frames. +type Reader struct { + Header + // Handler called when a block has been successfully read. + // It provides the number of bytes read. + OnBlockDone func(size int) + + buf [8]byte // Scrap buffer. + pos int64 // Current position in src. + src io.Reader // Source. + zdata []byte // Compressed data. + data []byte // Uncompressed data. + idx int // Index of unread bytes into data. + checksum xxh32.XXHZero // Frame hash. + skip int64 // Bytes to skip before next read. + dpos int64 // Position in dest +} + +// NewReader returns a new LZ4 frame decoder. +// No access to the underlying io.Reader is performed. +func NewReader(src io.Reader) *Reader { + r := &Reader{src: src} + return r +} + +// readHeader checks the frame magic number and parses the frame descriptoz. +// Skippable frames are supported even as a first frame although the LZ4 +// specifications recommends skippable frames not to be used as first frames. +func (z *Reader) readHeader(first bool) error { + defer z.checksum.Reset() + + buf := z.buf[:] + for { + magic, err := z.readUint32() + if err != nil { + z.pos += 4 + if !first && err == io.ErrUnexpectedEOF { + return io.EOF + } + return err + } + if magic == frameMagic { + break + } + if magic>>8 != frameSkipMagic>>8 { + return ErrInvalid + } + skipSize, err := z.readUint32() + if err != nil { + return err + } + z.pos += 4 + m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize)) + if err != nil { + return err + } + z.pos += m + } + + // Header. + if _, err := io.ReadFull(z.src, buf[:2]); err != nil { + return err + } + z.pos += 8 + + b := buf[0] + if v := b >> 6; v != Version { + return fmt.Errorf("lz4: invalid version: got %d; expected %d", v, Version) + } + if b>>5&1 == 0 { + return ErrBlockDependency + } + z.BlockChecksum = b>>4&1 > 0 + frameSize := b>>3&1 > 0 + z.NoChecksum = b>>2&1 == 0 + + bmsID := buf[1] >> 4 & 0x7 + if bmsID < 4 || bmsID > 7 { + return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID) + } + bSize := blockSizeIndexToValue(bmsID - 4) + z.BlockMaxSize = bSize + + // Allocate the compressed/uncompressed buffers. + // The compressed buffer cannot exceed the uncompressed one. + if n := 2 * bSize; cap(z.zdata) < n { + z.zdata = make([]byte, n, n) + } + if debugFlag { + debug("header block max size id=%d size=%d", bmsID, bSize) + } + z.zdata = z.zdata[:bSize] + z.data = z.zdata[:cap(z.zdata)][bSize:] + z.idx = len(z.data) + + _, _ = z.checksum.Write(buf[0:2]) + + if frameSize { + buf := buf[:8] + if _, err := io.ReadFull(z.src, buf); err != nil { + return err + } + z.Size = binary.LittleEndian.Uint64(buf) + z.pos += 8 + _, _ = z.checksum.Write(buf) + } + + // Header checksum. + if _, err := io.ReadFull(z.src, buf[:1]); err != nil { + return err + } + z.pos++ + if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] { + return fmt.Errorf("lz4: invalid header checksum: got %x; expected %x", buf[0], h) + } + + z.Header.done = true + if debugFlag { + debug("header read: %v", z.Header) + } + + return nil +} + +// Read decompresses data from the underlying source into the supplied buffer. +// +// Since there can be multiple streams concatenated, Header values may +// change between calls to Read(). If that is the case, no data is actually read from +// the underlying io.Reader, to allow for potential input buffer resizing. +func (z *Reader) Read(buf []byte) (int, error) { + if debugFlag { + debug("Read buf len=%d", len(buf)) + } + if !z.Header.done { + if err := z.readHeader(true); err != nil { + return 0, err + } + if debugFlag { + debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d", + len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx) + } + } + + if len(buf) == 0 { + return 0, nil + } + + if z.idx == len(z.data) { + // No data ready for reading, process the next block. + if debugFlag { + debug("reading block from writer") + } + // Reset uncompressed buffer + z.data = z.zdata[:cap(z.zdata)][len(z.zdata):] + + // Block length: 0 = end of frame, highest bit set: uncompressed. + bLen, err := z.readUint32() + if err != nil { + return 0, err + } + z.pos += 4 + + if bLen == 0 { + // End of frame reached. + if !z.NoChecksum { + // Validate the frame checksum. + checksum, err := z.readUint32() + if err != nil { + return 0, err + } + if debugFlag { + debug("frame checksum got=%x / want=%x", z.checksum.Sum32(), checksum) + } + z.pos += 4 + if h := z.checksum.Sum32(); checksum != h { + return 0, fmt.Errorf("lz4: invalid frame checksum: got %x; expected %x", h, checksum) + } + } + + // Get ready for the next concatenated frame and keep the position. + pos := z.pos + z.Reset(z.src) + z.pos = pos + + // Since multiple frames can be concatenated, check for more. + return 0, z.readHeader(false) + } + + if debugFlag { + debug("raw block size %d", bLen) + } + if bLen&compressedBlockFlag > 0 { + // Uncompressed block. + bLen &= compressedBlockMask + if debugFlag { + debug("uncompressed block size %d", bLen) + } + if int(bLen) > cap(z.data) { + return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) + } + z.data = z.data[:bLen] + if _, err := io.ReadFull(z.src, z.data); err != nil { + return 0, err + } + z.pos += int64(bLen) + if z.OnBlockDone != nil { + z.OnBlockDone(int(bLen)) + } + + if z.BlockChecksum { + checksum, err := z.readUint32() + if err != nil { + return 0, err + } + z.pos += 4 + + if h := xxh32.ChecksumZero(z.data); h != checksum { + return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) + } + } + + } else { + // Compressed block. + if debugFlag { + debug("compressed block size %d", bLen) + } + if int(bLen) > cap(z.data) { + return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) + } + zdata := z.zdata[:bLen] + if _, err := io.ReadFull(z.src, zdata); err != nil { + return 0, err + } + z.pos += int64(bLen) + + if z.BlockChecksum { + checksum, err := z.readUint32() + if err != nil { + return 0, err + } + z.pos += 4 + + if h := xxh32.ChecksumZero(zdata); h != checksum { + return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) + } + } + + n, err := UncompressBlock(zdata, z.data) + if err != nil { + return 0, err + } + z.data = z.data[:n] + if z.OnBlockDone != nil { + z.OnBlockDone(n) + } + } + + if !z.NoChecksum { + _, _ = z.checksum.Write(z.data) + if debugFlag { + debug("current frame checksum %x", z.checksum.Sum32()) + } + } + z.idx = 0 + } + + if z.skip > int64(len(z.data[z.idx:])) { + z.skip -= int64(len(z.data[z.idx:])) + z.dpos += int64(len(z.data[z.idx:])) + z.idx = len(z.data) + return 0, nil + } + + z.idx += int(z.skip) + z.dpos += z.skip + z.skip = 0 + + n := copy(buf, z.data[z.idx:]) + z.idx += n + z.dpos += int64(n) + if debugFlag { + debug("copied %d bytes to input", n) + } + + return n, nil +} + +// Seek implements io.Seeker, but supports seeking forward from the current +// position only. Any other seek will return an error. Allows skipping output +// bytes which aren't needed, which in some scenarios is faster than reading +// and discarding them. +// Note this may cause future calls to Read() to read 0 bytes if all of the +// data they would have returned is skipped. +func (z *Reader) Seek(offset int64, whence int) (int64, error) { + if offset < 0 || whence != io.SeekCurrent { + return z.dpos + z.skip, ErrUnsupportedSeek + } + z.skip += offset + return z.dpos + z.skip, nil +} + +// Reset discards the Reader's state and makes it equivalent to the +// result of its original state from NewReader, but reading from r instead. +// This permits reusing a Reader rather than allocating a new one. +func (z *Reader) Reset(r io.Reader) { + z.Header = Header{} + z.pos = 0 + z.src = r + z.zdata = z.zdata[:0] + z.data = z.data[:0] + z.idx = 0 + z.checksum.Reset() +} + +// readUint32 reads an uint32 into the supplied buffer. +// The idea is to make use of the already allocated buffers avoiding additional allocations. +func (z *Reader) readUint32() (uint32, error) { + buf := z.buf[:4] + _, err := io.ReadFull(z.src, buf) + x := binary.LittleEndian.Uint32(buf) + return x, err +} diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go new file mode 100644 index 0000000000..324f1386b8 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/writer.go @@ -0,0 +1,408 @@ +package lz4 + +import ( + "encoding/binary" + "fmt" + "github.com/pierrec/lz4/internal/xxh32" + "io" + "runtime" +) + +// zResult contains the results of compressing a block. +type zResult struct { + size uint32 // Block header + data []byte // Compressed data + checksum uint32 // Data checksum +} + +// Writer implements the LZ4 frame encoder. +type Writer struct { + Header + // Handler called when a block has been successfully written out. + // It provides the number of bytes written. + OnBlockDone func(size int) + + buf [19]byte // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes + dst io.Writer // Destination. + checksum xxh32.XXHZero // Frame checksum. + data []byte // Data to be compressed + buffer for compressed data. + idx int // Index into data. + hashtable [winSize]int // Hash table used in CompressBlock(). + + // For concurrency. + c chan chan zResult // Channel for block compression goroutines and writer goroutine. + err error // Any error encountered while writing to the underlying destination. +} + +// NewWriter returns a new LZ4 frame encoder. +// No access to the underlying io.Writer is performed. +// The supplied Header is checked at the first Write. +// It is ok to change it before the first Write but then not until a Reset() is performed. +func NewWriter(dst io.Writer) *Writer { + z := new(Writer) + z.Reset(dst) + return z +} + +// WithConcurrency sets the number of concurrent go routines used for compression. +// A negative value sets the concurrency to GOMAXPROCS. +func (z *Writer) WithConcurrency(n int) *Writer { + switch { + case n == 0 || n == 1: + z.c = nil + return z + case n < 0: + n = runtime.GOMAXPROCS(0) + } + z.c = make(chan chan zResult, n) + // Writer goroutine managing concurrent block compression goroutines. + go func() { + // Process next block compression item. + for c := range z.c { + // Read the next compressed block result. + // Waiting here ensures that the blocks are output in the order they were sent. + // The incoming channel is always closed as it indicates to the caller that + // the block has been processed. + res := <-c + n := len(res.data) + if n == 0 { + // Notify the block compression routine that we are done with its result. + // This is used when a sentinel block is sent to terminate the compression. + close(c) + return + } + // Write the block. + if err := z.writeUint32(res.size); err != nil && z.err == nil { + z.err = err + } + if _, err := z.dst.Write(res.data); err != nil && z.err == nil { + z.err = err + } + if z.BlockChecksum { + if err := z.writeUint32(res.checksum); err != nil && z.err == nil { + z.err = err + } + } + if isCompressed := res.size&compressedBlockFlag == 0; isCompressed { + // It is now safe to release the buffer as no longer in use by any goroutine. + putBuffer(cap(res.data), res.data) + } + if h := z.OnBlockDone; h != nil { + h(n) + } + close(c) + } + }() + return z +} + +// newBuffers instantiates new buffers which size matches the one in Header. +// The returned buffers are for decompression and compression respectively. +func (z *Writer) newBuffers() { + bSize := z.Header.BlockMaxSize + buf := getBuffer(bSize) + z.data = buf[:bSize] // Uncompressed buffer is the first half. +} + +// freeBuffers puts the writer's buffers back to the pool. +func (z *Writer) freeBuffers() { + // Put the buffer back into the pool, if any. + putBuffer(z.Header.BlockMaxSize, z.data) + z.data = nil +} + +// writeHeader builds and writes the header (magic+header) to the underlying io.Writer. +func (z *Writer) writeHeader() error { + // Default to 4Mb if BlockMaxSize is not set. + if z.Header.BlockMaxSize == 0 { + z.Header.BlockMaxSize = blockSize4M + } + // The only option that needs to be validated. + bSize := z.Header.BlockMaxSize + if !isValidBlockSize(z.Header.BlockMaxSize) { + return fmt.Errorf("lz4: invalid block max size: %d", bSize) + } + // Allocate the compressed/uncompressed buffers. + // The compressed buffer cannot exceed the uncompressed one. + z.newBuffers() + z.idx = 0 + + // Size is optional. + buf := z.buf[:] + + // Set the fixed size data: magic number, block max size and flags. + binary.LittleEndian.PutUint32(buf[0:], frameMagic) + flg := byte(Version << 6) + flg |= 1 << 5 // No block dependency. + if z.Header.BlockChecksum { + flg |= 1 << 4 + } + if z.Header.Size > 0 { + flg |= 1 << 3 + } + if !z.Header.NoChecksum { + flg |= 1 << 2 + } + buf[4] = flg + buf[5] = blockSizeValueToIndex(z.Header.BlockMaxSize) << 4 + + // Current buffer size: magic(4) + flags(1) + block max size (1). + n := 6 + // Optional items. + if z.Header.Size > 0 { + binary.LittleEndian.PutUint64(buf[n:], z.Header.Size) + n += 8 + } + + // The header checksum includes the flags, block max size and optional Size. + buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF) + z.checksum.Reset() + + // Header ready, write it out. + if _, err := z.dst.Write(buf[0 : n+1]); err != nil { + return err + } + z.Header.done = true + if debugFlag { + debug("wrote header %v", z.Header) + } + + return nil +} + +// Write compresses data from the supplied buffer into the underlying io.Writer. +// Write does not return until the data has been written. +func (z *Writer) Write(buf []byte) (int, error) { + if !z.Header.done { + if err := z.writeHeader(); err != nil { + return 0, err + } + } + if debugFlag { + debug("input buffer len=%d index=%d", len(buf), z.idx) + } + + zn := len(z.data) + var n int + for len(buf) > 0 { + if z.idx == 0 && len(buf) >= zn { + // Avoid a copy as there is enough data for a block. + if err := z.compressBlock(buf[:zn]); err != nil { + return n, err + } + n += zn + buf = buf[zn:] + continue + } + // Accumulate the data to be compressed. + m := copy(z.data[z.idx:], buf) + n += m + z.idx += m + buf = buf[m:] + if debugFlag { + debug("%d bytes copied to buf, current index %d", n, z.idx) + } + + if z.idx < len(z.data) { + // Buffer not filled. + if debugFlag { + debug("need more data for compression") + } + return n, nil + } + + // Buffer full. + if err := z.compressBlock(z.data); err != nil { + return n, err + } + z.idx = 0 + } + + return n, nil +} + +// compressBlock compresses a block. +func (z *Writer) compressBlock(data []byte) error { + if !z.NoChecksum { + _, _ = z.checksum.Write(data) + } + + if z.c != nil { + c := make(chan zResult) + z.c <- c // Send now to guarantee order + go writerCompressBlock(c, z.Header, data) + return nil + } + + zdata := z.data[z.Header.BlockMaxSize:cap(z.data)] + // The compressed block size cannot exceed the input's. + var zn int + + if level := z.Header.CompressionLevel; level != 0 { + zn, _ = CompressBlockHC(data, zdata, level) + } else { + zn, _ = CompressBlock(data, zdata, z.hashtable[:]) + } + + var bLen uint32 + if debugFlag { + debug("block compression %d => %d", len(data), zn) + } + if zn > 0 && zn < len(data) { + // Compressible and compressed size smaller than uncompressed: ok! + bLen = uint32(zn) + zdata = zdata[:zn] + } else { + // Uncompressed block. + bLen = uint32(len(data)) | compressedBlockFlag + zdata = data + } + if debugFlag { + debug("block compression to be written len=%d data len=%d", bLen, len(zdata)) + } + + // Write the block. + if err := z.writeUint32(bLen); err != nil { + return err + } + written, err := z.dst.Write(zdata) + if err != nil { + return err + } + if h := z.OnBlockDone; h != nil { + h(written) + } + + if !z.BlockChecksum { + if debugFlag { + debug("current frame checksum %x", z.checksum.Sum32()) + } + return nil + } + checksum := xxh32.ChecksumZero(zdata) + if debugFlag { + debug("block checksum %x", checksum) + defer func() { debug("current frame checksum %x", z.checksum.Sum32()) }() + } + return z.writeUint32(checksum) +} + +// Flush flushes any pending compressed data to the underlying writer. +// Flush does not return until the data has been written. +// If the underlying writer returns an error, Flush returns that error. +func (z *Writer) Flush() error { + if debugFlag { + debug("flush with index %d", z.idx) + } + if z.idx == 0 { + return nil + } + + data := z.data[:z.idx] + z.idx = 0 + if z.c == nil { + return z.compressBlock(data) + } + if !z.NoChecksum { + _, _ = z.checksum.Write(data) + } + c := make(chan zResult) + z.c <- c + writerCompressBlock(c, z.Header, data) + return nil +} + +func (z *Writer) close() error { + if z.c == nil { + return nil + } + // Send a sentinel block (no data to compress) to terminate the writer main goroutine. + c := make(chan zResult) + z.c <- c + c <- zResult{} + // Wait for the main goroutine to complete. + <-c + // At this point the main goroutine has shut down or is about to return. + z.c = nil + return z.err +} + +// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if !z.Header.done { + if err := z.writeHeader(); err != nil { + return err + } + } + if err := z.Flush(); err != nil { + return err + } + if err := z.close(); err != nil { + return err + } + z.freeBuffers() + + if debugFlag { + debug("writing last empty block") + } + if err := z.writeUint32(0); err != nil { + return err + } + if z.NoChecksum { + return nil + } + checksum := z.checksum.Sum32() + if debugFlag { + debug("stream checksum %x", checksum) + } + return z.writeUint32(checksum) +} + +// Reset clears the state of the Writer z such that it is equivalent to its +// initial state from NewWriter, but instead writing to w. +// No access to the underlying io.Writer is performed. +func (z *Writer) Reset(w io.Writer) { + n := cap(z.c) + _ = z.close() + z.freeBuffers() + z.Header.Reset() + z.dst = w + z.checksum.Reset() + z.idx = 0 + z.err = nil + z.WithConcurrency(n) +} + +// writeUint32 writes a uint32 to the underlying writer. +func (z *Writer) writeUint32(x uint32) error { + buf := z.buf[:4] + binary.LittleEndian.PutUint32(buf, x) + _, err := z.dst.Write(buf) + return err +} + +// writerCompressBlock compresses data into a pooled buffer and writes its result +// out to the input channel. +func writerCompressBlock(c chan zResult, header Header, data []byte) { + zdata := getBuffer(header.BlockMaxSize) + // The compressed block size cannot exceed the input's. + var zn int + if level := header.CompressionLevel; level != 0 { + zn, _ = CompressBlockHC(data, zdata, level) + } else { + var hashTable [winSize]int + zn, _ = CompressBlock(data, zdata, hashTable[:]) + } + var res zResult + if zn > 0 && zn < len(data) { + res.size = uint32(zn) + res.data = zdata[:zn] + } else { + res.size = uint32(len(data)) | compressedBlockFlag + res.data = data + } + if header.BlockChecksum { + res.checksum = xxh32.ChecksumZero(res.data) + } + c <- res +} diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/ryanuber/go-glob/.travis.yml similarity index 55% rename from vendor/github.com/mitchellh/mapstructure/.travis.yml rename to vendor/github.com/ryanuber/go-glob/.travis.yml index 1689c7d735..9d1ca3c378 100644 --- a/vendor/github.com/mitchellh/mapstructure/.travis.yml +++ b/vendor/github.com/ryanuber/go-glob/.travis.yml @@ -1,8 +1,5 @@ language: go - go: - - "1.11.x" - tip - script: - - go test + - go test -v ./... diff --git a/vendor/github.com/ryanuber/go-glob/LICENSE b/vendor/github.com/ryanuber/go-glob/LICENSE new file mode 100644 index 0000000000..bdfbd95149 --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Ryan Uber + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ryanuber/go-glob/README.md b/vendor/github.com/ryanuber/go-glob/README.md new file mode 100644 index 0000000000..48f7fcb05a --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/README.md @@ -0,0 +1,29 @@ +# String globbing in golang [![Build Status](https://travis-ci.org/ryanuber/go-glob.svg)](https://travis-ci.org/ryanuber/go-glob) + +`go-glob` is a single-function library implementing basic string glob support. + +Globs are an extremely user-friendly way of supporting string matching without +requiring knowledge of regular expressions or Go's particular regex engine. Most +people understand that if you put a `*` character somewhere in a string, it is +treated as a wildcard. Surprisingly, this functionality isn't found in Go's +standard library, except for `path.Match`, which is intended to be used while +comparing paths (not arbitrary strings), and contains specialized logic for this +use case. A better solution might be a POSIX basic (non-ERE) regular expression +engine for Go, which doesn't exist currently. + +Example +======= + +``` +package main + +import "github.com/ryanuber/go-glob" + +func main() { + glob.Glob("*World!", "Hello, World!") // true + glob.Glob("Hello,*", "Hello, World!") // true + glob.Glob("*ello,*", "Hello, World!") // true + glob.Glob("World!", "Hello, World!") // false + glob.Glob("/home/*", "/home/ryanuber/.bashrc") // true +} +``` diff --git a/vendor/github.com/ryanuber/go-glob/glob.go b/vendor/github.com/ryanuber/go-glob/glob.go new file mode 100644 index 0000000000..e67db3be18 --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/glob.go @@ -0,0 +1,56 @@ +package glob + +import "strings" + +// The character which is treated like a glob +const GLOB = "*" + +// Glob will test a string pattern, potentially containing globs, against a +// subject string. The result is a simple true/false, determining whether or +// not the glob pattern matched the subject text. +func Glob(pattern, subj string) bool { + // Empty pattern can only match empty subject + if pattern == "" { + return subj == pattern + } + + // If the pattern _is_ a glob, it matches everything + if pattern == GLOB { + return true + } + + parts := strings.Split(pattern, GLOB) + + if len(parts) == 1 { + // No globs in pattern, so test for equality + return subj == pattern + } + + leadingGlob := strings.HasPrefix(pattern, GLOB) + trailingGlob := strings.HasSuffix(pattern, GLOB) + end := len(parts) - 1 + + // Go over the leading parts and ensure they match. + for i := 0; i < end; i++ { + idx := strings.Index(subj, parts[i]) + + switch i { + case 0: + // Check the first section. Requires special handling. + if !leadingGlob && idx != 0 { + return false + } + default: + // Check that the middle parts match. + if idx < 0 { + return false + } + } + + // Trim evaluated text from subj as we loop over the pattern. + subj = subj[idx+len(parts[i]):] + } + + // Reached the last section. Requires special handling. + return trailingGlob || strings.HasSuffix(subj, parts[end]) +} diff --git a/vendor/github.com/ryanuber/go-glob/go.mod b/vendor/github.com/ryanuber/go-glob/go.mod new file mode 100644 index 0000000000..f38203593c --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/go.mod @@ -0,0 +1 @@ +module github.com/ryanuber/go-glob diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml index 6d4d1be7b5..571116cc39 100644 --- a/vendor/go.uber.org/atomic/.codecov.yml +++ b/vendor/go.uber.org/atomic/.codecov.yml @@ -13,3 +13,7 @@ coverage: if_not_found: success # if parent is not found report status as success, error, or failure if_ci_failed: error # if ci fails report status as success, error, or failure +# Also update COVER_IGNORE_PKGS in the Makefile. +ignore: + - /internal/gen-atomicint/ + - /internal/gen-valuewrapper/ diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore index c3fa253893..2e337a0ed5 100644 --- a/vendor/go.uber.org/atomic/.gitignore +++ b/vendor/go.uber.org/atomic/.gitignore @@ -10,3 +10,6 @@ lint.log # Profiling output *.prof + +# Output of fossa analyzer +/fossa diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml deleted file mode 100644 index 4e73268b60..0000000000 --- a/vendor/go.uber.org/atomic/.travis.yml +++ /dev/null @@ -1,27 +0,0 @@ -sudo: false -language: go -go_import_path: go.uber.org/atomic - -env: - global: - - GO111MODULE=on - -matrix: - include: - - go: 1.12.x - - go: 1.13.x - env: LINT=1 - -cache: - directories: - - vendor - -before_install: - - go version - -script: - - test -z "$LINT" || make lint - - make cover - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md index aef8b6ebc4..38f564e2b3 100644 --- a/vendor/go.uber.org/atomic/CHANGELOG.md +++ b/vendor/go.uber.org/atomic/CHANGELOG.md @@ -4,32 +4,71 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [1.9.0] - 2021-07-15 +### Added +- Add `Float64.Swap` to match int atomic operations. +- Add `atomic.Time` type for atomic operations on `time.Time` values. + +[1.9.0]: https://github.com/uber-go/atomic/compare/v1.8.0...v1.9.0 + +## [1.8.0] - 2021-06-09 +### Added +- Add `atomic.Uintptr` type for atomic operations on `uintptr` values. +- Add `atomic.UnsafePointer` type for atomic operations on `unsafe.Pointer` values. + +[1.8.0]: https://github.com/uber-go/atomic/compare/v1.7.0...v1.8.0 + +## [1.7.0] - 2020-09-14 +### Added +- Support JSON serialization and deserialization of primitive atomic types. +- Support Text marshalling and unmarshalling for string atomics. + +### Changed +- Disallow incorrect comparison of atomic values in a non-atomic way. + +### Removed +- Remove dependency on `golang.org/x/{lint, tools}`. + +[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0 + ## [1.6.0] - 2020-02-24 ### Changed - Drop library dependency on `golang.org/x/{lint, tools}`. +[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0 + ## [1.5.1] - 2019-11-19 - Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together causing `CAS` to fail even though the old value matches. +[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1 + ## [1.5.0] - 2019-10-29 ### Changed - With Go modules, only the `go.uber.org/atomic` import path is supported now. If you need to use the old import path, please add a `replace` directive to your `go.mod`. +[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0 + ## [1.4.0] - 2019-05-01 ### Added - Add `atomic.Error` type for atomic operations on `error` values. +[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0 + ## [1.3.2] - 2018-05-02 ### Added - Add `atomic.Duration` type for atomic operations on `time.Duration` values. +[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2 + ## [1.3.1] - 2017-11-14 ### Fixed - Revert optimization for `atomic.String.Store("")` which caused data races. +[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1 + ## [1.3.0] - 2017-11-13 ### Added - Add `atomic.Bool.CAS` for compare-and-swap semantics on bools. @@ -37,10 +76,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed - Optimize `atomic.String.Store("")` by avoiding an allocation. +[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0 + ## [1.2.0] - 2017-04-12 ### Added - Shadow `atomic.Value` from `sync/atomic`. +[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0 + ## [1.1.0] - 2017-03-10 ### Added - Add atomic `Float64` type. @@ -48,17 +91,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed - Support new `go.uber.org/atomic` import path. +[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0 + ## [1.0.0] - 2016-07-18 - Initial release. -[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0 -[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1 -[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0 -[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0 -[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2 -[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1 -[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0 -[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0 -[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0 [1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0 diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile index 39af0fb63f..46c945b32b 100644 --- a/vendor/go.uber.org/atomic/Makefile +++ b/vendor/go.uber.org/atomic/Makefile @@ -2,8 +2,16 @@ export GOBIN ?= $(shell pwd)/bin GOLINT = $(GOBIN)/golint +GEN_ATOMICINT = $(GOBIN)/gen-atomicint +GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper +STATICCHECK = $(GOBIN)/staticcheck -GO_FILES ?= *.go +GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print) + +# Also update ignore section in .codecov.yml. +COVER_IGNORE_PKGS = \ + go.uber.org/atomic/internal/gen-atomicint \ + go.uber.org/atomic/internal/gen-atomicwrapper .PHONY: build build: @@ -20,16 +28,52 @@ gofmt: @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false) $(GOLINT): - go install golang.org/x/lint/golint + cd tools && go install golang.org/x/lint/golint + +$(STATICCHECK): + cd tools && go install honnef.co/go/tools/cmd/staticcheck + +$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*) + go build -o $@ ./internal/gen-atomicwrapper + +$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*) + go build -o $@ ./internal/gen-atomicint .PHONY: golint golint: $(GOLINT) $(GOLINT) ./... +.PHONY: staticcheck +staticcheck: $(STATICCHECK) + $(STATICCHECK) ./... + .PHONY: lint -lint: gofmt golint +lint: gofmt golint staticcheck generatenodirty + +# comma separated list of packages to consider for code coverage. +COVER_PKG = $(shell \ + go list -find ./... | \ + grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \ + paste -sd, -) .PHONY: cover cover: - go test -coverprofile=cover.out -coverpkg ./... -v ./... + go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./... go tool cover -html=cover.out -o cover.html + +.PHONY: generate +generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER) + go generate ./... + +.PHONY: generatenodirty +generatenodirty: + @[ -z "$$(git status --porcelain)" ] || ( \ + echo "Working tree is dirty. Commit your changes first."; \ + git status; \ + exit 1 ) + @make generate + @status=$$(git status --porcelain); \ + [ -z "$$status" ] || ( \ + echo "Working tree is dirty after `make generate`:"; \ + echo "$$status"; \ + echo "Please ensure that the generated code is up-to-date." ) diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md index ade0c20f16..96b47a1f12 100644 --- a/vendor/go.uber.org/atomic/README.md +++ b/vendor/go.uber.org/atomic/README.md @@ -55,8 +55,8 @@ Released under the [MIT License](LICENSE.txt). [doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg [doc]: https://godoc.org/go.uber.org/atomic -[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master -[ci]: https://travis-ci.com/uber-go/atomic +[ci-img]: https://github.com/uber-go/atomic/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/atomic/actions/workflows/go.yml [cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg [cov]: https://codecov.io/gh/uber-go/atomic [reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/atomic.go b/vendor/go.uber.org/atomic/atomic.go deleted file mode 100644 index ad5fa0980a..0000000000 --- a/vendor/go.uber.org/atomic/atomic.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package atomic provides simple wrappers around numerics to enforce atomic -// access. -package atomic - -import ( - "math" - "sync/atomic" - "time" -) - -// Int32 is an atomic wrapper around an int32. -type Int32 struct{ v int32 } - -// NewInt32 creates an Int32. -func NewInt32(i int32) *Int32 { - return &Int32{i} -} - -// Load atomically loads the wrapped value. -func (i *Int32) Load() int32 { - return atomic.LoadInt32(&i.v) -} - -// Add atomically adds to the wrapped int32 and returns the new value. -func (i *Int32) Add(n int32) int32 { - return atomic.AddInt32(&i.v, n) -} - -// Sub atomically subtracts from the wrapped int32 and returns the new value. -func (i *Int32) Sub(n int32) int32 { - return atomic.AddInt32(&i.v, -n) -} - -// Inc atomically increments the wrapped int32 and returns the new value. -func (i *Int32) Inc() int32 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int32 and returns the new value. -func (i *Int32) Dec() int32 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Int32) CAS(old, new int32) bool { - return atomic.CompareAndSwapInt32(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Int32) Store(n int32) { - atomic.StoreInt32(&i.v, n) -} - -// Swap atomically swaps the wrapped int32 and returns the old value. -func (i *Int32) Swap(n int32) int32 { - return atomic.SwapInt32(&i.v, n) -} - -// Int64 is an atomic wrapper around an int64. -type Int64 struct{ v int64 } - -// NewInt64 creates an Int64. -func NewInt64(i int64) *Int64 { - return &Int64{i} -} - -// Load atomically loads the wrapped value. -func (i *Int64) Load() int64 { - return atomic.LoadInt64(&i.v) -} - -// Add atomically adds to the wrapped int64 and returns the new value. -func (i *Int64) Add(n int64) int64 { - return atomic.AddInt64(&i.v, n) -} - -// Sub atomically subtracts from the wrapped int64 and returns the new value. -func (i *Int64) Sub(n int64) int64 { - return atomic.AddInt64(&i.v, -n) -} - -// Inc atomically increments the wrapped int64 and returns the new value. -func (i *Int64) Inc() int64 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int64 and returns the new value. -func (i *Int64) Dec() int64 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Int64) CAS(old, new int64) bool { - return atomic.CompareAndSwapInt64(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Int64) Store(n int64) { - atomic.StoreInt64(&i.v, n) -} - -// Swap atomically swaps the wrapped int64 and returns the old value. -func (i *Int64) Swap(n int64) int64 { - return atomic.SwapInt64(&i.v, n) -} - -// Uint32 is an atomic wrapper around an uint32. -type Uint32 struct{ v uint32 } - -// NewUint32 creates a Uint32. -func NewUint32(i uint32) *Uint32 { - return &Uint32{i} -} - -// Load atomically loads the wrapped value. -func (i *Uint32) Load() uint32 { - return atomic.LoadUint32(&i.v) -} - -// Add atomically adds to the wrapped uint32 and returns the new value. -func (i *Uint32) Add(n uint32) uint32 { - return atomic.AddUint32(&i.v, n) -} - -// Sub atomically subtracts from the wrapped uint32 and returns the new value. -func (i *Uint32) Sub(n uint32) uint32 { - return atomic.AddUint32(&i.v, ^(n - 1)) -} - -// Inc atomically increments the wrapped uint32 and returns the new value. -func (i *Uint32) Inc() uint32 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int32 and returns the new value. -func (i *Uint32) Dec() uint32 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Uint32) CAS(old, new uint32) bool { - return atomic.CompareAndSwapUint32(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uint32) Store(n uint32) { - atomic.StoreUint32(&i.v, n) -} - -// Swap atomically swaps the wrapped uint32 and returns the old value. -func (i *Uint32) Swap(n uint32) uint32 { - return atomic.SwapUint32(&i.v, n) -} - -// Uint64 is an atomic wrapper around a uint64. -type Uint64 struct{ v uint64 } - -// NewUint64 creates a Uint64. -func NewUint64(i uint64) *Uint64 { - return &Uint64{i} -} - -// Load atomically loads the wrapped value. -func (i *Uint64) Load() uint64 { - return atomic.LoadUint64(&i.v) -} - -// Add atomically adds to the wrapped uint64 and returns the new value. -func (i *Uint64) Add(n uint64) uint64 { - return atomic.AddUint64(&i.v, n) -} - -// Sub atomically subtracts from the wrapped uint64 and returns the new value. -func (i *Uint64) Sub(n uint64) uint64 { - return atomic.AddUint64(&i.v, ^(n - 1)) -} - -// Inc atomically increments the wrapped uint64 and returns the new value. -func (i *Uint64) Inc() uint64 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped uint64 and returns the new value. -func (i *Uint64) Dec() uint64 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Uint64) CAS(old, new uint64) bool { - return atomic.CompareAndSwapUint64(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uint64) Store(n uint64) { - atomic.StoreUint64(&i.v, n) -} - -// Swap atomically swaps the wrapped uint64 and returns the old value. -func (i *Uint64) Swap(n uint64) uint64 { - return atomic.SwapUint64(&i.v, n) -} - -// Bool is an atomic Boolean. -type Bool struct{ v uint32 } - -// NewBool creates a Bool. -func NewBool(initial bool) *Bool { - return &Bool{boolToInt(initial)} -} - -// Load atomically loads the Boolean. -func (b *Bool) Load() bool { - return truthy(atomic.LoadUint32(&b.v)) -} - -// CAS is an atomic compare-and-swap. -func (b *Bool) CAS(old, new bool) bool { - return atomic.CompareAndSwapUint32(&b.v, boolToInt(old), boolToInt(new)) -} - -// Store atomically stores the passed value. -func (b *Bool) Store(new bool) { - atomic.StoreUint32(&b.v, boolToInt(new)) -} - -// Swap sets the given value and returns the previous value. -func (b *Bool) Swap(new bool) bool { - return truthy(atomic.SwapUint32(&b.v, boolToInt(new))) -} - -// Toggle atomically negates the Boolean and returns the previous value. -func (b *Bool) Toggle() bool { - for { - old := b.Load() - if b.CAS(old, !old) { - return old - } - } -} - -func truthy(n uint32) bool { - return n == 1 -} - -func boolToInt(b bool) uint32 { - if b { - return 1 - } - return 0 -} - -// Float64 is an atomic wrapper around float64. -type Float64 struct { - v uint64 -} - -// NewFloat64 creates a Float64. -func NewFloat64(f float64) *Float64 { - return &Float64{math.Float64bits(f)} -} - -// Load atomically loads the wrapped value. -func (f *Float64) Load() float64 { - return math.Float64frombits(atomic.LoadUint64(&f.v)) -} - -// Store atomically stores the passed value. -func (f *Float64) Store(s float64) { - atomic.StoreUint64(&f.v, math.Float64bits(s)) -} - -// Add atomically adds to the wrapped float64 and returns the new value. -func (f *Float64) Add(s float64) float64 { - for { - old := f.Load() - new := old + s - if f.CAS(old, new) { - return new - } - } -} - -// Sub atomically subtracts from the wrapped float64 and returns the new value. -func (f *Float64) Sub(s float64) float64 { - return f.Add(-s) -} - -// CAS is an atomic compare-and-swap. -func (f *Float64) CAS(old, new float64) bool { - return atomic.CompareAndSwapUint64(&f.v, math.Float64bits(old), math.Float64bits(new)) -} - -// Duration is an atomic wrapper around time.Duration -// https://godoc.org/time#Duration -type Duration struct { - v Int64 -} - -// NewDuration creates a Duration. -func NewDuration(d time.Duration) *Duration { - return &Duration{v: *NewInt64(int64(d))} -} - -// Load atomically loads the wrapped value. -func (d *Duration) Load() time.Duration { - return time.Duration(d.v.Load()) -} - -// Store atomically stores the passed value. -func (d *Duration) Store(n time.Duration) { - d.v.Store(int64(n)) -} - -// Add atomically adds to the wrapped time.Duration and returns the new value. -func (d *Duration) Add(n time.Duration) time.Duration { - return time.Duration(d.v.Add(int64(n))) -} - -// Sub atomically subtracts from the wrapped time.Duration and returns the new value. -func (d *Duration) Sub(n time.Duration) time.Duration { - return time.Duration(d.v.Sub(int64(n))) -} - -// Swap atomically swaps the wrapped time.Duration and returns the old value. -func (d *Duration) Swap(n time.Duration) time.Duration { - return time.Duration(d.v.Swap(int64(n))) -} - -// CAS is an atomic compare-and-swap. -func (d *Duration) CAS(old, new time.Duration) bool { - return d.v.CAS(int64(old), int64(new)) -} - -// Value shadows the type of the same name from sync/atomic -// https://godoc.org/sync/atomic#Value -type Value struct{ atomic.Value } diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go new file mode 100644 index 0000000000..209df7bbcd --- /dev/null +++ b/vendor/go.uber.org/atomic/bool.go @@ -0,0 +1,81 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" +) + +// Bool is an atomic type-safe wrapper for bool values. +type Bool struct { + _ nocmp // disallow non-atomic comparison + + v Uint32 +} + +var _zeroBool bool + +// NewBool creates a new Bool. +func NewBool(val bool) *Bool { + x := &Bool{} + if val != _zeroBool { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped bool. +func (x *Bool) Load() bool { + return truthy(x.v.Load()) +} + +// Store atomically stores the passed bool. +func (x *Bool) Store(val bool) { + x.v.Store(boolToInt(val)) +} + +// CAS is an atomic compare-and-swap for bool values. +func (x *Bool) CAS(old, new bool) (swapped bool) { + return x.v.CAS(boolToInt(old), boolToInt(new)) +} + +// Swap atomically stores the given bool and returns the old +// value. +func (x *Bool) Swap(val bool) (old bool) { + return truthy(x.v.Swap(boolToInt(val))) +} + +// MarshalJSON encodes the wrapped bool into JSON. +func (x *Bool) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a bool from JSON. +func (x *Bool) UnmarshalJSON(b []byte) error { + var v bool + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/atomic/bool_ext.go new file mode 100644 index 0000000000..a2e60e9873 --- /dev/null +++ b/vendor/go.uber.org/atomic/bool_ext.go @@ -0,0 +1,53 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "strconv" +) + +//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go + +func truthy(n uint32) bool { + return n == 1 +} + +func boolToInt(b bool) uint32 { + if b { + return 1 + } + return 0 +} + +// Toggle atomically negates the Boolean and returns the previous value. +func (b *Bool) Toggle() (old bool) { + for { + old := b.Load() + if b.CAS(old, !old) { + return old + } + } +} + +// String encodes the wrapped value as a string. +func (b *Bool) String() string { + return strconv.FormatBool(b.Load()) +} diff --git a/vendor/go.uber.org/atomic/doc.go b/vendor/go.uber.org/atomic/doc.go new file mode 100644 index 0000000000..ae7390ee68 --- /dev/null +++ b/vendor/go.uber.org/atomic/doc.go @@ -0,0 +1,23 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package atomic provides simple wrappers around numerics to enforce atomic +// access. +package atomic diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go new file mode 100644 index 0000000000..207594f5e8 --- /dev/null +++ b/vendor/go.uber.org/atomic/duration.go @@ -0,0 +1,82 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "time" +) + +// Duration is an atomic type-safe wrapper for time.Duration values. +type Duration struct { + _ nocmp // disallow non-atomic comparison + + v Int64 +} + +var _zeroDuration time.Duration + +// NewDuration creates a new Duration. +func NewDuration(val time.Duration) *Duration { + x := &Duration{} + if val != _zeroDuration { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped time.Duration. +func (x *Duration) Load() time.Duration { + return time.Duration(x.v.Load()) +} + +// Store atomically stores the passed time.Duration. +func (x *Duration) Store(val time.Duration) { + x.v.Store(int64(val)) +} + +// CAS is an atomic compare-and-swap for time.Duration values. +func (x *Duration) CAS(old, new time.Duration) (swapped bool) { + return x.v.CAS(int64(old), int64(new)) +} + +// Swap atomically stores the given time.Duration and returns the old +// value. +func (x *Duration) Swap(val time.Duration) (old time.Duration) { + return time.Duration(x.v.Swap(int64(val))) +} + +// MarshalJSON encodes the wrapped time.Duration into JSON. +func (x *Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a time.Duration from JSON. +func (x *Duration) UnmarshalJSON(b []byte) error { + var v time.Duration + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go new file mode 100644 index 0000000000..4c18b0a9ed --- /dev/null +++ b/vendor/go.uber.org/atomic/duration_ext.go @@ -0,0 +1,40 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "time" + +//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go + +// Add atomically adds to the wrapped time.Duration and returns the new value. +func (d *Duration) Add(delta time.Duration) time.Duration { + return time.Duration(d.v.Add(int64(delta))) +} + +// Sub atomically subtracts from the wrapped time.Duration and returns the new value. +func (d *Duration) Sub(delta time.Duration) time.Duration { + return time.Duration(d.v.Sub(int64(delta))) +} + +// String encodes the wrapped value as a string. +func (d *Duration) String() string { + return d.Load().String() +} diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go index 0489d19bad..3be19c35ee 100644 --- a/vendor/go.uber.org/atomic/error.go +++ b/vendor/go.uber.org/atomic/error.go @@ -1,4 +1,6 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -20,36 +22,30 @@ package atomic -// Error is an atomic type-safe wrapper around Value for errors -type Error struct{ v Value } - -// errorHolder is non-nil holder for error object. -// atomic.Value panics on saving nil object, so err object needs to be -// wrapped with valid object first. -type errorHolder struct{ err error } +// Error is an atomic type-safe wrapper for error values. +type Error struct { + _ nocmp // disallow non-atomic comparison -// NewError creates new atomic error object -func NewError(err error) *Error { - e := &Error{} - if err != nil { - e.Store(err) - } - return e + v Value } -// Load atomically loads the wrapped error -func (e *Error) Load() error { - v := e.v.Load() - if v == nil { - return nil +var _zeroError error + +// NewError creates a new Error. +func NewError(val error) *Error { + x := &Error{} + if val != _zeroError { + x.Store(val) } + return x +} - eh := v.(errorHolder) - return eh.err +// Load atomically loads the wrapped error. +func (x *Error) Load() error { + return unpackError(x.v.Load()) } -// Store atomically stores error. -// NOTE: a holder object is allocated on each Store call. -func (e *Error) Store(err error) { - e.v.Store(errorHolder{err: err}) +// Store atomically stores the passed error. +func (x *Error) Store(val error) { + x.v.Store(packError(val)) } diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go new file mode 100644 index 0000000000..ffe0be21cb --- /dev/null +++ b/vendor/go.uber.org/atomic/error_ext.go @@ -0,0 +1,39 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// atomic.Value panics on nil inputs, or if the underlying type changes. +// Stabilize by always storing a custom struct that we control. + +//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go + +type packedError struct{ Value error } + +func packError(v error) interface{} { + return packedError{v} +} + +func unpackError(v interface{}) error { + if err, ok := v.(packedError); ok { + return err.Value + } + return nil +} diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go new file mode 100644 index 0000000000..8a13671847 --- /dev/null +++ b/vendor/go.uber.org/atomic/float64.go @@ -0,0 +1,77 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "math" +) + +// Float64 is an atomic type-safe wrapper for float64 values. +type Float64 struct { + _ nocmp // disallow non-atomic comparison + + v Uint64 +} + +var _zeroFloat64 float64 + +// NewFloat64 creates a new Float64. +func NewFloat64(val float64) *Float64 { + x := &Float64{} + if val != _zeroFloat64 { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped float64. +func (x *Float64) Load() float64 { + return math.Float64frombits(x.v.Load()) +} + +// Store atomically stores the passed float64. +func (x *Float64) Store(val float64) { + x.v.Store(math.Float64bits(val)) +} + +// Swap atomically stores the given float64 and returns the old +// value. +func (x *Float64) Swap(val float64) (old float64) { + return math.Float64frombits(x.v.Swap(math.Float64bits(val))) +} + +// MarshalJSON encodes the wrapped float64 into JSON. +func (x *Float64) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a float64 from JSON. +func (x *Float64) UnmarshalJSON(b []byte) error { + var v float64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go new file mode 100644 index 0000000000..df36b0107f --- /dev/null +++ b/vendor/go.uber.org/atomic/float64_ext.go @@ -0,0 +1,69 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "math" + "strconv" +) + +//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -swap -json -imports math -file=float64.go + +// Add atomically adds to the wrapped float64 and returns the new value. +func (f *Float64) Add(delta float64) float64 { + for { + old := f.Load() + new := old + delta + if f.CAS(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float64 and returns the new value. +func (f *Float64) Sub(delta float64) float64 { + return f.Add(-delta) +} + +// CAS is an atomic compare-and-swap for float64 values. +// +// Note: CAS handles NaN incorrectly. NaN != NaN using Go's inbuilt operators +// but CAS allows a stored NaN to compare equal to a passed in NaN. +// This avoids typical CAS loops from blocking forever, e.g., +// +// for { +// old := atom.Load() +// new = f(old) +// if atom.CAS(old, new) { +// break +// } +// } +// +// If CAS did not match NaN to match, then the above would loop forever. +func (f *Float64) CAS(old, new float64) (swapped bool) { + return f.v.CAS(math.Float64bits(old), math.Float64bits(new)) +} + +// String encodes the wrapped value as a string. +func (f *Float64) String() string { + // 'g' is the behavior for floats with %v. + return strconv.FormatFloat(f.Load(), 'g', -1, 64) +} diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go new file mode 100644 index 0000000000..1e9ef4f879 --- /dev/null +++ b/vendor/go.uber.org/atomic/gen.go @@ -0,0 +1,27 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go +//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go +//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go +//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go +//go:generate bin/gen-atomicint -name=Uintptr -wrapped=uintptr -unsigned -file=uintptr.go diff --git a/vendor/go.uber.org/atomic/go.mod b/vendor/go.uber.org/atomic/go.mod index a935daebb9..daa7599fe1 100644 --- a/vendor/go.uber.org/atomic/go.mod +++ b/vendor/go.uber.org/atomic/go.mod @@ -3,8 +3,6 @@ module go.uber.org/atomic require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/stretchr/testify v1.3.0 - golang.org/x/lint v0.0.0-20190930215403-16217165b5de - golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c // indirect ) go 1.13 diff --git a/vendor/go.uber.org/atomic/go.sum b/vendor/go.uber.org/atomic/go.sum index 51b2b62afb..4f89841505 100644 --- a/vendor/go.uber.org/atomic/go.sum +++ b/vendor/go.uber.org/atomic/go.sum @@ -1,4 +1,3 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -7,16 +6,3 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c h1:IGkKhmfzcztjm6gYkykvu/NiS8kaqbCWAEWWAyf8J5U= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go new file mode 100644 index 0000000000..640ea36a17 --- /dev/null +++ b/vendor/go.uber.org/atomic/int32.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Int32 is an atomic wrapper around int32. +type Int32 struct { + _ nocmp // disallow non-atomic comparison + + v int32 +} + +// NewInt32 creates a new Int32. +func NewInt32(val int32) *Int32 { + return &Int32{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Int32) Load() int32 { + return atomic.LoadInt32(&i.v) +} + +// Add atomically adds to the wrapped int32 and returns the new value. +func (i *Int32) Add(delta int32) int32 { + return atomic.AddInt32(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped int32 and returns the new value. +func (i *Int32) Sub(delta int32) int32 { + return atomic.AddInt32(&i.v, -delta) +} + +// Inc atomically increments the wrapped int32 and returns the new value. +func (i *Int32) Inc() int32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Int32) Dec() int32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int32) CAS(old, new int32) (swapped bool) { + return atomic.CompareAndSwapInt32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int32) Store(val int32) { + atomic.StoreInt32(&i.v, val) +} + +// Swap atomically swaps the wrapped int32 and returns the old value. +func (i *Int32) Swap(val int32) (old int32) { + return atomic.SwapInt32(&i.v, val) +} + +// MarshalJSON encodes the wrapped int32 into JSON. +func (i *Int32) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped int32. +func (i *Int32) UnmarshalJSON(b []byte) error { + var v int32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Int32) String() string { + v := i.Load() + return strconv.FormatInt(int64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go new file mode 100644 index 0000000000..9ab66b9809 --- /dev/null +++ b/vendor/go.uber.org/atomic/int64.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Int64 is an atomic wrapper around int64. +type Int64 struct { + _ nocmp // disallow non-atomic comparison + + v int64 +} + +// NewInt64 creates a new Int64. +func NewInt64(val int64) *Int64 { + return &Int64{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Int64) Load() int64 { + return atomic.LoadInt64(&i.v) +} + +// Add atomically adds to the wrapped int64 and returns the new value. +func (i *Int64) Add(delta int64) int64 { + return atomic.AddInt64(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped int64 and returns the new value. +func (i *Int64) Sub(delta int64) int64 { + return atomic.AddInt64(&i.v, -delta) +} + +// Inc atomically increments the wrapped int64 and returns the new value. +func (i *Int64) Inc() int64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int64 and returns the new value. +func (i *Int64) Dec() int64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int64) CAS(old, new int64) (swapped bool) { + return atomic.CompareAndSwapInt64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int64) Store(val int64) { + atomic.StoreInt64(&i.v, val) +} + +// Swap atomically swaps the wrapped int64 and returns the old value. +func (i *Int64) Swap(val int64) (old int64) { + return atomic.SwapInt64(&i.v, val) +} + +// MarshalJSON encodes the wrapped int64 into JSON. +func (i *Int64) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped int64. +func (i *Int64) UnmarshalJSON(b []byte) error { + var v int64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Int64) String() string { + v := i.Load() + return strconv.FormatInt(int64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go new file mode 100644 index 0000000000..a8201cb4a1 --- /dev/null +++ b/vendor/go.uber.org/atomic/nocmp.go @@ -0,0 +1,35 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// nocmp is an uncomparable struct. Embed this inside another struct to make +// it uncomparable. +// +// type Foo struct { +// nocmp +// // ... +// } +// +// This DOES NOT: +// +// - Disallow shallow copies of structs +// - Disallow comparison of pointers to uncomparable structs +type nocmp [0]func() diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go index ede8136fac..80df93d094 100644 --- a/vendor/go.uber.org/atomic/string.go +++ b/vendor/go.uber.org/atomic/string.go @@ -1,4 +1,6 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -20,30 +22,33 @@ package atomic -// String is an atomic type-safe wrapper around Value for strings. -type String struct{ v Value } +// String is an atomic type-safe wrapper for string values. +type String struct { + _ nocmp // disallow non-atomic comparison + + v Value +} + +var _zeroString string -// NewString creates a String. -func NewString(str string) *String { - s := &String{} - if str != "" { - s.Store(str) +// NewString creates a new String. +func NewString(val string) *String { + x := &String{} + if val != _zeroString { + x.Store(val) } - return s + return x } // Load atomically loads the wrapped string. -func (s *String) Load() string { - v := s.v.Load() - if v == nil { - return "" +func (x *String) Load() string { + if v := x.v.Load(); v != nil { + return v.(string) } - return v.(string) + return _zeroString } // Store atomically stores the passed string. -// Note: Converting the string to an interface{} to store in the Value -// requires an allocation. -func (s *String) Store(str string) { - s.v.Store(str) +func (x *String) Store(val string) { + x.v.Store(val) } diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go new file mode 100644 index 0000000000..83d92edafc --- /dev/null +++ b/vendor/go.uber.org/atomic/string_ext.go @@ -0,0 +1,45 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go +// Note: No Swap as String wraps Value, which wraps the stdlib sync/atomic.Value which +// only supports Swap as of go1.17: https://github.com/golang/go/issues/39351 + +// String returns the wrapped value. +func (s *String) String() string { + return s.Load() +} + +// MarshalText encodes the wrapped string into a textual form. +// +// This makes it encodable as JSON, YAML, XML, and more. +func (s *String) MarshalText() ([]byte, error) { + return []byte(s.Load()), nil +} + +// UnmarshalText decodes text and replaces the wrapped string with it. +// +// This makes it decodable from JSON, YAML, XML, and more. +func (s *String) UnmarshalText(b []byte) error { + s.Store(string(b)) + return nil +} diff --git a/vendor/go.uber.org/atomic/time.go b/vendor/go.uber.org/atomic/time.go new file mode 100644 index 0000000000..33460fc37e --- /dev/null +++ b/vendor/go.uber.org/atomic/time.go @@ -0,0 +1,55 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "time" +) + +// Time is an atomic type-safe wrapper for time.Time values. +type Time struct { + _ nocmp // disallow non-atomic comparison + + v Value +} + +var _zeroTime time.Time + +// NewTime creates a new Time. +func NewTime(val time.Time) *Time { + x := &Time{} + if val != _zeroTime { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped time.Time. +func (x *Time) Load() time.Time { + return unpackTime(x.v.Load()) +} + +// Store atomically stores the passed time.Time. +func (x *Time) Store(val time.Time) { + x.v.Store(packTime(val)) +} diff --git a/vendor/go.uber.org/atomic/time_ext.go b/vendor/go.uber.org/atomic/time_ext.go new file mode 100644 index 0000000000..1e3dc978aa --- /dev/null +++ b/vendor/go.uber.org/atomic/time_ext.go @@ -0,0 +1,36 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "time" + +//go:generate bin/gen-atomicwrapper -name=Time -type=time.Time -wrapped=Value -pack=packTime -unpack=unpackTime -imports time -file=time.go + +func packTime(t time.Time) interface{} { + return t +} + +func unpackTime(v interface{}) time.Time { + if t, ok := v.(time.Time); ok { + return t + } + return time.Time{} +} diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go new file mode 100644 index 0000000000..7859a9cc3b --- /dev/null +++ b/vendor/go.uber.org/atomic/uint32.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uint32 is an atomic wrapper around uint32. +type Uint32 struct { + _ nocmp // disallow non-atomic comparison + + v uint32 +} + +// NewUint32 creates a new Uint32. +func NewUint32(val uint32) *Uint32 { + return &Uint32{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Uint32) Load() uint32 { + return atomic.LoadUint32(&i.v) +} + +// Add atomically adds to the wrapped uint32 and returns the new value. +func (i *Uint32) Add(delta uint32) uint32 { + return atomic.AddUint32(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped uint32 and returns the new value. +func (i *Uint32) Sub(delta uint32) uint32 { + return atomic.AddUint32(&i.v, ^(delta - 1)) +} + +// Inc atomically increments the wrapped uint32 and returns the new value. +func (i *Uint32) Inc() uint32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint32 and returns the new value. +func (i *Uint32) Dec() uint32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint32) CAS(old, new uint32) (swapped bool) { + return atomic.CompareAndSwapUint32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint32) Store(val uint32) { + atomic.StoreUint32(&i.v, val) +} + +// Swap atomically swaps the wrapped uint32 and returns the old value. +func (i *Uint32) Swap(val uint32) (old uint32) { + return atomic.SwapUint32(&i.v, val) +} + +// MarshalJSON encodes the wrapped uint32 into JSON. +func (i *Uint32) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uint32. +func (i *Uint32) UnmarshalJSON(b []byte) error { + var v uint32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uint32) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go new file mode 100644 index 0000000000..2f2a7db638 --- /dev/null +++ b/vendor/go.uber.org/atomic/uint64.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uint64 is an atomic wrapper around uint64. +type Uint64 struct { + _ nocmp // disallow non-atomic comparison + + v uint64 +} + +// NewUint64 creates a new Uint64. +func NewUint64(val uint64) *Uint64 { + return &Uint64{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Uint64) Load() uint64 { + return atomic.LoadUint64(&i.v) +} + +// Add atomically adds to the wrapped uint64 and returns the new value. +func (i *Uint64) Add(delta uint64) uint64 { + return atomic.AddUint64(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped uint64 and returns the new value. +func (i *Uint64) Sub(delta uint64) uint64 { + return atomic.AddUint64(&i.v, ^(delta - 1)) +} + +// Inc atomically increments the wrapped uint64 and returns the new value. +func (i *Uint64) Inc() uint64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint64 and returns the new value. +func (i *Uint64) Dec() uint64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint64) CAS(old, new uint64) (swapped bool) { + return atomic.CompareAndSwapUint64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint64) Store(val uint64) { + atomic.StoreUint64(&i.v, val) +} + +// Swap atomically swaps the wrapped uint64 and returns the old value. +func (i *Uint64) Swap(val uint64) (old uint64) { + return atomic.SwapUint64(&i.v, val) +} + +// MarshalJSON encodes the wrapped uint64 into JSON. +func (i *Uint64) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uint64. +func (i *Uint64) UnmarshalJSON(b []byte) error { + var v uint64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uint64) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/uintptr.go b/vendor/go.uber.org/atomic/uintptr.go new file mode 100644 index 0000000000..ecf7a77273 --- /dev/null +++ b/vendor/go.uber.org/atomic/uintptr.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uintptr is an atomic wrapper around uintptr. +type Uintptr struct { + _ nocmp // disallow non-atomic comparison + + v uintptr +} + +// NewUintptr creates a new Uintptr. +func NewUintptr(val uintptr) *Uintptr { + return &Uintptr{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Uintptr) Load() uintptr { + return atomic.LoadUintptr(&i.v) +} + +// Add atomically adds to the wrapped uintptr and returns the new value. +func (i *Uintptr) Add(delta uintptr) uintptr { + return atomic.AddUintptr(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped uintptr and returns the new value. +func (i *Uintptr) Sub(delta uintptr) uintptr { + return atomic.AddUintptr(&i.v, ^(delta - 1)) +} + +// Inc atomically increments the wrapped uintptr and returns the new value. +func (i *Uintptr) Inc() uintptr { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uintptr and returns the new value. +func (i *Uintptr) Dec() uintptr { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uintptr) CAS(old, new uintptr) (swapped bool) { + return atomic.CompareAndSwapUintptr(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uintptr) Store(val uintptr) { + atomic.StoreUintptr(&i.v, val) +} + +// Swap atomically swaps the wrapped uintptr and returns the old value. +func (i *Uintptr) Swap(val uintptr) (old uintptr) { + return atomic.SwapUintptr(&i.v, val) +} + +// MarshalJSON encodes the wrapped uintptr into JSON. +func (i *Uintptr) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uintptr. +func (i *Uintptr) UnmarshalJSON(b []byte) error { + var v uintptr + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uintptr) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/unsafe_pointer.go b/vendor/go.uber.org/atomic/unsafe_pointer.go new file mode 100644 index 0000000000..169f793dcf --- /dev/null +++ b/vendor/go.uber.org/atomic/unsafe_pointer.go @@ -0,0 +1,58 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "sync/atomic" + "unsafe" +) + +// UnsafePointer is an atomic wrapper around unsafe.Pointer. +type UnsafePointer struct { + _ nocmp // disallow non-atomic comparison + + v unsafe.Pointer +} + +// NewUnsafePointer creates a new UnsafePointer. +func NewUnsafePointer(val unsafe.Pointer) *UnsafePointer { + return &UnsafePointer{v: val} +} + +// Load atomically loads the wrapped value. +func (p *UnsafePointer) Load() unsafe.Pointer { + return atomic.LoadPointer(&p.v) +} + +// Store atomically stores the passed value. +func (p *UnsafePointer) Store(val unsafe.Pointer) { + atomic.StorePointer(&p.v, val) +} + +// Swap atomically swaps the wrapped unsafe.Pointer and returns the old value. +func (p *UnsafePointer) Swap(val unsafe.Pointer) (old unsafe.Pointer) { + return atomic.SwapPointer(&p.v, val) +} + +// CAS is an atomic compare-and-swap. +func (p *UnsafePointer) CAS(old, new unsafe.Pointer) (swapped bool) { + return atomic.CompareAndSwapPointer(&p.v, old, new) +} diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go new file mode 100644 index 0000000000..671f3a3824 --- /dev/null +++ b/vendor/go.uber.org/atomic/value.go @@ -0,0 +1,31 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "sync/atomic" + +// Value shadows the type of the same name from sync/atomic +// https://godoc.org/sync/atomic#Value +type Value struct { + atomic.Value + + _ nocmp // disallow non-atomic comparison +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go index 8a893fdfff..56bfaaa17d 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.7 && amd64 && gc && !purego // +build go1.7,amd64,gc,!purego package blake2b diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s index 8608a7f7d1..4b9daa18d9 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.7 && amd64 && gc && !purego // +build go1.7,amd64,gc,!purego #include "textflag.h" @@ -282,14 +283,12 @@ TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment MOVQ blocks_len+32(FP), DI MOVQ SP, DX - MOVQ SP, R9 - ADDQ $31, R9 - ANDQ $~31, R9 - MOVQ R9, SP + ADDQ $31, DX + ANDQ $~31, DX - MOVQ CX, 16(SP) + MOVQ CX, 16(DX) XORQ CX, CX - MOVQ CX, 24(SP) + MOVQ CX, 24(DX) VMOVDQU ·AVX2_c40<>(SB), Y4 VMOVDQU ·AVX2_c48<>(SB), Y5 @@ -301,33 +300,33 @@ TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment MOVQ 0(BX), R8 MOVQ 8(BX), R9 - MOVQ R9, 8(SP) + MOVQ R9, 8(DX) loop: ADDQ $128, R8 - MOVQ R8, 0(SP) + MOVQ R8, 0(DX) CMPQ R8, $128 JGE noinc INCQ R9 - MOVQ R9, 8(SP) + MOVQ R9, 8(DX) noinc: VMOVDQA Y8, Y0 VMOVDQA Y9, Y1 VMOVDQA Y6, Y2 - VPXOR 0(SP), Y7, Y3 + VPXOR 0(DX), Y7, Y3 LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() - VMOVDQA Y12, 32(SP) - VMOVDQA Y13, 64(SP) - VMOVDQA Y14, 96(SP) - VMOVDQA Y15, 128(SP) + VMOVDQA Y12, 32(DX) + VMOVDQA Y13, 64(DX) + VMOVDQA Y14, 96(DX) + VMOVDQA Y15, 128(DX) ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() - VMOVDQA Y12, 160(SP) - VMOVDQA Y13, 192(SP) - VMOVDQA Y14, 224(SP) - VMOVDQA Y15, 256(SP) + VMOVDQA Y12, 160(DX) + VMOVDQA Y13, 192(DX) + VMOVDQA Y14, 224(DX) + VMOVDQA Y15, 256(DX) ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() @@ -347,8 +346,8 @@ noinc: LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - ROUND_AVX2(32(SP), 64(SP), 96(SP), 128(SP), Y10, Y4, Y5) - ROUND_AVX2(160(SP), 192(SP), 224(SP), 256(SP), Y10, Y4, Y5) + ROUND_AVX2(32(DX), 64(DX), 96(DX), 128(DX), Y10, Y4, Y5) + ROUND_AVX2(160(DX), 192(DX), 224(DX), 256(DX), Y10, Y4, Y5) VPXOR Y0, Y8, Y8 VPXOR Y1, Y9, Y9 @@ -366,7 +365,6 @@ noinc: VMOVDQU Y9, 32(AX) VZEROUPPER - MOVQ DX, SP RET #define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA @@ -584,11 +582,9 @@ TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment MOVQ blocks_base+24(FP), SI MOVQ blocks_len+32(FP), DI - MOVQ SP, BP - MOVQ SP, R9 - ADDQ $15, R9 - ANDQ $~15, R9 - MOVQ R9, SP + MOVQ SP, R10 + ADDQ $15, R10 + ANDQ $~15, R10 VMOVDQU ·AVX_c40<>(SB), X0 VMOVDQU ·AVX_c48<>(SB), X1 @@ -596,8 +592,8 @@ TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment VMOVDQA X1, X9 VMOVDQU ·AVX_iv3<>(SB), X0 - VMOVDQA X0, 0(SP) - XORQ CX, 0(SP) // 0(SP) = ·AVX_iv3 ^ (CX || 0) + VMOVDQA X0, 0(R10) + XORQ CX, 0(R10) // 0(R10) = ·AVX_iv3 ^ (CX || 0) VMOVDQU 0(AX), X10 VMOVDQU 16(AX), X11 @@ -624,35 +620,35 @@ noinc: VMOVDQU ·AVX_iv2<>(SB), X6 VPXOR X15, X6, X6 - VMOVDQA 0(SP), X7 + VMOVDQA 0(R10), X7 LOAD_MSG_AVX_0_2_4_6_1_3_5_7() - VMOVDQA X12, 16(SP) - VMOVDQA X13, 32(SP) - VMOVDQA X14, 48(SP) - VMOVDQA X15, 64(SP) + VMOVDQA X12, 16(R10) + VMOVDQA X13, 32(R10) + VMOVDQA X14, 48(R10) + VMOVDQA X15, 64(R10) HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) SHUFFLE_AVX() LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) - VMOVDQA X12, 80(SP) - VMOVDQA X13, 96(SP) - VMOVDQA X14, 112(SP) - VMOVDQA X15, 128(SP) + VMOVDQA X12, 80(R10) + VMOVDQA X13, 96(R10) + VMOVDQA X14, 112(R10) + VMOVDQA X15, 128(R10) HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) SHUFFLE_AVX_INV() LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) - VMOVDQA X12, 144(SP) - VMOVDQA X13, 160(SP) - VMOVDQA X14, 176(SP) - VMOVDQA X15, 192(SP) + VMOVDQA X12, 144(R10) + VMOVDQA X13, 160(R10) + VMOVDQA X14, 176(R10) + VMOVDQA X15, 192(R10) HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) SHUFFLE_AVX() LOAD_MSG_AVX_1_0_11_5_12_2_7_3() - VMOVDQA X12, 208(SP) - VMOVDQA X13, 224(SP) - VMOVDQA X14, 240(SP) - VMOVDQA X15, 256(SP) + VMOVDQA X12, 208(R10) + VMOVDQA X13, 224(R10) + VMOVDQA X14, 240(R10) + VMOVDQA X15, 256(R10) HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) SHUFFLE_AVX_INV() @@ -712,14 +708,14 @@ noinc: HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) SHUFFLE_AVX_INV() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X15, X8, X9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X15, X8, X9) SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X15, X8, X9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X15, X8, X9) SHUFFLE_AVX_INV() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X15, X8, X9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X15, X8, X9) SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X15, X8, X9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X15, X8, X9) SHUFFLE_AVX_INV() VMOVDQU 32(AX), X14 @@ -746,5 +742,4 @@ noinc: MOVQ R9, 8(BX) VZEROUPPER - MOVQ BP, SP RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go index a52c887fcb..5fa1b32841 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.7 && amd64 && gc && !purego // +build !go1.7,amd64,gc,!purego package blake2b diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s index 1f4c6a9279..ae75eb9afc 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && gc && !purego // +build amd64,gc,!purego #include "textflag.h" @@ -118,15 +119,13 @@ TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment MOVQ blocks_base+24(FP), SI MOVQ blocks_len+32(FP), DI - MOVQ SP, BP - MOVQ SP, R9 - ADDQ $15, R9 - ANDQ $~15, R9 - MOVQ R9, SP + MOVQ SP, R10 + ADDQ $15, R10 + ANDQ $~15, R10 MOVOU ·iv3<>(SB), X0 - MOVO X0, 0(SP) - XORQ CX, 0(SP) // 0(SP) = ·iv3 ^ (CX || 0) + MOVO X0, 0(R10) + XORQ CX, 0(R10) // 0(R10) = ·iv3 ^ (CX || 0) MOVOU ·c40<>(SB), X13 MOVOU ·c48<>(SB), X14 @@ -156,35 +155,35 @@ noinc: MOVOU ·iv2<>(SB), X6 PXOR X8, X6 - MOVO 0(SP), X7 + MOVO 0(R10), X7 LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) - MOVO X8, 16(SP) - MOVO X9, 32(SP) - MOVO X10, 48(SP) - MOVO X11, 64(SP) + MOVO X8, 16(R10) + MOVO X9, 32(R10) + MOVO X10, 48(R10) + MOVO X11, 64(R10) HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) - MOVO X8, 80(SP) - MOVO X9, 96(SP) - MOVO X10, 112(SP) - MOVO X11, 128(SP) + MOVO X8, 80(R10) + MOVO X9, 96(R10) + MOVO X10, 112(R10) + MOVO X11, 128(R10) HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) - MOVO X8, 144(SP) - MOVO X9, 160(SP) - MOVO X10, 176(SP) - MOVO X11, 192(SP) + MOVO X8, 144(R10) + MOVO X9, 160(R10) + MOVO X10, 176(R10) + MOVO X11, 192(R10) HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) - MOVO X8, 208(SP) - MOVO X9, 224(SP) - MOVO X10, 240(SP) - MOVO X11, 256(SP) + MOVO X8, 208(R10) + MOVO X9, 224(R10) + MOVO X10, 240(R10) + MOVO X11, 256(R10) HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) @@ -244,14 +243,14 @@ noinc: HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X11, X13, X14) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X11, X13, X14) SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X11, X13, X14) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X11, X13, X14) SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X11, X13, X14) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X11, X13, X14) SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X11, X13, X14) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X11, X13, X14) SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) MOVOU 32(AX), X10 @@ -277,5 +276,4 @@ noinc: MOVQ R8, 0(BX) MOVQ R9, 8(BX) - MOVQ BP, SP RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go index 8597457781..b0137cdf02 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !amd64 || purego || !gc // +build !amd64 purego !gc package blake2b diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go index efd689af4b..9d8633963c 100644 --- a/vendor/golang.org/x/crypto/blake2b/register.go +++ b/vendor/golang.org/x/crypto/blake2b/register.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.9 // +build go1.9 package blake2b diff --git a/vendor/golang.org/x/crypto/blake2s/blake2s_386.go b/vendor/golang.org/x/crypto/blake2s/blake2s_386.go index 2d8ee63c1a..b4463fb4dc 100644 --- a/vendor/golang.org/x/crypto/blake2s/blake2s_386.go +++ b/vendor/golang.org/x/crypto/blake2s/blake2s_386.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build 386 && gc && !purego // +build 386,gc,!purego package blake2s diff --git a/vendor/golang.org/x/crypto/blake2s/blake2s_386.s b/vendor/golang.org/x/crypto/blake2s/blake2s_386.s index 023532b46d..603d00ca32 100644 --- a/vendor/golang.org/x/crypto/blake2s/blake2s_386.s +++ b/vendor/golang.org/x/crypto/blake2s/blake2s_386.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build 386 && gc && !purego // +build 386,gc,!purego #include "textflag.h" @@ -297,19 +298,17 @@ TEXT ·hashBlocksSSE2(SB), 0, $672-24 // frame = 656 + 16 byte alignment MOVL blocks_base+12(FP), SI MOVL blocks_len+16(FP), DX - MOVL SP, BP MOVL SP, DI ADDL $15, DI ANDL $~15, DI - MOVL DI, SP - MOVL CX, 8(SP) + MOVL CX, 8(DI) MOVL 0(BX), CX - MOVL CX, 0(SP) + MOVL CX, 0(DI) MOVL 4(BX), CX - MOVL CX, 4(SP) + MOVL CX, 4(DI) XORL CX, CX - MOVL CX, 12(SP) + MOVL CX, 12(DI) MOVOU 0(AX), X0 MOVOU 16(AX), X1 @@ -321,22 +320,22 @@ loop: MOVOU iv0<>(SB), X6 MOVOU iv1<>(SB), X7 - MOVO 0(SP), X3 + MOVO 0(DI), X3 PADDQ X2, X3 PXOR X3, X7 - MOVO X3, 0(SP) - - PRECOMPUTE(SP, 16, SI, CX) - ROUND_SSE2(X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X3) - ROUND_SSE2(X4, X5, X6, X7, 16+64(SP), 32+64(SP), 48+64(SP), 64+64(SP), X3) - ROUND_SSE2(X4, X5, X6, X7, 16+128(SP), 32+128(SP), 48+128(SP), 64+128(SP), X3) - ROUND_SSE2(X4, X5, X6, X7, 16+192(SP), 32+192(SP), 48+192(SP), 64+192(SP), X3) - ROUND_SSE2(X4, X5, X6, X7, 16+256(SP), 32+256(SP), 48+256(SP), 64+256(SP), X3) - ROUND_SSE2(X4, X5, X6, X7, 16+320(SP), 32+320(SP), 48+320(SP), 64+320(SP), X3) - ROUND_SSE2(X4, X5, X6, X7, 16+384(SP), 32+384(SP), 48+384(SP), 64+384(SP), X3) - ROUND_SSE2(X4, X5, X6, X7, 16+448(SP), 32+448(SP), 48+448(SP), 64+448(SP), X3) - ROUND_SSE2(X4, X5, X6, X7, 16+512(SP), 32+512(SP), 48+512(SP), 64+512(SP), X3) - ROUND_SSE2(X4, X5, X6, X7, 16+576(SP), 32+576(SP), 48+576(SP), 64+576(SP), X3) + MOVO X3, 0(DI) + + PRECOMPUTE(DI, 16, SI, CX) + ROUND_SSE2(X4, X5, X6, X7, 16(DI), 32(DI), 48(DI), 64(DI), X3) + ROUND_SSE2(X4, X5, X6, X7, 16+64(DI), 32+64(DI), 48+64(DI), 64+64(DI), X3) + ROUND_SSE2(X4, X5, X6, X7, 16+128(DI), 32+128(DI), 48+128(DI), 64+128(DI), X3) + ROUND_SSE2(X4, X5, X6, X7, 16+192(DI), 32+192(DI), 48+192(DI), 64+192(DI), X3) + ROUND_SSE2(X4, X5, X6, X7, 16+256(DI), 32+256(DI), 48+256(DI), 64+256(DI), X3) + ROUND_SSE2(X4, X5, X6, X7, 16+320(DI), 32+320(DI), 48+320(DI), 64+320(DI), X3) + ROUND_SSE2(X4, X5, X6, X7, 16+384(DI), 32+384(DI), 48+384(DI), 64+384(DI), X3) + ROUND_SSE2(X4, X5, X6, X7, 16+448(DI), 32+448(DI), 48+448(DI), 64+448(DI), X3) + ROUND_SSE2(X4, X5, X6, X7, 16+512(DI), 32+512(DI), 48+512(DI), 64+512(DI), X3) + ROUND_SSE2(X4, X5, X6, X7, 16+576(DI), 32+576(DI), 48+576(DI), 64+576(DI), X3) PXOR X4, X0 PXOR X5, X1 @@ -347,15 +346,14 @@ loop: SUBL $64, DX JNE loop - MOVL 0(SP), CX + MOVL 0(DI), CX MOVL CX, 0(BX) - MOVL 4(SP), CX + MOVL 4(DI), CX MOVL CX, 4(BX) MOVOU X0, 0(AX) MOVOU X1, 16(AX) - MOVL BP, SP RET // func hashBlocksSSSE3(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte) @@ -366,54 +364,52 @@ TEXT ·hashBlocksSSSE3(SB), 0, $704-24 // frame = 688 + 16 byte alignment MOVL blocks_base+12(FP), SI MOVL blocks_len+16(FP), DX - MOVL SP, BP MOVL SP, DI ADDL $15, DI ANDL $~15, DI - MOVL DI, SP - MOVL CX, 8(SP) + MOVL CX, 8(DI) MOVL 0(BX), CX - MOVL CX, 0(SP) + MOVL CX, 0(DI) MOVL 4(BX), CX - MOVL CX, 4(SP) + MOVL CX, 4(DI) XORL CX, CX - MOVL CX, 12(SP) + MOVL CX, 12(DI) MOVOU 0(AX), X0 MOVOU 16(AX), X1 MOVOU counter<>(SB), X2 loop: - MOVO X0, 656(SP) - MOVO X1, 672(SP) + MOVO X0, 656(DI) + MOVO X1, 672(DI) MOVO X0, X4 MOVO X1, X5 MOVOU iv0<>(SB), X6 MOVOU iv1<>(SB), X7 - MOVO 0(SP), X3 + MOVO 0(DI), X3 PADDQ X2, X3 PXOR X3, X7 - MOVO X3, 0(SP) + MOVO X3, 0(DI) MOVOU rol16<>(SB), X0 MOVOU rol8<>(SB), X1 - PRECOMPUTE(SP, 16, SI, CX) - ROUND_SSSE3(X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X3, X0, X1) - ROUND_SSSE3(X4, X5, X6, X7, 16+64(SP), 32+64(SP), 48+64(SP), 64+64(SP), X3, X0, X1) - ROUND_SSSE3(X4, X5, X6, X7, 16+128(SP), 32+128(SP), 48+128(SP), 64+128(SP), X3, X0, X1) - ROUND_SSSE3(X4, X5, X6, X7, 16+192(SP), 32+192(SP), 48+192(SP), 64+192(SP), X3, X0, X1) - ROUND_SSSE3(X4, X5, X6, X7, 16+256(SP), 32+256(SP), 48+256(SP), 64+256(SP), X3, X0, X1) - ROUND_SSSE3(X4, X5, X6, X7, 16+320(SP), 32+320(SP), 48+320(SP), 64+320(SP), X3, X0, X1) - ROUND_SSSE3(X4, X5, X6, X7, 16+384(SP), 32+384(SP), 48+384(SP), 64+384(SP), X3, X0, X1) - ROUND_SSSE3(X4, X5, X6, X7, 16+448(SP), 32+448(SP), 48+448(SP), 64+448(SP), X3, X0, X1) - ROUND_SSSE3(X4, X5, X6, X7, 16+512(SP), 32+512(SP), 48+512(SP), 64+512(SP), X3, X0, X1) - ROUND_SSSE3(X4, X5, X6, X7, 16+576(SP), 32+576(SP), 48+576(SP), 64+576(SP), X3, X0, X1) - - MOVO 656(SP), X0 - MOVO 672(SP), X1 + PRECOMPUTE(DI, 16, SI, CX) + ROUND_SSSE3(X4, X5, X6, X7, 16(DI), 32(DI), 48(DI), 64(DI), X3, X0, X1) + ROUND_SSSE3(X4, X5, X6, X7, 16+64(DI), 32+64(DI), 48+64(DI), 64+64(DI), X3, X0, X1) + ROUND_SSSE3(X4, X5, X6, X7, 16+128(DI), 32+128(DI), 48+128(DI), 64+128(DI), X3, X0, X1) + ROUND_SSSE3(X4, X5, X6, X7, 16+192(DI), 32+192(DI), 48+192(DI), 64+192(DI), X3, X0, X1) + ROUND_SSSE3(X4, X5, X6, X7, 16+256(DI), 32+256(DI), 48+256(DI), 64+256(DI), X3, X0, X1) + ROUND_SSSE3(X4, X5, X6, X7, 16+320(DI), 32+320(DI), 48+320(DI), 64+320(DI), X3, X0, X1) + ROUND_SSSE3(X4, X5, X6, X7, 16+384(DI), 32+384(DI), 48+384(DI), 64+384(DI), X3, X0, X1) + ROUND_SSSE3(X4, X5, X6, X7, 16+448(DI), 32+448(DI), 48+448(DI), 64+448(DI), X3, X0, X1) + ROUND_SSSE3(X4, X5, X6, X7, 16+512(DI), 32+512(DI), 48+512(DI), 64+512(DI), X3, X0, X1) + ROUND_SSSE3(X4, X5, X6, X7, 16+576(DI), 32+576(DI), 48+576(DI), 64+576(DI), X3, X0, X1) + + MOVO 656(DI), X0 + MOVO 672(DI), X1 PXOR X4, X0 PXOR X5, X1 PXOR X6, X0 @@ -423,13 +419,12 @@ loop: SUBL $64, DX JNE loop - MOVL 0(SP), CX + MOVL 0(DI), CX MOVL CX, 0(BX) - MOVL 4(SP), CX + MOVL 4(DI), CX MOVL CX, 4(BX) MOVOU X0, 0(AX) MOVOU X1, 16(AX) - MOVL BP, SP RET diff --git a/vendor/golang.org/x/crypto/blake2s/blake2s_amd64.go b/vendor/golang.org/x/crypto/blake2s/blake2s_amd64.go index 267bdce1e3..becdaa120f 100644 --- a/vendor/golang.org/x/crypto/blake2s/blake2s_amd64.go +++ b/vendor/golang.org/x/crypto/blake2s/blake2s_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && gc && !purego // +build amd64,gc,!purego package blake2s diff --git a/vendor/golang.org/x/crypto/blake2s/blake2s_amd64.s b/vendor/golang.org/x/crypto/blake2s/blake2s_amd64.s index b905944fd0..e9df7a7c21 100644 --- a/vendor/golang.org/x/crypto/blake2s/blake2s_amd64.s +++ b/vendor/golang.org/x/crypto/blake2s/blake2s_amd64.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && gc && !purego // +build amd64,gc,!purego #include "textflag.h" @@ -317,30 +318,30 @@ GLOBL counter<>(SB), (NOPTR+RODATA), $16 MOVL R15, 8*4+off+576(dst) #define BLAKE2s_SSE2() \ - PRECOMPUTE_MSG(SP, 16, SI, R8, R9, R10, R11, R12, R13, R14, R15); \ - ROUND_SSE2(X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X8); \ - ROUND_SSE2(X4, X5, X6, X7, 16+64(SP), 32+64(SP), 48+64(SP), 64+64(SP), X8); \ - ROUND_SSE2(X4, X5, X6, X7, 16+128(SP), 32+128(SP), 48+128(SP), 64+128(SP), X8); \ - ROUND_SSE2(X4, X5, X6, X7, 16+192(SP), 32+192(SP), 48+192(SP), 64+192(SP), X8); \ - ROUND_SSE2(X4, X5, X6, X7, 16+256(SP), 32+256(SP), 48+256(SP), 64+256(SP), X8); \ - ROUND_SSE2(X4, X5, X6, X7, 16+320(SP), 32+320(SP), 48+320(SP), 64+320(SP), X8); \ - ROUND_SSE2(X4, X5, X6, X7, 16+384(SP), 32+384(SP), 48+384(SP), 64+384(SP), X8); \ - ROUND_SSE2(X4, X5, X6, X7, 16+448(SP), 32+448(SP), 48+448(SP), 64+448(SP), X8); \ - ROUND_SSE2(X4, X5, X6, X7, 16+512(SP), 32+512(SP), 48+512(SP), 64+512(SP), X8); \ - ROUND_SSE2(X4, X5, X6, X7, 16+576(SP), 32+576(SP), 48+576(SP), 64+576(SP), X8) + PRECOMPUTE_MSG(BP, 16, SI, R8, R9, R10, R11, R12, R13, R14, R15); \ + ROUND_SSE2(X4, X5, X6, X7, 16(BP), 32(BP), 48(BP), 64(BP), X8); \ + ROUND_SSE2(X4, X5, X6, X7, 16+64(BP), 32+64(BP), 48+64(BP), 64+64(BP), X8); \ + ROUND_SSE2(X4, X5, X6, X7, 16+128(BP), 32+128(BP), 48+128(BP), 64+128(BP), X8); \ + ROUND_SSE2(X4, X5, X6, X7, 16+192(BP), 32+192(BP), 48+192(BP), 64+192(BP), X8); \ + ROUND_SSE2(X4, X5, X6, X7, 16+256(BP), 32+256(BP), 48+256(BP), 64+256(BP), X8); \ + ROUND_SSE2(X4, X5, X6, X7, 16+320(BP), 32+320(BP), 48+320(BP), 64+320(BP), X8); \ + ROUND_SSE2(X4, X5, X6, X7, 16+384(BP), 32+384(BP), 48+384(BP), 64+384(BP), X8); \ + ROUND_SSE2(X4, X5, X6, X7, 16+448(BP), 32+448(BP), 48+448(BP), 64+448(BP), X8); \ + ROUND_SSE2(X4, X5, X6, X7, 16+512(BP), 32+512(BP), 48+512(BP), 64+512(BP), X8); \ + ROUND_SSE2(X4, X5, X6, X7, 16+576(BP), 32+576(BP), 48+576(BP), 64+576(BP), X8) #define BLAKE2s_SSSE3() \ - PRECOMPUTE_MSG(SP, 16, SI, R8, R9, R10, R11, R12, R13, R14, R15); \ - ROUND_SSSE3(X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X8, X13, X14); \ - ROUND_SSSE3(X4, X5, X6, X7, 16+64(SP), 32+64(SP), 48+64(SP), 64+64(SP), X8, X13, X14); \ - ROUND_SSSE3(X4, X5, X6, X7, 16+128(SP), 32+128(SP), 48+128(SP), 64+128(SP), X8, X13, X14); \ - ROUND_SSSE3(X4, X5, X6, X7, 16+192(SP), 32+192(SP), 48+192(SP), 64+192(SP), X8, X13, X14); \ - ROUND_SSSE3(X4, X5, X6, X7, 16+256(SP), 32+256(SP), 48+256(SP), 64+256(SP), X8, X13, X14); \ - ROUND_SSSE3(X4, X5, X6, X7, 16+320(SP), 32+320(SP), 48+320(SP), 64+320(SP), X8, X13, X14); \ - ROUND_SSSE3(X4, X5, X6, X7, 16+384(SP), 32+384(SP), 48+384(SP), 64+384(SP), X8, X13, X14); \ - ROUND_SSSE3(X4, X5, X6, X7, 16+448(SP), 32+448(SP), 48+448(SP), 64+448(SP), X8, X13, X14); \ - ROUND_SSSE3(X4, X5, X6, X7, 16+512(SP), 32+512(SP), 48+512(SP), 64+512(SP), X8, X13, X14); \ - ROUND_SSSE3(X4, X5, X6, X7, 16+576(SP), 32+576(SP), 48+576(SP), 64+576(SP), X8, X13, X14) + PRECOMPUTE_MSG(BP, 16, SI, R8, R9, R10, R11, R12, R13, R14, R15); \ + ROUND_SSSE3(X4, X5, X6, X7, 16(BP), 32(BP), 48(BP), 64(BP), X8, X13, X14); \ + ROUND_SSSE3(X4, X5, X6, X7, 16+64(BP), 32+64(BP), 48+64(BP), 64+64(BP), X8, X13, X14); \ + ROUND_SSSE3(X4, X5, X6, X7, 16+128(BP), 32+128(BP), 48+128(BP), 64+128(BP), X8, X13, X14); \ + ROUND_SSSE3(X4, X5, X6, X7, 16+192(BP), 32+192(BP), 48+192(BP), 64+192(BP), X8, X13, X14); \ + ROUND_SSSE3(X4, X5, X6, X7, 16+256(BP), 32+256(BP), 48+256(BP), 64+256(BP), X8, X13, X14); \ + ROUND_SSSE3(X4, X5, X6, X7, 16+320(BP), 32+320(BP), 48+320(BP), 64+320(BP), X8, X13, X14); \ + ROUND_SSSE3(X4, X5, X6, X7, 16+384(BP), 32+384(BP), 48+384(BP), 64+384(BP), X8, X13, X14); \ + ROUND_SSSE3(X4, X5, X6, X7, 16+448(BP), 32+448(BP), 48+448(BP), 64+448(BP), X8, X13, X14); \ + ROUND_SSSE3(X4, X5, X6, X7, 16+512(BP), 32+512(BP), 48+512(BP), 64+512(BP), X8, X13, X14); \ + ROUND_SSSE3(X4, X5, X6, X7, 16+576(BP), 32+576(BP), 48+576(BP), 64+576(BP), X8, X13, X14) #define BLAKE2s_SSE4() \ LOAD_MSG_SSE4(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15); \ @@ -372,16 +373,12 @@ GLOBL counter<>(SB), (NOPTR+RODATA), $16 MOVQ blocks_len, DX; \ \ MOVQ SP, BP; \ - MOVQ SP, R9; \ - ADDQ $15, R9; \ - ANDQ $~15, R9; \ - MOVQ R9, SP; \ + ADDQ $15, BP; \ + ANDQ $~15, BP; \ \ MOVQ 0(BX), R9; \ - MOVQ R9, 0(SP); \ - XORQ R9, R9; \ - MOVQ R9, 8(SP); \ - MOVL CX, 8(SP); \ + MOVQ R9, 0(BP); \ + MOVQ CX, 8(BP); \ \ MOVOU 0(AX), X0; \ MOVOU 16(AX), X1; \ @@ -391,7 +388,7 @@ GLOBL counter<>(SB), (NOPTR+RODATA), $16 MOVOU counter<>(SB), X12; \ MOVOU rol16<>(SB), X13; \ MOVOU rol8<>(SB), X14; \ - MOVO 0(SP), X15; \ + MOVO 0(BP), X15; \ \ loop: \ MOVO X0, X4; \ @@ -413,14 +410,12 @@ GLOBL counter<>(SB), (NOPTR+RODATA), $16 SUBQ $64, DX; \ JNE loop; \ \ - MOVO X15, 0(SP); \ - MOVQ 0(SP), R9; \ + MOVO X15, 0(BP); \ + MOVQ 0(BP), R9; \ MOVQ R9, 0(BX); \ \ MOVOU X0, 0(AX); \ - MOVOU X1, 16(AX); \ - \ - MOVQ BP, SP + MOVOU X1, 16(AX) // func hashBlocksSSE2(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte) TEXT ·hashBlocksSSE2(SB), 0, $672-48 // frame = 656 + 16 byte alignment diff --git a/vendor/golang.org/x/crypto/blake2s/blake2s_ref.go b/vendor/golang.org/x/crypto/blake2s/blake2s_ref.go index 94ef01d96e..799dba0c41 100644 --- a/vendor/golang.org/x/crypto/blake2s/blake2s_ref.go +++ b/vendor/golang.org/x/crypto/blake2s/blake2s_ref.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (!amd64 && !386) || !gc || purego // +build !amd64,!386 !gc purego package blake2s diff --git a/vendor/golang.org/x/crypto/blake2s/register.go b/vendor/golang.org/x/crypto/blake2s/register.go index d277459a1c..ef79ff3c67 100644 --- a/vendor/golang.org/x/crypto/blake2s/register.go +++ b/vendor/golang.org/x/crypto/blake2s/register.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.9 // +build go1.9 package blake2s diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go index c474e5a804..94c71ac1ac 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.11 && gc && !purego // +build go1.11,gc,!purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s index 8fb49a13e3..63cae9e6f0 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.11 && gc && !purego // +build go1.11,gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go index 3e8a609fbd..025b49897e 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (!arm64 && !s390x && !ppc64le) || (arm64 && !go1.11) || !gc || purego // +build !arm64,!s390x,!ppc64le arm64,!go1.11 !gc purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go index 2806c6325d..da420b2e97 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s index 3dad4b2fa2..5c0fed26f8 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s @@ -19,6 +19,7 @@ // The differences in this and the original implementation are // due to the calling conventions and initialization of constants. +//go:build gc && !purego // +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go index a0774dde1c..c5898db465 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s index 818161189b..f3ef5a019d 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego #include "go_asm.h" diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go index c41d061485..25959b9a6e 100644 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego package chacha20poly1305 diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s index 55226b0e6c..867c181a14 100644 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s @@ -4,6 +4,7 @@ // This file was originally from https://golang.org/cl/24717 by Vlad Krasnov of CloudFlare. +//go:build gc && !purego // +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go index 13941c476f..f832b33d45 100644 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !amd64 || !gc || purego // +build !amd64 !gc purego package chacha20poly1305 diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go new file mode 100644 index 0000000000..83c776de08 --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -0,0 +1,804 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + encoding_asn1 "encoding/asn1" + "fmt" + "math/big" + "reflect" + "time" + + "golang.org/x/crypto/cryptobyte/asn1" +) + +// This file contains ASN.1-related methods for String and Builder. + +// Builder + +// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Int64(v int64) { + b.addASN1Signed(asn1.INTEGER, v) +} + +// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the +// given tag. +func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) { + b.addASN1Signed(tag, v) +} + +// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION. +func (b *Builder) AddASN1Enum(v int64) { + b.addASN1Signed(asn1.ENUM, v) +} + +func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) { + b.AddASN1(tag, func(c *Builder) { + length := 1 + for i := v; i >= 0x80 || i < -0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Uint64(v uint64) { + b.AddASN1(asn1.INTEGER, func(c *Builder) { + length := 1 + for i := v; i >= 0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1BigInt(n *big.Int) { + if b.err != nil { + return + } + + b.AddASN1(asn1.INTEGER, func(c *Builder) { + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement form. So we + // invert and subtract 1. If the most-significant-bit isn't set then + // we'll need to pad the beginning with 0xff in order to keep the number + // negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + c.add(0xff) + } + c.add(bytes...) + } else if n.Sign() == 0 { + c.add(0) + } else { + bytes := n.Bytes() + if bytes[0]&0x80 != 0 { + c.add(0) + } + c.add(bytes...) + } + }) +} + +// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING. +func (b *Builder) AddASN1OctetString(bytes []byte) { + b.AddASN1(asn1.OCTET_STRING, func(c *Builder) { + c.AddBytes(bytes) + }) +} + +const generalizedTimeFormatStr = "20060102150405Z0700" + +// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME. +func (b *Builder) AddASN1GeneralizedTime(t time.Time) { + if t.Year() < 0 || t.Year() > 9999 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t) + return + } + b.AddASN1(asn1.GeneralizedTime, func(c *Builder) { + c.AddBytes([]byte(t.Format(generalizedTimeFormatStr))) + }) +} + +// AddASN1UTCTime appends a DER-encoded ASN.1 UTCTime. +func (b *Builder) AddASN1UTCTime(t time.Time) { + b.AddASN1(asn1.UTCTime, func(c *Builder) { + // As utilized by the X.509 profile, UTCTime can only + // represent the years 1950 through 2049. + if t.Year() < 1950 || t.Year() >= 2050 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a UTCTime", t) + return + } + c.AddBytes([]byte(t.Format(defaultUTCTimeFormatStr))) + }) +} + +// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not +// support BIT STRINGs that are not a whole number of bytes. +func (b *Builder) AddASN1BitString(data []byte) { + b.AddASN1(asn1.BIT_STRING, func(b *Builder) { + b.AddUint8(0) + b.AddBytes(data) + }) +} + +func (b *Builder) addBase128Int(n int64) { + var length int + if n == 0 { + length = 1 + } else { + for i := n; i > 0; i >>= 7 { + length++ + } + } + + for i := length - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + + b.add(o) + } +} + +func isValidOID(oid encoding_asn1.ObjectIdentifier) bool { + if len(oid) < 2 { + return false + } + + if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) { + return false + } + + for _, v := range oid { + if v < 0 { + return false + } + } + + return true +} + +func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) { + b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) { + if !isValidOID(oid) { + b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid) + return + } + + b.addBase128Int(int64(oid[0])*40 + int64(oid[1])) + for _, v := range oid[2:] { + b.addBase128Int(int64(v)) + } + }) +} + +func (b *Builder) AddASN1Boolean(v bool) { + b.AddASN1(asn1.BOOLEAN, func(b *Builder) { + if v { + b.AddUint8(0xff) + } else { + b.AddUint8(0) + } + }) +} + +func (b *Builder) AddASN1NULL() { + b.add(uint8(asn1.NULL), 0) +} + +// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if +// successful or records an error if one occurred. +func (b *Builder) MarshalASN1(v interface{}) { + // NOTE(martinkr): This is somewhat of a hack to allow propagation of + // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a + // value embedded into a struct, its tag information is lost. + if b.err != nil { + return + } + bytes, err := encoding_asn1.Marshal(v) + if err != nil { + b.err = err + return + } + b.AddBytes(bytes) +} + +// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag. +// Tags greater than 30 are not supported and result in an error (i.e. +// low-tag-number form only). The child builder passed to the +// BuilderContinuation can be used to build the content of the ASN.1 object. +func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) { + if b.err != nil { + return + } + // Identifiers with the low five bits set indicate high-tag-number format + // (two or more octets), which we don't support. + if tag&0x1f == 0x1f { + b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag) + return + } + b.AddUint8(uint8(tag)) + b.addLengthPrefixed(1, true, f) +} + +// String + +// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean +// representation into out and advances. It reports whether the read +// was successful. +func (s *String) ReadASN1Boolean(out *bool) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 { + return false + } + + switch bytes[0] { + case 0: + *out = false + case 0xff: + *out = true + default: + return false + } + + return true +} + +var bigIntType = reflect.TypeOf((*big.Int)(nil)).Elem() + +// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does +// not point to an integer or to a big.Int, it panics. It reports whether the +// read was successful. +func (s *String) ReadASN1Integer(out interface{}) bool { + if reflect.TypeOf(out).Kind() != reflect.Ptr { + panic("out is not a pointer") + } + switch reflect.ValueOf(out).Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + var i int64 + if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) { + return false + } + reflect.ValueOf(out).Elem().SetInt(i) + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + var u uint64 + if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) { + return false + } + reflect.ValueOf(out).Elem().SetUint(u) + return true + case reflect.Struct: + if reflect.TypeOf(out).Elem() == bigIntType { + return s.readASN1BigInt(out.(*big.Int)) + } + } + panic("out does not point to an integer type") +} + +func checkASN1Integer(bytes []byte) bool { + if len(bytes) == 0 { + // An INTEGER is encoded with at least one octet. + return false + } + if len(bytes) == 1 { + return true + } + if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 { + // Value is not minimally encoded. + return false + } + return true +} + +var bigOne = big.NewInt(1) + +func (s *String) readASN1BigInt(out *big.Int) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + // Negative number. + neg := make([]byte, len(bytes)) + for i, b := range bytes { + neg[i] = ^b + } + out.SetBytes(neg) + out.Add(out, bigOne) + out.Neg(out) + } else { + out.SetBytes(bytes) + } + return true +} + +func (s *String) readASN1Int64(out *int64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) { + return false + } + return true +} + +func asn1Signed(out *int64, n []byte) bool { + length := len(n) + if length > 8 { + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= int64(n[i]) + } + // Shift up and down in order to sign extend the result. + *out <<= 64 - uint8(length)*8 + *out >>= 64 - uint8(length)*8 + return true +} + +func (s *String) readASN1Uint64(out *uint64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) { + return false + } + return true +} + +func asn1Unsigned(out *uint64, n []byte) bool { + length := len(n) + if length > 9 || length == 9 && n[0] != 0 { + // Too large for uint64. + return false + } + if n[0]&0x80 != 0 { + // Negative number. + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= uint64(n[i]) + } + return true +} + +// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out +// and advances. It reports whether the read was successful and resulted in a +// value that can be represented in an int64. +func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool { + var bytes String + return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes) +} + +// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports +// whether the read was successful. +func (s *String) ReadASN1Enum(out *int) bool { + var bytes String + var i int64 + if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) { + return false + } + if int64(int(i)) != i { + return false + } + *out = int(i) + return true +} + +func (s *String) readBase128Int(out *int) bool { + ret := 0 + for i := 0; len(*s) > 0; i++ { + if i == 4 { + return false + } + ret <<= 7 + b := s.read(1)[0] + ret |= int(b & 0x7f) + if b&0x80 == 0 { + *out = ret + return true + } + } + return false // truncated +} + +// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 { + return false + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + components := make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + var v int + if !bytes.readBase128Int(&v) { + return false + } + if v < 80 { + components[0] = v / 40 + components[1] = v % 40 + } else { + components[0] = 2 + components[1] = v - 80 + } + + i := 2 + for ; len(bytes) > 0; i++ { + if !bytes.readBase128Int(&v) { + return false + } + components[i] = v + } + *out = components[:i] + return true +} + +// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.GeneralizedTime) { + return false + } + t := string(bytes) + res, err := time.Parse(generalizedTimeFormatStr, t) + if err != nil { + return false + } + if serialized := res.Format(generalizedTimeFormatStr); serialized != t { + return false + } + *out = res + return true +} + +const defaultUTCTimeFormatStr = "060102150405Z0700" + +// ReadASN1UTCTime decodes an ASN.1 UTCTime into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1UTCTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.UTCTime) { + return false + } + t := string(bytes) + + formatStr := defaultUTCTimeFormatStr + var err error + res, err := time.Parse(formatStr, t) + if err != nil { + // Fallback to minute precision if we can't parse second + // precision. If we are following X.509 or X.690 we shouldn't + // support this, but we do. + formatStr = "0601021504Z0700" + res, err = time.Parse(formatStr, t) + } + if err != nil { + return false + } + + if serialized := res.Format(formatStr); serialized != t { + return false + } + + if res.Year() >= 2050 { + // UTCTime interprets the low order digits 50-99 as 1950-99. + // This only applies to its use in the X.509 profile. + // See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 + res = res.AddDate(-100, 0, 0) + } + *out = res + return true +} + +// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 || + len(bytes)*8/8 != len(bytes) { + return false + } + + paddingBits := uint8(bytes[0]) + bytes = bytes[1:] + if paddingBits > 7 || + len(bytes) == 0 && paddingBits != 0 || + len(bytes) > 0 && bytes[len(bytes)-1]&(1< 4 || len(*s) < int(2+lenLen) { + return false + } + + lenBytes := String((*s)[2 : 2+lenLen]) + if !lenBytes.readUnsigned(&len32, int(lenLen)) { + return false + } + + // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length + // with the minimum number of octets. + if len32 < 128 { + // Length should have used short-form encoding. + return false + } + if len32>>((lenLen-1)*8) == 0 { + // Leading octet is 0. Length should have been at least one byte shorter. + return false + } + + headerLen = 2 + uint32(lenLen) + if headerLen+len32 < len32 { + // Overflow. + return false + } + length = headerLen + len32 + } + + if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) { + return false + } + if skipHeader && !out.Skip(int(headerLen)) { + panic("cryptobyte: internal error") + } + + return true +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go new file mode 100644 index 0000000000..cda8e3edfd --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go @@ -0,0 +1,46 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 contains supporting types for parsing and building ASN.1 +// messages with the cryptobyte package. +package asn1 // import "golang.org/x/crypto/cryptobyte/asn1" + +// Tag represents an ASN.1 identifier octet, consisting of a tag number +// (indicating a type) and class (such as context-specific or constructed). +// +// Methods in the cryptobyte package only support the low-tag-number form, i.e. +// a single identifier octet with bits 7-8 encoding the class and bits 1-6 +// encoding the tag number. +type Tag uint8 + +const ( + classConstructed = 0x20 + classContextSpecific = 0x80 +) + +// Constructed returns t with the constructed class bit set. +func (t Tag) Constructed() Tag { return t | classConstructed } + +// ContextSpecific returns t with the context-specific class bit set. +func (t Tag) ContextSpecific() Tag { return t | classContextSpecific } + +// The following is a list of standard tag and class combinations. +const ( + BOOLEAN = Tag(1) + INTEGER = Tag(2) + BIT_STRING = Tag(3) + OCTET_STRING = Tag(4) + NULL = Tag(5) + OBJECT_IDENTIFIER = Tag(6) + ENUM = Tag(10) + UTF8String = Tag(12) + SEQUENCE = Tag(16 | classConstructed) + SET = Tag(17 | classConstructed) + PrintableString = Tag(19) + T61String = Tag(20) + IA5String = Tag(22) + UTCTime = Tag(23) + GeneralizedTime = Tag(24) + GeneralString = Tag(27) +) diff --git a/vendor/golang.org/x/crypto/cryptobyte/builder.go b/vendor/golang.org/x/crypto/cryptobyte/builder.go new file mode 100644 index 0000000000..ca7b1db5ce --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/builder.go @@ -0,0 +1,337 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + "errors" + "fmt" +) + +// A Builder builds byte strings from fixed-length and length-prefixed values. +// Builders either allocate space as needed, or are ‘fixed’, which means that +// they write into a given buffer and produce an error if it's exhausted. +// +// The zero value is a usable Builder that allocates space as needed. +// +// Simple values are marshaled and appended to a Builder using methods on the +// Builder. Length-prefixed values are marshaled by providing a +// BuilderContinuation, which is a function that writes the inner contents of +// the value to a given Builder. See the documentation for BuilderContinuation +// for details. +type Builder struct { + err error + result []byte + fixedSize bool + child *Builder + offset int + pendingLenLen int + pendingIsASN1 bool + inContinuation *bool +} + +// NewBuilder creates a Builder that appends its output to the given buffer. +// Like append(), the slice will be reallocated if its capacity is exceeded. +// Use Bytes to get the final buffer. +func NewBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + } +} + +// NewFixedBuilder creates a Builder that appends its output into the given +// buffer. This builder does not reallocate the output buffer. Writes that +// would exceed the buffer's capacity are treated as an error. +func NewFixedBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + fixedSize: true, + } +} + +// SetError sets the value to be returned as the error from Bytes. Writes +// performed after calling SetError are ignored. +func (b *Builder) SetError(err error) { + b.err = err +} + +// Bytes returns the bytes written by the builder or an error if one has +// occurred during building. +func (b *Builder) Bytes() ([]byte, error) { + if b.err != nil { + return nil, b.err + } + return b.result[b.offset:], nil +} + +// BytesOrPanic returns the bytes written by the builder or panics if an error +// has occurred during building. +func (b *Builder) BytesOrPanic() []byte { + if b.err != nil { + panic(b.err) + } + return b.result[b.offset:] +} + +// AddUint8 appends an 8-bit value to the byte string. +func (b *Builder) AddUint8(v uint8) { + b.add(byte(v)) +} + +// AddUint16 appends a big-endian, 16-bit value to the byte string. +func (b *Builder) AddUint16(v uint16) { + b.add(byte(v>>8), byte(v)) +} + +// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest +// byte of the 32-bit input value is silently truncated. +func (b *Builder) AddUint24(v uint32) { + b.add(byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint32 appends a big-endian, 32-bit value to the byte string. +func (b *Builder) AddUint32(v uint32) { + b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddBytes appends a sequence of bytes to the byte string. +func (b *Builder) AddBytes(v []byte) { + b.add(v...) +} + +// BuilderContinuation is a continuation-passing interface for building +// length-prefixed byte sequences. Builder methods for length-prefixed +// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation +// supplied to them. The child builder passed to the continuation can be used +// to build the content of the length-prefixed sequence. For example: +// +// parent := cryptobyte.NewBuilder() +// parent.AddUint8LengthPrefixed(func (child *Builder) { +// child.AddUint8(42) +// child.AddUint8LengthPrefixed(func (grandchild *Builder) { +// grandchild.AddUint8(5) +// }) +// }) +// +// It is an error to write more bytes to the child than allowed by the reserved +// length prefix. After the continuation returns, the child must be considered +// invalid, i.e. users must not store any copies or references of the child +// that outlive the continuation. +// +// If the continuation panics with a value of type BuildError then the inner +// error will be returned as the error from Bytes. If the child panics +// otherwise then Bytes will repanic with the same value. +type BuilderContinuation func(child *Builder) + +// BuildError wraps an error. If a BuilderContinuation panics with this value, +// the panic will be recovered and the inner error will be returned from +// Builder.Bytes. +type BuildError struct { + Err error +} + +// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence. +func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(1, false, f) +} + +// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence. +func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(2, false, f) +} + +// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence. +func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(3, false, f) +} + +// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence. +func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(4, false, f) +} + +func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) { + if !*b.inContinuation { + *b.inContinuation = true + + defer func() { + *b.inContinuation = false + + r := recover() + if r == nil { + return + } + + if buildError, ok := r.(BuildError); ok { + b.err = buildError.Err + } else { + panic(r) + } + }() + } + + f(arg) +} + +func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) { + // Subsequent writes can be ignored if the builder has encountered an error. + if b.err != nil { + return + } + + offset := len(b.result) + b.add(make([]byte, lenLen)...) + + if b.inContinuation == nil { + b.inContinuation = new(bool) + } + + b.child = &Builder{ + result: b.result, + fixedSize: b.fixedSize, + offset: offset, + pendingLenLen: lenLen, + pendingIsASN1: isASN1, + inContinuation: b.inContinuation, + } + + b.callContinuation(f, b.child) + b.flushChild() + if b.child != nil { + panic("cryptobyte: internal error") + } +} + +func (b *Builder) flushChild() { + if b.child == nil { + return + } + b.child.flushChild() + child := b.child + b.child = nil + + if child.err != nil { + b.err = child.err + return + } + + length := len(child.result) - child.pendingLenLen - child.offset + + if length < 0 { + panic("cryptobyte: internal error") // result unexpectedly shrunk + } + + if child.pendingIsASN1 { + // For ASN.1, we reserved a single byte for the length. If that turned out + // to be incorrect, we have to move the contents along in order to make + // space. + if child.pendingLenLen != 1 { + panic("cryptobyte: internal error") + } + var lenLen, lenByte uint8 + if int64(length) > 0xfffffffe { + b.err = errors.New("pending ASN.1 child too long") + return + } else if length > 0xffffff { + lenLen = 5 + lenByte = 0x80 | 4 + } else if length > 0xffff { + lenLen = 4 + lenByte = 0x80 | 3 + } else if length > 0xff { + lenLen = 3 + lenByte = 0x80 | 2 + } else if length > 0x7f { + lenLen = 2 + lenByte = 0x80 | 1 + } else { + lenLen = 1 + lenByte = uint8(length) + length = 0 + } + + // Insert the initial length byte, make space for successive length bytes, + // and adjust the offset. + child.result[child.offset] = lenByte + extraBytes := int(lenLen - 1) + if extraBytes != 0 { + child.add(make([]byte, extraBytes)...) + childStart := child.offset + child.pendingLenLen + copy(child.result[childStart+extraBytes:], child.result[childStart:]) + } + child.offset++ + child.pendingLenLen = extraBytes + } + + l := length + for i := child.pendingLenLen - 1; i >= 0; i-- { + child.result[child.offset+i] = uint8(l) + l >>= 8 + } + if l != 0 { + b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen) + return + } + + if b.fixedSize && &b.result[0] != &child.result[0] { + panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer") + } + + b.result = child.result +} + +func (b *Builder) add(bytes ...byte) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted write while child is pending") + } + if len(b.result)+len(bytes) < len(bytes) { + b.err = errors.New("cryptobyte: length overflow") + } + if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) { + b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer") + return + } + b.result = append(b.result, bytes...) +} + +// Unwrite rolls back n bytes written directly to the Builder. An attempt by a +// child builder passed to a continuation to unwrite bytes from its parent will +// panic. +func (b *Builder) Unwrite(n int) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted unwrite while child is pending") + } + length := len(b.result) - b.pendingLenLen - b.offset + if length < 0 { + panic("cryptobyte: internal error") + } + if n > length { + panic("cryptobyte: attempted to unwrite more than was written") + } + b.result = b.result[:len(b.result)-n] +} + +// A MarshalingValue marshals itself into a Builder. +type MarshalingValue interface { + // Marshal is called by Builder.AddValue. It receives a pointer to a builder + // to marshal itself into. It may return an error that occurred during + // marshaling, such as unset or invalid values. + Marshal(b *Builder) error +} + +// AddValue calls Marshal on v, passing a pointer to the builder to append to. +// If Marshal returns an error, it is set on the Builder so that subsequent +// appends don't have an effect. +func (b *Builder) AddValue(v MarshalingValue) { + err := v.Marshal(b) + if err != nil { + b.err = err + } +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go new file mode 100644 index 0000000000..589d297e6b --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -0,0 +1,161 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cryptobyte contains types that help with parsing and constructing +// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage +// contains useful ASN.1 constants.) +// +// The String type is for parsing. It wraps a []byte slice and provides helper +// functions for consuming structures, value by value. +// +// The Builder type is for constructing messages. It providers helper functions +// for appending values and also for appending length-prefixed submessages – +// without having to worry about calculating the length prefix ahead of time. +// +// See the documentation and examples for the Builder and String types to get +// started. +package cryptobyte // import "golang.org/x/crypto/cryptobyte" + +// String represents a string of bytes. It provides methods for parsing +// fixed-length and length-prefixed values from it. +type String []byte + +// read advances a String by n bytes and returns them. If less than n bytes +// remain, it returns nil. +func (s *String) read(n int) []byte { + if len(*s) < n || n < 0 { + return nil + } + v := (*s)[:n] + *s = (*s)[n:] + return v +} + +// Skip advances the String by n byte and reports whether it was successful. +func (s *String) Skip(n int) bool { + return s.read(n) != nil +} + +// ReadUint8 decodes an 8-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint8(out *uint8) bool { + v := s.read(1) + if v == nil { + return false + } + *out = uint8(v[0]) + return true +} + +// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint16(out *uint16) bool { + v := s.read(2) + if v == nil { + return false + } + *out = uint16(v[0])<<8 | uint16(v[1]) + return true +} + +// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint24(out *uint32) bool { + v := s.read(3) + if v == nil { + return false + } + *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2]) + return true +} + +// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint32(out *uint32) bool { + v := s.read(4) + if v == nil { + return false + } + *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3]) + return true +} + +func (s *String) readUnsigned(out *uint32, length int) bool { + v := s.read(length) + if v == nil { + return false + } + var result uint32 + for i := 0; i < length; i++ { + result <<= 8 + result |= uint32(v[i]) + } + *out = result + return true +} + +func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool { + lenBytes := s.read(lenLen) + if lenBytes == nil { + return false + } + var length uint32 + for _, b := range lenBytes { + length = length << 8 + length = length | uint32(b) + } + v := s.read(int(length)) + if v == nil { + return false + } + *outChild = v + return true +} + +// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value +// into out and advances over it. It reports whether the read was successful. +func (s *String) ReadUint8LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(1, out) +} + +// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit +// length-prefixed value into out and advances over it. It reports whether the +// read was successful. +func (s *String) ReadUint16LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(2, out) +} + +// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit +// length-prefixed value into out and advances over it. It reports whether +// the read was successful. +func (s *String) ReadUint24LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(3, out) +} + +// ReadBytes reads n bytes into out and advances over them. It reports +// whether the read was successful. +func (s *String) ReadBytes(out *[]byte, n int) bool { + v := s.read(n) + if v == nil { + return false + } + *out = v + return true +} + +// CopyBytes copies len(out) bytes into out and advances over them. It reports +// whether the copy operation was successful +func (s *String) CopyBytes(out []byte) bool { + n := len(out) + v := s.read(n) + if v == nil { + return false + } + return copy(out, v) == n +} + +// Empty reports whether the string does not contain any bytes. +func (s String) Empty() bool { + return len(s) == 0 +} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go index 4b9a655d1b..cda3fdd354 100644 --- a/vendor/golang.org/x/crypto/curve25519/curve25519.go +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -10,6 +10,8 @@ package curve25519 // import "golang.org/x/crypto/curve25519" import ( "crypto/subtle" "fmt" + + "golang.org/x/crypto/curve25519/internal/field" ) // ScalarMult sets dst to the product scalar * point. @@ -18,7 +20,55 @@ import ( // zeroes, irrespective of the scalar. Instead, use the X25519 function, which // will return an error. func ScalarMult(dst, scalar, point *[32]byte) { - scalarMult(dst, scalar, point) + var e [32]byte + + copy(e[:], scalar[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var x1, x2, z2, x3, z3, tmp0, tmp1 field.Element + x1.SetBytes(point[:]) + x2.One() + x3.Set(&x1) + z3.One() + + swap := 0 + for pos := 254; pos >= 0; pos-- { + b := e[pos/8] >> uint(pos&7) + b &= 1 + swap ^= int(b) + x2.Swap(&x3, swap) + z2.Swap(&z3, swap) + swap = int(b) + + tmp0.Subtract(&x3, &z3) + tmp1.Subtract(&x2, &z2) + x2.Add(&x2, &z2) + z2.Add(&x3, &z3) + z3.Multiply(&tmp0, &x2) + z2.Multiply(&z2, &tmp1) + tmp0.Square(&tmp1) + tmp1.Square(&x2) + x3.Add(&z3, &z2) + z2.Subtract(&z3, &z2) + x2.Multiply(&tmp1, &tmp0) + tmp1.Subtract(&tmp1, &tmp0) + z2.Square(&z2) + + z3.Mult32(&tmp1, 121666) + x3.Square(&x3) + tmp0.Add(&tmp0, &z3) + z3.Multiply(&x1, &z2) + z2.Multiply(&tmp1, &tmp0) + } + + x2.Swap(&x3, swap) + z2.Swap(&z3, swap) + + z2.Invert(&z2) + x2.Multiply(&x2, &z2) + copy(dst[:], x2.Bytes()) } // ScalarBaseMult sets dst to the product scalar * base where base is the diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.go b/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.go deleted file mode 100644 index 877b6de292..0000000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64,gc,!purego - -package curve25519 - -// These functions are implemented in the .s files. The names of the functions -// in the rest of the file are also taken from the SUPERCOP sources to help -// people following along. - -//go:noescape - -func cswap(inout *[5]uint64, v uint64) - -//go:noescape - -func ladderstep(inout *[5][5]uint64) - -//go:noescape - -func freeze(inout *[5]uint64) - -//go:noescape - -func mul(dest, a, b *[5]uint64) - -//go:noescape - -func square(out, in *[5]uint64) - -// mladder uses a Montgomery ladder to calculate (xr/zr) *= s. -func mladder(xr, zr *[5]uint64, s *[32]byte) { - var work [5][5]uint64 - - work[0] = *xr - setint(&work[1], 1) - setint(&work[2], 0) - work[3] = *xr - setint(&work[4], 1) - - j := uint(6) - var prevbit byte - - for i := 31; i >= 0; i-- { - for j < 8 { - bit := ((*s)[i] >> j) & 1 - swap := bit ^ prevbit - prevbit = bit - cswap(&work[1], uint64(swap)) - ladderstep(&work) - j-- - } - j = 7 - } - - *xr = work[1] - *zr = work[2] -} - -func scalarMult(out, in, base *[32]byte) { - var e [32]byte - copy(e[:], (*in)[:]) - e[0] &= 248 - e[31] &= 127 - e[31] |= 64 - - var t, z [5]uint64 - unpack(&t, base) - mladder(&t, &z, &e) - invert(&z, &z) - mul(&t, &t, &z) - pack(out, &t) -} - -func setint(r *[5]uint64, v uint64) { - r[0] = v - r[1] = 0 - r[2] = 0 - r[3] = 0 - r[4] = 0 -} - -// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian -// order. -func unpack(r *[5]uint64, x *[32]byte) { - r[0] = uint64(x[0]) | - uint64(x[1])<<8 | - uint64(x[2])<<16 | - uint64(x[3])<<24 | - uint64(x[4])<<32 | - uint64(x[5])<<40 | - uint64(x[6]&7)<<48 - - r[1] = uint64(x[6])>>3 | - uint64(x[7])<<5 | - uint64(x[8])<<13 | - uint64(x[9])<<21 | - uint64(x[10])<<29 | - uint64(x[11])<<37 | - uint64(x[12]&63)<<45 - - r[2] = uint64(x[12])>>6 | - uint64(x[13])<<2 | - uint64(x[14])<<10 | - uint64(x[15])<<18 | - uint64(x[16])<<26 | - uint64(x[17])<<34 | - uint64(x[18])<<42 | - uint64(x[19]&1)<<50 - - r[3] = uint64(x[19])>>1 | - uint64(x[20])<<7 | - uint64(x[21])<<15 | - uint64(x[22])<<23 | - uint64(x[23])<<31 | - uint64(x[24])<<39 | - uint64(x[25]&15)<<47 - - r[4] = uint64(x[25])>>4 | - uint64(x[26])<<4 | - uint64(x[27])<<12 | - uint64(x[28])<<20 | - uint64(x[29])<<28 | - uint64(x[30])<<36 | - uint64(x[31]&127)<<44 -} - -// pack sets out = x where out is the usual, little-endian form of the 5, -// 51-bit limbs in x. -func pack(out *[32]byte, x *[5]uint64) { - t := *x - freeze(&t) - - out[0] = byte(t[0]) - out[1] = byte(t[0] >> 8) - out[2] = byte(t[0] >> 16) - out[3] = byte(t[0] >> 24) - out[4] = byte(t[0] >> 32) - out[5] = byte(t[0] >> 40) - out[6] = byte(t[0] >> 48) - - out[6] ^= byte(t[1]<<3) & 0xf8 - out[7] = byte(t[1] >> 5) - out[8] = byte(t[1] >> 13) - out[9] = byte(t[1] >> 21) - out[10] = byte(t[1] >> 29) - out[11] = byte(t[1] >> 37) - out[12] = byte(t[1] >> 45) - - out[12] ^= byte(t[2]<<6) & 0xc0 - out[13] = byte(t[2] >> 2) - out[14] = byte(t[2] >> 10) - out[15] = byte(t[2] >> 18) - out[16] = byte(t[2] >> 26) - out[17] = byte(t[2] >> 34) - out[18] = byte(t[2] >> 42) - out[19] = byte(t[2] >> 50) - - out[19] ^= byte(t[3]<<1) & 0xfe - out[20] = byte(t[3] >> 7) - out[21] = byte(t[3] >> 15) - out[22] = byte(t[3] >> 23) - out[23] = byte(t[3] >> 31) - out[24] = byte(t[3] >> 39) - out[25] = byte(t[3] >> 47) - - out[25] ^= byte(t[4]<<4) & 0xf0 - out[26] = byte(t[4] >> 4) - out[27] = byte(t[4] >> 12) - out[28] = byte(t[4] >> 20) - out[29] = byte(t[4] >> 28) - out[30] = byte(t[4] >> 36) - out[31] = byte(t[4] >> 44) -} - -// invert calculates r = x^-1 mod p using Fermat's little theorem. -func invert(r *[5]uint64, x *[5]uint64) { - var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64 - - square(&z2, x) /* 2 */ - square(&t, &z2) /* 4 */ - square(&t, &t) /* 8 */ - mul(&z9, &t, x) /* 9 */ - mul(&z11, &z9, &z2) /* 11 */ - square(&t, &z11) /* 22 */ - mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */ - - square(&t, &z2_5_0) /* 2^6 - 2^1 */ - for i := 1; i < 5; i++ { /* 2^20 - 2^10 */ - square(&t, &t) - } - mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */ - - square(&t, &z2_10_0) /* 2^11 - 2^1 */ - for i := 1; i < 10; i++ { /* 2^20 - 2^10 */ - square(&t, &t) - } - mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */ - - square(&t, &z2_20_0) /* 2^21 - 2^1 */ - for i := 1; i < 20; i++ { /* 2^40 - 2^20 */ - square(&t, &t) - } - mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */ - - square(&t, &t) /* 2^41 - 2^1 */ - for i := 1; i < 10; i++ { /* 2^50 - 2^10 */ - square(&t, &t) - } - mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */ - - square(&t, &z2_50_0) /* 2^51 - 2^1 */ - for i := 1; i < 50; i++ { /* 2^100 - 2^50 */ - square(&t, &t) - } - mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */ - - square(&t, &z2_100_0) /* 2^101 - 2^1 */ - for i := 1; i < 100; i++ { /* 2^200 - 2^100 */ - square(&t, &t) - } - mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */ - - square(&t, &t) /* 2^201 - 2^1 */ - for i := 1; i < 50; i++ { /* 2^250 - 2^50 */ - square(&t, &t) - } - mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */ - - square(&t, &t) /* 2^251 - 2^1 */ - square(&t, &t) /* 2^252 - 2^2 */ - square(&t, &t) /* 2^253 - 2^3 */ - - square(&t, &t) /* 2^254 - 2^4 */ - - square(&t, &t) /* 2^255 - 2^5 */ - mul(r, &t, &z11) /* 2^255 - 21 */ -} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.s b/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.s deleted file mode 100644 index 6c53380926..0000000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.s +++ /dev/null @@ -1,1793 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html - -// +build amd64,gc,!purego - -#define REDMASK51 0x0007FFFFFFFFFFFF - -// These constants cannot be encoded in non-MOVQ immediates. -// We access them directly from memory instead. - -DATA ·_121666_213(SB)/8, $996687872 -GLOBL ·_121666_213(SB), 8, $8 - -DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA -GLOBL ·_2P0(SB), 8, $8 - -DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE -GLOBL ·_2P1234(SB), 8, $8 - -// func freeze(inout *[5]uint64) -TEXT ·freeze(SB),7,$0-8 - MOVQ inout+0(FP), DI - - MOVQ 0(DI),SI - MOVQ 8(DI),DX - MOVQ 16(DI),CX - MOVQ 24(DI),R8 - MOVQ 32(DI),R9 - MOVQ $REDMASK51,AX - MOVQ AX,R10 - SUBQ $18,R10 - MOVQ $3,R11 -REDUCELOOP: - MOVQ SI,R12 - SHRQ $51,R12 - ANDQ AX,SI - ADDQ R12,DX - MOVQ DX,R12 - SHRQ $51,R12 - ANDQ AX,DX - ADDQ R12,CX - MOVQ CX,R12 - SHRQ $51,R12 - ANDQ AX,CX - ADDQ R12,R8 - MOVQ R8,R12 - SHRQ $51,R12 - ANDQ AX,R8 - ADDQ R12,R9 - MOVQ R9,R12 - SHRQ $51,R12 - ANDQ AX,R9 - IMUL3Q $19,R12,R12 - ADDQ R12,SI - SUBQ $1,R11 - JA REDUCELOOP - MOVQ $1,R12 - CMPQ R10,SI - CMOVQLT R11,R12 - CMPQ AX,DX - CMOVQNE R11,R12 - CMPQ AX,CX - CMOVQNE R11,R12 - CMPQ AX,R8 - CMOVQNE R11,R12 - CMPQ AX,R9 - CMOVQNE R11,R12 - NEGQ R12 - ANDQ R12,AX - ANDQ R12,R10 - SUBQ R10,SI - SUBQ AX,DX - SUBQ AX,CX - SUBQ AX,R8 - SUBQ AX,R9 - MOVQ SI,0(DI) - MOVQ DX,8(DI) - MOVQ CX,16(DI) - MOVQ R8,24(DI) - MOVQ R9,32(DI) - RET - -// func ladderstep(inout *[5][5]uint64) -TEXT ·ladderstep(SB),0,$296-8 - MOVQ inout+0(FP),DI - - MOVQ 40(DI),SI - MOVQ 48(DI),DX - MOVQ 56(DI),CX - MOVQ 64(DI),R8 - MOVQ 72(DI),R9 - MOVQ SI,AX - MOVQ DX,R10 - MOVQ CX,R11 - MOVQ R8,R12 - MOVQ R9,R13 - ADDQ ·_2P0(SB),AX - ADDQ ·_2P1234(SB),R10 - ADDQ ·_2P1234(SB),R11 - ADDQ ·_2P1234(SB),R12 - ADDQ ·_2P1234(SB),R13 - ADDQ 80(DI),SI - ADDQ 88(DI),DX - ADDQ 96(DI),CX - ADDQ 104(DI),R8 - ADDQ 112(DI),R9 - SUBQ 80(DI),AX - SUBQ 88(DI),R10 - SUBQ 96(DI),R11 - SUBQ 104(DI),R12 - SUBQ 112(DI),R13 - MOVQ SI,0(SP) - MOVQ DX,8(SP) - MOVQ CX,16(SP) - MOVQ R8,24(SP) - MOVQ R9,32(SP) - MOVQ AX,40(SP) - MOVQ R10,48(SP) - MOVQ R11,56(SP) - MOVQ R12,64(SP) - MOVQ R13,72(SP) - MOVQ 40(SP),AX - MULQ 40(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 40(SP),AX - SHLQ $1,AX - MULQ 48(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 40(SP),AX - SHLQ $1,AX - MULQ 56(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 40(SP),AX - SHLQ $1,AX - MULQ 64(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 40(SP),AX - SHLQ $1,AX - MULQ 72(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 48(SP),AX - MULQ 48(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 48(SP),AX - SHLQ $1,AX - MULQ 56(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 48(SP),AX - SHLQ $1,AX - MULQ 64(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 48(SP),DX - IMUL3Q $38,DX,AX - MULQ 72(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 56(SP),AX - MULQ 56(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 56(SP),DX - IMUL3Q $38,DX,AX - MULQ 64(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 56(SP),DX - IMUL3Q $38,DX,AX - MULQ 72(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 64(SP),DX - IMUL3Q $19,DX,AX - MULQ 64(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 64(SP),DX - IMUL3Q $38,DX,AX - MULQ 72(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 72(SP),DX - IMUL3Q $19,DX,AX - MULQ 72(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,80(SP) - MOVQ R8,88(SP) - MOVQ R9,96(SP) - MOVQ AX,104(SP) - MOVQ R10,112(SP) - MOVQ 0(SP),AX - MULQ 0(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 0(SP),AX - SHLQ $1,AX - MULQ 8(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 0(SP),AX - SHLQ $1,AX - MULQ 16(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 0(SP),AX - SHLQ $1,AX - MULQ 24(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 0(SP),AX - SHLQ $1,AX - MULQ 32(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 8(SP),AX - MULQ 8(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - SHLQ $1,AX - MULQ 16(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 8(SP),AX - SHLQ $1,AX - MULQ 24(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SP),DX - IMUL3Q $38,DX,AX - MULQ 32(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 16(SP),AX - MULQ 16(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 16(SP),DX - IMUL3Q $38,DX,AX - MULQ 24(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 16(SP),DX - IMUL3Q $38,DX,AX - MULQ 32(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 24(SP),DX - IMUL3Q $19,DX,AX - MULQ 24(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 24(SP),DX - IMUL3Q $38,DX,AX - MULQ 32(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 32(SP),DX - IMUL3Q $19,DX,AX - MULQ 32(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,120(SP) - MOVQ R8,128(SP) - MOVQ R9,136(SP) - MOVQ AX,144(SP) - MOVQ R10,152(SP) - MOVQ SI,SI - MOVQ R8,DX - MOVQ R9,CX - MOVQ AX,R8 - MOVQ R10,R9 - ADDQ ·_2P0(SB),SI - ADDQ ·_2P1234(SB),DX - ADDQ ·_2P1234(SB),CX - ADDQ ·_2P1234(SB),R8 - ADDQ ·_2P1234(SB),R9 - SUBQ 80(SP),SI - SUBQ 88(SP),DX - SUBQ 96(SP),CX - SUBQ 104(SP),R8 - SUBQ 112(SP),R9 - MOVQ SI,160(SP) - MOVQ DX,168(SP) - MOVQ CX,176(SP) - MOVQ R8,184(SP) - MOVQ R9,192(SP) - MOVQ 120(DI),SI - MOVQ 128(DI),DX - MOVQ 136(DI),CX - MOVQ 144(DI),R8 - MOVQ 152(DI),R9 - MOVQ SI,AX - MOVQ DX,R10 - MOVQ CX,R11 - MOVQ R8,R12 - MOVQ R9,R13 - ADDQ ·_2P0(SB),AX - ADDQ ·_2P1234(SB),R10 - ADDQ ·_2P1234(SB),R11 - ADDQ ·_2P1234(SB),R12 - ADDQ ·_2P1234(SB),R13 - ADDQ 160(DI),SI - ADDQ 168(DI),DX - ADDQ 176(DI),CX - ADDQ 184(DI),R8 - ADDQ 192(DI),R9 - SUBQ 160(DI),AX - SUBQ 168(DI),R10 - SUBQ 176(DI),R11 - SUBQ 184(DI),R12 - SUBQ 192(DI),R13 - MOVQ SI,200(SP) - MOVQ DX,208(SP) - MOVQ CX,216(SP) - MOVQ R8,224(SP) - MOVQ R9,232(SP) - MOVQ AX,240(SP) - MOVQ R10,248(SP) - MOVQ R11,256(SP) - MOVQ R12,264(SP) - MOVQ R13,272(SP) - MOVQ 224(SP),SI - IMUL3Q $19,SI,AX - MOVQ AX,280(SP) - MULQ 56(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 232(SP),DX - IMUL3Q $19,DX,AX - MOVQ AX,288(SP) - MULQ 48(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 200(SP),AX - MULQ 40(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 200(SP),AX - MULQ 48(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 200(SP),AX - MULQ 56(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 200(SP),AX - MULQ 64(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 200(SP),AX - MULQ 72(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 208(SP),AX - MULQ 40(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 208(SP),AX - MULQ 48(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 208(SP),AX - MULQ 56(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 208(SP),AX - MULQ 64(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 208(SP),DX - IMUL3Q $19,DX,AX - MULQ 72(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 216(SP),AX - MULQ 40(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 216(SP),AX - MULQ 48(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 216(SP),AX - MULQ 56(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 216(SP),DX - IMUL3Q $19,DX,AX - MULQ 64(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 216(SP),DX - IMUL3Q $19,DX,AX - MULQ 72(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 224(SP),AX - MULQ 40(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 224(SP),AX - MULQ 48(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 280(SP),AX - MULQ 64(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 280(SP),AX - MULQ 72(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 232(SP),AX - MULQ 40(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 288(SP),AX - MULQ 56(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 288(SP),AX - MULQ 64(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 288(SP),AX - MULQ 72(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,40(SP) - MOVQ R8,48(SP) - MOVQ R9,56(SP) - MOVQ AX,64(SP) - MOVQ R10,72(SP) - MOVQ 264(SP),SI - IMUL3Q $19,SI,AX - MOVQ AX,200(SP) - MULQ 16(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 272(SP),DX - IMUL3Q $19,DX,AX - MOVQ AX,208(SP) - MULQ 8(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 240(SP),AX - MULQ 0(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 240(SP),AX - MULQ 8(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 240(SP),AX - MULQ 16(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 240(SP),AX - MULQ 24(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 240(SP),AX - MULQ 32(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 248(SP),AX - MULQ 0(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 248(SP),AX - MULQ 8(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 248(SP),AX - MULQ 16(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 248(SP),AX - MULQ 24(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 248(SP),DX - IMUL3Q $19,DX,AX - MULQ 32(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 256(SP),AX - MULQ 0(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 256(SP),AX - MULQ 8(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 256(SP),AX - MULQ 16(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 256(SP),DX - IMUL3Q $19,DX,AX - MULQ 24(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 256(SP),DX - IMUL3Q $19,DX,AX - MULQ 32(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 264(SP),AX - MULQ 0(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 264(SP),AX - MULQ 8(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 200(SP),AX - MULQ 24(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 200(SP),AX - MULQ 32(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 272(SP),AX - MULQ 0(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 208(SP),AX - MULQ 16(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 208(SP),AX - MULQ 24(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 208(SP),AX - MULQ 32(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,DX - MOVQ R8,CX - MOVQ R9,R11 - MOVQ AX,R12 - MOVQ R10,R13 - ADDQ ·_2P0(SB),DX - ADDQ ·_2P1234(SB),CX - ADDQ ·_2P1234(SB),R11 - ADDQ ·_2P1234(SB),R12 - ADDQ ·_2P1234(SB),R13 - ADDQ 40(SP),SI - ADDQ 48(SP),R8 - ADDQ 56(SP),R9 - ADDQ 64(SP),AX - ADDQ 72(SP),R10 - SUBQ 40(SP),DX - SUBQ 48(SP),CX - SUBQ 56(SP),R11 - SUBQ 64(SP),R12 - SUBQ 72(SP),R13 - MOVQ SI,120(DI) - MOVQ R8,128(DI) - MOVQ R9,136(DI) - MOVQ AX,144(DI) - MOVQ R10,152(DI) - MOVQ DX,160(DI) - MOVQ CX,168(DI) - MOVQ R11,176(DI) - MOVQ R12,184(DI) - MOVQ R13,192(DI) - MOVQ 120(DI),AX - MULQ 120(DI) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 128(DI) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 136(DI) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 144(DI) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 152(DI) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 128(DI),AX - MULQ 128(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 128(DI),AX - SHLQ $1,AX - MULQ 136(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 128(DI),AX - SHLQ $1,AX - MULQ 144(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 128(DI),DX - IMUL3Q $38,DX,AX - MULQ 152(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(DI),AX - MULQ 136(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 136(DI),DX - IMUL3Q $38,DX,AX - MULQ 144(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(DI),DX - IMUL3Q $38,DX,AX - MULQ 152(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 144(DI),DX - IMUL3Q $19,DX,AX - MULQ 144(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 144(DI),DX - IMUL3Q $38,DX,AX - MULQ 152(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 152(DI),DX - IMUL3Q $19,DX,AX - MULQ 152(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,120(DI) - MOVQ R8,128(DI) - MOVQ R9,136(DI) - MOVQ AX,144(DI) - MOVQ R10,152(DI) - MOVQ 160(DI),AX - MULQ 160(DI) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 168(DI) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 176(DI) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 184(DI) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 192(DI) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 168(DI),AX - MULQ 168(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 168(DI),AX - SHLQ $1,AX - MULQ 176(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 168(DI),AX - SHLQ $1,AX - MULQ 184(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 168(DI),DX - IMUL3Q $38,DX,AX - MULQ 192(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),AX - MULQ 176(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 176(DI),DX - IMUL3Q $38,DX,AX - MULQ 184(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),DX - IMUL3Q $38,DX,AX - MULQ 192(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 184(DI),DX - IMUL3Q $19,DX,AX - MULQ 184(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 184(DI),DX - IMUL3Q $38,DX,AX - MULQ 192(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 192(DI),DX - IMUL3Q $19,DX,AX - MULQ 192(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,160(DI) - MOVQ R8,168(DI) - MOVQ R9,176(DI) - MOVQ AX,184(DI) - MOVQ R10,192(DI) - MOVQ 184(DI),SI - IMUL3Q $19,SI,AX - MOVQ AX,0(SP) - MULQ 16(DI) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 192(DI),DX - IMUL3Q $19,DX,AX - MOVQ AX,8(SP) - MULQ 8(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 160(DI),AX - MULQ 0(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 160(DI),AX - MULQ 8(DI) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 160(DI),AX - MULQ 16(DI) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 160(DI),AX - MULQ 24(DI) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 160(DI),AX - MULQ 32(DI) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 168(DI),AX - MULQ 0(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 168(DI),AX - MULQ 8(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 168(DI),AX - MULQ 16(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 168(DI),AX - MULQ 24(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 168(DI),DX - IMUL3Q $19,DX,AX - MULQ 32(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),AX - MULQ 0(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 176(DI),AX - MULQ 8(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 176(DI),AX - MULQ 16(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 176(DI),DX - IMUL3Q $19,DX,AX - MULQ 24(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),DX - IMUL3Q $19,DX,AX - MULQ 32(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 184(DI),AX - MULQ 0(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 184(DI),AX - MULQ 8(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 0(SP),AX - MULQ 24(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SP),AX - MULQ 32(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 192(DI),AX - MULQ 0(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SP),AX - MULQ 16(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 8(SP),AX - MULQ 24(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - MULQ 32(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,160(DI) - MOVQ R8,168(DI) - MOVQ R9,176(DI) - MOVQ AX,184(DI) - MOVQ R10,192(DI) - MOVQ 144(SP),SI - IMUL3Q $19,SI,AX - MOVQ AX,0(SP) - MULQ 96(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 152(SP),DX - IMUL3Q $19,DX,AX - MOVQ AX,8(SP) - MULQ 88(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 120(SP),AX - MULQ 80(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 120(SP),AX - MULQ 88(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 120(SP),AX - MULQ 96(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 120(SP),AX - MULQ 104(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 120(SP),AX - MULQ 112(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 128(SP),AX - MULQ 80(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 128(SP),AX - MULQ 88(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 128(SP),AX - MULQ 96(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 128(SP),AX - MULQ 104(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 128(SP),DX - IMUL3Q $19,DX,AX - MULQ 112(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(SP),AX - MULQ 80(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 136(SP),AX - MULQ 88(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 136(SP),AX - MULQ 96(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 136(SP),DX - IMUL3Q $19,DX,AX - MULQ 104(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(SP),DX - IMUL3Q $19,DX,AX - MULQ 112(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 144(SP),AX - MULQ 80(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 144(SP),AX - MULQ 88(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 0(SP),AX - MULQ 104(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SP),AX - MULQ 112(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 152(SP),AX - MULQ 80(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SP),AX - MULQ 96(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 8(SP),AX - MULQ 104(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - MULQ 112(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,40(DI) - MOVQ R8,48(DI) - MOVQ R9,56(DI) - MOVQ AX,64(DI) - MOVQ R10,72(DI) - MOVQ 160(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - MOVQ AX,SI - MOVQ DX,CX - MOVQ 168(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,CX - MOVQ DX,R8 - MOVQ 176(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,R8 - MOVQ DX,R9 - MOVQ 184(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,R9 - MOVQ DX,R10 - MOVQ 192(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,R10 - IMUL3Q $19,DX,DX - ADDQ DX,SI - ADDQ 80(SP),SI - ADDQ 88(SP),CX - ADDQ 96(SP),R8 - ADDQ 104(SP),R9 - ADDQ 112(SP),R10 - MOVQ SI,80(DI) - MOVQ CX,88(DI) - MOVQ R8,96(DI) - MOVQ R9,104(DI) - MOVQ R10,112(DI) - MOVQ 104(DI),SI - IMUL3Q $19,SI,AX - MOVQ AX,0(SP) - MULQ 176(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 112(DI),DX - IMUL3Q $19,DX,AX - MOVQ AX,8(SP) - MULQ 168(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 80(DI),AX - MULQ 160(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 80(DI),AX - MULQ 168(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 80(DI),AX - MULQ 176(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 80(DI),AX - MULQ 184(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 80(DI),AX - MULQ 192(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 88(DI),AX - MULQ 160(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 88(DI),AX - MULQ 168(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 88(DI),AX - MULQ 176(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 88(DI),AX - MULQ 184(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 88(DI),DX - IMUL3Q $19,DX,AX - MULQ 192(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 96(DI),AX - MULQ 160(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 96(DI),AX - MULQ 168(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 96(DI),AX - MULQ 176(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 96(DI),DX - IMUL3Q $19,DX,AX - MULQ 184(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 96(DI),DX - IMUL3Q $19,DX,AX - MULQ 192(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 104(DI),AX - MULQ 160(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 104(DI),AX - MULQ 168(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 0(SP),AX - MULQ 184(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SP),AX - MULQ 192(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 112(DI),AX - MULQ 160(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SP),AX - MULQ 176(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 8(SP),AX - MULQ 184(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - MULQ 192(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,80(DI) - MOVQ R8,88(DI) - MOVQ R9,96(DI) - MOVQ AX,104(DI) - MOVQ R10,112(DI) - RET - -// func cswap(inout *[4][5]uint64, v uint64) -TEXT ·cswap(SB),7,$0 - MOVQ inout+0(FP),DI - MOVQ v+8(FP),SI - - SUBQ $1, SI - NOTQ SI - MOVQ SI, X15 - PSHUFD $0x44, X15, X15 - - MOVOU 0(DI), X0 - MOVOU 16(DI), X2 - MOVOU 32(DI), X4 - MOVOU 48(DI), X6 - MOVOU 64(DI), X8 - MOVOU 80(DI), X1 - MOVOU 96(DI), X3 - MOVOU 112(DI), X5 - MOVOU 128(DI), X7 - MOVOU 144(DI), X9 - - MOVO X1, X10 - MOVO X3, X11 - MOVO X5, X12 - MOVO X7, X13 - MOVO X9, X14 - - PXOR X0, X10 - PXOR X2, X11 - PXOR X4, X12 - PXOR X6, X13 - PXOR X8, X14 - PAND X15, X10 - PAND X15, X11 - PAND X15, X12 - PAND X15, X13 - PAND X15, X14 - PXOR X10, X0 - PXOR X10, X1 - PXOR X11, X2 - PXOR X11, X3 - PXOR X12, X4 - PXOR X12, X5 - PXOR X13, X6 - PXOR X13, X7 - PXOR X14, X8 - PXOR X14, X9 - - MOVOU X0, 0(DI) - MOVOU X2, 16(DI) - MOVOU X4, 32(DI) - MOVOU X6, 48(DI) - MOVOU X8, 64(DI) - MOVOU X1, 80(DI) - MOVOU X3, 96(DI) - MOVOU X5, 112(DI) - MOVOU X7, 128(DI) - MOVOU X9, 144(DI) - RET - -// func mul(dest, a, b *[5]uint64) -TEXT ·mul(SB),0,$16-24 - MOVQ dest+0(FP), DI - MOVQ a+8(FP), SI - MOVQ b+16(FP), DX - - MOVQ DX,CX - MOVQ 24(SI),DX - IMUL3Q $19,DX,AX - MOVQ AX,0(SP) - MULQ 16(CX) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 32(SI),DX - IMUL3Q $19,DX,AX - MOVQ AX,8(SP) - MULQ 8(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SI),AX - MULQ 0(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SI),AX - MULQ 8(CX) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 0(SI),AX - MULQ 16(CX) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 0(SI),AX - MULQ 24(CX) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 0(SI),AX - MULQ 32(CX) - MOVQ AX,BX - MOVQ DX,BP - MOVQ 8(SI),AX - MULQ 0(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SI),AX - MULQ 8(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 8(SI),AX - MULQ 16(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SI),AX - MULQ 24(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 8(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 16(SI),AX - MULQ 0(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 16(SI),AX - MULQ 8(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 16(SI),AX - MULQ 16(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 16(SI),DX - IMUL3Q $19,DX,AX - MULQ 24(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 16(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 24(SI),AX - MULQ 0(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 24(SI),AX - MULQ 8(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 0(SP),AX - MULQ 24(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 0(SP),AX - MULQ 32(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 32(SI),AX - MULQ 0(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 8(SP),AX - MULQ 16(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - MULQ 24(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 8(SP),AX - MULQ 32(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ $REDMASK51,SI - SHLQ $13,R8,R9 - ANDQ SI,R8 - SHLQ $13,R10,R11 - ANDQ SI,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ SI,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ SI,R14 - ADDQ R13,R14 - SHLQ $13,BX,BP - ANDQ SI,BX - ADDQ R15,BX - IMUL3Q $19,BP,DX - ADDQ DX,R8 - MOVQ R8,DX - SHRQ $51,DX - ADDQ R10,DX - MOVQ DX,CX - SHRQ $51,DX - ANDQ SI,R8 - ADDQ R12,DX - MOVQ DX,R9 - SHRQ $51,DX - ANDQ SI,CX - ADDQ R14,DX - MOVQ DX,AX - SHRQ $51,DX - ANDQ SI,R9 - ADDQ BX,DX - MOVQ DX,R10 - SHRQ $51,DX - ANDQ SI,AX - IMUL3Q $19,DX,DX - ADDQ DX,R8 - ANDQ SI,R10 - MOVQ R8,0(DI) - MOVQ CX,8(DI) - MOVQ R9,16(DI) - MOVQ AX,24(DI) - MOVQ R10,32(DI) - RET - -// func square(out, in *[5]uint64) -TEXT ·square(SB),7,$0-16 - MOVQ out+0(FP), DI - MOVQ in+8(FP), SI - - MOVQ 0(SI),AX - MULQ 0(SI) - MOVQ AX,CX - MOVQ DX,R8 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 8(SI) - MOVQ AX,R9 - MOVQ DX,R10 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 16(SI) - MOVQ AX,R11 - MOVQ DX,R12 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 24(SI) - MOVQ AX,R13 - MOVQ DX,R14 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 32(SI) - MOVQ AX,R15 - MOVQ DX,BX - MOVQ 8(SI),AX - MULQ 8(SI) - ADDQ AX,R11 - ADCQ DX,R12 - MOVQ 8(SI),AX - SHLQ $1,AX - MULQ 16(SI) - ADDQ AX,R13 - ADCQ DX,R14 - MOVQ 8(SI),AX - SHLQ $1,AX - MULQ 24(SI) - ADDQ AX,R15 - ADCQ DX,BX - MOVQ 8(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,CX - ADCQ DX,R8 - MOVQ 16(SI),AX - MULQ 16(SI) - ADDQ AX,R15 - ADCQ DX,BX - MOVQ 16(SI),DX - IMUL3Q $38,DX,AX - MULQ 24(SI) - ADDQ AX,CX - ADCQ DX,R8 - MOVQ 16(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,R9 - ADCQ DX,R10 - MOVQ 24(SI),DX - IMUL3Q $19,DX,AX - MULQ 24(SI) - ADDQ AX,R9 - ADCQ DX,R10 - MOVQ 24(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,R11 - ADCQ DX,R12 - MOVQ 32(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(SI) - ADDQ AX,R13 - ADCQ DX,R14 - MOVQ $REDMASK51,SI - SHLQ $13,CX,R8 - ANDQ SI,CX - SHLQ $13,R9,R10 - ANDQ SI,R9 - ADDQ R8,R9 - SHLQ $13,R11,R12 - ANDQ SI,R11 - ADDQ R10,R11 - SHLQ $13,R13,R14 - ANDQ SI,R13 - ADDQ R12,R13 - SHLQ $13,R15,BX - ANDQ SI,R15 - ADDQ R14,R15 - IMUL3Q $19,BX,DX - ADDQ DX,CX - MOVQ CX,DX - SHRQ $51,DX - ADDQ R9,DX - ANDQ SI,CX - MOVQ DX,R8 - SHRQ $51,DX - ADDQ R11,DX - ANDQ SI,R8 - MOVQ DX,R9 - SHRQ $51,DX - ADDQ R13,DX - ANDQ SI,R9 - MOVQ DX,AX - SHRQ $51,DX - ADDQ R15,DX - ANDQ SI,AX - MOVQ DX,R10 - SHRQ $51,DX - IMUL3Q $19,DX,DX - ADDQ DX,CX - ANDQ SI,R10 - MOVQ CX,0(DI) - MOVQ R8,8(DI) - MOVQ R9,16(DI) - MOVQ AX,24(DI) - MOVQ R10,32(DI) - RET diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_generic.go b/vendor/golang.org/x/crypto/curve25519/curve25519_generic.go deleted file mode 100644 index c43b13fc83..0000000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519_generic.go +++ /dev/null @@ -1,828 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package curve25519 - -import "encoding/binary" - -// This code is a port of the public domain, "ref10" implementation of -// curve25519 from SUPERCOP 20130419 by D. J. Bernstein. - -// fieldElement represents an element of the field GF(2^255 - 19). An element -// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 -// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on -// context. -type fieldElement [10]int32 - -func feZero(fe *fieldElement) { - for i := range fe { - fe[i] = 0 - } -} - -func feOne(fe *fieldElement) { - feZero(fe) - fe[0] = 1 -} - -func feAdd(dst, a, b *fieldElement) { - for i := range dst { - dst[i] = a[i] + b[i] - } -} - -func feSub(dst, a, b *fieldElement) { - for i := range dst { - dst[i] = a[i] - b[i] - } -} - -func feCopy(dst, src *fieldElement) { - for i := range dst { - dst[i] = src[i] - } -} - -// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0. -// -// Preconditions: b in {0,1}. -func feCSwap(f, g *fieldElement, b int32) { - b = -b - for i := range f { - t := b & (f[i] ^ g[i]) - f[i] ^= t - g[i] ^= t - } -} - -// load3 reads a 24-bit, little-endian value from in. -func load3(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - return r -} - -// load4 reads a 32-bit, little-endian value from in. -func load4(in []byte) int64 { - return int64(binary.LittleEndian.Uint32(in)) -} - -func feFromBytes(dst *fieldElement, src *[32]byte) { - h0 := load4(src[:]) - h1 := load3(src[4:]) << 6 - h2 := load3(src[7:]) << 5 - h3 := load3(src[10:]) << 3 - h4 := load3(src[13:]) << 2 - h5 := load4(src[16:]) - h6 := load3(src[20:]) << 7 - h7 := load3(src[23:]) << 5 - h8 := load3(src[26:]) << 4 - h9 := (load3(src[29:]) & 0x7fffff) << 2 - - var carry [10]int64 - carry[9] = (h9 + 1<<24) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - carry[1] = (h1 + 1<<24) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[3] = (h3 + 1<<24) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[5] = (h5 + 1<<24) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - carry[7] = (h7 + 1<<24) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[0] = (h0 + 1<<25) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[2] = (h2 + 1<<25) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[4] = (h4 + 1<<25) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[6] = (h6 + 1<<25) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - carry[8] = (h8 + 1<<25) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - dst[0] = int32(h0) - dst[1] = int32(h1) - dst[2] = int32(h2) - dst[3] = int32(h3) - dst[4] = int32(h4) - dst[5] = int32(h5) - dst[6] = int32(h6) - dst[7] = int32(h7) - dst[8] = int32(h8) - dst[9] = int32(h9) -} - -// feToBytes marshals h to s. -// Preconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Write p=2^255-19; q=floor(h/p). -// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). -// -// Proof: -// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. -// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. -// -// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). -// Then 0> 25 - q = (h[0] + q) >> 26 - q = (h[1] + q) >> 25 - q = (h[2] + q) >> 26 - q = (h[3] + q) >> 25 - q = (h[4] + q) >> 26 - q = (h[5] + q) >> 25 - q = (h[6] + q) >> 26 - q = (h[7] + q) >> 25 - q = (h[8] + q) >> 26 - q = (h[9] + q) >> 25 - - // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. - h[0] += 19 * q - // Goal: Output h-2^255 q, which is between 0 and 2^255-20. - - carry[0] = h[0] >> 26 - h[1] += carry[0] - h[0] -= carry[0] << 26 - carry[1] = h[1] >> 25 - h[2] += carry[1] - h[1] -= carry[1] << 25 - carry[2] = h[2] >> 26 - h[3] += carry[2] - h[2] -= carry[2] << 26 - carry[3] = h[3] >> 25 - h[4] += carry[3] - h[3] -= carry[3] << 25 - carry[4] = h[4] >> 26 - h[5] += carry[4] - h[4] -= carry[4] << 26 - carry[5] = h[5] >> 25 - h[6] += carry[5] - h[5] -= carry[5] << 25 - carry[6] = h[6] >> 26 - h[7] += carry[6] - h[6] -= carry[6] << 26 - carry[7] = h[7] >> 25 - h[8] += carry[7] - h[7] -= carry[7] << 25 - carry[8] = h[8] >> 26 - h[9] += carry[8] - h[8] -= carry[8] << 26 - carry[9] = h[9] >> 25 - h[9] -= carry[9] << 25 - // h10 = carry9 - - // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; - // evidently 2^255 h10-2^255 q = 0. - // Goal: Output h[0]+...+2^230 h[9]. - - s[0] = byte(h[0] >> 0) - s[1] = byte(h[0] >> 8) - s[2] = byte(h[0] >> 16) - s[3] = byte((h[0] >> 24) | (h[1] << 2)) - s[4] = byte(h[1] >> 6) - s[5] = byte(h[1] >> 14) - s[6] = byte((h[1] >> 22) | (h[2] << 3)) - s[7] = byte(h[2] >> 5) - s[8] = byte(h[2] >> 13) - s[9] = byte((h[2] >> 21) | (h[3] << 5)) - s[10] = byte(h[3] >> 3) - s[11] = byte(h[3] >> 11) - s[12] = byte((h[3] >> 19) | (h[4] << 6)) - s[13] = byte(h[4] >> 2) - s[14] = byte(h[4] >> 10) - s[15] = byte(h[4] >> 18) - s[16] = byte(h[5] >> 0) - s[17] = byte(h[5] >> 8) - s[18] = byte(h[5] >> 16) - s[19] = byte((h[5] >> 24) | (h[6] << 1)) - s[20] = byte(h[6] >> 7) - s[21] = byte(h[6] >> 15) - s[22] = byte((h[6] >> 23) | (h[7] << 3)) - s[23] = byte(h[7] >> 5) - s[24] = byte(h[7] >> 13) - s[25] = byte((h[7] >> 21) | (h[8] << 4)) - s[26] = byte(h[8] >> 4) - s[27] = byte(h[8] >> 12) - s[28] = byte((h[8] >> 20) | (h[9] << 6)) - s[29] = byte(h[9] >> 2) - s[30] = byte(h[9] >> 10) - s[31] = byte(h[9] >> 18) -} - -// feMul calculates h = f * g -// Can overlap h with f or g. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Notes on implementation strategy: -// -// Using schoolbook multiplication. -// Karatsuba would save a little in some cost models. -// -// Most multiplications by 2 and 19 are 32-bit precomputations; -// cheaper than 64-bit postcomputations. -// -// There is one remaining multiplication by 19 in the carry chain; -// one *19 precomputation can be merged into this, -// but the resulting data flow is considerably less clean. -// -// There are 12 carries below. -// 10 of them are 2-way parallelizable and vectorizable. -// Can get away with 11 carries, but then data flow is much deeper. -// -// With tighter constraints on inputs can squeeze carries into int32. -func feMul(h, f, g *fieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - g0 := g[0] - g1 := g[1] - g2 := g[2] - g3 := g[3] - g4 := g[4] - g5 := g[5] - g6 := g[6] - g7 := g[7] - g8 := g[8] - g9 := g[9] - g1_19 := 19 * g1 // 1.4*2^29 - g2_19 := 19 * g2 // 1.4*2^30; still ok - g3_19 := 19 * g3 - g4_19 := 19 * g4 - g5_19 := 19 * g5 - g6_19 := 19 * g6 - g7_19 := 19 * g7 - g8_19 := 19 * g8 - g9_19 := 19 * g9 - f1_2 := 2 * f1 - f3_2 := 2 * f3 - f5_2 := 2 * f5 - f7_2 := 2 * f7 - f9_2 := 2 * f9 - f0g0 := int64(f0) * int64(g0) - f0g1 := int64(f0) * int64(g1) - f0g2 := int64(f0) * int64(g2) - f0g3 := int64(f0) * int64(g3) - f0g4 := int64(f0) * int64(g4) - f0g5 := int64(f0) * int64(g5) - f0g6 := int64(f0) * int64(g6) - f0g7 := int64(f0) * int64(g7) - f0g8 := int64(f0) * int64(g8) - f0g9 := int64(f0) * int64(g9) - f1g0 := int64(f1) * int64(g0) - f1g1_2 := int64(f1_2) * int64(g1) - f1g2 := int64(f1) * int64(g2) - f1g3_2 := int64(f1_2) * int64(g3) - f1g4 := int64(f1) * int64(g4) - f1g5_2 := int64(f1_2) * int64(g5) - f1g6 := int64(f1) * int64(g6) - f1g7_2 := int64(f1_2) * int64(g7) - f1g8 := int64(f1) * int64(g8) - f1g9_38 := int64(f1_2) * int64(g9_19) - f2g0 := int64(f2) * int64(g0) - f2g1 := int64(f2) * int64(g1) - f2g2 := int64(f2) * int64(g2) - f2g3 := int64(f2) * int64(g3) - f2g4 := int64(f2) * int64(g4) - f2g5 := int64(f2) * int64(g5) - f2g6 := int64(f2) * int64(g6) - f2g7 := int64(f2) * int64(g7) - f2g8_19 := int64(f2) * int64(g8_19) - f2g9_19 := int64(f2) * int64(g9_19) - f3g0 := int64(f3) * int64(g0) - f3g1_2 := int64(f3_2) * int64(g1) - f3g2 := int64(f3) * int64(g2) - f3g3_2 := int64(f3_2) * int64(g3) - f3g4 := int64(f3) * int64(g4) - f3g5_2 := int64(f3_2) * int64(g5) - f3g6 := int64(f3) * int64(g6) - f3g7_38 := int64(f3_2) * int64(g7_19) - f3g8_19 := int64(f3) * int64(g8_19) - f3g9_38 := int64(f3_2) * int64(g9_19) - f4g0 := int64(f4) * int64(g0) - f4g1 := int64(f4) * int64(g1) - f4g2 := int64(f4) * int64(g2) - f4g3 := int64(f4) * int64(g3) - f4g4 := int64(f4) * int64(g4) - f4g5 := int64(f4) * int64(g5) - f4g6_19 := int64(f4) * int64(g6_19) - f4g7_19 := int64(f4) * int64(g7_19) - f4g8_19 := int64(f4) * int64(g8_19) - f4g9_19 := int64(f4) * int64(g9_19) - f5g0 := int64(f5) * int64(g0) - f5g1_2 := int64(f5_2) * int64(g1) - f5g2 := int64(f5) * int64(g2) - f5g3_2 := int64(f5_2) * int64(g3) - f5g4 := int64(f5) * int64(g4) - f5g5_38 := int64(f5_2) * int64(g5_19) - f5g6_19 := int64(f5) * int64(g6_19) - f5g7_38 := int64(f5_2) * int64(g7_19) - f5g8_19 := int64(f5) * int64(g8_19) - f5g9_38 := int64(f5_2) * int64(g9_19) - f6g0 := int64(f6) * int64(g0) - f6g1 := int64(f6) * int64(g1) - f6g2 := int64(f6) * int64(g2) - f6g3 := int64(f6) * int64(g3) - f6g4_19 := int64(f6) * int64(g4_19) - f6g5_19 := int64(f6) * int64(g5_19) - f6g6_19 := int64(f6) * int64(g6_19) - f6g7_19 := int64(f6) * int64(g7_19) - f6g8_19 := int64(f6) * int64(g8_19) - f6g9_19 := int64(f6) * int64(g9_19) - f7g0 := int64(f7) * int64(g0) - f7g1_2 := int64(f7_2) * int64(g1) - f7g2 := int64(f7) * int64(g2) - f7g3_38 := int64(f7_2) * int64(g3_19) - f7g4_19 := int64(f7) * int64(g4_19) - f7g5_38 := int64(f7_2) * int64(g5_19) - f7g6_19 := int64(f7) * int64(g6_19) - f7g7_38 := int64(f7_2) * int64(g7_19) - f7g8_19 := int64(f7) * int64(g8_19) - f7g9_38 := int64(f7_2) * int64(g9_19) - f8g0 := int64(f8) * int64(g0) - f8g1 := int64(f8) * int64(g1) - f8g2_19 := int64(f8) * int64(g2_19) - f8g3_19 := int64(f8) * int64(g3_19) - f8g4_19 := int64(f8) * int64(g4_19) - f8g5_19 := int64(f8) * int64(g5_19) - f8g6_19 := int64(f8) * int64(g6_19) - f8g7_19 := int64(f8) * int64(g7_19) - f8g8_19 := int64(f8) * int64(g8_19) - f8g9_19 := int64(f8) * int64(g9_19) - f9g0 := int64(f9) * int64(g0) - f9g1_38 := int64(f9_2) * int64(g1_19) - f9g2_19 := int64(f9) * int64(g2_19) - f9g3_38 := int64(f9_2) * int64(g3_19) - f9g4_19 := int64(f9) * int64(g4_19) - f9g5_38 := int64(f9_2) * int64(g5_19) - f9g6_19 := int64(f9) * int64(g6_19) - f9g7_38 := int64(f9_2) * int64(g7_19) - f9g8_19 := int64(f9) * int64(g8_19) - f9g9_38 := int64(f9_2) * int64(g9_19) - h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 - h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 - h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 - h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 - h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 - h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 - h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 - h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 - h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 - h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 - var carry [10]int64 - - // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) - // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 - // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) - // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - // |h0| <= 2^25 - // |h4| <= 2^25 - // |h1| <= 1.51*2^58 - // |h5| <= 1.51*2^58 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - // |h1| <= 2^24; from now on fits into int32 - // |h5| <= 2^24; from now on fits into int32 - // |h2| <= 1.21*2^59 - // |h6| <= 1.21*2^59 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - // |h2| <= 2^25; from now on fits into int32 unchanged - // |h6| <= 2^25; from now on fits into int32 unchanged - // |h3| <= 1.51*2^58 - // |h7| <= 1.51*2^58 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - // |h3| <= 2^24; from now on fits into int32 unchanged - // |h7| <= 2^24; from now on fits into int32 unchanged - // |h4| <= 1.52*2^33 - // |h8| <= 1.52*2^33 - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - // |h4| <= 2^25; from now on fits into int32 unchanged - // |h8| <= 2^25; from now on fits into int32 unchanged - // |h5| <= 1.01*2^24 - // |h9| <= 1.51*2^58 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - // |h9| <= 2^24; from now on fits into int32 unchanged - // |h0| <= 1.8*2^37 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - // |h0| <= 2^25; from now on fits into int32 unchanged - // |h1| <= 1.01*2^24 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// feSquare calculates h = f*f. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func feSquare(h, f *fieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - f0_2 := 2 * f0 - f1_2 := 2 * f1 - f2_2 := 2 * f2 - f3_2 := 2 * f3 - f4_2 := 2 * f4 - f5_2 := 2 * f5 - f6_2 := 2 * f6 - f7_2 := 2 * f7 - f5_38 := 38 * f5 // 1.31*2^30 - f6_19 := 19 * f6 // 1.31*2^30 - f7_38 := 38 * f7 // 1.31*2^30 - f8_19 := 19 * f8 // 1.31*2^30 - f9_38 := 38 * f9 // 1.31*2^30 - f0f0 := int64(f0) * int64(f0) - f0f1_2 := int64(f0_2) * int64(f1) - f0f2_2 := int64(f0_2) * int64(f2) - f0f3_2 := int64(f0_2) * int64(f3) - f0f4_2 := int64(f0_2) * int64(f4) - f0f5_2 := int64(f0_2) * int64(f5) - f0f6_2 := int64(f0_2) * int64(f6) - f0f7_2 := int64(f0_2) * int64(f7) - f0f8_2 := int64(f0_2) * int64(f8) - f0f9_2 := int64(f0_2) * int64(f9) - f1f1_2 := int64(f1_2) * int64(f1) - f1f2_2 := int64(f1_2) * int64(f2) - f1f3_4 := int64(f1_2) * int64(f3_2) - f1f4_2 := int64(f1_2) * int64(f4) - f1f5_4 := int64(f1_2) * int64(f5_2) - f1f6_2 := int64(f1_2) * int64(f6) - f1f7_4 := int64(f1_2) * int64(f7_2) - f1f8_2 := int64(f1_2) * int64(f8) - f1f9_76 := int64(f1_2) * int64(f9_38) - f2f2 := int64(f2) * int64(f2) - f2f3_2 := int64(f2_2) * int64(f3) - f2f4_2 := int64(f2_2) * int64(f4) - f2f5_2 := int64(f2_2) * int64(f5) - f2f6_2 := int64(f2_2) * int64(f6) - f2f7_2 := int64(f2_2) * int64(f7) - f2f8_38 := int64(f2_2) * int64(f8_19) - f2f9_38 := int64(f2) * int64(f9_38) - f3f3_2 := int64(f3_2) * int64(f3) - f3f4_2 := int64(f3_2) * int64(f4) - f3f5_4 := int64(f3_2) * int64(f5_2) - f3f6_2 := int64(f3_2) * int64(f6) - f3f7_76 := int64(f3_2) * int64(f7_38) - f3f8_38 := int64(f3_2) * int64(f8_19) - f3f9_76 := int64(f3_2) * int64(f9_38) - f4f4 := int64(f4) * int64(f4) - f4f5_2 := int64(f4_2) * int64(f5) - f4f6_38 := int64(f4_2) * int64(f6_19) - f4f7_38 := int64(f4) * int64(f7_38) - f4f8_38 := int64(f4_2) * int64(f8_19) - f4f9_38 := int64(f4) * int64(f9_38) - f5f5_38 := int64(f5) * int64(f5_38) - f5f6_38 := int64(f5_2) * int64(f6_19) - f5f7_76 := int64(f5_2) * int64(f7_38) - f5f8_38 := int64(f5_2) * int64(f8_19) - f5f9_76 := int64(f5_2) * int64(f9_38) - f6f6_19 := int64(f6) * int64(f6_19) - f6f7_38 := int64(f6) * int64(f7_38) - f6f8_38 := int64(f6_2) * int64(f8_19) - f6f9_38 := int64(f6) * int64(f9_38) - f7f7_38 := int64(f7) * int64(f7_38) - f7f8_38 := int64(f7_2) * int64(f8_19) - f7f9_76 := int64(f7_2) * int64(f9_38) - f8f8_19 := int64(f8) * int64(f8_19) - f8f9_38 := int64(f8) * int64(f9_38) - f9f9_38 := int64(f9) * int64(f9_38) - h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 - h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 - h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 - h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 - h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 - h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 - h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 - h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 - h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 - h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 - var carry [10]int64 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// feMul121666 calculates h = f * 121666. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func feMul121666(h, f *fieldElement) { - h0 := int64(f[0]) * 121666 - h1 := int64(f[1]) * 121666 - h2 := int64(f[2]) * 121666 - h3 := int64(f[3]) * 121666 - h4 := int64(f[4]) * 121666 - h5 := int64(f[5]) * 121666 - h6 := int64(f[6]) * 121666 - h7 := int64(f[7]) * 121666 - h8 := int64(f[8]) * 121666 - h9 := int64(f[9]) * 121666 - var carry [10]int64 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// feInvert sets out = z^-1. -func feInvert(out, z *fieldElement) { - var t0, t1, t2, t3 fieldElement - var i int - - feSquare(&t0, z) - for i = 1; i < 1; i++ { - feSquare(&t0, &t0) - } - feSquare(&t1, &t0) - for i = 1; i < 2; i++ { - feSquare(&t1, &t1) - } - feMul(&t1, z, &t1) - feMul(&t0, &t0, &t1) - feSquare(&t2, &t0) - for i = 1; i < 1; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t1, &t2) - feSquare(&t2, &t1) - for i = 1; i < 5; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t2, &t1) - for i = 1; i < 10; i++ { - feSquare(&t2, &t2) - } - feMul(&t2, &t2, &t1) - feSquare(&t3, &t2) - for i = 1; i < 20; i++ { - feSquare(&t3, &t3) - } - feMul(&t2, &t3, &t2) - feSquare(&t2, &t2) - for i = 1; i < 10; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t2, &t1) - for i = 1; i < 50; i++ { - feSquare(&t2, &t2) - } - feMul(&t2, &t2, &t1) - feSquare(&t3, &t2) - for i = 1; i < 100; i++ { - feSquare(&t3, &t3) - } - feMul(&t2, &t3, &t2) - feSquare(&t2, &t2) - for i = 1; i < 50; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t1, &t1) - for i = 1; i < 5; i++ { - feSquare(&t1, &t1) - } - feMul(out, &t1, &t0) -} - -func scalarMultGeneric(out, in, base *[32]byte) { - var e [32]byte - - copy(e[:], in[:]) - e[0] &= 248 - e[31] &= 127 - e[31] |= 64 - - var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement - feFromBytes(&x1, base) - feOne(&x2) - feCopy(&x3, &x1) - feOne(&z3) - - swap := int32(0) - for pos := 254; pos >= 0; pos-- { - b := e[pos/8] >> uint(pos&7) - b &= 1 - swap ^= int32(b) - feCSwap(&x2, &x3, swap) - feCSwap(&z2, &z3, swap) - swap = int32(b) - - feSub(&tmp0, &x3, &z3) - feSub(&tmp1, &x2, &z2) - feAdd(&x2, &x2, &z2) - feAdd(&z2, &x3, &z3) - feMul(&z3, &tmp0, &x2) - feMul(&z2, &z2, &tmp1) - feSquare(&tmp0, &tmp1) - feSquare(&tmp1, &x2) - feAdd(&x3, &z3, &z2) - feSub(&z2, &z3, &z2) - feMul(&x2, &tmp1, &tmp0) - feSub(&tmp1, &tmp1, &tmp0) - feSquare(&z2, &z2) - feMul121666(&z3, &tmp1) - feSquare(&x3, &x3) - feAdd(&tmp0, &tmp0, &z3) - feMul(&z3, &x1, &z2) - feMul(&z2, &tmp1, &tmp0) - } - - feCSwap(&x2, &x3, swap) - feCSwap(&z2, &z3, swap) - - feInvert(&z2, &z2) - feMul(&x2, &x2, &z2) - feToBytes(out, &x2) -} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go b/vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go deleted file mode 100644 index 80d3300af5..0000000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 !gc purego - -package curve25519 - -func scalarMult(out, in, base *[32]byte) { - scalarMultGeneric(out, in, base) -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/README b/vendor/golang.org/x/crypto/curve25519/internal/field/README new file mode 100644 index 0000000000..e25bca7dc8 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/README @@ -0,0 +1,7 @@ +This package is kept in sync with crypto/ed25519/internal/edwards25519/field in +the standard library. + +If there are any changes in the standard library that need to be synced to this +package, run sync.sh. It will not overwrite any local changes made since the +previous sync, so it's ok to land changes in this package first, and then sync +to the standard library later. diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go new file mode 100644 index 0000000000..ca841ad99e --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go @@ -0,0 +1,416 @@ +// Copyright (c) 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package field implements fast arithmetic modulo 2^255-19. +package field + +import ( + "crypto/subtle" + "encoding/binary" + "math/bits" +) + +// Element represents an element of the field GF(2^255-19). Note that this +// is not a cryptographically secure group, and should only be used to interact +// with edwards25519.Point coordinates. +// +// This type works similarly to math/big.Int, and all arguments and receivers +// are allowed to alias. +// +// The zero value is a valid zero element. +type Element struct { + // An element t represents the integer + // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204 + // + // Between operations, all limbs are expected to be lower than 2^52. + l0 uint64 + l1 uint64 + l2 uint64 + l3 uint64 + l4 uint64 +} + +const maskLow51Bits uint64 = (1 << 51) - 1 + +var feZero = &Element{0, 0, 0, 0, 0} + +// Zero sets v = 0, and returns v. +func (v *Element) Zero() *Element { + *v = *feZero + return v +} + +var feOne = &Element{1, 0, 0, 0, 0} + +// One sets v = 1, and returns v. +func (v *Element) One() *Element { + *v = *feOne + return v +} + +// reduce reduces v modulo 2^255 - 19 and returns it. +func (v *Element) reduce() *Element { + v.carryPropagate() + + // After the light reduction we now have a field element representation + // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19. + + // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1, + // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise. + c := (v.l0 + 19) >> 51 + c = (v.l1 + c) >> 51 + c = (v.l2 + c) >> 51 + c = (v.l3 + c) >> 51 + c = (v.l4 + c) >> 51 + + // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's + // effectively applying the reduction identity to the carry. + v.l0 += 19 * c + + v.l1 += v.l0 >> 51 + v.l0 = v.l0 & maskLow51Bits + v.l2 += v.l1 >> 51 + v.l1 = v.l1 & maskLow51Bits + v.l3 += v.l2 >> 51 + v.l2 = v.l2 & maskLow51Bits + v.l4 += v.l3 >> 51 + v.l3 = v.l3 & maskLow51Bits + // no additional carry + v.l4 = v.l4 & maskLow51Bits + + return v +} + +// Add sets v = a + b, and returns v. +func (v *Element) Add(a, b *Element) *Element { + v.l0 = a.l0 + b.l0 + v.l1 = a.l1 + b.l1 + v.l2 = a.l2 + b.l2 + v.l3 = a.l3 + b.l3 + v.l4 = a.l4 + b.l4 + // Using the generic implementation here is actually faster than the + // assembly. Probably because the body of this function is so simple that + // the compiler can figure out better optimizations by inlining the carry + // propagation. TODO + return v.carryPropagateGeneric() +} + +// Subtract sets v = a - b, and returns v. +func (v *Element) Subtract(a, b *Element) *Element { + // We first add 2 * p, to guarantee the subtraction won't underflow, and + // then subtract b (which can be up to 2^255 + 2^13 * 19). + v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0 + v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1 + v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2 + v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3 + v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4 + return v.carryPropagate() +} + +// Negate sets v = -a, and returns v. +func (v *Element) Negate(a *Element) *Element { + return v.Subtract(feZero, a) +} + +// Invert sets v = 1/z mod p, and returns v. +// +// If z == 0, Invert returns v = 0. +func (v *Element) Invert(z *Element) *Element { + // Inversion is implemented as exponentiation with exponent p − 2. It uses the + // same sequence of 255 squarings and 11 multiplications as [Curve25519]. + var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element + + z2.Square(z) // 2 + t.Square(&z2) // 4 + t.Square(&t) // 8 + z9.Multiply(&t, z) // 9 + z11.Multiply(&z9, &z2) // 11 + t.Square(&z11) // 22 + z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0 + + t.Square(&z2_5_0) // 2^6 - 2^1 + for i := 0; i < 4; i++ { + t.Square(&t) // 2^10 - 2^5 + } + z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0 + + t.Square(&z2_10_0) // 2^11 - 2^1 + for i := 0; i < 9; i++ { + t.Square(&t) // 2^20 - 2^10 + } + z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0 + + t.Square(&z2_20_0) // 2^21 - 2^1 + for i := 0; i < 19; i++ { + t.Square(&t) // 2^40 - 2^20 + } + t.Multiply(&t, &z2_20_0) // 2^40 - 2^0 + + t.Square(&t) // 2^41 - 2^1 + for i := 0; i < 9; i++ { + t.Square(&t) // 2^50 - 2^10 + } + z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0 + + t.Square(&z2_50_0) // 2^51 - 2^1 + for i := 0; i < 49; i++ { + t.Square(&t) // 2^100 - 2^50 + } + z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0 + + t.Square(&z2_100_0) // 2^101 - 2^1 + for i := 0; i < 99; i++ { + t.Square(&t) // 2^200 - 2^100 + } + t.Multiply(&t, &z2_100_0) // 2^200 - 2^0 + + t.Square(&t) // 2^201 - 2^1 + for i := 0; i < 49; i++ { + t.Square(&t) // 2^250 - 2^50 + } + t.Multiply(&t, &z2_50_0) // 2^250 - 2^0 + + t.Square(&t) // 2^251 - 2^1 + t.Square(&t) // 2^252 - 2^2 + t.Square(&t) // 2^253 - 2^3 + t.Square(&t) // 2^254 - 2^4 + t.Square(&t) // 2^255 - 2^5 + + return v.Multiply(&t, &z11) // 2^255 - 21 +} + +// Set sets v = a, and returns v. +func (v *Element) Set(a *Element) *Element { + *v = *a + return v +} + +// SetBytes sets v to x, which must be a 32-byte little-endian encoding. +// +// Consistent with RFC 7748, the most significant bit (the high bit of the +// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1) +// are accepted. Note that this is laxer than specified by RFC 8032. +func (v *Element) SetBytes(x []byte) *Element { + if len(x) != 32 { + panic("edwards25519: invalid field element input size") + } + + // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51). + v.l0 = binary.LittleEndian.Uint64(x[0:8]) + v.l0 &= maskLow51Bits + // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51). + v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3 + v.l1 &= maskLow51Bits + // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51). + v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6 + v.l2 &= maskLow51Bits + // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51). + v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1 + v.l3 &= maskLow51Bits + // Bits 204:251 (bytes 24:32, bits 192:256, shift 12, mask 51). + // Note: not bytes 25:33, shift 4, to avoid overread. + v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12 + v.l4 &= maskLow51Bits + + return v +} + +// Bytes returns the canonical 32-byte little-endian encoding of v. +func (v *Element) Bytes() []byte { + // This function is outlined to make the allocations inline in the caller + // rather than happen on the heap. + var out [32]byte + return v.bytes(&out) +} + +func (v *Element) bytes(out *[32]byte) []byte { + t := *v + t.reduce() + + var buf [8]byte + for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} { + bitsOffset := i * 51 + binary.LittleEndian.PutUint64(buf[:], l<= len(out) { + break + } + out[off] |= bb + } + } + + return out[:] +} + +// Equal returns 1 if v and u are equal, and 0 otherwise. +func (v *Element) Equal(u *Element) int { + sa, sv := u.Bytes(), v.Bytes() + return subtle.ConstantTimeCompare(sa, sv) +} + +// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise. +func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) } + +// Select sets v to a if cond == 1, and to b if cond == 0. +func (v *Element) Select(a, b *Element, cond int) *Element { + m := mask64Bits(cond) + v.l0 = (m & a.l0) | (^m & b.l0) + v.l1 = (m & a.l1) | (^m & b.l1) + v.l2 = (m & a.l2) | (^m & b.l2) + v.l3 = (m & a.l3) | (^m & b.l3) + v.l4 = (m & a.l4) | (^m & b.l4) + return v +} + +// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v. +func (v *Element) Swap(u *Element, cond int) { + m := mask64Bits(cond) + t := m & (v.l0 ^ u.l0) + v.l0 ^= t + u.l0 ^= t + t = m & (v.l1 ^ u.l1) + v.l1 ^= t + u.l1 ^= t + t = m & (v.l2 ^ u.l2) + v.l2 ^= t + u.l2 ^= t + t = m & (v.l3 ^ u.l3) + v.l3 ^= t + u.l3 ^= t + t = m & (v.l4 ^ u.l4) + v.l4 ^= t + u.l4 ^= t +} + +// IsNegative returns 1 if v is negative, and 0 otherwise. +func (v *Element) IsNegative() int { + return int(v.Bytes()[0] & 1) +} + +// Absolute sets v to |u|, and returns v. +func (v *Element) Absolute(u *Element) *Element { + return v.Select(new(Element).Negate(u), u, u.IsNegative()) +} + +// Multiply sets v = x * y, and returns v. +func (v *Element) Multiply(x, y *Element) *Element { + feMul(v, x, y) + return v +} + +// Square sets v = x * x, and returns v. +func (v *Element) Square(x *Element) *Element { + feSquare(v, x) + return v +} + +// Mult32 sets v = x * y, and returns v. +func (v *Element) Mult32(x *Element, y uint32) *Element { + x0lo, x0hi := mul51(x.l0, y) + x1lo, x1hi := mul51(x.l1, y) + x2lo, x2hi := mul51(x.l2, y) + x3lo, x3hi := mul51(x.l3, y) + x4lo, x4hi := mul51(x.l4, y) + v.l0 = x0lo + 19*x4hi // carried over per the reduction identity + v.l1 = x1lo + x0hi + v.l2 = x2lo + x1hi + v.l3 = x3lo + x2hi + v.l4 = x4lo + x3hi + // The hi portions are going to be only 32 bits, plus any previous excess, + // so we can skip the carry propagation. + return v +} + +// mul51 returns lo + hi * 2âµÂ¹ = a * b. +func mul51(a uint64, b uint32) (lo uint64, hi uint64) { + mh, ml := bits.Mul64(a, uint64(b)) + lo = ml & maskLow51Bits + hi = (mh << 13) | (ml >> 51) + return +} + +// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3. +func (v *Element) Pow22523(x *Element) *Element { + var t0, t1, t2 Element + + t0.Square(x) // x^2 + t1.Square(&t0) // x^4 + t1.Square(&t1) // x^8 + t1.Multiply(x, &t1) // x^9 + t0.Multiply(&t0, &t1) // x^11 + t0.Square(&t0) // x^22 + t0.Multiply(&t1, &t0) // x^31 + t1.Square(&t0) // x^62 + for i := 1; i < 5; i++ { // x^992 + t1.Square(&t1) + } + t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1 + t1.Square(&t0) // 2^11 - 2 + for i := 1; i < 10; i++ { // 2^20 - 2^10 + t1.Square(&t1) + } + t1.Multiply(&t1, &t0) // 2^20 - 1 + t2.Square(&t1) // 2^21 - 2 + for i := 1; i < 20; i++ { // 2^40 - 2^20 + t2.Square(&t2) + } + t1.Multiply(&t2, &t1) // 2^40 - 1 + t1.Square(&t1) // 2^41 - 2 + for i := 1; i < 10; i++ { // 2^50 - 2^10 + t1.Square(&t1) + } + t0.Multiply(&t1, &t0) // 2^50 - 1 + t1.Square(&t0) // 2^51 - 2 + for i := 1; i < 50; i++ { // 2^100 - 2^50 + t1.Square(&t1) + } + t1.Multiply(&t1, &t0) // 2^100 - 1 + t2.Square(&t1) // 2^101 - 2 + for i := 1; i < 100; i++ { // 2^200 - 2^100 + t2.Square(&t2) + } + t1.Multiply(&t2, &t1) // 2^200 - 1 + t1.Square(&t1) // 2^201 - 2 + for i := 1; i < 50; i++ { // 2^250 - 2^50 + t1.Square(&t1) + } + t0.Multiply(&t1, &t0) // 2^250 - 1 + t0.Square(&t0) // 2^251 - 2 + t0.Square(&t0) // 2^252 - 4 + return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3) +} + +// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion. +var sqrtM1 = &Element{1718705420411056, 234908883556509, + 2233514472574048, 2117202627021982, 765476049583133} + +// SqrtRatio sets r to the non-negative square root of the ratio of u and v. +// +// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio +// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00, +// and returns r and 0. +func (r *Element) SqrtRatio(u, v *Element) (rr *Element, wasSquare int) { + var a, b Element + + // r = (u * v3) * (u * v7)^((p-5)/8) + v2 := a.Square(v) + uv3 := b.Multiply(u, b.Multiply(v2, v)) + uv7 := a.Multiply(uv3, a.Square(v2)) + r.Multiply(uv3, r.Pow22523(uv7)) + + check := a.Multiply(v, a.Square(r)) // check = v * r^2 + + uNeg := b.Negate(u) + correctSignSqrt := check.Equal(u) + flippedSignSqrt := check.Equal(uNeg) + flippedSignSqrtI := check.Equal(uNeg.Multiply(uNeg, sqrtM1)) + + rPrime := b.Multiply(r, sqrtM1) // r_prime = SQRT_M1 * r + // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r) + r.Select(rPrime, r, flippedSignSqrt|flippedSignSqrtI) + + r.Absolute(r) // Choose the nonnegative square root. + return r, correctSignSqrt | flippedSignSqrt +} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go new file mode 100644 index 0000000000..44dc8e8caf --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go @@ -0,0 +1,13 @@ +// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. + +// +build amd64,gc,!purego + +package field + +// feMul sets out = a * b. It works like feMulGeneric. +//go:noescape +func feMul(out *Element, a *Element, b *Element) + +// feSquare sets out = a * a. It works like feSquareGeneric. +//go:noescape +func feSquare(out *Element, a *Element) diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s new file mode 100644 index 0000000000..293f013c94 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s @@ -0,0 +1,379 @@ +// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. + +//go:build amd64 && gc && !purego +// +build amd64,gc,!purego + +#include "textflag.h" + +// func feMul(out *Element, a *Element, b *Element) +TEXT ·feMul(SB), NOSPLIT, $0-24 + MOVQ a+8(FP), CX + MOVQ b+16(FP), BX + + // r0 = a0×b0 + MOVQ (CX), AX + MULQ (BX) + MOVQ AX, DI + MOVQ DX, SI + + // r0 += 19×a1×b4 + MOVQ 8(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r0 += 19×a2×b3 + MOVQ 16(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r0 += 19×a3×b2 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 16(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r0 += 19×a4×b1 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 8(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r1 = a0×b1 + MOVQ (CX), AX + MULQ 8(BX) + MOVQ AX, R9 + MOVQ DX, R8 + + // r1 += a1×b0 + MOVQ 8(CX), AX + MULQ (BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r1 += 19×a2×b4 + MOVQ 16(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r1 += 19×a3×b3 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r1 += 19×a4×b2 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 16(BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r2 = a0×b2 + MOVQ (CX), AX + MULQ 16(BX) + MOVQ AX, R11 + MOVQ DX, R10 + + // r2 += a1×b1 + MOVQ 8(CX), AX + MULQ 8(BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r2 += a2×b0 + MOVQ 16(CX), AX + MULQ (BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r2 += 19×a3×b4 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r2 += 19×a4×b3 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r3 = a0×b3 + MOVQ (CX), AX + MULQ 24(BX) + MOVQ AX, R13 + MOVQ DX, R12 + + // r3 += a1×b2 + MOVQ 8(CX), AX + MULQ 16(BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r3 += a2×b1 + MOVQ 16(CX), AX + MULQ 8(BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r3 += a3×b0 + MOVQ 24(CX), AX + MULQ (BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r3 += 19×a4×b4 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r4 = a0×b4 + MOVQ (CX), AX + MULQ 32(BX) + MOVQ AX, R15 + MOVQ DX, R14 + + // r4 += a1×b3 + MOVQ 8(CX), AX + MULQ 24(BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // r4 += a2×b2 + MOVQ 16(CX), AX + MULQ 16(BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // r4 += a3×b1 + MOVQ 24(CX), AX + MULQ 8(BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // r4 += a4×b0 + MOVQ 32(CX), AX + MULQ (BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // First reduction chain + MOVQ $0x0007ffffffffffff, AX + SHLQ $0x0d, DI, SI + SHLQ $0x0d, R9, R8 + SHLQ $0x0d, R11, R10 + SHLQ $0x0d, R13, R12 + SHLQ $0x0d, R15, R14 + ANDQ AX, DI + IMUL3Q $0x13, R14, R14 + ADDQ R14, DI + ANDQ AX, R9 + ADDQ SI, R9 + ANDQ AX, R11 + ADDQ R8, R11 + ANDQ AX, R13 + ADDQ R10, R13 + ANDQ AX, R15 + ADDQ R12, R15 + + // Second reduction chain (carryPropagate) + MOVQ DI, SI + SHRQ $0x33, SI + MOVQ R9, R8 + SHRQ $0x33, R8 + MOVQ R11, R10 + SHRQ $0x33, R10 + MOVQ R13, R12 + SHRQ $0x33, R12 + MOVQ R15, R14 + SHRQ $0x33, R14 + ANDQ AX, DI + IMUL3Q $0x13, R14, R14 + ADDQ R14, DI + ANDQ AX, R9 + ADDQ SI, R9 + ANDQ AX, R11 + ADDQ R8, R11 + ANDQ AX, R13 + ADDQ R10, R13 + ANDQ AX, R15 + ADDQ R12, R15 + + // Store output + MOVQ out+0(FP), AX + MOVQ DI, (AX) + MOVQ R9, 8(AX) + MOVQ R11, 16(AX) + MOVQ R13, 24(AX) + MOVQ R15, 32(AX) + RET + +// func feSquare(out *Element, a *Element) +TEXT ·feSquare(SB), NOSPLIT, $0-16 + MOVQ a+8(FP), CX + + // r0 = l0×l0 + MOVQ (CX), AX + MULQ (CX) + MOVQ AX, SI + MOVQ DX, BX + + // r0 += 38×l1×l4 + MOVQ 8(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 32(CX) + ADDQ AX, SI + ADCQ DX, BX + + // r0 += 38×l2×l3 + MOVQ 16(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 24(CX) + ADDQ AX, SI + ADCQ DX, BX + + // r1 = 2×l0×l1 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 8(CX) + MOVQ AX, R8 + MOVQ DX, DI + + // r1 += 38×l2×l4 + MOVQ 16(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 32(CX) + ADDQ AX, R8 + ADCQ DX, DI + + // r1 += 19×l3×l3 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(CX) + ADDQ AX, R8 + ADCQ DX, DI + + // r2 = 2×l0×l2 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 16(CX) + MOVQ AX, R10 + MOVQ DX, R9 + + // r2 += l1×l1 + MOVQ 8(CX), AX + MULQ 8(CX) + ADDQ AX, R10 + ADCQ DX, R9 + + // r2 += 38×l3×l4 + MOVQ 24(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 32(CX) + ADDQ AX, R10 + ADCQ DX, R9 + + // r3 = 2×l0×l3 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 24(CX) + MOVQ AX, R12 + MOVQ DX, R11 + + // r3 += 2×l1×l2 + MOVQ 8(CX), AX + IMUL3Q $0x02, AX, AX + MULQ 16(CX) + ADDQ AX, R12 + ADCQ DX, R11 + + // r3 += 19×l4×l4 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(CX) + ADDQ AX, R12 + ADCQ DX, R11 + + // r4 = 2×l0×l4 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 32(CX) + MOVQ AX, R14 + MOVQ DX, R13 + + // r4 += 2×l1×l3 + MOVQ 8(CX), AX + IMUL3Q $0x02, AX, AX + MULQ 24(CX) + ADDQ AX, R14 + ADCQ DX, R13 + + // r4 += l2×l2 + MOVQ 16(CX), AX + MULQ 16(CX) + ADDQ AX, R14 + ADCQ DX, R13 + + // First reduction chain + MOVQ $0x0007ffffffffffff, AX + SHLQ $0x0d, SI, BX + SHLQ $0x0d, R8, DI + SHLQ $0x0d, R10, R9 + SHLQ $0x0d, R12, R11 + SHLQ $0x0d, R14, R13 + ANDQ AX, SI + IMUL3Q $0x13, R13, R13 + ADDQ R13, SI + ANDQ AX, R8 + ADDQ BX, R8 + ANDQ AX, R10 + ADDQ DI, R10 + ANDQ AX, R12 + ADDQ R9, R12 + ANDQ AX, R14 + ADDQ R11, R14 + + // Second reduction chain (carryPropagate) + MOVQ SI, BX + SHRQ $0x33, BX + MOVQ R8, DI + SHRQ $0x33, DI + MOVQ R10, R9 + SHRQ $0x33, R9 + MOVQ R12, R11 + SHRQ $0x33, R11 + MOVQ R14, R13 + SHRQ $0x33, R13 + ANDQ AX, SI + IMUL3Q $0x13, R13, R13 + ADDQ R13, SI + ANDQ AX, R8 + ADDQ BX, R8 + ANDQ AX, R10 + ADDQ DI, R10 + ANDQ AX, R12 + ADDQ R9, R12 + ANDQ AX, R14 + ADDQ R11, R14 + + // Store output + MOVQ out+0(FP), AX + MOVQ SI, (AX) + MOVQ R8, 8(AX) + MOVQ R10, 16(AX) + MOVQ R12, 24(AX) + MOVQ R14, 32(AX) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go new file mode 100644 index 0000000000..ddb6c9b8f7 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go @@ -0,0 +1,12 @@ +// Copyright (c) 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || !gc || purego +// +build !amd64 !gc purego + +package field + +func feMul(v, x, y *Element) { feMulGeneric(v, x, y) } + +func feSquare(v, x *Element) { feSquareGeneric(v, x) } diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go new file mode 100644 index 0000000000..af459ef515 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go @@ -0,0 +1,16 @@ +// Copyright (c) 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm64 && gc && !purego +// +build arm64,gc,!purego + +package field + +//go:noescape +func carryPropagate(v *Element) + +func (v *Element) carryPropagate() *Element { + carryPropagate(v) + return v +} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s new file mode 100644 index 0000000000..5c91e45892 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s @@ -0,0 +1,43 @@ +// Copyright (c) 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm64 && gc && !purego +// +build arm64,gc,!purego + +#include "textflag.h" + +// carryPropagate works exactly like carryPropagateGeneric and uses the +// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but +// avoids loading R0-R4 twice and uses LDP and STP. +// +// See https://golang.org/issues/43145 for the main compiler issue. +// +// func carryPropagate(v *Element) +TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8 + MOVD v+0(FP), R20 + + LDP 0(R20), (R0, R1) + LDP 16(R20), (R2, R3) + MOVD 32(R20), R4 + + AND $0x7ffffffffffff, R0, R10 + AND $0x7ffffffffffff, R1, R11 + AND $0x7ffffffffffff, R2, R12 + AND $0x7ffffffffffff, R3, R13 + AND $0x7ffffffffffff, R4, R14 + + ADD R0>>51, R11, R11 + ADD R1>>51, R12, R12 + ADD R2>>51, R13, R13 + ADD R3>>51, R14, R14 + // R4>>51 * 19 + R10 -> R10 + LSR $51, R4, R21 + MOVD $19, R22 + MADD R22, R10, R21, R10 + + STP (R10, R11), 0(R20) + STP (R12, R13), 16(R20) + MOVD R14, 32(R20) + + RET diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go new file mode 100644 index 0000000000..234a5b2e5d --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go @@ -0,0 +1,12 @@ +// Copyright (c) 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !arm64 || !gc || purego +// +build !arm64 !gc purego + +package field + +func (v *Element) carryPropagate() *Element { + return v.carryPropagateGeneric() +} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go new file mode 100644 index 0000000000..7b5b78cbd6 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go @@ -0,0 +1,264 @@ +// Copyright (c) 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package field + +import "math/bits" + +// uint128 holds a 128-bit number as two 64-bit limbs, for use with the +// bits.Mul64 and bits.Add64 intrinsics. +type uint128 struct { + lo, hi uint64 +} + +// mul64 returns a * b. +func mul64(a, b uint64) uint128 { + hi, lo := bits.Mul64(a, b) + return uint128{lo, hi} +} + +// addMul64 returns v + a * b. +func addMul64(v uint128, a, b uint64) uint128 { + hi, lo := bits.Mul64(a, b) + lo, c := bits.Add64(lo, v.lo, 0) + hi, _ = bits.Add64(hi, v.hi, c) + return uint128{lo, hi} +} + +// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits. +func shiftRightBy51(a uint128) uint64 { + return (a.hi << (64 - 51)) | (a.lo >> 51) +} + +func feMulGeneric(v, a, b *Element) { + a0 := a.l0 + a1 := a.l1 + a2 := a.l2 + a3 := a.l3 + a4 := a.l4 + + b0 := b.l0 + b1 := b.l1 + b2 := b.l2 + b3 := b.l3 + b4 := b.l4 + + // Limb multiplication works like pen-and-paper columnar multiplication, but + // with 51-bit limbs instead of digits. + // + // a4 a3 a2 a1 a0 x + // b4 b3 b2 b1 b0 = + // ------------------------ + // a4b0 a3b0 a2b0 a1b0 a0b0 + + // a4b1 a3b1 a2b1 a1b1 a0b1 + + // a4b2 a3b2 a2b2 a1b2 a0b2 + + // a4b3 a3b3 a2b3 a1b3 a0b3 + + // a4b4 a3b4 a2b4 a1b4 a0b4 = + // ---------------------------------------------- + // r8 r7 r6 r5 r4 r3 r2 r1 r0 + // + // We can then use the reduction identity (a * 2²âµâµ + b = a * 19 + b) to + // reduce the limbs that would overflow 255 bits. r5 * 2²âµâµ becomes 19 * r5, + // r6 * 2³â°â¶ becomes 19 * r6 * 2âµÂ¹, etc. + // + // Reduction can be carried out simultaneously to multiplication. For + // example, we do not compute r5: whenever the result of a multiplication + // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0. + // + // a4b0 a3b0 a2b0 a1b0 a0b0 + + // a3b1 a2b1 a1b1 a0b1 19×a4b1 + + // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 + + // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 + + // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 = + // -------------------------------------- + // r4 r3 r2 r1 r0 + // + // Finally we add up the columns into wide, overlapping limbs. + + a1_19 := a1 * 19 + a2_19 := a2 * 19 + a3_19 := a3 * 19 + a4_19 := a4 * 19 + + // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) + r0 := mul64(a0, b0) + r0 = addMul64(r0, a1_19, b4) + r0 = addMul64(r0, a2_19, b3) + r0 = addMul64(r0, a3_19, b2) + r0 = addMul64(r0, a4_19, b1) + + // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2) + r1 := mul64(a0, b1) + r1 = addMul64(r1, a1, b0) + r1 = addMul64(r1, a2_19, b4) + r1 = addMul64(r1, a3_19, b3) + r1 = addMul64(r1, a4_19, b2) + + // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3) + r2 := mul64(a0, b2) + r2 = addMul64(r2, a1, b1) + r2 = addMul64(r2, a2, b0) + r2 = addMul64(r2, a3_19, b4) + r2 = addMul64(r2, a4_19, b3) + + // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4 + r3 := mul64(a0, b3) + r3 = addMul64(r3, a1, b2) + r3 = addMul64(r3, a2, b1) + r3 = addMul64(r3, a3, b0) + r3 = addMul64(r3, a4_19, b4) + + // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 + r4 := mul64(a0, b4) + r4 = addMul64(r4, a1, b3) + r4 = addMul64(r4, a2, b2) + r4 = addMul64(r4, a3, b1) + r4 = addMul64(r4, a4, b0) + + // After the multiplication, we need to reduce (carry) the five coefficients + // to obtain a result with limbs that are at most slightly larger than 2âµÂ¹, + // to respect the Element invariant. + // + // Overall, the reduction works the same as carryPropagate, except with + // wider inputs: we take the carry for each coefficient by shifting it right + // by 51, and add it to the limb above it. The top carry is multiplied by 19 + // according to the reduction identity and added to the lowest limb. + // + // The largest coefficient (r0) will be at most 111 bits, which guarantees + // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64. + // + // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) + // r0 < 2âµÂ²Ã—2âµÂ² + 19×(2âµÂ²Ã—2âµÂ² + 2âµÂ²Ã—2âµÂ² + 2âµÂ²Ã—2âµÂ² + 2âµÂ²Ã—2âµÂ²) + // r0 < (1 + 19 × 4) × 2âµÂ² × 2âµÂ² + // r0 < 2ⷠ× 2âµÂ² × 2âµÂ² + // r0 < 2¹¹¹ + // + // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most + // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and + // allows us to easily apply the reduction identity. + // + // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 + // r4 < 5 × 2âµÂ² × 2âµÂ² + // r4 < 2¹â°â· + // + + c0 := shiftRightBy51(r0) + c1 := shiftRightBy51(r1) + c2 := shiftRightBy51(r2) + c3 := shiftRightBy51(r3) + c4 := shiftRightBy51(r4) + + rr0 := r0.lo&maskLow51Bits + c4*19 + rr1 := r1.lo&maskLow51Bits + c0 + rr2 := r2.lo&maskLow51Bits + c1 + rr3 := r3.lo&maskLow51Bits + c2 + rr4 := r4.lo&maskLow51Bits + c3 + + // Now all coefficients fit into 64-bit registers but are still too large to + // be passed around as a Element. We therefore do one last carry chain, + // where the carries will be small enough to fit in the wiggle room above 2âµÂ¹. + *v = Element{rr0, rr1, rr2, rr3, rr4} + v.carryPropagate() +} + +func feSquareGeneric(v, a *Element) { + l0 := a.l0 + l1 := a.l1 + l2 := a.l2 + l3 := a.l3 + l4 := a.l4 + + // Squaring works precisely like multiplication above, but thanks to its + // symmetry we get to group a few terms together. + // + // l4 l3 l2 l1 l0 x + // l4 l3 l2 l1 l0 = + // ------------------------ + // l4l0 l3l0 l2l0 l1l0 l0l0 + + // l4l1 l3l1 l2l1 l1l1 l0l1 + + // l4l2 l3l2 l2l2 l1l2 l0l2 + + // l4l3 l3l3 l2l3 l1l3 l0l3 + + // l4l4 l3l4 l2l4 l1l4 l0l4 = + // ---------------------------------------------- + // r8 r7 r6 r5 r4 r3 r2 r1 r0 + // + // l4l0 l3l0 l2l0 l1l0 l0l0 + + // l3l1 l2l1 l1l1 l0l1 19×l4l1 + + // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 + + // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 + + // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 = + // -------------------------------------- + // r4 r3 r2 r1 r0 + // + // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with + // only three Mul64 and four Add64, instead of five and eight. + + l0_2 := l0 * 2 + l1_2 := l1 * 2 + + l1_38 := l1 * 38 + l2_38 := l2 * 38 + l3_38 := l3 * 38 + + l3_19 := l3 * 19 + l4_19 := l4 * 19 + + // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3) + r0 := mul64(l0, l0) + r0 = addMul64(r0, l1_38, l4) + r0 = addMul64(r0, l2_38, l3) + + // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3 + r1 := mul64(l0_2, l1) + r1 = addMul64(r1, l2_38, l4) + r1 = addMul64(r1, l3_19, l3) + + // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4 + r2 := mul64(l0_2, l2) + r2 = addMul64(r2, l1, l1) + r2 = addMul64(r2, l3_38, l4) + + // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4 + r3 := mul64(l0_2, l3) + r3 = addMul64(r3, l1_2, l2) + r3 = addMul64(r3, l4_19, l4) + + // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2 + r4 := mul64(l0_2, l4) + r4 = addMul64(r4, l1_2, l3) + r4 = addMul64(r4, l2, l2) + + c0 := shiftRightBy51(r0) + c1 := shiftRightBy51(r1) + c2 := shiftRightBy51(r2) + c3 := shiftRightBy51(r3) + c4 := shiftRightBy51(r4) + + rr0 := r0.lo&maskLow51Bits + c4*19 + rr1 := r1.lo&maskLow51Bits + c0 + rr2 := r2.lo&maskLow51Bits + c1 + rr3 := r3.lo&maskLow51Bits + c2 + rr4 := r4.lo&maskLow51Bits + c3 + + *v = Element{rr0, rr1, rr2, rr3, rr4} + v.carryPropagate() +} + +// carryPropagate brings the limbs below 52 bits by applying the reduction +// identity (a * 2²âµâµ + b = a * 19 + b) to the l4 carry. TODO inline +func (v *Element) carryPropagateGeneric() *Element { + c0 := v.l0 >> 51 + c1 := v.l1 >> 51 + c2 := v.l2 >> 51 + c3 := v.l3 >> 51 + c4 := v.l4 >> 51 + + v.l0 = v.l0&maskLow51Bits + c4*19 + v.l1 = v.l1&maskLow51Bits + c0 + v.l2 = v.l2&maskLow51Bits + c1 + v.l3 = v.l3&maskLow51Bits + c2 + v.l4 = v.l4&maskLow51Bits + c3 + + return v +} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint b/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint new file mode 100644 index 0000000000..e3685f95ca --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint @@ -0,0 +1 @@ +b0c49ae9f59d233526f8934262c5bbbe14d4358d diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh b/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh new file mode 100644 index 0000000000..1ba22a8b4c --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh @@ -0,0 +1,19 @@ +#! /bin/bash +set -euo pipefail + +cd "$(git rev-parse --show-toplevel)" + +STD_PATH=src/crypto/ed25519/internal/edwards25519/field +LOCAL_PATH=curve25519/internal/field +LAST_SYNC_REF=$(cat $LOCAL_PATH/sync.checkpoint) + +git fetch https://go.googlesource.com/go master + +if git diff --quiet $LAST_SYNC_REF:$STD_PATH FETCH_HEAD:$STD_PATH; then + echo "No changes." +else + NEW_REF=$(git rev-parse FETCH_HEAD | tee $LOCAL_PATH/sync.checkpoint) + echo "Applying changes from $LAST_SYNC_REF to $NEW_REF..." + git diff $LAST_SYNC_REF:$STD_PATH FETCH_HEAD:$STD_PATH | \ + git apply -3 --directory=$LOCAL_PATH +fi diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go new file mode 100644 index 0000000000..71ad917dad --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/ed25519.go @@ -0,0 +1,223 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// In Go 1.13, the ed25519 package was promoted to the standard library as +// crypto/ed25519, and this package became a wrapper for the standard library one. +// +//go:build !go1.13 +// +build !go1.13 + +// Package ed25519 implements the Ed25519 signature algorithm. See +// https://ed25519.cr.yp.to/. +// +// These functions are also compatible with the “Ed25519†function defined in +// RFC 8032. However, unlike RFC 8032's formulation, this package's private key +// representation includes a public key suffix to make multiple signing +// operations with the same key more efficient. This package refers to the RFC +// 8032 private key as the “seedâ€. +package ed25519 + +// This code is a port of the public domain, “ref10†implementation of ed25519 +// from SUPERCOP. + +import ( + "bytes" + "crypto" + cryptorand "crypto/rand" + "crypto/sha512" + "errors" + "io" + "strconv" + + "golang.org/x/crypto/ed25519/internal/edwards25519" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys as used in this package. + PublicKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = 64 + // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. + SeedSize = 32 +) + +// PublicKey is the type of Ed25519 public keys. +type PublicKey []byte + +// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. +type PrivateKey []byte + +// Public returns the PublicKey corresponding to priv. +func (priv PrivateKey) Public() crypto.PublicKey { + publicKey := make([]byte, PublicKeySize) + copy(publicKey, priv[32:]) + return PublicKey(publicKey) +} + +// Seed returns the private key seed corresponding to priv. It is provided for +// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds +// in this package. +func (priv PrivateKey) Seed() []byte { + seed := make([]byte, SeedSize) + copy(seed, priv[:32]) + return seed +} + +// Sign signs the given message with priv. +// Ed25519 performs two passes over messages to be signed and therefore cannot +// handle pre-hashed messages. Thus opts.HashFunc() must return zero to +// indicate the message hasn't been hashed. This can be achieved by passing +// crypto.Hash(0) as the value for opts. +func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) { + if opts.HashFunc() != crypto.Hash(0) { + return nil, errors.New("ed25519: cannot sign hashed message") + } + + return Sign(priv, message), nil +} + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { + if rand == nil { + rand = cryptorand.Reader + } + + seed := make([]byte, SeedSize) + if _, err := io.ReadFull(rand, seed); err != nil { + return nil, nil, err + } + + privateKey := NewKeyFromSeed(seed) + publicKey := make([]byte, PublicKeySize) + copy(publicKey, privateKey[32:]) + + return publicKey, privateKey, nil +} + +// NewKeyFromSeed calculates a private key from a seed. It will panic if +// len(seed) is not SeedSize. This function is provided for interoperability +// with RFC 8032. RFC 8032's private keys correspond to seeds in this +// package. +func NewKeyFromSeed(seed []byte) PrivateKey { + if l := len(seed); l != SeedSize { + panic("ed25519: bad seed length: " + strconv.Itoa(l)) + } + + digest := sha512.Sum512(seed) + digest[0] &= 248 + digest[31] &= 127 + digest[31] |= 64 + + var A edwards25519.ExtendedGroupElement + var hBytes [32]byte + copy(hBytes[:], digest[:]) + edwards25519.GeScalarMultBase(&A, &hBytes) + var publicKeyBytes [32]byte + A.ToBytes(&publicKeyBytes) + + privateKey := make([]byte, PrivateKeySize) + copy(privateKey, seed) + copy(privateKey[32:], publicKeyBytes[:]) + + return privateKey +} + +// Sign signs the message with privateKey and returns a signature. It will +// panic if len(privateKey) is not PrivateKeySize. +func Sign(privateKey PrivateKey, message []byte) []byte { + if l := len(privateKey); l != PrivateKeySize { + panic("ed25519: bad private key length: " + strconv.Itoa(l)) + } + + h := sha512.New() + h.Write(privateKey[:32]) + + var digest1, messageDigest, hramDigest [64]byte + var expandedSecretKey [32]byte + h.Sum(digest1[:0]) + copy(expandedSecretKey[:], digest1[:]) + expandedSecretKey[0] &= 248 + expandedSecretKey[31] &= 63 + expandedSecretKey[31] |= 64 + + h.Reset() + h.Write(digest1[32:]) + h.Write(message) + h.Sum(messageDigest[:0]) + + var messageDigestReduced [32]byte + edwards25519.ScReduce(&messageDigestReduced, &messageDigest) + var R edwards25519.ExtendedGroupElement + edwards25519.GeScalarMultBase(&R, &messageDigestReduced) + + var encodedR [32]byte + R.ToBytes(&encodedR) + + h.Reset() + h.Write(encodedR[:]) + h.Write(privateKey[32:]) + h.Write(message) + h.Sum(hramDigest[:0]) + var hramDigestReduced [32]byte + edwards25519.ScReduce(&hramDigestReduced, &hramDigest) + + var s [32]byte + edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) + + signature := make([]byte, SignatureSize) + copy(signature[:], encodedR[:]) + copy(signature[32:], s[:]) + + return signature +} + +// Verify reports whether sig is a valid signature of message by publicKey. It +// will panic if len(publicKey) is not PublicKeySize. +func Verify(publicKey PublicKey, message, sig []byte) bool { + if l := len(publicKey); l != PublicKeySize { + panic("ed25519: bad public key length: " + strconv.Itoa(l)) + } + + if len(sig) != SignatureSize || sig[63]&224 != 0 { + return false + } + + var A edwards25519.ExtendedGroupElement + var publicKeyBytes [32]byte + copy(publicKeyBytes[:], publicKey) + if !A.FromBytes(&publicKeyBytes) { + return false + } + edwards25519.FeNeg(&A.X, &A.X) + edwards25519.FeNeg(&A.T, &A.T) + + h := sha512.New() + h.Write(sig[:32]) + h.Write(publicKey[:]) + h.Write(message) + var digest [64]byte + h.Sum(digest[:0]) + + var hReduced [32]byte + edwards25519.ScReduce(&hReduced, &digest) + + var R edwards25519.ProjectiveGroupElement + var s [32]byte + copy(s[:], sig[32:]) + + // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in + // the range [0, order) in order to prevent signature malleability. + if !edwards25519.ScMinimal(&s) { + return false + } + + edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s) + + var checkR [32]byte + R.ToBytes(&checkR) + return bytes.Equal(sig[:32], checkR[:]) +} diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go b/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go new file mode 100644 index 0000000000..b5974dc8b2 --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go @@ -0,0 +1,74 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.13 +// +build go1.13 + +// Package ed25519 implements the Ed25519 signature algorithm. See +// https://ed25519.cr.yp.to/. +// +// These functions are also compatible with the “Ed25519†function defined in +// RFC 8032. However, unlike RFC 8032's formulation, this package's private key +// representation includes a public key suffix to make multiple signing +// operations with the same key more efficient. This package refers to the RFC +// 8032 private key as the “seedâ€. +// +// Beginning with Go 1.13, the functionality of this package was moved to the +// standard library as crypto/ed25519. This package only acts as a compatibility +// wrapper. +package ed25519 + +import ( + "crypto/ed25519" + "io" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys as used in this package. + PublicKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = 64 + // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. + SeedSize = 32 +) + +// PublicKey is the type of Ed25519 public keys. +// +// This type is an alias for crypto/ed25519's PublicKey type. +// See the crypto/ed25519 package for the methods on this type. +type PublicKey = ed25519.PublicKey + +// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. +// +// This type is an alias for crypto/ed25519's PrivateKey type. +// See the crypto/ed25519 package for the methods on this type. +type PrivateKey = ed25519.PrivateKey + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { + return ed25519.GenerateKey(rand) +} + +// NewKeyFromSeed calculates a private key from a seed. It will panic if +// len(seed) is not SeedSize. This function is provided for interoperability +// with RFC 8032. RFC 8032's private keys correspond to seeds in this +// package. +func NewKeyFromSeed(seed []byte) PrivateKey { + return ed25519.NewKeyFromSeed(seed) +} + +// Sign signs the message with privateKey and returns a signature. It will +// panic if len(privateKey) is not PrivateKeySize. +func Sign(privateKey PrivateKey, message []byte) []byte { + return ed25519.Sign(privateKey, message) +} + +// Verify reports whether sig is a valid signature of message by publicKey. It +// will panic if len(publicKey) is not PublicKeySize. +func Verify(publicKey PublicKey, message, sig []byte) bool { + return ed25519.Verify(publicKey, message, sig) +} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go new file mode 100644 index 0000000000..e39f086c1d --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go @@ -0,0 +1,1422 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +// These values are from the public domain, “ref10†implementation of ed25519 +// from SUPERCOP. + +// d is a constant in the Edwards curve equation. +var d = FieldElement{ + -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, +} + +// d2 is 2*d. +var d2 = FieldElement{ + -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, +} + +// SqrtM1 is the square-root of -1 in the field. +var SqrtM1 = FieldElement{ + -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, +} + +// A is a constant in the Montgomery-form of curve25519. +var A = FieldElement{ + 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, +} + +// bi contains precomputed multiples of the base-point. See the Ed25519 paper +// for a discussion about how these values are used. +var bi = [8]PreComputedGroupElement{ + { + FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, + FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, + FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, + }, + { + FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, + FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, + FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, + }, + { + FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, + FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, + FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, + }, + { + FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, + FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, + FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, + }, + { + FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, + FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, + FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, + }, + { + FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, + FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, + FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, + }, + { + FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, + FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, + FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, + }, + { + FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, + FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, + FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, + }, +} + +// base contains precomputed multiples of the base-point. See the Ed25519 paper +// for a discussion about how these values are used. +var base = [32][8]PreComputedGroupElement{ + { + { + FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, + FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, + FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, + }, + { + FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, + FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, + FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, + }, + { + FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, + FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, + FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, + }, + { + FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, + FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, + FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, + }, + { + FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, + FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, + FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, + }, + { + FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, + FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, + FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, + }, + { + FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, + FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, + FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, + }, + { + FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, + FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, + FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, + }, + }, + { + { + FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, + FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, + FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, + }, + { + FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, + FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, + FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, + }, + { + FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, + FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, + FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, + }, + { + FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, + FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, + FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, + }, + { + FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, + FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, + FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, + }, + { + FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, + FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, + FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, + }, + { + FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, + FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, + FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, + }, + { + FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, + FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, + FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, + }, + }, + { + { + FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, + FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, + FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, + }, + { + FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, + FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, + FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, + }, + { + FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, + FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, + FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, + }, + { + FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, + FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, + FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, + }, + { + FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, + FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, + FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, + }, + { + FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, + FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, + FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, + }, + { + FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, + FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, + FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, + }, + { + FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, + FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, + FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, + }, + }, + { + { + FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, + FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, + FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, + }, + { + FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, + FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, + FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, + }, + { + FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, + FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, + FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, + }, + { + FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, + FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, + FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, + }, + { + FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, + FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, + FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, + }, + { + FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, + FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, + FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, + }, + { + FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, + FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, + FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, + }, + { + FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, + FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, + FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, + }, + }, + { + { + FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, + FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, + FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, + }, + { + FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, + FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, + FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, + }, + { + FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, + FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, + FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, + }, + { + FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, + FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, + FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, + }, + { + FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, + FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, + FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, + }, + { + FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, + FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, + FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, + }, + { + FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, + FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, + FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, + }, + { + FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, + FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, + FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, + }, + }, + { + { + FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, + FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, + FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, + }, + { + FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, + FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, + FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, + }, + { + FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, + FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, + FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, + }, + { + FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, + FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, + FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, + }, + { + FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, + FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, + FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, + }, + { + FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, + FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, + FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, + }, + { + FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, + FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, + FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, + }, + { + FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, + FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, + FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, + }, + }, + { + { + FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, + FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, + FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, + }, + { + FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, + FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, + FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, + }, + { + FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, + FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, + FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, + }, + { + FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, + FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, + FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, + }, + { + FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, + FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, + FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, + }, + { + FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, + FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, + FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, + }, + { + FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, + FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, + FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, + }, + { + FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, + FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, + FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, + }, + }, + { + { + FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, + FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, + FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, + }, + { + FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, + FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, + FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, + }, + { + FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, + FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, + FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, + }, + { + FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, + FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, + FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, + }, + { + FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, + FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, + FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, + }, + { + FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, + FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, + FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, + }, + { + FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, + FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, + FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, + }, + { + FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, + FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, + FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, + }, + }, + { + { + FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, + FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, + FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, + }, + { + FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, + FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, + FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, + }, + { + FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, + FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, + FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, + }, + { + FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, + FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, + FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, + }, + { + FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, + FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, + FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, + }, + { + FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, + FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, + FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, + }, + { + FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, + FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, + FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, + }, + { + FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, + FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, + FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, + }, + }, + { + { + FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, + FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, + FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, + }, + { + FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, + FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, + FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, + }, + { + FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, + FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, + FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, + }, + { + FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, + FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, + FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, + }, + { + FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, + FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, + FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, + }, + { + FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, + FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, + FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, + }, + { + FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, + FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, + FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, + }, + { + FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, + FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, + FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, + }, + }, + { + { + FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, + FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, + FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, + }, + { + FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, + FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, + FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, + }, + { + FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, + FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, + FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, + }, + { + FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, + FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, + FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, + }, + { + FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, + FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, + FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, + }, + { + FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, + FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, + FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, + }, + { + FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, + FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, + FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, + }, + { + FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, + FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, + FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, + }, + }, + { + { + FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, + FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, + FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, + }, + { + FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, + FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, + FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, + }, + { + FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, + FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, + FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, + }, + { + FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, + FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, + FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, + }, + { + FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, + FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, + FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, + }, + { + FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, + FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, + FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, + }, + { + FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, + FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, + FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, + }, + { + FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, + FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, + FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, + }, + }, + { + { + FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, + FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, + FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, + }, + { + FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, + FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, + FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, + }, + { + FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, + FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, + FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, + }, + { + FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, + FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, + FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, + }, + { + FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, + FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, + FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, + }, + { + FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, + FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, + FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, + }, + { + FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, + FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, + FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, + }, + { + FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, + FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, + FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, + }, + }, + { + { + FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, + FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, + FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, + }, + { + FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, + FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, + FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, + }, + { + FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, + FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, + FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, + }, + { + FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, + FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, + FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, + }, + { + FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, + FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, + FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, + }, + { + FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, + FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, + FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, + }, + { + FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, + FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, + FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, + }, + { + FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, + FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, + FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, + }, + }, + { + { + FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, + FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, + FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, + }, + { + FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, + FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, + FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, + }, + { + FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, + FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, + FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, + }, + { + FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, + FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, + FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, + }, + { + FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, + FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, + FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, + }, + { + FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, + FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, + FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, + }, + { + FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, + FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, + FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, + }, + { + FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, + FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, + FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, + }, + }, + { + { + FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, + FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, + FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, + }, + { + FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, + FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, + FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, + }, + { + FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, + FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, + FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, + }, + { + FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, + FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, + FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, + }, + { + FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, + FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, + FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, + }, + { + FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, + FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, + FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, + }, + { + FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, + FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, + FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, + }, + { + FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, + FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, + FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, + }, + }, + { + { + FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, + FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, + FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, + }, + { + FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, + FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, + FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, + }, + { + FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, + FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, + FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, + }, + { + FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, + FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, + FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, + }, + { + FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, + FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, + FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, + }, + { + FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, + FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, + FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, + }, + { + FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, + FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, + FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, + }, + { + FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, + FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, + FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, + }, + }, + { + { + FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, + FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, + FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, + }, + { + FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, + FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, + FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, + }, + { + FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, + FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, + FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, + }, + { + FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, + FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, + FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, + }, + { + FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, + FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, + FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, + }, + { + FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, + FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, + FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, + }, + { + FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, + FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, + FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, + }, + { + FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, + FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, + FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, + }, + }, + { + { + FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, + FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, + FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, + }, + { + FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, + FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, + FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, + }, + { + FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, + FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, + FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, + }, + { + FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, + FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, + FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, + }, + { + FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, + FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, + FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, + }, + { + FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, + FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, + FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, + }, + { + FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, + FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, + FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, + }, + { + FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, + FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, + FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, + }, + }, + { + { + FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, + FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, + FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, + }, + { + FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, + FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, + FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, + }, + { + FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, + FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, + FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, + }, + { + FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, + FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, + FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, + }, + { + FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, + FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, + FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, + }, + { + FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, + FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, + FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, + }, + { + FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, + FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, + FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, + }, + { + FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, + FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, + FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, + }, + }, + { + { + FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, + FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, + FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, + }, + { + FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, + FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, + FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, + }, + { + FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, + FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, + FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, + }, + { + FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, + FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, + FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, + }, + { + FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, + FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, + FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, + }, + { + FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, + FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, + FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, + }, + { + FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, + FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, + FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, + }, + { + FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, + FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, + FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, + }, + }, + { + { + FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, + FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, + FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, + }, + { + FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, + FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, + FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, + }, + { + FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, + FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, + FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, + }, + { + FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, + FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, + FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, + }, + { + FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, + FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, + FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, + }, + { + FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, + FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, + FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, + }, + { + FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, + FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, + FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, + }, + { + FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, + FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, + FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, + }, + }, + { + { + FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, + FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, + FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, + }, + { + FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, + FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, + FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, + }, + { + FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, + FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, + FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, + }, + { + FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, + FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, + FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, + }, + { + FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, + FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, + FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, + }, + { + FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, + FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, + FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, + }, + { + FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, + FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, + FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, + }, + { + FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, + FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, + FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, + }, + }, + { + { + FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, + FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, + FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, + }, + { + FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, + FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, + FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, + }, + { + FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, + FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, + FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, + }, + { + FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, + FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, + FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, + }, + { + FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, + FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, + FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, + }, + { + FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, + FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, + FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, + }, + { + FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, + FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, + FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, + }, + { + FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, + FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, + FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, + }, + }, + { + { + FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, + FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, + FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, + }, + { + FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, + FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, + FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, + }, + { + FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, + FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, + FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, + }, + { + FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, + FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, + FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, + }, + { + FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, + FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, + FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, + }, + { + FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, + FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, + FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, + }, + { + FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, + FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, + FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, + }, + { + FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, + FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, + FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, + }, + }, + { + { + FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, + FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, + FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, + }, + { + FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, + FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, + FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, + }, + { + FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, + FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, + FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, + }, + { + FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, + FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, + FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, + }, + { + FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, + FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, + FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, + }, + { + FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, + FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, + FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, + }, + { + FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, + FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, + FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, + }, + { + FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, + FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, + FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, + }, + }, + { + { + FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, + FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, + FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, + }, + { + FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, + FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, + FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, + }, + { + FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, + FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, + FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, + }, + { + FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, + FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, + FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, + }, + { + FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, + FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, + FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, + }, + { + FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, + FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, + FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, + }, + { + FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, + FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, + FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, + }, + { + FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, + FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, + FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, + }, + }, + { + { + FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, + FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, + FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, + }, + { + FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, + FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, + FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, + }, + { + FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, + FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, + FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, + }, + { + FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, + FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, + FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, + }, + { + FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, + FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, + FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, + }, + { + FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, + FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, + FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, + }, + { + FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, + FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, + FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, + }, + { + FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, + FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, + FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, + }, + }, + { + { + FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, + FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, + FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, + }, + { + FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, + FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, + FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, + }, + { + FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, + FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, + FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, + }, + { + FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, + FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, + FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, + }, + { + FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, + FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, + FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, + }, + { + FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, + FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, + FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, + }, + { + FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, + FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, + FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, + }, + { + FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, + FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, + FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, + }, + }, + { + { + FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, + FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, + FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, + }, + { + FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, + FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, + FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, + }, + { + FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, + FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, + FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, + }, + { + FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, + FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, + FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, + }, + { + FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, + FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, + FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, + }, + { + FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, + FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, + FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, + }, + { + FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, + FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, + FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, + }, + { + FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, + FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, + FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, + }, + }, + { + { + FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, + FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, + FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, + }, + { + FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, + FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, + FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, + }, + { + FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, + FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, + FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, + }, + { + FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, + FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, + FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, + }, + { + FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, + FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, + FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, + }, + { + FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, + FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, + FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, + }, + { + FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, + FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, + FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, + }, + { + FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, + FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, + FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, + }, + }, + { + { + FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, + FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, + FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, + }, + { + FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, + FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, + FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, + }, + { + FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, + FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, + FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, + }, + { + FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, + FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, + FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, + }, + { + FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, + FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, + FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, + }, + { + FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, + FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, + FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, + }, + { + FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, + FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, + FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, + }, + { + FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, + FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, + FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, + }, + }, +} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go new file mode 100644 index 0000000000..fd03c252af --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go @@ -0,0 +1,1793 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +import "encoding/binary" + +// This code is a port of the public domain, “ref10†implementation of ed25519 +// from SUPERCOP. + +// FieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type FieldElement [10]int32 + +var zero FieldElement + +func FeZero(fe *FieldElement) { + copy(fe[:], zero[:]) +} + +func FeOne(fe *FieldElement) { + FeZero(fe) + fe[0] = 1 +} + +func FeAdd(dst, a, b *FieldElement) { + dst[0] = a[0] + b[0] + dst[1] = a[1] + b[1] + dst[2] = a[2] + b[2] + dst[3] = a[3] + b[3] + dst[4] = a[4] + b[4] + dst[5] = a[5] + b[5] + dst[6] = a[6] + b[6] + dst[7] = a[7] + b[7] + dst[8] = a[8] + b[8] + dst[9] = a[9] + b[9] +} + +func FeSub(dst, a, b *FieldElement) { + dst[0] = a[0] - b[0] + dst[1] = a[1] - b[1] + dst[2] = a[2] - b[2] + dst[3] = a[3] - b[3] + dst[4] = a[4] - b[4] + dst[5] = a[5] - b[5] + dst[6] = a[6] - b[6] + dst[7] = a[7] - b[7] + dst[8] = a[8] - b[8] + dst[9] = a[9] - b[9] +} + +func FeCopy(dst, src *FieldElement) { + copy(dst[:], src[:]) +} + +// Replace (f,g) with (g,g) if b == 1; +// replace (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func FeCMove(f, g *FieldElement, b int32) { + b = -b + f[0] ^= b & (f[0] ^ g[0]) + f[1] ^= b & (f[1] ^ g[1]) + f[2] ^= b & (f[2] ^ g[2]) + f[3] ^= b & (f[3] ^ g[3]) + f[4] ^= b & (f[4] ^ g[4]) + f[5] ^= b & (f[5] ^ g[5]) + f[6] ^= b & (f[6] ^ g[6]) + f[7] ^= b & (f[7] ^ g[7]) + f[8] ^= b & (f[8] ^ g[8]) + f[9] ^= b & (f[9] ^ g[9]) +} + +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +func load4(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + r |= int64(in[3]) << 24 + return r +} + +func FeFromBytes(dst *FieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := (load3(src[29:]) & 8388607) << 2 + + FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +// FeToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +func FeIsNegative(f *FieldElement) byte { + var s [32]byte + FeToBytes(&s, f) + return s[0] & 1 +} + +func FeIsNonZero(f *FieldElement) int32 { + var s [32]byte + FeToBytes(&s, f) + var x uint8 + for _, b := range s { + x |= b + } + x |= x >> 4 + x |= x >> 2 + x |= x >> 1 + return int32(x & 1) +} + +// FeNeg sets h = -f +// +// Preconditions: +// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func FeNeg(h, f *FieldElement) { + h[0] = -f[0] + h[1] = -f[1] + h[2] = -f[2] + h[3] = -f[3] + h[4] = -f[4] + h[5] = -f[5] + h[6] = -f[6] + h[7] = -f[7] + h[8] = -f[8] + h[9] = -f[9] +} + +func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { + var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64 + + /* + |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + */ + + c0 = (h0 + (1 << 25)) >> 26 + h1 += c0 + h0 -= c0 << 26 + c4 = (h4 + (1 << 25)) >> 26 + h5 += c4 + h4 -= c4 << 26 + /* |h0| <= 2^25 */ + /* |h4| <= 2^25 */ + /* |h1| <= 1.51*2^58 */ + /* |h5| <= 1.51*2^58 */ + + c1 = (h1 + (1 << 24)) >> 25 + h2 += c1 + h1 -= c1 << 25 + c5 = (h5 + (1 << 24)) >> 25 + h6 += c5 + h5 -= c5 << 25 + /* |h1| <= 2^24; from now on fits into int32 */ + /* |h5| <= 2^24; from now on fits into int32 */ + /* |h2| <= 1.21*2^59 */ + /* |h6| <= 1.21*2^59 */ + + c2 = (h2 + (1 << 25)) >> 26 + h3 += c2 + h2 -= c2 << 26 + c6 = (h6 + (1 << 25)) >> 26 + h7 += c6 + h6 -= c6 << 26 + /* |h2| <= 2^25; from now on fits into int32 unchanged */ + /* |h6| <= 2^25; from now on fits into int32 unchanged */ + /* |h3| <= 1.51*2^58 */ + /* |h7| <= 1.51*2^58 */ + + c3 = (h3 + (1 << 24)) >> 25 + h4 += c3 + h3 -= c3 << 25 + c7 = (h7 + (1 << 24)) >> 25 + h8 += c7 + h7 -= c7 << 25 + /* |h3| <= 2^24; from now on fits into int32 unchanged */ + /* |h7| <= 2^24; from now on fits into int32 unchanged */ + /* |h4| <= 1.52*2^33 */ + /* |h8| <= 1.52*2^33 */ + + c4 = (h4 + (1 << 25)) >> 26 + h5 += c4 + h4 -= c4 << 26 + c8 = (h8 + (1 << 25)) >> 26 + h9 += c8 + h8 -= c8 << 26 + /* |h4| <= 2^25; from now on fits into int32 unchanged */ + /* |h8| <= 2^25; from now on fits into int32 unchanged */ + /* |h5| <= 1.01*2^24 */ + /* |h9| <= 1.51*2^58 */ + + c9 = (h9 + (1 << 24)) >> 25 + h0 += c9 * 19 + h9 -= c9 << 25 + /* |h9| <= 2^24; from now on fits into int32 unchanged */ + /* |h0| <= 1.8*2^37 */ + + c0 = (h0 + (1 << 25)) >> 26 + h1 += c0 + h0 -= c0 << 26 + /* |h0| <= 2^25; from now on fits into int32 unchanged */ + /* |h1| <= 1.01*2^24 */ + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// FeMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs, can squeeze carries into int32. +func FeMul(h, f, g *FieldElement) { + f0 := int64(f[0]) + f1 := int64(f[1]) + f2 := int64(f[2]) + f3 := int64(f[3]) + f4 := int64(f[4]) + f5 := int64(f[5]) + f6 := int64(f[6]) + f7 := int64(f[7]) + f8 := int64(f[8]) + f9 := int64(f[9]) + + f1_2 := int64(2 * f[1]) + f3_2 := int64(2 * f[3]) + f5_2 := int64(2 * f[5]) + f7_2 := int64(2 * f[7]) + f9_2 := int64(2 * f[9]) + + g0 := int64(g[0]) + g1 := int64(g[1]) + g2 := int64(g[2]) + g3 := int64(g[3]) + g4 := int64(g[4]) + g5 := int64(g[5]) + g6 := int64(g[6]) + g7 := int64(g[7]) + g8 := int64(g[8]) + g9 := int64(g[9]) + + g1_19 := int64(19 * g[1]) /* 1.4*2^29 */ + g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */ + g3_19 := int64(19 * g[3]) + g4_19 := int64(19 * g[4]) + g5_19 := int64(19 * g[5]) + g6_19 := int64(19 * g[6]) + g7_19 := int64(19 * g[7]) + g8_19 := int64(19 * g[8]) + g9_19 := int64(19 * g[9]) + + h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19 + h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19 + h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19 + h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19 + h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19 + h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19 + h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19 + h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19 + h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19 + h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0 + + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { + f0 := int64(f[0]) + f1 := int64(f[1]) + f2 := int64(f[2]) + f3 := int64(f[3]) + f4 := int64(f[4]) + f5 := int64(f[5]) + f6 := int64(f[6]) + f7 := int64(f[7]) + f8 := int64(f[8]) + f9 := int64(f[9]) + f0_2 := int64(2 * f[0]) + f1_2 := int64(2 * f[1]) + f2_2 := int64(2 * f[2]) + f3_2 := int64(2 * f[3]) + f4_2 := int64(2 * f[4]) + f5_2 := int64(2 * f[5]) + f6_2 := int64(2 * f[6]) + f7_2 := int64(2 * f[7]) + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + + h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38 + h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19 + h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19 + h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38 + h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38 + h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19 + h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19 + h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38 + h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38 + h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5 + + return +} + +// FeSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func FeSquare(h, f *FieldElement) { + h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +// FeSquare2 sets h = 2 * f * f +// +// Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. +// See fe_mul.c for discussion of implementation strategy. +func FeSquare2(h, f *FieldElement) { + h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) + + h0 += h0 + h1 += h1 + h2 += h2 + h3 += h3 + h4 += h4 + h5 += h5 + h6 += h6 + h7 += h7 + h8 += h8 + h9 += h9 + + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +func FeInvert(out, z *FieldElement) { + var t0, t1, t2, t3 FieldElement + var i int + + FeSquare(&t0, z) // 2^1 + FeSquare(&t1, &t0) // 2^2 + for i = 1; i < 2; i++ { // 2^3 + FeSquare(&t1, &t1) + } + FeMul(&t1, z, &t1) // 2^3 + 2^0 + FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 + FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 + FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 + FeSquare(&t2, &t1) // 5,4,3,2,1 + for i = 1; i < 5; i++ { // 9,8,7,6,5 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 + FeSquare(&t2, &t1) // 10..1 + for i = 1; i < 10; i++ { // 19..10 + FeSquare(&t2, &t2) + } + FeMul(&t2, &t2, &t1) // 19..0 + FeSquare(&t3, &t2) // 20..1 + for i = 1; i < 20; i++ { // 39..20 + FeSquare(&t3, &t3) + } + FeMul(&t2, &t3, &t2) // 39..0 + FeSquare(&t2, &t2) // 40..1 + for i = 1; i < 10; i++ { // 49..10 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 49..0 + FeSquare(&t2, &t1) // 50..1 + for i = 1; i < 50; i++ { // 99..50 + FeSquare(&t2, &t2) + } + FeMul(&t2, &t2, &t1) // 99..0 + FeSquare(&t3, &t2) // 100..1 + for i = 1; i < 100; i++ { // 199..100 + FeSquare(&t3, &t3) + } + FeMul(&t2, &t3, &t2) // 199..0 + FeSquare(&t2, &t2) // 200..1 + for i = 1; i < 50; i++ { // 249..50 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 249..0 + FeSquare(&t1, &t1) // 250..1 + for i = 1; i < 5; i++ { // 254..5 + FeSquare(&t1, &t1) + } + FeMul(out, &t1, &t0) // 254..5,3,1,0 +} + +func fePow22523(out, z *FieldElement) { + var t0, t1, t2 FieldElement + var i int + + FeSquare(&t0, z) + for i = 1; i < 1; i++ { + FeSquare(&t0, &t0) + } + FeSquare(&t1, &t0) + for i = 1; i < 2; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, z, &t1) + FeMul(&t0, &t0, &t1) + FeSquare(&t0, &t0) + for i = 1; i < 1; i++ { + FeSquare(&t0, &t0) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 5; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 10; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, &t1, &t0) + FeSquare(&t2, &t1) + for i = 1; i < 20; i++ { + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) + FeSquare(&t1, &t1) + for i = 1; i < 10; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 50; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, &t1, &t0) + FeSquare(&t2, &t1) + for i = 1; i < 100; i++ { + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) + FeSquare(&t1, &t1) + for i = 1; i < 50; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t0, &t0) + for i = 1; i < 2; i++ { + FeSquare(&t0, &t0) + } + FeMul(out, &t0, z) +} + +// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * +// y^2 where d = -121665/121666. +// +// Several representations are used: +// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z +// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT +// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T +// PreComputedGroupElement: (y+x,y-x,2dxy) + +type ProjectiveGroupElement struct { + X, Y, Z FieldElement +} + +type ExtendedGroupElement struct { + X, Y, Z, T FieldElement +} + +type CompletedGroupElement struct { + X, Y, Z, T FieldElement +} + +type PreComputedGroupElement struct { + yPlusX, yMinusX, xy2d FieldElement +} + +type CachedGroupElement struct { + yPlusX, yMinusX, Z, T2d FieldElement +} + +func (p *ProjectiveGroupElement) Zero() { + FeZero(&p.X) + FeOne(&p.Y) + FeOne(&p.Z) +} + +func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { + var t0 FieldElement + + FeSquare(&r.X, &p.X) + FeSquare(&r.Z, &p.Y) + FeSquare2(&r.T, &p.Z) + FeAdd(&r.Y, &p.X, &p.Y) + FeSquare(&t0, &r.Y) + FeAdd(&r.Y, &r.Z, &r.X) + FeSub(&r.Z, &r.Z, &r.X) + FeSub(&r.X, &t0, &r.Y) + FeSub(&r.T, &r.T, &r.Z) +} + +func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { + var recip, x, y FieldElement + + FeInvert(&recip, &p.Z) + FeMul(&x, &p.X, &recip) + FeMul(&y, &p.Y, &recip) + FeToBytes(s, &y) + s[31] ^= FeIsNegative(&x) << 7 +} + +func (p *ExtendedGroupElement) Zero() { + FeZero(&p.X) + FeOne(&p.Y) + FeOne(&p.Z) + FeZero(&p.T) +} + +func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { + var q ProjectiveGroupElement + p.ToProjective(&q) + q.Double(r) +} + +func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { + FeAdd(&r.yPlusX, &p.Y, &p.X) + FeSub(&r.yMinusX, &p.Y, &p.X) + FeCopy(&r.Z, &p.Z) + FeMul(&r.T2d, &p.T, &d2) +} + +func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { + FeCopy(&r.X, &p.X) + FeCopy(&r.Y, &p.Y) + FeCopy(&r.Z, &p.Z) +} + +func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { + var recip, x, y FieldElement + + FeInvert(&recip, &p.Z) + FeMul(&x, &p.X, &recip) + FeMul(&y, &p.Y, &recip) + FeToBytes(s, &y) + s[31] ^= FeIsNegative(&x) << 7 +} + +func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { + var u, v, v3, vxx, check FieldElement + + FeFromBytes(&p.Y, s) + FeOne(&p.Z) + FeSquare(&u, &p.Y) + FeMul(&v, &u, &d) + FeSub(&u, &u, &p.Z) // y = y^2-1 + FeAdd(&v, &v, &p.Z) // v = dy^2+1 + + FeSquare(&v3, &v) + FeMul(&v3, &v3, &v) // v3 = v^3 + FeSquare(&p.X, &v3) + FeMul(&p.X, &p.X, &v) + FeMul(&p.X, &p.X, &u) // x = uv^7 + + fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) + FeMul(&p.X, &p.X, &v3) + FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) + + var tmpX, tmp2 [32]byte + + FeSquare(&vxx, &p.X) + FeMul(&vxx, &vxx, &v) + FeSub(&check, &vxx, &u) // vx^2-u + if FeIsNonZero(&check) == 1 { + FeAdd(&check, &vxx, &u) // vx^2+u + if FeIsNonZero(&check) == 1 { + return false + } + FeMul(&p.X, &p.X, &SqrtM1) + + FeToBytes(&tmpX, &p.X) + for i, v := range tmpX { + tmp2[31-i] = v + } + } + + if FeIsNegative(&p.X) != (s[31] >> 7) { + FeNeg(&p.X, &p.X) + } + + FeMul(&p.T, &p.X, &p.Y) + return true +} + +func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { + FeMul(&r.X, &p.X, &p.T) + FeMul(&r.Y, &p.Y, &p.Z) + FeMul(&r.Z, &p.Z, &p.T) +} + +func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { + FeMul(&r.X, &p.X, &p.T) + FeMul(&r.Y, &p.Y, &p.Z) + FeMul(&r.Z, &p.Z, &p.T) + FeMul(&r.T, &p.X, &p.Y) +} + +func (p *PreComputedGroupElement) Zero() { + FeOne(&p.yPlusX) + FeOne(&p.yMinusX) + FeZero(&p.xy2d) +} + +func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yPlusX) + FeMul(&r.Y, &r.Y, &q.yMinusX) + FeMul(&r.T, &q.T2d, &p.T) + FeMul(&r.X, &p.Z, &q.Z) + FeAdd(&t0, &r.X, &r.X) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeAdd(&r.Z, &t0, &r.T) + FeSub(&r.T, &t0, &r.T) +} + +func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yMinusX) + FeMul(&r.Y, &r.Y, &q.yPlusX) + FeMul(&r.T, &q.T2d, &p.T) + FeMul(&r.X, &p.Z, &q.Z) + FeAdd(&t0, &r.X, &r.X) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeSub(&r.Z, &t0, &r.T) + FeAdd(&r.T, &t0, &r.T) +} + +func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yPlusX) + FeMul(&r.Y, &r.Y, &q.yMinusX) + FeMul(&r.T, &q.xy2d, &p.T) + FeAdd(&t0, &p.Z, &p.Z) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeAdd(&r.Z, &t0, &r.T) + FeSub(&r.T, &t0, &r.T) +} + +func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yMinusX) + FeMul(&r.Y, &r.Y, &q.yPlusX) + FeMul(&r.T, &q.xy2d, &p.T) + FeAdd(&t0, &p.Z, &p.Z) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeSub(&r.Z, &t0, &r.T) + FeAdd(&r.T, &t0, &r.T) +} + +func slide(r *[256]int8, a *[32]byte) { + for i := range r { + r[i] = int8(1 & (a[i>>3] >> uint(i&7))) + } + + for i := range r { + if r[i] != 0 { + for b := 1; b <= 6 && i+b < 256; b++ { + if r[i+b] != 0 { + if r[i]+(r[i+b]<= -15 { + r[i] -= r[i+b] << uint(b) + for k := i + b; k < 256; k++ { + if r[k] == 0 { + r[k] = 1 + break + } + r[k] = 0 + } + } else { + break + } + } + } + } + } +} + +// GeDoubleScalarMultVartime sets r = a*A + b*B +// where a = a[0]+256*a[1]+...+256^31 a[31]. +// and b = b[0]+256*b[1]+...+256^31 b[31]. +// B is the Ed25519 base point (x,4/5) with x positive. +func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { + var aSlide, bSlide [256]int8 + var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A + var t CompletedGroupElement + var u, A2 ExtendedGroupElement + var i int + + slide(&aSlide, a) + slide(&bSlide, b) + + A.ToCached(&Ai[0]) + A.Double(&t) + t.ToExtended(&A2) + + for i := 0; i < 7; i++ { + geAdd(&t, &A2, &Ai[i]) + t.ToExtended(&u) + u.ToCached(&Ai[i+1]) + } + + r.Zero() + + for i = 255; i >= 0; i-- { + if aSlide[i] != 0 || bSlide[i] != 0 { + break + } + } + + for ; i >= 0; i-- { + r.Double(&t) + + if aSlide[i] > 0 { + t.ToExtended(&u) + geAdd(&t, &u, &Ai[aSlide[i]/2]) + } else if aSlide[i] < 0 { + t.ToExtended(&u) + geSub(&t, &u, &Ai[(-aSlide[i])/2]) + } + + if bSlide[i] > 0 { + t.ToExtended(&u) + geMixedAdd(&t, &u, &bi[bSlide[i]/2]) + } else if bSlide[i] < 0 { + t.ToExtended(&u) + geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) + } + + t.ToProjective(r) + } +} + +// equal returns 1 if b == c and 0 otherwise, assuming that b and c are +// non-negative. +func equal(b, c int32) int32 { + x := uint32(b ^ c) + x-- + return int32(x >> 31) +} + +// negative returns 1 if b < 0 and 0 otherwise. +func negative(b int32) int32 { + return (b >> 31) & 1 +} + +func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { + FeCMove(&t.yPlusX, &u.yPlusX, b) + FeCMove(&t.yMinusX, &u.yMinusX, b) + FeCMove(&t.xy2d, &u.xy2d, b) +} + +func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { + var minusT PreComputedGroupElement + bNegative := negative(b) + bAbs := b - (((-bNegative) & b) << 1) + + t.Zero() + for i := int32(0); i < 8; i++ { + PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) + } + FeCopy(&minusT.yPlusX, &t.yMinusX) + FeCopy(&minusT.yMinusX, &t.yPlusX) + FeNeg(&minusT.xy2d, &t.xy2d) + PreComputedGroupElementCMove(t, &minusT, bNegative) +} + +// GeScalarMultBase computes h = a*B, where +// a = a[0]+256*a[1]+...+256^31 a[31] +// B is the Ed25519 base point (x,4/5) with x positive. +// +// Preconditions: +// a[31] <= 127 +func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { + var e [64]int8 + + for i, v := range a { + e[2*i] = int8(v & 15) + e[2*i+1] = int8((v >> 4) & 15) + } + + // each e[i] is between 0 and 15 and e[63] is between 0 and 7. + + carry := int8(0) + for i := 0; i < 63; i++ { + e[i] += carry + carry = (e[i] + 8) >> 4 + e[i] -= carry << 4 + } + e[63] += carry + // each e[i] is between -8 and 8. + + h.Zero() + var t PreComputedGroupElement + var r CompletedGroupElement + for i := int32(1); i < 64; i += 2 { + selectPoint(&t, i/2, int32(e[i])) + geMixedAdd(&r, h, &t) + r.ToExtended(h) + } + + var s ProjectiveGroupElement + + h.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToExtended(h) + + for i := int32(0); i < 64; i += 2 { + selectPoint(&t, i/2, int32(e[i])) + geMixedAdd(&r, h, &t) + r.ToExtended(h) + } +} + +// The scalars are GF(2^252 + 27742317777372353535851937790883648493). + +// Input: +// a[0]+256*a[1]+...+256^31*a[31] = a +// b[0]+256*b[1]+...+256^31*b[31] = b +// c[0]+256*c[1]+...+256^31*c[31] = c +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func ScMulAdd(s, a, b, c *[32]byte) { + a0 := 2097151 & load3(a[:]) + a1 := 2097151 & (load4(a[2:]) >> 5) + a2 := 2097151 & (load3(a[5:]) >> 2) + a3 := 2097151 & (load4(a[7:]) >> 7) + a4 := 2097151 & (load4(a[10:]) >> 4) + a5 := 2097151 & (load3(a[13:]) >> 1) + a6 := 2097151 & (load4(a[15:]) >> 6) + a7 := 2097151 & (load3(a[18:]) >> 3) + a8 := 2097151 & load3(a[21:]) + a9 := 2097151 & (load4(a[23:]) >> 5) + a10 := 2097151 & (load3(a[26:]) >> 2) + a11 := (load4(a[28:]) >> 7) + b0 := 2097151 & load3(b[:]) + b1 := 2097151 & (load4(b[2:]) >> 5) + b2 := 2097151 & (load3(b[5:]) >> 2) + b3 := 2097151 & (load4(b[7:]) >> 7) + b4 := 2097151 & (load4(b[10:]) >> 4) + b5 := 2097151 & (load3(b[13:]) >> 1) + b6 := 2097151 & (load4(b[15:]) >> 6) + b7 := 2097151 & (load3(b[18:]) >> 3) + b8 := 2097151 & load3(b[21:]) + b9 := 2097151 & (load4(b[23:]) >> 5) + b10 := 2097151 & (load3(b[26:]) >> 2) + b11 := (load4(b[28:]) >> 7) + c0 := 2097151 & load3(c[:]) + c1 := 2097151 & (load4(c[2:]) >> 5) + c2 := 2097151 & (load3(c[5:]) >> 2) + c3 := 2097151 & (load4(c[7:]) >> 7) + c4 := 2097151 & (load4(c[10:]) >> 4) + c5 := 2097151 & (load3(c[13:]) >> 1) + c6 := 2097151 & (load4(c[15:]) >> 6) + c7 := 2097151 & (load3(c[18:]) >> 3) + c8 := 2097151 & load3(c[21:]) + c9 := 2097151 & (load4(c[23:]) >> 5) + c10 := 2097151 & (load3(c[26:]) >> 2) + c11 := (load4(c[28:]) >> 7) + var carry [23]int64 + + s0 := c0 + a0*b0 + s1 := c1 + a0*b1 + a1*b0 + s2 := c2 + a0*b2 + a1*b1 + a2*b0 + s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 + s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 + s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 + s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 + s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 + s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 + s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 + s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 + s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 + s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 + s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 + s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 + s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 + s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 + s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 + s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 + s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 + s20 := a9*b11 + a10*b10 + a11*b9 + s21 := a10*b11 + a11*b10 + s22 := a11 * b11 + s23 := int64(0) + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + carry[18] = (s18 + (1 << 20)) >> 21 + s19 += carry[18] + s18 -= carry[18] << 21 + carry[20] = (s20 + (1 << 20)) >> 21 + s21 += carry[20] + s20 -= carry[20] << 21 + carry[22] = (s22 + (1 << 20)) >> 21 + s23 += carry[22] + s22 -= carry[22] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + carry[17] = (s17 + (1 << 20)) >> 21 + s18 += carry[17] + s17 -= carry[17] << 21 + carry[19] = (s19 + (1 << 20)) >> 21 + s20 += carry[19] + s19 -= carry[19] << 21 + carry[21] = (s21 + (1 << 20)) >> 21 + s22 += carry[21] + s21 -= carry[21] << 21 + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + s[0] = byte(s0 >> 0) + s[1] = byte(s0 >> 8) + s[2] = byte((s0 >> 16) | (s1 << 5)) + s[3] = byte(s1 >> 3) + s[4] = byte(s1 >> 11) + s[5] = byte((s1 >> 19) | (s2 << 2)) + s[6] = byte(s2 >> 6) + s[7] = byte((s2 >> 14) | (s3 << 7)) + s[8] = byte(s3 >> 1) + s[9] = byte(s3 >> 9) + s[10] = byte((s3 >> 17) | (s4 << 4)) + s[11] = byte(s4 >> 4) + s[12] = byte(s4 >> 12) + s[13] = byte((s4 >> 20) | (s5 << 1)) + s[14] = byte(s5 >> 7) + s[15] = byte((s5 >> 15) | (s6 << 6)) + s[16] = byte(s6 >> 2) + s[17] = byte(s6 >> 10) + s[18] = byte((s6 >> 18) | (s7 << 3)) + s[19] = byte(s7 >> 5) + s[20] = byte(s7 >> 13) + s[21] = byte(s8 >> 0) + s[22] = byte(s8 >> 8) + s[23] = byte((s8 >> 16) | (s9 << 5)) + s[24] = byte(s9 >> 3) + s[25] = byte(s9 >> 11) + s[26] = byte((s9 >> 19) | (s10 << 2)) + s[27] = byte(s10 >> 6) + s[28] = byte((s10 >> 14) | (s11 << 7)) + s[29] = byte(s11 >> 1) + s[30] = byte(s11 >> 9) + s[31] = byte(s11 >> 17) +} + +// Input: +// s[0]+256*s[1]+...+256^63*s[63] = s +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = s mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func ScReduce(out *[32]byte, s *[64]byte) { + s0 := 2097151 & load3(s[:]) + s1 := 2097151 & (load4(s[2:]) >> 5) + s2 := 2097151 & (load3(s[5:]) >> 2) + s3 := 2097151 & (load4(s[7:]) >> 7) + s4 := 2097151 & (load4(s[10:]) >> 4) + s5 := 2097151 & (load3(s[13:]) >> 1) + s6 := 2097151 & (load4(s[15:]) >> 6) + s7 := 2097151 & (load3(s[18:]) >> 3) + s8 := 2097151 & load3(s[21:]) + s9 := 2097151 & (load4(s[23:]) >> 5) + s10 := 2097151 & (load3(s[26:]) >> 2) + s11 := 2097151 & (load4(s[28:]) >> 7) + s12 := 2097151 & (load4(s[31:]) >> 4) + s13 := 2097151 & (load3(s[34:]) >> 1) + s14 := 2097151 & (load4(s[36:]) >> 6) + s15 := 2097151 & (load3(s[39:]) >> 3) + s16 := 2097151 & load3(s[42:]) + s17 := 2097151 & (load4(s[44:]) >> 5) + s18 := 2097151 & (load3(s[47:]) >> 2) + s19 := 2097151 & (load4(s[49:]) >> 7) + s20 := 2097151 & (load4(s[52:]) >> 4) + s21 := 2097151 & (load3(s[55:]) >> 1) + s22 := 2097151 & (load4(s[57:]) >> 6) + s23 := (load4(s[60:]) >> 3) + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + var carry [17]int64 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + out[0] = byte(s0 >> 0) + out[1] = byte(s0 >> 8) + out[2] = byte((s0 >> 16) | (s1 << 5)) + out[3] = byte(s1 >> 3) + out[4] = byte(s1 >> 11) + out[5] = byte((s1 >> 19) | (s2 << 2)) + out[6] = byte(s2 >> 6) + out[7] = byte((s2 >> 14) | (s3 << 7)) + out[8] = byte(s3 >> 1) + out[9] = byte(s3 >> 9) + out[10] = byte((s3 >> 17) | (s4 << 4)) + out[11] = byte(s4 >> 4) + out[12] = byte(s4 >> 12) + out[13] = byte((s4 >> 20) | (s5 << 1)) + out[14] = byte(s5 >> 7) + out[15] = byte((s5 >> 15) | (s6 << 6)) + out[16] = byte(s6 >> 2) + out[17] = byte(s6 >> 10) + out[18] = byte((s6 >> 18) | (s7 << 3)) + out[19] = byte(s7 >> 5) + out[20] = byte(s7 >> 13) + out[21] = byte(s8 >> 0) + out[22] = byte(s8 >> 8) + out[23] = byte((s8 >> 16) | (s9 << 5)) + out[24] = byte(s9 >> 3) + out[25] = byte(s9 >> 11) + out[26] = byte((s9 >> 19) | (s10 << 2)) + out[27] = byte(s10 >> 6) + out[28] = byte((s10 >> 14) | (s11 << 7)) + out[29] = byte(s11 >> 1) + out[30] = byte(s11 >> 9) + out[31] = byte(s11 >> 17) +} + +// order is the order of Curve25519 in little-endian form. +var order = [4]uint64{0x5812631a5cf5d3ed, 0x14def9dea2f79cd6, 0, 0x1000000000000000} + +// ScMinimal returns true if the given scalar is less than the order of the +// curve. +func ScMinimal(scalar *[32]byte) bool { + for i := 3; ; i-- { + v := binary.LittleEndian.Uint64(scalar[i*8:]) + if v > order[i] { + return false + } else if v < order[i] { + break + } else if i == 0 { + return false + } + } + + return true +} diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing.go index 281c27ef02..4fad24f8dc 100644 --- a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go +++ b/vendor/golang.org/x/crypto/internal/subtle/aliasing.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego // +build !purego // Package subtle implements functions that are often useful in cryptographic diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go index e20a296592..80ccbed2c0 100644 --- a/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go +++ b/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego // +build purego // Package subtle implements functions that are often useful in cryptographic diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go new file mode 100644 index 0000000000..593f653008 --- /dev/null +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -0,0 +1,77 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC +2898 / PKCS #5 v2.0. + +A key derivation function is useful when encrypting data based on a password +or any other not-fully-random data. It uses a pseudorandom function to derive +a secure encryption key based on the password. + +While v2.0 of the standard defines only one pseudorandom function to use, +HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved +Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To +choose, you can pass the `New` functions from the different SHA packages to +pbkdf2.Key. +*/ +package pbkdf2 // import "golang.org/x/crypto/pbkdf2" + +import ( + "crypto/hmac" + "hash" +) + +// Key derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. The key is +// derived based on the method described as PBKDF2 with the HMAC variant using +// the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { + prf := hmac.New(h, password) + hashLen := prf.Size() + numBlocks := (keyLen + hashLen - 1) / hashLen + + var buf [4]byte + dk := make([]byte, 0, numBlocks*hashLen) + U := make([]byte, hashLen) + for block := 1; block <= numBlocks; block++ { + // N.B.: || means concatenation, ^ means XOR + // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter + // U_1 = PRF(password, salt || uint(i)) + prf.Reset() + prf.Write(salt) + buf[0] = byte(block >> 24) + buf[1] = byte(block >> 16) + buf[2] = byte(block >> 8) + buf[3] = byte(block) + prf.Write(buf[:4]) + dk = prf.Sum(dk) + T := dk[len(dk)-hashLen:] + copy(U, T) + + // U_n = PRF(password, U_(n-1)) + for n := 2; n <= iter; n++ { + prf.Reset() + prf.Write(U) + U = U[:0] + U = prf.Sum(U) + for x := range U { + T[x] ^= U[x] + } + } + } + return dk[:keyLen] +} diff --git a/vendor/golang.org/x/crypto/poly1305/bits_compat.go b/vendor/golang.org/x/crypto/poly1305/bits_compat.go index 157a69f61b..45b5c966b2 100644 --- a/vendor/golang.org/x/crypto/poly1305/bits_compat.go +++ b/vendor/golang.org/x/crypto/poly1305/bits_compat.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.13 // +build !go1.13 package poly1305 diff --git a/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go b/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go index a0a185f0fc..ed52b3418a 100644 --- a/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go +++ b/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.13 // +build go1.13 package poly1305 diff --git a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go index af6c94f921..f184b67d98 100644 --- a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go +++ b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego // +build !amd64,!ppc64le,!s390x !gc purego package poly1305 diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go index cf3a69ed3b..6d522333f2 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego package poly1305 diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s index 2cb0373140..1d74f0f881 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go index cb4b7185dc..4a069941a6 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego package poly1305 diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s index 5cd7494b21..58422aad23 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s +++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego #include "textflag.h" @@ -82,7 +83,7 @@ multiply: BGE loop bytes_between_0_and_15: - CMP $0, R5 + CMP R5, $0 BEQ done MOVD $0, R16 // h0 MOVD $0, R17 // h1 @@ -122,7 +123,7 @@ just1: // Exactly 8 MOVD (R4), R16 - CMP $0, R17 + CMP R17, $0 // Check if we've already set R17; if not // set 1 to indicate end of msg. @@ -151,7 +152,7 @@ less4: ADD $2, R4 less2: - CMP $0, R5 + CMP R5, $0 BEQ insert1 MOVBZ (R4), R21 SLD R22, R21, R21 @@ -166,12 +167,12 @@ insert1: carry: // Add new values to h0, h1, h2 - ADDC R16, R8 - ADDE R17, R9 - ADDE $0, R10 - MOVD $16, R5 - ADD R5, R4 - BR multiply + ADDC R16, R8 + ADDE R17, R9 + ADDZE R10, R10 + MOVD $16, R5 + ADD R5, R4 + BR multiply done: // Save h0, h1, h2 in state diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go index 188a665e12..62cc9f8470 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego package poly1305 diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s index bdd882c606..69c64f8421 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go index 5cf1d6d839..c400dfcf7b 100644 --- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && !purego && gc // +build amd64,!purego,gc package salsa diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s index 9c84012466..c089277204 100644 --- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s @@ -2,13 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && !purego && gc // +build amd64,!purego,gc // This code was translated into a form compatible with 6a from the public // domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html // func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) -// This needs up to 64 bytes at 360(SP); hence the non-obvious frame size. +// This needs up to 64 bytes at 360(R12); hence the non-obvious frame size. TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment MOVQ out+0(FP),DI MOVQ in+8(FP),SI @@ -17,10 +18,8 @@ TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment MOVQ key+32(FP),R8 MOVQ SP,R12 - MOVQ SP,R9 - ADDQ $31, R9 - ANDQ $~31, R9 - MOVQ R9, SP + ADDQ $31, R12 + ANDQ $~31, R12 MOVQ DX,R9 MOVQ CX,DX @@ -32,116 +31,116 @@ TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment MOVL 0(R10),R8 MOVL 0(DX),AX MOVL 16(R10),R11 - MOVL CX,0(SP) - MOVL R8, 4 (SP) - MOVL AX, 8 (SP) - MOVL R11, 12 (SP) + MOVL CX,0(R12) + MOVL R8, 4 (R12) + MOVL AX, 8 (R12) + MOVL R11, 12 (R12) MOVL 8(DX),CX MOVL 24(R10),R8 MOVL 4(R10),AX MOVL 4(DX),R11 - MOVL CX,16(SP) - MOVL R8, 20 (SP) - MOVL AX, 24 (SP) - MOVL R11, 28 (SP) + MOVL CX,16(R12) + MOVL R8, 20 (R12) + MOVL AX, 24 (R12) + MOVL R11, 28 (R12) MOVL 12(DX),CX MOVL 12(R10),DX MOVL 28(R10),R8 MOVL 8(R10),AX - MOVL DX,32(SP) - MOVL CX, 36 (SP) - MOVL R8, 40 (SP) - MOVL AX, 44 (SP) + MOVL DX,32(R12) + MOVL CX, 36 (R12) + MOVL R8, 40 (R12) + MOVL AX, 44 (R12) MOVQ $1634760805,DX MOVQ $857760878,CX MOVQ $2036477234,R8 MOVQ $1797285236,AX - MOVL DX,48(SP) - MOVL CX, 52 (SP) - MOVL R8, 56 (SP) - MOVL AX, 60 (SP) + MOVL DX,48(R12) + MOVL CX, 52 (R12) + MOVL R8, 56 (R12) + MOVL AX, 60 (R12) CMPQ R9,$256 JB BYTESBETWEEN1AND255 - MOVOA 48(SP),X0 + MOVOA 48(R12),X0 PSHUFL $0X55,X0,X1 PSHUFL $0XAA,X0,X2 PSHUFL $0XFF,X0,X3 PSHUFL $0X00,X0,X0 - MOVOA X1,64(SP) - MOVOA X2,80(SP) - MOVOA X3,96(SP) - MOVOA X0,112(SP) - MOVOA 0(SP),X0 + MOVOA X1,64(R12) + MOVOA X2,80(R12) + MOVOA X3,96(R12) + MOVOA X0,112(R12) + MOVOA 0(R12),X0 PSHUFL $0XAA,X0,X1 PSHUFL $0XFF,X0,X2 PSHUFL $0X00,X0,X3 PSHUFL $0X55,X0,X0 - MOVOA X1,128(SP) - MOVOA X2,144(SP) - MOVOA X3,160(SP) - MOVOA X0,176(SP) - MOVOA 16(SP),X0 + MOVOA X1,128(R12) + MOVOA X2,144(R12) + MOVOA X3,160(R12) + MOVOA X0,176(R12) + MOVOA 16(R12),X0 PSHUFL $0XFF,X0,X1 PSHUFL $0X55,X0,X2 PSHUFL $0XAA,X0,X0 - MOVOA X1,192(SP) - MOVOA X2,208(SP) - MOVOA X0,224(SP) - MOVOA 32(SP),X0 + MOVOA X1,192(R12) + MOVOA X2,208(R12) + MOVOA X0,224(R12) + MOVOA 32(R12),X0 PSHUFL $0X00,X0,X1 PSHUFL $0XAA,X0,X2 PSHUFL $0XFF,X0,X0 - MOVOA X1,240(SP) - MOVOA X2,256(SP) - MOVOA X0,272(SP) + MOVOA X1,240(R12) + MOVOA X2,256(R12) + MOVOA X0,272(R12) BYTESATLEAST256: - MOVL 16(SP),DX - MOVL 36 (SP),CX - MOVL DX,288(SP) - MOVL CX,304(SP) + MOVL 16(R12),DX + MOVL 36 (R12),CX + MOVL DX,288(R12) + MOVL CX,304(R12) SHLQ $32,CX ADDQ CX,DX ADDQ $1,DX MOVQ DX,CX SHRQ $32,CX - MOVL DX, 292 (SP) - MOVL CX, 308 (SP) + MOVL DX, 292 (R12) + MOVL CX, 308 (R12) ADDQ $1,DX MOVQ DX,CX SHRQ $32,CX - MOVL DX, 296 (SP) - MOVL CX, 312 (SP) + MOVL DX, 296 (R12) + MOVL CX, 312 (R12) ADDQ $1,DX MOVQ DX,CX SHRQ $32,CX - MOVL DX, 300 (SP) - MOVL CX, 316 (SP) + MOVL DX, 300 (R12) + MOVL CX, 316 (R12) ADDQ $1,DX MOVQ DX,CX SHRQ $32,CX - MOVL DX,16(SP) - MOVL CX, 36 (SP) - MOVQ R9,352(SP) + MOVL DX,16(R12) + MOVL CX, 36 (R12) + MOVQ R9,352(R12) MOVQ $20,DX - MOVOA 64(SP),X0 - MOVOA 80(SP),X1 - MOVOA 96(SP),X2 - MOVOA 256(SP),X3 - MOVOA 272(SP),X4 - MOVOA 128(SP),X5 - MOVOA 144(SP),X6 - MOVOA 176(SP),X7 - MOVOA 192(SP),X8 - MOVOA 208(SP),X9 - MOVOA 224(SP),X10 - MOVOA 304(SP),X11 - MOVOA 112(SP),X12 - MOVOA 160(SP),X13 - MOVOA 240(SP),X14 - MOVOA 288(SP),X15 + MOVOA 64(R12),X0 + MOVOA 80(R12),X1 + MOVOA 96(R12),X2 + MOVOA 256(R12),X3 + MOVOA 272(R12),X4 + MOVOA 128(R12),X5 + MOVOA 144(R12),X6 + MOVOA 176(R12),X7 + MOVOA 192(R12),X8 + MOVOA 208(R12),X9 + MOVOA 224(R12),X10 + MOVOA 304(R12),X11 + MOVOA 112(R12),X12 + MOVOA 160(R12),X13 + MOVOA 240(R12),X14 + MOVOA 288(R12),X15 MAINLOOP1: - MOVOA X1,320(SP) - MOVOA X2,336(SP) + MOVOA X1,320(R12) + MOVOA X2,336(R12) MOVOA X13,X1 PADDL X12,X1 MOVOA X1,X2 @@ -191,8 +190,8 @@ TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment PXOR X1,X12 PSRLL $14,X2 PXOR X2,X12 - MOVOA 320(SP),X1 - MOVOA X12,320(SP) + MOVOA 320(R12),X1 + MOVOA X12,320(R12) MOVOA X9,X2 PADDL X7,X2 MOVOA X2,X12 @@ -207,8 +206,8 @@ TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment PXOR X2,X3 PSRLL $25,X12 PXOR X12,X3 - MOVOA 336(SP),X2 - MOVOA X0,336(SP) + MOVOA 336(R12),X2 + MOVOA X0,336(R12) MOVOA X6,X0 PADDL X2,X0 MOVOA X0,X12 @@ -251,8 +250,8 @@ TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment PXOR X0,X1 PSRLL $14,X12 PXOR X12,X1 - MOVOA 320(SP),X0 - MOVOA X1,320(SP) + MOVOA 320(R12),X0 + MOVOA X1,320(R12) MOVOA X4,X1 PADDL X0,X1 MOVOA X1,X12 @@ -267,8 +266,8 @@ TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment PXOR X1,X2 PSRLL $14,X12 PXOR X12,X2 - MOVOA 336(SP),X12 - MOVOA X2,336(SP) + MOVOA 336(R12),X12 + MOVOA X2,336(R12) MOVOA X14,X1 PADDL X12,X1 MOVOA X1,X2 @@ -311,8 +310,8 @@ TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment PXOR X1,X0 PSRLL $14,X2 PXOR X2,X0 - MOVOA 320(SP),X1 - MOVOA X0,320(SP) + MOVOA 320(R12),X1 + MOVOA X0,320(R12) MOVOA X8,X0 PADDL X14,X0 MOVOA X0,X2 @@ -327,8 +326,8 @@ TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment PXOR X0,X6 PSRLL $25,X2 PXOR X2,X6 - MOVOA 336(SP),X2 - MOVOA X12,336(SP) + MOVOA 336(R12),X2 + MOVOA X12,336(R12) MOVOA X3,X0 PADDL X2,X0 MOVOA X0,X12 @@ -378,14 +377,14 @@ TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment PXOR X0,X2 PSRLL $14,X12 PXOR X12,X2 - MOVOA 320(SP),X12 - MOVOA 336(SP),X0 + MOVOA 320(R12),X12 + MOVOA 336(R12),X0 SUBQ $2,DX JA MAINLOOP1 - PADDL 112(SP),X12 - PADDL 176(SP),X7 - PADDL 224(SP),X10 - PADDL 272(SP),X4 + PADDL 112(R12),X12 + PADDL 176(R12),X7 + PADDL 224(R12),X10 + PADDL 272(R12),X4 MOVD X12,DX MOVD X7,CX MOVD X10,R8 @@ -446,10 +445,10 @@ TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment MOVL CX,196(DI) MOVL R8,200(DI) MOVL R9,204(DI) - PADDL 240(SP),X14 - PADDL 64(SP),X0 - PADDL 128(SP),X5 - PADDL 192(SP),X8 + PADDL 240(R12),X14 + PADDL 64(R12),X0 + PADDL 128(R12),X5 + PADDL 192(R12),X8 MOVD X14,DX MOVD X0,CX MOVD X5,R8 @@ -510,10 +509,10 @@ TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment MOVL CX,212(DI) MOVL R8,216(DI) MOVL R9,220(DI) - PADDL 288(SP),X15 - PADDL 304(SP),X11 - PADDL 80(SP),X1 - PADDL 144(SP),X6 + PADDL 288(R12),X15 + PADDL 304(R12),X11 + PADDL 80(R12),X1 + PADDL 144(R12),X6 MOVD X15,DX MOVD X11,CX MOVD X1,R8 @@ -574,10 +573,10 @@ TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment MOVL CX,228(DI) MOVL R8,232(DI) MOVL R9,236(DI) - PADDL 160(SP),X13 - PADDL 208(SP),X9 - PADDL 256(SP),X3 - PADDL 96(SP),X2 + PADDL 160(R12),X13 + PADDL 208(R12),X9 + PADDL 256(R12),X3 + PADDL 96(R12),X2 MOVD X13,DX MOVD X9,CX MOVD X3,R8 @@ -638,7 +637,7 @@ TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment MOVL CX,244(DI) MOVL R8,248(DI) MOVL R9,252(DI) - MOVQ 352(SP),R9 + MOVQ 352(R12),R9 SUBQ $256,R9 ADDQ $256,SI ADDQ $256,DI @@ -650,17 +649,17 @@ TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment CMPQ R9,$64 JAE NOCOPY MOVQ DI,DX - LEAQ 360(SP),DI + LEAQ 360(R12),DI MOVQ R9,CX REP; MOVSB - LEAQ 360(SP),DI - LEAQ 360(SP),SI + LEAQ 360(R12),DI + LEAQ 360(R12),SI NOCOPY: - MOVQ R9,352(SP) - MOVOA 48(SP),X0 - MOVOA 0(SP),X1 - MOVOA 16(SP),X2 - MOVOA 32(SP),X3 + MOVQ R9,352(R12) + MOVOA 48(R12),X0 + MOVOA 0(R12),X1 + MOVOA 16(R12),X2 + MOVOA 32(R12),X3 MOVOA X1,X4 MOVQ $20,CX MAINLOOP2: @@ -791,10 +790,10 @@ TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment PSHUFL $0X39,X3,X3 PXOR X6,X0 JA MAINLOOP2 - PADDL 48(SP),X0 - PADDL 0(SP),X1 - PADDL 16(SP),X2 - PADDL 32(SP),X3 + PADDL 48(R12),X0 + PADDL 0(R12),X1 + PADDL 16(R12),X2 + PADDL 32(R12),X3 MOVD X0,CX MOVD X1,R8 MOVD X2,R9 @@ -855,16 +854,16 @@ TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment MOVL R8,44(DI) MOVL R9,28(DI) MOVL AX,12(DI) - MOVQ 352(SP),R9 - MOVL 16(SP),CX - MOVL 36 (SP),R8 + MOVQ 352(R12),R9 + MOVL 16(R12),CX + MOVL 36 (R12),R8 ADDQ $1,CX SHLQ $32,R8 ADDQ R8,CX MOVQ CX,R8 SHRQ $32,R8 - MOVL CX,16(SP) - MOVL R8, 36 (SP) + MOVL CX,16(R12) + MOVL R8, 36 (R12) CMPQ R9,$64 JA BYTESATLEAST65 JAE BYTESATLEAST64 @@ -874,7 +873,6 @@ TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment REP; MOVSB BYTESATLEAST64: DONE: - MOVQ R12,SP RET BYTESATLEAST65: SUBQ $64,R9 diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go index 24f03c146b..4392cc1ac7 100644 --- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !amd64 || purego || !gc // +build !amd64 purego !gc package salsa diff --git a/vendor/golang.org/x/crypto/sha3/hashes_generic.go b/vendor/golang.org/x/crypto/sha3/hashes_generic.go index fac0c0e2c9..c74fc20fcb 100644 --- a/vendor/golang.org/x/crypto/sha3/hashes_generic.go +++ b/vendor/golang.org/x/crypto/sha3/hashes_generic.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !gc || purego || !s390x // +build !gc purego !s390x package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/keccakf.go b/vendor/golang.org/x/crypto/sha3/keccakf.go index 92b63a3cf0..0f4ae8bacf 100644 --- a/vendor/golang.org/x/crypto/sha3/keccakf.go +++ b/vendor/golang.org/x/crypto/sha3/keccakf.go @@ -2,7 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 purego !gc +//go:build !amd64 || purego || !gc +// +build !amd64 purego !gc package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go index 3e3e7600f9..248a38241f 100644 --- a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && !purego && gc // +build amd64,!purego,gc package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s index 8f4d1877ce..4cfa54383b 100644 --- a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build amd64 && !purego && gc // +build amd64,!purego,gc // This code was translated into a form compatible with 6a from the public diff --git a/vendor/golang.org/x/crypto/sha3/register.go b/vendor/golang.org/x/crypto/sha3/register.go index 3cf6a22e09..8b4453aac3 100644 --- a/vendor/golang.org/x/crypto/sha3/register.go +++ b/vendor/golang.org/x/crypto/sha3/register.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.4 // +build go1.4 package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go index 485e2d5fa8..4fcfc924ef 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s index e2df6412e7..a0e051b045 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gc && !purego // +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/sha3/shake_generic.go b/vendor/golang.org/x/crypto/sha3/shake_generic.go index 68148f2643..5c0710ef98 100644 --- a/vendor/golang.org/x/crypto/sha3/shake_generic.go +++ b/vendor/golang.org/x/crypto/sha3/shake_generic.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !gc || purego || !s390x // +build !gc purego !s390x package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go index ddafa826c9..59c8eb9418 100644 --- a/vendor/golang.org/x/crypto/sha3/xor.go +++ b/vendor/golang.org/x/crypto/sha3/xor.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (!amd64 && !386 && !ppc64le) || purego // +build !amd64,!386,!ppc64le purego package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/xor_generic.go b/vendor/golang.org/x/crypto/sha3/xor_generic.go index fd35f02ef6..8d94771127 100644 --- a/vendor/golang.org/x/crypto/sha3/xor_generic.go +++ b/vendor/golang.org/x/crypto/sha3/xor_generic.go @@ -19,7 +19,7 @@ func xorInGeneric(d *state, buf []byte) { } } -// copyOutGeneric copies ulint64s to a byte buffer. +// copyOutGeneric copies uint64s to a byte buffer. func copyOutGeneric(d *state, b []byte) { for i := 0; len(b) >= 8; i++ { binary.LittleEndian.PutUint64(b, d.a[i]) diff --git a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go index 6249ad85ca..1ce606246d 100644 --- a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go +++ b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (amd64 || 386 || ppc64le) && !purego // +build amd64 386 ppc64le // +build !purego diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go new file mode 100644 index 0000000000..a3c021d3f8 --- /dev/null +++ b/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,56 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// As of Go 1.7 this package is available in the standard library under the +// name context. https://golang.org/pkg/context. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context // import "golang.org/x/net/context" + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go new file mode 100644 index 0000000000..344bd14334 --- /dev/null +++ b/vendor/golang.org/x/net/context/go17.go @@ -0,0 +1,73 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 +// +build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, CancelFunc(f) +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, CancelFunc(f) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go new file mode 100644 index 0000000000..64d31ecc3e --- /dev/null +++ b/vendor/golang.org/x/net/context/go19.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.9 +// +build go1.9 + +package context + +import "context" // standard library's context, as of Go 1.7 + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go new file mode 100644 index 0000000000..5270db5db7 --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -0,0 +1,301 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.7 +// +build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go new file mode 100644 index 0000000000..1f9715341f --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go19.go @@ -0,0 +1,110 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.9 +// +build !go1.9 + +package context + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go index 73804d3472..ff7acf2d5b 100644 --- a/vendor/golang.org/x/net/html/const.go +++ b/vendor/golang.org/x/net/html/const.go @@ -52,7 +52,7 @@ var isSpecialElementMap = map[string]bool{ "iframe": true, "img": true, "input": true, - "keygen": true, + "keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility. "li": true, "link": true, "listing": true, diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go index 74774c458a..9da9e9dc42 100644 --- a/vendor/golang.org/x/net/html/foreign.go +++ b/vendor/golang.org/x/net/html/foreign.go @@ -161,65 +161,62 @@ var mathMLAttributeAdjustments = map[string]string{ } var svgAttributeAdjustments = map[string]string{ - "attributename": "attributeName", - "attributetype": "attributeType", - "basefrequency": "baseFrequency", - "baseprofile": "baseProfile", - "calcmode": "calcMode", - "clippathunits": "clipPathUnits", - "contentscripttype": "contentScriptType", - "contentstyletype": "contentStyleType", - "diffuseconstant": "diffuseConstant", - "edgemode": "edgeMode", - "externalresourcesrequired": "externalResourcesRequired", - "filterunits": "filterUnits", - "glyphref": "glyphRef", - "gradienttransform": "gradientTransform", - "gradientunits": "gradientUnits", - "kernelmatrix": "kernelMatrix", - "kernelunitlength": "kernelUnitLength", - "keypoints": "keyPoints", - "keysplines": "keySplines", - "keytimes": "keyTimes", - "lengthadjust": "lengthAdjust", - "limitingconeangle": "limitingConeAngle", - "markerheight": "markerHeight", - "markerunits": "markerUnits", - "markerwidth": "markerWidth", - "maskcontentunits": "maskContentUnits", - "maskunits": "maskUnits", - "numoctaves": "numOctaves", - "pathlength": "pathLength", - "patterncontentunits": "patternContentUnits", - "patterntransform": "patternTransform", - "patternunits": "patternUnits", - "pointsatx": "pointsAtX", - "pointsaty": "pointsAtY", - "pointsatz": "pointsAtZ", - "preservealpha": "preserveAlpha", - "preserveaspectratio": "preserveAspectRatio", - "primitiveunits": "primitiveUnits", - "refx": "refX", - "refy": "refY", - "repeatcount": "repeatCount", - "repeatdur": "repeatDur", - "requiredextensions": "requiredExtensions", - "requiredfeatures": "requiredFeatures", - "specularconstant": "specularConstant", - "specularexponent": "specularExponent", - "spreadmethod": "spreadMethod", - "startoffset": "startOffset", - "stddeviation": "stdDeviation", - "stitchtiles": "stitchTiles", - "surfacescale": "surfaceScale", - "systemlanguage": "systemLanguage", - "tablevalues": "tableValues", - "targetx": "targetX", - "targety": "targetY", - "textlength": "textLength", - "viewbox": "viewBox", - "viewtarget": "viewTarget", - "xchannelselector": "xChannelSelector", - "ychannelselector": "yChannelSelector", - "zoomandpan": "zoomAndPan", + "attributename": "attributeName", + "attributetype": "attributeType", + "basefrequency": "baseFrequency", + "baseprofile": "baseProfile", + "calcmode": "calcMode", + "clippathunits": "clipPathUnits", + "diffuseconstant": "diffuseConstant", + "edgemode": "edgeMode", + "filterunits": "filterUnits", + "glyphref": "glyphRef", + "gradienttransform": "gradientTransform", + "gradientunits": "gradientUnits", + "kernelmatrix": "kernelMatrix", + "kernelunitlength": "kernelUnitLength", + "keypoints": "keyPoints", + "keysplines": "keySplines", + "keytimes": "keyTimes", + "lengthadjust": "lengthAdjust", + "limitingconeangle": "limitingConeAngle", + "markerheight": "markerHeight", + "markerunits": "markerUnits", + "markerwidth": "markerWidth", + "maskcontentunits": "maskContentUnits", + "maskunits": "maskUnits", + "numoctaves": "numOctaves", + "pathlength": "pathLength", + "patterncontentunits": "patternContentUnits", + "patterntransform": "patternTransform", + "patternunits": "patternUnits", + "pointsatx": "pointsAtX", + "pointsaty": "pointsAtY", + "pointsatz": "pointsAtZ", + "preservealpha": "preserveAlpha", + "preserveaspectratio": "preserveAspectRatio", + "primitiveunits": "primitiveUnits", + "refx": "refX", + "refy": "refY", + "repeatcount": "repeatCount", + "repeatdur": "repeatDur", + "requiredextensions": "requiredExtensions", + "requiredfeatures": "requiredFeatures", + "specularconstant": "specularConstant", + "specularexponent": "specularExponent", + "spreadmethod": "spreadMethod", + "startoffset": "startOffset", + "stddeviation": "stdDeviation", + "stitchtiles": "stitchTiles", + "surfacescale": "surfaceScale", + "systemlanguage": "systemLanguage", + "tablevalues": "tableValues", + "targetx": "targetX", + "targety": "targetY", + "textlength": "textLength", + "viewbox": "viewBox", + "viewtarget": "viewTarget", + "xchannelselector": "xChannelSelector", + "ychannelselector": "yChannelSelector", + "zoomandpan": "zoomAndPan", } diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go index 2cd12fc816..f91466f7cd 100644 --- a/vendor/golang.org/x/net/html/parse.go +++ b/vendor/golang.org/x/net/html/parse.go @@ -728,7 +728,13 @@ func inHeadNoscriptIM(p *parser) bool { return inBodyIM(p) case a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Style: return inHeadIM(p) - case a.Head, a.Noscript: + case a.Head: + // Ignore the token. + return true + case a.Noscript: + // Don't let the tokenizer go into raw text mode even when a