aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/github.com/Azure/azure-storage-go
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/Azure/azure-storage-go')
-rw-r--r--vendor/github.com/Azure/azure-storage-go/LICENSE21
-rw-r--r--vendor/github.com/Azure/azure-storage-go/README.md10
-rw-r--r--vendor/github.com/Azure/azure-storage-go/authorization.go223
-rw-r--r--vendor/github.com/Azure/azure-storage-go/blob.go1130
-rw-r--r--vendor/github.com/Azure/azure-storage-go/blobserviceclient.go92
-rw-r--r--vendor/github.com/Azure/azure-storage-go/client.go479
-rw-r--r--vendor/github.com/Azure/azure-storage-go/container.go376
-rw-r--r--vendor/github.com/Azure/azure-storage-go/directory.go217
-rw-r--r--vendor/github.com/Azure/azure-storage-go/file.go412
-rw-r--r--vendor/github.com/Azure/azure-storage-go/fileserviceclient.go375
-rw-r--r--vendor/github.com/Azure/azure-storage-go/glide.lock14
-rw-r--r--vendor/github.com/Azure/azure-storage-go/glide.yaml4
-rw-r--r--vendor/github.com/Azure/azure-storage-go/queue.go339
-rw-r--r--vendor/github.com/Azure/azure-storage-go/queueserviceclient.go20
-rw-r--r--vendor/github.com/Azure/azure-storage-go/share.go186
-rw-r--r--vendor/github.com/Azure/azure-storage-go/storagepolicy.go47
-rw-r--r--vendor/github.com/Azure/azure-storage-go/storageservice.go118
-rw-r--r--vendor/github.com/Azure/azure-storage-go/table.go254
-rw-r--r--vendor/github.com/Azure/azure-storage-go/table_entities.go354
-rw-r--r--vendor/github.com/Azure/azure-storage-go/tableserviceclient.go20
-rw-r--r--vendor/github.com/Azure/azure-storage-go/util.go85
-rw-r--r--vendor/github.com/Azure/azure-storage-go/version.go5
22 files changed, 4781 insertions, 0 deletions
diff --git a/vendor/github.com/Azure/azure-storage-go/LICENSE b/vendor/github.com/Azure/azure-storage-go/LICENSE
new file mode 100644
index 000000000..21071075c
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-go/LICENSE
@@ -0,0 +1,21 @@
+ MIT License
+
+ Copyright (c) Microsoft Corporation. All rights reserved.
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in all
+ copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE
diff --git a/vendor/github.com/Azure/azure-storage-go/README.md b/vendor/github.com/Azure/azure-storage-go/README.md
new file mode 100644
index 000000000..f4af7bf39
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-go/README.md
@@ -0,0 +1,10 @@
+# Azure Storage SDK for Go
+[![GoDoc](https://godoc.org/github.com/Azure/azure-storage-go?status.svg)](https://godoc.org/github.com/Azure/azure-storage-go) [![Build Status](https://travis-ci.org/Azure/azure-storage-go.svg?branch=master)](https://travis-ci.org/Azure/azure-storage-go) [![Go Report Card](https://goreportcard.com/badge/github.com/Azure/azure-storage-go)](https://goreportcard.com/report/github.com/Azure/azure-storage-go)
+
+The `github.com/Azure/azure-sdk-for-go/storage` package is used to perform operations in Azure Storage Service. To manage your storage accounts (Azure Resource Manager / ARM), use the [github.com/Azure/azure-sdk-for-go/arm/storage](../arm/storage) package. For your classic storage accounts (Azure Service Management / ASM), use [github.com/Azure/azure-sdk-for-go/management/storageservice](../management/storageservice) package.
+
+This package includes support for [Azure Storage Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/)
+
+# Contributing
+
+This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
diff --git a/vendor/github.com/Azure/azure-storage-go/authorization.go b/vendor/github.com/Azure/azure-storage-go/authorization.go
new file mode 100644
index 000000000..89a0d0b3c
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-go/authorization.go
@@ -0,0 +1,223 @@
+// Package storage provides clients for Microsoft Azure Storage Services.
+package storage
+
+import (
+ "bytes"
+ "fmt"
+ "net/url"
+ "sort"
+ "strings"
+)
+
+// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/authentication-for-the-azure-storage-services
+
+type authentication string
+
+const (
+ sharedKey authentication = "sharedKey"
+ sharedKeyForTable authentication = "sharedKeyTable"
+ sharedKeyLite authentication = "sharedKeyLite"
+ sharedKeyLiteForTable authentication = "sharedKeyLiteTable"
+
+ // headers
+ headerAuthorization = "Authorization"
+ headerContentLength = "Content-Length"
+ headerDate = "Date"
+ headerXmsDate = "x-ms-date"
+ headerXmsVersion = "x-ms-version"
+ headerContentEncoding = "Content-Encoding"
+ headerContentLanguage = "Content-Language"
+ headerContentType = "Content-Type"
+ headerContentMD5 = "Content-MD5"
+ headerIfModifiedSince = "If-Modified-Since"
+ headerIfMatch = "If-Match"
+ headerIfNoneMatch = "If-None-Match"
+ headerIfUnmodifiedSince = "If-Unmodified-Since"
+ headerRange = "Range"
+)
+
+func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) {
+ authHeader, err := c.getSharedKey(verb, url, headers, auth)
+ if err != nil {
+ return nil, err
+ }
+ headers[headerAuthorization] = authHeader
+ return headers, nil
+}
+
+func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) {
+ canRes, err := c.buildCanonicalizedResource(url, auth)
+ if err != nil {
+ return "", err
+ }
+
+ canString, err := buildCanonicalizedString(verb, headers, canRes, auth)
+ if err != nil {
+ return "", err
+ }
+ return c.createAuthorizationHeader(canString, auth), nil
+}
+
+func (c *Client) buildCanonicalizedResource(uri string, auth authentication) (string, error) {
+ errMsg := "buildCanonicalizedResource error: %s"
+ u, err := url.Parse(uri)
+ if err != nil {
+ return "", fmt.Errorf(errMsg, err.Error())
+ }
+
+ cr := bytes.NewBufferString("/")
+ cr.WriteString(c.getCanonicalizedAccountName())
+
+ if len(u.Path) > 0 {
+ // Any portion of the CanonicalizedResource string that is derived from
+ // the resource's URI should be encoded exactly as it is in the URI.
+ // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
+ cr.WriteString(u.EscapedPath())
+ }
+
+ params, err := url.ParseQuery(u.RawQuery)
+ if err != nil {
+ return "", fmt.Errorf(errMsg, err.Error())
+ }
+
+ // See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277
+ if auth == sharedKey {
+ if len(params) > 0 {
+ cr.WriteString("\n")
+
+ keys := []string{}
+ for key := range params {
+ keys = append(keys, key)
+ }
+ sort.Strings(keys)
+
+ completeParams := []string{}
+ for _, key := range keys {
+ if len(params[key]) > 1 {
+ sort.Strings(params[key])
+ }
+
+ completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")))
+ }
+ cr.WriteString(strings.Join(completeParams, "\n"))
+ }
+ } else {
+ // search for "comp" parameter, if exists then add it to canonicalizedresource
+ if v, ok := params["comp"]; ok {
+ cr.WriteString("?comp=" + v[0])
+ }
+ }
+
+ return string(cr.Bytes()), nil
+}
+
+func (c *Client) getCanonicalizedAccountName() string {
+ // since we may be trying to access a secondary storage account, we need to
+ // remove the -secondary part of the storage name
+ return strings.TrimSuffix(c.accountName, "-secondary")
+}
+
+func buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string, auth authentication) (string, error) {
+ contentLength := headers[headerContentLength]
+ if contentLength == "0" {
+ contentLength = ""
+ }
+ date := headers[headerDate]
+ if v, ok := headers[headerXmsDate]; ok {
+ if auth == sharedKey || auth == sharedKeyLite {
+ date = ""
+ } else {
+ date = v
+ }
+ }
+ var canString string
+ switch auth {
+ case sharedKey:
+ canString = strings.Join([]string{
+ verb,
+ headers[headerContentEncoding],
+ headers[headerContentLanguage],
+ contentLength,
+ headers[headerContentMD5],
+ headers[headerContentType],
+ date,
+ headers[headerIfModifiedSince],
+ headers[headerIfMatch],
+ headers[headerIfNoneMatch],
+ headers[headerIfUnmodifiedSince],
+ headers[headerRange],
+ buildCanonicalizedHeader(headers),
+ canonicalizedResource,
+ }, "\n")
+ case sharedKeyForTable:
+ canString = strings.Join([]string{
+ verb,
+ headers[headerContentMD5],
+ headers[headerContentType],
+ date,
+ canonicalizedResource,
+ }, "\n")
+ case sharedKeyLite:
+ canString = strings.Join([]string{
+ verb,
+ headers[headerContentMD5],
+ headers[headerContentType],
+ date,
+ buildCanonicalizedHeader(headers),
+ canonicalizedResource,
+ }, "\n")
+ case sharedKeyLiteForTable:
+ canString = strings.Join([]string{
+ date,
+ canonicalizedResource,
+ }, "\n")
+ default:
+ return "", fmt.Errorf("%s authentication is not supported yet", auth)
+ }
+ return canString, nil
+}
+
+func buildCanonicalizedHeader(headers map[string]string) string {
+ cm := make(map[string]string)
+
+ for k, v := range headers {
+ headerName := strings.TrimSpace(strings.ToLower(k))
+ if strings.HasPrefix(headerName, "x-ms-") {
+ cm[headerName] = v
+ }
+ }
+
+ if len(cm) == 0 {
+ return ""
+ }
+
+ keys := []string{}
+ for key := range cm {
+ keys = append(keys, key)
+ }
+
+ sort.Strings(keys)
+
+ ch := bytes.NewBufferString("")
+
+ for _, key := range keys {
+ ch.WriteString(key)
+ ch.WriteRune(':')
+ ch.WriteString(cm[key])
+ ch.WriteRune('\n')
+ }
+
+ return strings.TrimSuffix(string(ch.Bytes()), "\n")
+}
+
+func (c *Client) createAuthorizationHeader(canonicalizedString string, auth authentication) string {
+ signature := c.computeHmac256(canonicalizedString)
+ var key string
+ switch auth {
+ case sharedKey, sharedKeyForTable:
+ key = "SharedKey"
+ case sharedKeyLite, sharedKeyLiteForTable:
+ key = "SharedKeyLite"
+ }
+ return fmt.Sprintf("%s %s:%s", key, c.getCanonicalizedAccountName(), signature)
+}
diff --git a/vendor/github.com/Azure/azure-storage-go/blob.go b/vendor/github.com/Azure/azure-storage-go/blob.go
new file mode 100644
index 000000000..636efc662
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-go/blob.go
@@ -0,0 +1,1130 @@
+package storage
+
+import (
+ "bytes"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// A Blob is an entry in BlobListResponse.
+type Blob struct {
+ Name string `xml:"Name"`
+ Properties BlobProperties `xml:"Properties"`
+ Metadata BlobMetadata `xml:"Metadata"`
+}
+
+// BlobMetadata is a set of custom name/value pairs.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179404.aspx
+type BlobMetadata map[string]string
+
+type blobMetadataEntries struct {
+ Entries []blobMetadataEntry `xml:",any"`
+}
+type blobMetadataEntry struct {
+ XMLName xml.Name
+ Value string `xml:",chardata"`
+}
+
+// UnmarshalXML converts the xml:Metadata into Metadata map
+func (bm *BlobMetadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ var entries blobMetadataEntries
+ if err := d.DecodeElement(&entries, &start); err != nil {
+ return err
+ }
+ for _, entry := range entries.Entries {
+ if *bm == nil {
+ *bm = make(BlobMetadata)
+ }
+ (*bm)[strings.ToLower(entry.XMLName.Local)] = entry.Value
+ }
+ return nil
+}
+
+// MarshalXML implements the xml.Marshaler interface. It encodes
+// metadata name/value pairs as they would appear in an Azure
+// ListBlobs response.
+func (bm BlobMetadata) MarshalXML(enc *xml.Encoder, start xml.StartElement) error {
+ entries := make([]blobMetadataEntry, 0, len(bm))
+ for k, v := range bm {
+ entries = append(entries, blobMetadataEntry{
+ XMLName: xml.Name{Local: http.CanonicalHeaderKey(k)},
+ Value: v,
+ })
+ }
+ return enc.EncodeElement(blobMetadataEntries{
+ Entries: entries,
+ }, start)
+}
+
+// BlobProperties contains various properties of a blob
+// returned in various endpoints like ListBlobs or GetBlobProperties.
+type BlobProperties struct {
+ LastModified string `xml:"Last-Modified"`
+ Etag string `xml:"Etag"`
+ ContentMD5 string `xml:"Content-MD5"`
+ ContentLength int64 `xml:"Content-Length"`
+ ContentType string `xml:"Content-Type"`
+ ContentEncoding string `xml:"Content-Encoding"`
+ CacheControl string `xml:"Cache-Control"`
+ ContentLanguage string `xml:"Cache-Language"`
+ BlobType BlobType `xml:"x-ms-blob-blob-type"`
+ SequenceNumber int64 `xml:"x-ms-blob-sequence-number"`
+ CopyID string `xml:"CopyId"`
+ CopyStatus string `xml:"CopyStatus"`
+ CopySource string `xml:"CopySource"`
+ CopyProgress string `xml:"CopyProgress"`
+ CopyCompletionTime string `xml:"CopyCompletionTime"`
+ CopyStatusDescription string `xml:"CopyStatusDescription"`
+ LeaseStatus string `xml:"LeaseStatus"`
+ LeaseState string `xml:"LeaseState"`
+}
+
+// BlobHeaders contains various properties of a blob and is an entry
+// in SetBlobProperties
+type BlobHeaders struct {
+ ContentMD5 string `header:"x-ms-blob-content-md5"`
+ ContentLanguage string `header:"x-ms-blob-content-language"`
+ ContentEncoding string `header:"x-ms-blob-content-encoding"`
+ ContentType string `header:"x-ms-blob-content-type"`
+ CacheControl string `header:"x-ms-blob-cache-control"`
+}
+
+// BlobType defines the type of the Azure Blob.
+type BlobType string
+
+// Types of page blobs
+const (
+ BlobTypeBlock BlobType = "BlockBlob"
+ BlobTypePage BlobType = "PageBlob"
+ BlobTypeAppend BlobType = "AppendBlob"
+)
+
+// PageWriteType defines the type updates that are going to be
+// done on the page blob.
+type PageWriteType string
+
+// Types of operations on page blobs
+const (
+ PageWriteTypeUpdate PageWriteType = "update"
+ PageWriteTypeClear PageWriteType = "clear"
+)
+
+const (
+ blobCopyStatusPending = "pending"
+ blobCopyStatusSuccess = "success"
+ blobCopyStatusAborted = "aborted"
+ blobCopyStatusFailed = "failed"
+)
+
+// lease constants.
+const (
+ leaseHeaderPrefix = "x-ms-lease-"
+ headerLeaseID = "x-ms-lease-id"
+ leaseAction = "x-ms-lease-action"
+ leaseBreakPeriod = "x-ms-lease-break-period"
+ leaseDuration = "x-ms-lease-duration"
+ leaseProposedID = "x-ms-proposed-lease-id"
+ leaseTime = "x-ms-lease-time"
+
+ acquireLease = "acquire"
+ renewLease = "renew"
+ changeLease = "change"
+ releaseLease = "release"
+ breakLease = "break"
+)
+
+// BlockListType is used to filter out types of blocks in a Get Blocks List call
+// for a block blob.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all
+// block types.
+type BlockListType string
+
+// Filters for listing blocks in block blobs
+const (
+ BlockListTypeAll BlockListType = "all"
+ BlockListTypeCommitted BlockListType = "committed"
+ BlockListTypeUncommitted BlockListType = "uncommitted"
+)
+
+// Maximum sizes (per REST API) for various concepts
+const (
+ MaxBlobBlockSize = 100 * 1024 * 1024
+ MaxBlobPageSize = 4 * 1024 * 1024
+)
+
+// BlockStatus defines states a block for a block blob can
+// be in.
+type BlockStatus string
+
+// List of statuses that can be used to refer to a block in a block list
+const (
+ BlockStatusUncommitted BlockStatus = "Uncommitted"
+ BlockStatusCommitted BlockStatus = "Committed"
+ BlockStatusLatest BlockStatus = "Latest"
+)
+
+// Block is used to create Block entities for Put Block List
+// call.
+type Block struct {
+ ID string
+ Status BlockStatus
+}
+
+// BlockListResponse contains the response fields from Get Block List call.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx
+type BlockListResponse struct {
+ XMLName xml.Name `xml:"BlockList"`
+ CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"`
+ UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"`
+}
+
+// BlockResponse contains the block information returned
+// in the GetBlockListCall.
+type BlockResponse struct {
+ Name string `xml:"Name"`
+ Size int64 `xml:"Size"`
+}
+
+// GetPageRangesResponse contains the response fields from
+// Get Page Ranges call.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
+type GetPageRangesResponse struct {
+ XMLName xml.Name `xml:"PageList"`
+ PageList []PageRange `xml:"PageRange"`
+}
+
+// PageRange contains information about a page of a page blob from
+// Get Pages Range call.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
+type PageRange struct {
+ Start int64 `xml:"Start"`
+ End int64 `xml:"End"`
+}
+
+var (
+ errBlobCopyAborted = errors.New("storage: blob copy is aborted")
+ errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch")
+)
+
+// BlobExists returns true if a blob with given name exists on the specified
+// container of the storage account.
+func (b BlobStorageClient) BlobExists(container, name string) (bool, error) {
+ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
+ headers := b.client.getStandardHeaders()
+ resp, err := b.client.exec(http.MethodHead, uri, headers, nil, b.auth)
+ if resp != nil {
+ defer readAndCloseBody(resp.body)
+ if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
+ return resp.statusCode == http.StatusOK, nil
+ }
+ }
+ return false, err
+}
+
+// GetBlobURL gets the canonical URL to the blob with the specified name in the
+// specified container. If name is not specified, the canonical URL for the entire
+// container is obtained.
+// This method does not create a publicly accessible URL if the blob or container
+// is private and this method does not check if the blob exists.
+func (b BlobStorageClient) GetBlobURL(container, name string) string {
+ if container == "" {
+ container = "$root"
+ }
+ return b.client.getEndpoint(blobServiceName, pathForResource(container, name), url.Values{})
+}
+
+// GetBlob returns a stream to read the blob. Caller must call Close() the
+// reader to close on the underlying connection.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx
+func (b BlobStorageClient) GetBlob(container, name string) (io.ReadCloser, error) {
+ resp, err := b.getBlobRange(container, name, "", nil)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
+
+// GetBlobRange reads the specified range of a blob to a stream. The bytesRange
+// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx
+func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (io.ReadCloser, error) {
+ resp, err := b.getBlobRange(container, name, bytesRange, extraHeaders)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := checkRespCode(resp.statusCode, []int{http.StatusPartialContent}); err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
+
+func (b BlobStorageClient) getBlobRange(container, name, bytesRange string, extraHeaders map[string]string) (*storageResponse, error) {
+ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
+
+ extraHeaders = b.client.protectUserAgent(extraHeaders)
+ headers := b.client.getStandardHeaders()
+ if bytesRange != "" {
+ headers["Range"] = fmt.Sprintf("bytes=%s", bytesRange)
+ }
+
+ for k, v := range extraHeaders {
+ headers[k] = v
+ }
+
+ resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
+ if err != nil {
+ return nil, err
+ }
+ return resp, err
+}
+
+// leasePut is common PUT code for the various acquire/release/break etc functions.
+func (b BlobStorageClient) leaseCommonPut(container string, name string, headers map[string]string, expectedStatus int) (http.Header, error) {
+ params := url.Values{"comp": {"lease"}}
+ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params)
+
+ resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth)
+ if err != nil {
+ return nil, err
+ }
+ defer readAndCloseBody(resp.body)
+
+ if err := checkRespCode(resp.statusCode, []int{expectedStatus}); err != nil {
+ return nil, err
+ }
+
+ return resp.headers, nil
+}
+
+// SnapshotBlob creates a snapshot for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx
+func (b BlobStorageClient) SnapshotBlob(container string, name string, timeout int, extraHeaders map[string]string) (snapshotTimestamp *time.Time, err error) {
+ extraHeaders = b.client.protectUserAgent(extraHeaders)
+ headers := b.client.getStandardHeaders()
+ params := url.Values{"comp": {"snapshot"}}
+
+ if timeout > 0 {
+ params.Add("timeout", strconv.Itoa(timeout))
+ }
+
+ for k, v := range extraHeaders {
+ headers[k] = v
+ }
+
+ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params)
+ resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth)
+ if err != nil || resp == nil {
+ return nil, err
+ }
+
+ defer readAndCloseBody(resp.body)
+
+ if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil {
+ return nil, err
+ }
+
+ snapshotResponse := resp.headers.Get(http.CanonicalHeaderKey("x-ms-snapshot"))
+ if snapshotResponse != "" {
+ snapshotTimestamp, err := time.Parse(time.RFC3339, snapshotResponse)
+ if err != nil {
+ return nil, err
+ }
+
+ return &snapshotTimestamp, nil
+ }
+
+ return nil, errors.New("Snapshot not created")
+}
+
+// AcquireLease creates a lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
+// returns leaseID acquired
+// In API Versions starting on 2012-02-12, the minimum leaseTimeInSeconds is 15, the maximum
+// non-infinite leaseTimeInSeconds is 60. To specify an infinite lease, provide the value -1.
+func (b BlobStorageClient) AcquireLease(container string, name string, leaseTimeInSeconds int, proposedLeaseID string) (returnedLeaseID string, err error) {
+ headers := b.client.getStandardHeaders()
+ headers[leaseAction] = acquireLease
+
+ if leaseTimeInSeconds == -1 {
+ // Do nothing, but don't trigger the following clauses.
+ } else if leaseTimeInSeconds > 60 || b.client.apiVersion < "2012-02-12" {
+ leaseTimeInSeconds = 60
+ } else if leaseTimeInSeconds < 15 {
+ leaseTimeInSeconds = 15
+ }
+
+ headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds)
+
+ if proposedLeaseID != "" {
+ headers[leaseProposedID] = proposedLeaseID
+ }
+
+ respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusCreated)
+ if err != nil {
+ return "", err
+ }
+
+ returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID))
+
+ if returnedLeaseID != "" {
+ return returnedLeaseID, nil
+ }
+
+ return "", errors.New("LeaseID not returned")
+}
+
+// BreakLease breaks the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
+// Returns the timeout remaining in the lease in seconds
+func (b BlobStorageClient) BreakLease(container string, name string) (breakTimeout int, err error) {
+ headers := b.client.getStandardHeaders()
+ headers[leaseAction] = breakLease
+ return b.breakLeaseCommon(container, name, headers)
+}
+
+// BreakLeaseWithBreakPeriod breaks the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
+// breakPeriodInSeconds is used to determine how long until new lease can be created.
+// Returns the timeout remaining in the lease in seconds
+func (b BlobStorageClient) BreakLeaseWithBreakPeriod(container string, name string, breakPeriodInSeconds int) (breakTimeout int, err error) {
+ headers := b.client.getStandardHeaders()
+ headers[leaseAction] = breakLease
+ headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds)
+ return b.breakLeaseCommon(container, name, headers)
+}
+
+// breakLeaseCommon is common code for both version of BreakLease (with and without break period)
+func (b BlobStorageClient) breakLeaseCommon(container string, name string, headers map[string]string) (breakTimeout int, err error) {
+
+ respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusAccepted)
+ if err != nil {
+ return 0, err
+ }
+
+ breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime))
+ if breakTimeoutStr != "" {
+ breakTimeout, err = strconv.Atoi(breakTimeoutStr)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ return breakTimeout, nil
+}
+
+// ChangeLease changes a lease ID for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
+// Returns the new LeaseID acquired
+func (b BlobStorageClient) ChangeLease(container string, name string, currentLeaseID string, proposedLeaseID string) (newLeaseID string, err error) {
+ headers := b.client.getStandardHeaders()
+ headers[leaseAction] = changeLease
+ headers[headerLeaseID] = currentLeaseID
+ headers[leaseProposedID] = proposedLeaseID
+
+ respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusOK)
+ if err != nil {
+ return "", err
+ }
+
+ newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID))
+ if newLeaseID != "" {
+ return newLeaseID, nil
+ }
+
+ return "", errors.New("LeaseID not returned")
+}
+
+// ReleaseLease releases the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
+func (b BlobStorageClient) ReleaseLease(container string, name string, currentLeaseID string) error {
+ headers := b.client.getStandardHeaders()
+ headers[leaseAction] = releaseLease
+ headers[headerLeaseID] = currentLeaseID
+
+ _, err := b.leaseCommonPut(container, name, headers, http.StatusOK)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
+func (b BlobStorageClient) RenewLease(container string, name string, currentLeaseID string) error {
+ headers := b.client.getStandardHeaders()
+ headers[leaseAction] = renewLease
+ headers[headerLeaseID] = currentLeaseID
+
+ _, err := b.leaseCommonPut(container, name, headers, http.StatusOK)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// GetBlobProperties provides various information about the specified
+// blob. See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx
+func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobProperties, error) {
+ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
+
+ headers := b.client.getStandardHeaders()
+ resp, err := b.client.exec(http.MethodHead, uri, headers, nil, b.auth)
+ if err != nil {
+ return nil, err
+ }
+ defer readAndCloseBody(resp.body)
+
+ if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
+ return nil, err
+ }
+
+ var contentLength int64
+ contentLengthStr := resp.headers.Get("Content-Length")
+ if contentLengthStr != "" {
+ contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var sequenceNum int64
+ sequenceNumStr := resp.headers.Get("x-ms-blob-sequence-number")
+ if sequenceNumStr != "" {
+ sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &BlobProperties{
+ LastModified: resp.headers.Get("Last-Modified"),
+ Etag: resp.headers.Get("Etag"),
+ ContentMD5: resp.headers.Get("Content-MD5"),
+ ContentLength: contentLength,
+ ContentEncoding: resp.headers.Get("Content-Encoding"),
+ ContentType: resp.headers.Get("Content-Type"),
+ CacheControl: resp.headers.Get("Cache-Control"),
+ ContentLanguage: resp.headers.Get("Content-Language"),
+ SequenceNumber: sequenceNum,
+ CopyCompletionTime: resp.headers.Get("x-ms-copy-completion-time"),
+ CopyStatusDescription: resp.headers.Get("x-ms-copy-status-description"),
+ CopyID: resp.headers.Get("x-ms-copy-id"),
+ CopyProgress: resp.headers.Get("x-ms-copy-progress"),
+ CopySource: resp.headers.Get("x-ms-copy-source"),
+ CopyStatus: resp.headers.Get("x-ms-copy-status"),
+ BlobType: BlobType(resp.headers.Get("x-ms-blob-type")),
+ LeaseStatus: resp.headers.Get("x-ms-lease-status"),
+ LeaseState: resp.headers.Get("x-ms-lease-state"),
+ }, nil
+}
+
+// SetBlobProperties replaces the BlobHeaders for the specified blob.
+//
+// Some keys may be converted to Camel-Case before sending. All keys
+// are returned in lower case by GetBlobProperties. HTTP header names
+// are case-insensitive so case munging should not matter to other
+// applications either.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/ee691966.aspx
+func (b BlobStorageClient) SetBlobProperties(container, name string, blobHeaders BlobHeaders) error {
+ params := url.Values{"comp": {"properties"}}
+ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params)
+ headers := b.client.getStandardHeaders()
+
+ extraHeaders := headersFromStruct(blobHeaders)
+
+ for k, v := range extraHeaders {
+ headers[k] = v
+ }
+
+ resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth)
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+
+ return checkRespCode(resp.statusCode, []int{http.StatusOK})
+}
+
+// SetBlobMetadata replaces the metadata for the specified blob.
+//
+// Some keys may be converted to Camel-Case before sending. All keys
+// are returned in lower case by GetBlobMetadata. HTTP header names
+// are case-insensitive so case munging should not matter to other
+// applications either.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
+func (b BlobStorageClient) SetBlobMetadata(container, name string, metadata map[string]string, extraHeaders map[string]string) error {
+ params := url.Values{"comp": {"metadata"}}
+ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params)
+ metadata = b.client.protectUserAgent(metadata)
+ extraHeaders = b.client.protectUserAgent(extraHeaders)
+ headers := b.client.getStandardHeaders()
+ for k, v := range metadata {
+ headers[userDefinedMetadataHeaderPrefix+k] = v
+ }
+
+ for k, v := range extraHeaders {
+ headers[k] = v
+ }
+
+ resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth)
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+
+ return checkRespCode(resp.statusCode, []int{http.StatusOK})
+}
+
+// GetBlobMetadata returns all user-defined metadata for the specified blob.
+//
+// All metadata keys will be returned in lower case. (HTTP header
+// names are case-insensitive.)
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
+func (b BlobStorageClient) GetBlobMetadata(container, name string) (map[string]string, error) {
+ params := url.Values{"comp": {"metadata"}}
+ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params)
+ headers := b.client.getStandardHeaders()
+
+ resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
+ if err != nil {
+ return nil, err
+ }
+ defer readAndCloseBody(resp.body)
+
+ if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
+ return nil, err
+ }
+
+ metadata := make(map[string]string)
+ for k, v := range resp.headers {
+ // Can't trust CanonicalHeaderKey() to munge case
+ // reliably. "_" is allowed in identifiers:
+ // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
+ // https://msdn.microsoft.com/library/aa664670(VS.71).aspx
+ // http://tools.ietf.org/html/rfc7230#section-3.2
+ // ...but "_" is considered invalid by
+ // CanonicalMIMEHeaderKey in
+ // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
+ // so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar".
+ k = strings.ToLower(k)
+ if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
+ continue
+ }
+ // metadata["foo"] = content of the last X-Ms-Meta-Foo header
+ k = k[len(userDefinedMetadataHeaderPrefix):]
+ metadata[k] = v[len(v)-1]
+ }
+ return metadata, nil
+}
+
+// CreateBlockBlob initializes an empty block blob with no blocks.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx
+func (b BlobStorageClient) CreateBlockBlob(container, name string) error {
+ return b.CreateBlockBlobFromReader(container, name, 0, nil, nil)
+}
+
+// CreateBlockBlobFromReader initializes a block blob using data from
+// reader. Size must be the number of bytes read from reader. To
+// create an empty blob, use size==0 and reader==nil.
+//
+// The API rejects requests with size > 256 MiB (but this limit is not
+// checked by the SDK). To write a larger blob, use CreateBlockBlob,
+// PutBlock, and PutBlockList.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx
+func (b BlobStorageClient) CreateBlockBlobFromReader(container, name string, size uint64, blob io.Reader, extraHeaders map[string]string) error {
+ path := fmt.Sprintf("%s/%s", container, name)
+ uri := b.client.getEndpoint(blobServiceName, path, url.Values{})
+ extraHeaders = b.client.protectUserAgent(extraHeaders)
+ headers := b.client.getStandardHeaders()
+ headers["x-ms-blob-type"] = string(BlobTypeBlock)
+ headers["Content-Length"] = fmt.Sprintf("%d", size)
+
+ for k, v := range extraHeaders {
+ headers[k] = v
+ }
+
+ resp, err := b.client.exec(http.MethodPut, uri, headers, blob, b.auth)
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+ return checkRespCode(resp.statusCode, []int{http.StatusCreated})
+}
+
+// PutBlock saves the given data chunk to the specified block blob with
+// given ID.
+//
+// The API rejects chunks larger than 100 MB (but this limit is not
+// checked by the SDK).
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx
+func (b BlobStorageClient) PutBlock(container, name, blockID string, chunk []byte) error {
+ return b.PutBlockWithLength(container, name, blockID, uint64(len(chunk)), bytes.NewReader(chunk), nil)
+}
+
+// PutBlockWithLength saves the given data stream of exactly specified size to
+// the block blob with given ID. It is an alternative to PutBlocks where data
+// comes as stream but the length is known in advance.
+//
+// The API rejects requests with size > 100 MB (but this limit is not
+// checked by the SDK).
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx
+func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, size uint64, blob io.Reader, extraHeaders map[string]string) error {
+ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"block"}, "blockid": {blockID}})
+ extraHeaders = b.client.protectUserAgent(extraHeaders)
+ headers := b.client.getStandardHeaders()
+ headers["x-ms-blob-type"] = string(BlobTypeBlock)
+ headers["Content-Length"] = fmt.Sprintf("%v", size)
+
+ for k, v := range extraHeaders {
+ headers[k] = v
+ }
+
+ resp, err := b.client.exec(http.MethodPut, uri, headers, blob, b.auth)
+ if err != nil {
+ return err
+ }
+
+ defer readAndCloseBody(resp.body)
+ return checkRespCode(resp.statusCode, []int{http.StatusCreated})
+}
+
+// PutBlockList saves list of blocks to the specified block blob.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179467.aspx
+func (b BlobStorageClient) PutBlockList(container, name string, blocks []Block) error {
+ blockListXML := prepareBlockListRequest(blocks)
+
+ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"blocklist"}})
+ headers := b.client.getStandardHeaders()
+ headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML))
+
+ resp, err := b.client.exec(http.MethodPut, uri, headers, strings.NewReader(blockListXML), b.auth)
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+ return checkRespCode(resp.statusCode, []int{http.StatusCreated})
+}
+
+// GetBlockList retrieves list of blocks in the specified block blob.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx
+func (b BlobStorageClient) GetBlockList(container, name string, blockType BlockListType) (BlockListResponse, error) {
+ params := url.Values{"comp": {"blocklist"}, "blocklisttype": {string(blockType)}}
+ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params)
+ headers := b.client.getStandardHeaders()
+
+ var out BlockListResponse
+ resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
+ if err != nil {
+ return out, err
+ }
+ defer resp.body.Close()
+
+ err = xmlUnmarshal(resp.body, &out)
+ return out, err
+}
+
+// PutPageBlob initializes an empty page blob with specified name and maximum
+// size in bytes (size must be aligned to a 512-byte boundary). A page blob must
+// be created using this method before writing pages.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx
+func (b BlobStorageClient) PutPageBlob(container, name string, size int64, extraHeaders map[string]string) error {
+ path := fmt.Sprintf("%s/%s", container, name)
+ uri := b.client.getEndpoint(blobServiceName, path, url.Values{})
+ extraHeaders = b.client.protectUserAgent(extraHeaders)
+ headers := b.client.getStandardHeaders()
+ headers["x-ms-blob-type"] = string(BlobTypePage)
+ headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", size)
+
+ for k, v := range extraHeaders {
+ headers[k] = v
+ }
+
+ resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth)
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+
+ return checkRespCode(resp.statusCode, []int{http.StatusCreated})
+}
+
+// PutPage writes a range of pages to a page blob or clears the given range.
+// In case of 'clear' writes, given chunk is discarded. Ranges must be aligned
+// with 512-byte boundaries and chunk must be of size multiplies by 512.
+//
+// See https://msdn.microsoft.com/en-us/library/ee691975.aspx
+func (b BlobStorageClient) PutPage(container, name string, startByte, endByte int64, writeType PageWriteType, chunk []byte, extraHeaders map[string]string) error {
+ path := fmt.Sprintf("%s/%s", container, name)
+ uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"page"}})
+ extraHeaders = b.client.protectUserAgent(extraHeaders)
+ headers := b.client.getStandardHeaders()
+ headers["x-ms-blob-type"] = string(BlobTypePage)
+ headers["x-ms-page-write"] = string(writeType)
+ headers["x-ms-range"] = fmt.Sprintf("bytes=%v-%v", startByte, endByte)
+ for k, v := range extraHeaders {
+ headers[k] = v
+ }
+ var contentLength int64
+ var data io.Reader
+ if writeType == PageWriteTypeClear {
+ contentLength = 0
+ data = bytes.NewReader([]byte{})
+ } else {
+ contentLength = int64(len(chunk))
+ data = bytes.NewReader(chunk)
+ }
+ headers["Content-Length"] = fmt.Sprintf("%v", contentLength)
+
+ resp, err := b.client.exec(http.MethodPut, uri, headers, data, b.auth)
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+
+ return checkRespCode(resp.statusCode, []int{http.StatusCreated})
+}
+
+// GetPageRanges returns the list of valid page ranges for a page blob.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
+func (b BlobStorageClient) GetPageRanges(container, name string) (GetPageRangesResponse, error) {
+ path := fmt.Sprintf("%s/%s", container, name)
+ uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"pagelist"}})
+ headers := b.client.getStandardHeaders()
+
+ var out GetPageRangesResponse
+ resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
+ if err != nil {
+ return out, err
+ }
+ defer resp.body.Close()
+
+ if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
+ return out, err
+ }
+ err = xmlUnmarshal(resp.body, &out)
+ return out, err
+}
+
+// PutAppendBlob initializes an empty append blob with specified name. An
+// append blob must be created using this method before appending blocks.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx
+func (b BlobStorageClient) PutAppendBlob(container, name string, extraHeaders map[string]string) error {
+ path := fmt.Sprintf("%s/%s", container, name)
+ uri := b.client.getEndpoint(blobServiceName, path, url.Values{})
+ extraHeaders = b.client.protectUserAgent(extraHeaders)
+ headers := b.client.getStandardHeaders()
+ headers["x-ms-blob-type"] = string(BlobTypeAppend)
+
+ for k, v := range extraHeaders {
+ headers[k] = v
+ }
+
+ resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth)
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+
+ return checkRespCode(resp.statusCode, []int{http.StatusCreated})
+}
+
+// AppendBlock appends a block to an append blob.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/mt427365.aspx
+func (b BlobStorageClient) AppendBlock(container, name string, chunk []byte, extraHeaders map[string]string) error {
+ path := fmt.Sprintf("%s/%s", container, name)
+ uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"appendblock"}})
+ extraHeaders = b.client.protectUserAgent(extraHeaders)
+ headers := b.client.getStandardHeaders()
+ headers["x-ms-blob-type"] = string(BlobTypeAppend)
+ headers["Content-Length"] = fmt.Sprintf("%v", len(chunk))
+
+ for k, v := range extraHeaders {
+ headers[k] = v
+ }
+
+ resp, err := b.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.auth)
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+
+ return checkRespCode(resp.statusCode, []int{http.StatusCreated})
+}
+
+// CopyBlob starts a blob copy operation and waits for the operation to
+// complete. sourceBlob parameter must be a canonical URL to the blob (can be
+// obtained using GetBlobURL method.) There is no SLA on blob copy and therefore
+// this helper method works faster on smaller files.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx
+func (b BlobStorageClient) CopyBlob(container, name, sourceBlob string) error {
+ copyID, err := b.StartBlobCopy(container, name, sourceBlob)
+ if err != nil {
+ return err
+ }
+
+ return b.WaitForBlobCopy(container, name, copyID)
+}
+
+// StartBlobCopy starts a blob copy operation.
+// sourceBlob parameter must be a canonical URL to the blob (can be
+// obtained using GetBlobURL method.)
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx
+func (b BlobStorageClient) StartBlobCopy(container, name, sourceBlob string) (string, error) {
+ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
+
+ headers := b.client.getStandardHeaders()
+ headers["x-ms-copy-source"] = sourceBlob
+
+ resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth)
+ if err != nil {
+ return "", err
+ }
+ defer readAndCloseBody(resp.body)
+
+ if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil {
+ return "", err
+ }
+
+ copyID := resp.headers.Get("x-ms-copy-id")
+ if copyID == "" {
+ return "", errors.New("Got empty copy id header")
+ }
+ return copyID, nil
+}
+
+// AbortBlobCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function.
+// copyID is generated from StartBlobCopy function.
+// currentLeaseID is required IF the destination blob has an active lease on it.
+// As defined in https://msdn.microsoft.com/en-us/library/azure/jj159098.aspx
+func (b BlobStorageClient) AbortBlobCopy(container, name, copyID, currentLeaseID string, timeout int) error {
+ params := url.Values{"comp": {"copy"}, "copyid": {copyID}}
+ if timeout > 0 {
+ params.Add("timeout", strconv.Itoa(timeout))
+ }
+
+ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params)
+ headers := b.client.getStandardHeaders()
+ headers["x-ms-copy-action"] = "abort"
+
+ if currentLeaseID != "" {
+ headers[headerLeaseID] = currentLeaseID
+ }
+
+ resp, err := b.client.exec(http.MethodPut, uri, headers, nil, b.auth)
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+
+ if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// WaitForBlobCopy loops until a BlobCopy operation is completed (or fails with error)
+func (b BlobStorageClient) WaitForBlobCopy(container, name, copyID string) error {
+ for {
+ props, err := b.GetBlobProperties(container, name)
+ if err != nil {
+ return err
+ }
+
+ if props.CopyID != copyID {
+ return errBlobCopyIDMismatch
+ }
+
+ switch props.CopyStatus {
+ case blobCopyStatusSuccess:
+ return nil
+ case blobCopyStatusPending:
+ continue
+ case blobCopyStatusAborted:
+ return errBlobCopyAborted
+ case blobCopyStatusFailed:
+ return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", props.CopyID, props.CopyStatusDescription)
+ default:
+ return fmt.Errorf("storage: unhandled blob copy status: '%s'", props.CopyStatus)
+ }
+ }
+}
+
+// DeleteBlob deletes the given blob from the specified container.
+// If the blob does not exists at the time of the Delete Blob operation, it
+// returns error. See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx
+func (b BlobStorageClient) DeleteBlob(container, name string, extraHeaders map[string]string) error {
+ resp, err := b.deleteBlob(container, name, extraHeaders)
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+ return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
+}
+
+// DeleteBlobIfExists deletes the given blob from the specified container If the
+// blob is deleted with this call, returns true. Otherwise returns false.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx
+func (b BlobStorageClient) DeleteBlobIfExists(container, name string, extraHeaders map[string]string) (bool, error) {
+ resp, err := b.deleteBlob(container, name, extraHeaders)
+ if resp != nil {
+ defer readAndCloseBody(resp.body)
+ if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
+ return resp.statusCode == http.StatusAccepted, nil
+ }
+ }
+ return false, err
+}
+
+func (b BlobStorageClient) deleteBlob(container, name string, extraHeaders map[string]string) (*storageResponse, error) {
+ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
+ extraHeaders = b.client.protectUserAgent(extraHeaders)
+ headers := b.client.getStandardHeaders()
+ for k, v := range extraHeaders {
+ headers[k] = v
+ }
+
+ return b.client.exec(http.MethodDelete, uri, headers, nil, b.auth)
+}
+
+// helper method to construct the path to a blob given its container and blob
+// name
+func pathForBlob(container, name string) string {
+ return fmt.Sprintf("/%s/%s", container, name)
+}
+
+// helper method to construct the path to either a blob or container
+func pathForResource(container, name string) string {
+ if len(name) > 0 {
+ return fmt.Sprintf("/%s/%s", container, name)
+ }
+ return fmt.Sprintf("/%s", container)
+}
+
+// GetBlobSASURIWithSignedIPAndProtocol creates an URL to the specified blob which contains the Shared
+// Access Signature with specified permissions and expiration time. Also includes signedIPRange and allowed protocols.
+// If old API version is used but no signedIP is passed (ie empty string) then this should still work.
+// We only populate the signedIP when it non-empty.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx
+func (b BlobStorageClient) GetBlobSASURIWithSignedIPAndProtocol(container, name string, expiry time.Time, permissions string, signedIPRange string, HTTPSOnly bool) (string, error) {
+ var (
+ signedPermissions = permissions
+ blobURL = b.GetBlobURL(container, name)
+ )
+ canonicalizedResource, err := b.client.buildCanonicalizedResource(blobURL, b.auth)
+ if err != nil {
+ return "", err
+ }
+
+ // "The canonicalizedresouce portion of the string is a canonical path to the signed resource.
+ // It must include the service name (blob, table, queue or file) for version 2015-02-21 or
+ // later, the storage account name, and the resource name, and must be URL-decoded.
+ // -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
+
+ // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component).
+ canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1)
+ canonicalizedResource, err = url.QueryUnescape(canonicalizedResource)
+ if err != nil {
+ return "", err
+ }
+
+ signedExpiry := expiry.UTC().Format(time.RFC3339)
+
+ //If blob name is missing, resource is a container
+ signedResource := "c"
+ if len(name) > 0 {
+ signedResource = "b"
+ }
+
+ protocols := "https,http"
+ if HTTPSOnly {
+ protocols = "https"
+ }
+ stringToSign, err := blobSASStringToSign(b.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions, signedIPRange, protocols)
+ if err != nil {
+ return "", err
+ }
+
+ sig := b.client.computeHmac256(stringToSign)
+ sasParams := url.Values{
+ "sv": {b.client.apiVersion},
+ "se": {signedExpiry},
+ "sr": {signedResource},
+ "sp": {signedPermissions},
+ "sig": {sig},
+ }
+
+ if b.client.apiVersion >= "2015-04-05" {
+ sasParams.Add("spr", protocols)
+ if signedIPRange != "" {
+ sasParams.Add("sip", signedIPRange)
+ }
+ }
+
+ sasURL, err := url.Parse(blobURL)
+ if err != nil {
+ return "", err
+ }
+ sasURL.RawQuery = sasParams.Encode()
+ return sasURL.String(), nil
+}
+
+// GetBlobSASURI creates an URL to the specified blob which contains the Shared
+// Access Signature with specified permissions and expiration time.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx
+func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Time, permissions string) (string, error) {
+ url, err := b.GetBlobSASURIWithSignedIPAndProtocol(container, name, expiry, permissions, "", false)
+ return url, err
+}
+
+func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string, signedIP string, protocols string) (string, error) {
+ var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string
+
+ if signedVersion >= "2015-02-21" {
+ canonicalizedResource = "/blob" + canonicalizedResource
+ }
+
+ // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12
+ if signedVersion >= "2015-04-05" {
+ return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil
+ }
+
+ // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
+ if signedVersion >= "2013-08-15" {
+ return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil
+ }
+
+ return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15")
+}
diff --git a/vendor/github.com/Azure/azure-storage-go/blobserviceclient.go b/vendor/github.com/Azure/azure-storage-go/blobserviceclient.go
new file mode 100644
index 000000000..e5911ac81
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-go/blobserviceclient.go
@@ -0,0 +1,92 @@
+package storage
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+)
+
+// BlobStorageClient contains operations for Microsoft Azure Blob Storage
+// Service.
+type BlobStorageClient struct {
+ client Client
+ auth authentication
+}
+
+// GetServiceProperties gets the properties of your storage account's blob service.
+// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-blob-service-properties
+func (b *BlobStorageClient) GetServiceProperties() (*ServiceProperties, error) {
+ return b.client.getServiceProperties(blobServiceName, b.auth)
+}
+
+// SetServiceProperties sets the properties of your storage account's blob service.
+// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-blob-service-properties
+func (b *BlobStorageClient) SetServiceProperties(props ServiceProperties) error {
+ return b.client.setServiceProperties(props, blobServiceName, b.auth)
+}
+
+// ListContainersParameters defines the set of customizable parameters to make a
+// List Containers call.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
+type ListContainersParameters struct {
+ Prefix string
+ Marker string
+ Include string
+ MaxResults uint
+ Timeout uint
+}
+
+// GetContainerReference returns a Container object for the specified container name.
+func (b BlobStorageClient) GetContainerReference(name string) Container {
+ return Container{
+ bsc: &b,
+ Name: name,
+ }
+}
+
+// ListContainers returns the list of containers in a storage account along with
+// pagination token and other response details.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
+func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*ContainerListResponse, error) {
+ q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
+ uri := b.client.getEndpoint(blobServiceName, "", q)
+ headers := b.client.getStandardHeaders()
+
+ var out ContainerListResponse
+ resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.body.Close()
+ err = xmlUnmarshal(resp.body, &out)
+
+ // assign our client to the newly created Container objects
+ for i := range out.Containers {
+ out.Containers[i].bsc = &b
+ }
+ return &out, err
+}
+
+func (p ListContainersParameters) getParameters() url.Values {
+ out := url.Values{}
+
+ if p.Prefix != "" {
+ out.Set("prefix", p.Prefix)
+ }
+ if p.Marker != "" {
+ out.Set("marker", p.Marker)
+ }
+ if p.Include != "" {
+ out.Set("include", p.Include)
+ }
+ if p.MaxResults != 0 {
+ out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
+ }
+ if p.Timeout != 0 {
+ out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
+ }
+
+ return out
+}
diff --git a/vendor/github.com/Azure/azure-storage-go/client.go b/vendor/github.com/Azure/azure-storage-go/client.go
new file mode 100644
index 000000000..9ddbf08ae
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-go/client.go
@@ -0,0 +1,479 @@
+// Package storage provides clients for Microsoft Azure Storage Services.
+package storage
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "runtime"
+ "strconv"
+ "strings"
+
+ "github.com/Azure/go-autorest/autorest/azure"
+)
+
+const (
+ // DefaultBaseURL is the domain name used for storage requests in the
+ // public cloud when a default client is created.
+ DefaultBaseURL = "core.windows.net"
+
+ // DefaultAPIVersion is the Azure Storage API version string used when a
+ // basic client is created.
+ DefaultAPIVersion = "2016-05-31"
+
+ defaultUseHTTPS = true
+
+ // StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator
+ StorageEmulatorAccountName = "devstoreaccount1"
+
+ // StorageEmulatorAccountKey is the the fixed storage account used by Azure Storage Emulator
+ StorageEmulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
+
+ blobServiceName = "blob"
+ tableServiceName = "table"
+ queueServiceName = "queue"
+ fileServiceName = "file"
+
+ storageEmulatorBlob = "127.0.0.1:10000"
+ storageEmulatorTable = "127.0.0.1:10002"
+ storageEmulatorQueue = "127.0.0.1:10001"
+
+ userAgentHeader = "User-Agent"
+)
+
+// Client is the object that needs to be constructed to perform
+// operations on the storage account.
+type Client struct {
+ // HTTPClient is the http.Client used to initiate API
+ // requests. If it is nil, http.DefaultClient is used.
+ HTTPClient *http.Client
+
+ accountName string
+ accountKey []byte
+ useHTTPS bool
+ UseSharedKeyLite bool
+ baseURL string
+ apiVersion string
+ userAgent string
+}
+
+type storageResponse struct {
+ statusCode int
+ headers http.Header
+ body io.ReadCloser
+}
+
+type odataResponse struct {
+ storageResponse
+ odata odataErrorMessage
+}
+
+// AzureStorageServiceError contains fields of the error response from
+// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx
+// Some fields might be specific to certain calls.
+type AzureStorageServiceError struct {
+ Code string `xml:"Code"`
+ Message string `xml:"Message"`
+ AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"`
+ QueryParameterName string `xml:"QueryParameterName"`
+ QueryParameterValue string `xml:"QueryParameterValue"`
+ Reason string `xml:"Reason"`
+ StatusCode int
+ RequestID string
+}
+
+type odataErrorMessageMessage struct {
+ Lang string `json:"lang"`
+ Value string `json:"value"`
+}
+
+type odataErrorMessageInternal struct {
+ Code string `json:"code"`
+ Message odataErrorMessageMessage `json:"message"`
+}
+
+type odataErrorMessage struct {
+ Err odataErrorMessageInternal `json:"odata.error"`
+}
+
+// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
+// nor with an HTTP status code indicating success.
+type UnexpectedStatusCodeError struct {
+ allowed []int
+ got int
+}
+
+func (e UnexpectedStatusCodeError) Error() string {
+ s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
+
+ got := s(e.got)
+ expected := []string{}
+ for _, v := range e.allowed {
+ expected = append(expected, s(v))
+ }
+ return fmt.Sprintf("storage: status code from service response is %s; was expecting %s", got, strings.Join(expected, " or "))
+}
+
+// Got is the actual status code returned by Azure.
+func (e UnexpectedStatusCodeError) Got() int {
+ return e.got
+}
+
+// NewBasicClient constructs a Client with given storage service name and
+// key.
+func NewBasicClient(accountName, accountKey string) (Client, error) {
+ if accountName == StorageEmulatorAccountName {
+ return NewEmulatorClient()
+ }
+ return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS)
+}
+
+// NewBasicClientOnSovereignCloud constructs a Client with given storage service name and
+// key in the referenced cloud.
+func NewBasicClientOnSovereignCloud(accountName, accountKey string, env azure.Environment) (Client, error) {
+ if accountName == StorageEmulatorAccountName {
+ return NewEmulatorClient()
+ }
+ return NewClient(accountName, accountKey, env.StorageEndpointSuffix, DefaultAPIVersion, defaultUseHTTPS)
+}
+
+//NewEmulatorClient contructs a Client intended to only work with Azure
+//Storage Emulator
+func NewEmulatorClient() (Client, error) {
+ return NewClient(StorageEmulatorAccountName, StorageEmulatorAccountKey, DefaultBaseURL, DefaultAPIVersion, false)
+}
+
+// NewClient constructs a Client. This should be used if the caller wants
+// to specify whether to use HTTPS, a specific REST API version or a custom
+// storage endpoint than Azure Public Cloud.
+func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) {
+ var c Client
+ if accountName == "" {
+ return c, fmt.Errorf("azure: account name required")
+ } else if accountKey == "" {
+ return c, fmt.Errorf("azure: account key required")
+ } else if blobServiceBaseURL == "" {
+ return c, fmt.Errorf("azure: base storage service url required")
+ }
+
+ key, err := base64.StdEncoding.DecodeString(accountKey)
+ if err != nil {
+ return c, fmt.Errorf("azure: malformed storage account key: %v", err)
+ }
+
+ c = Client{
+ accountName: accountName,
+ accountKey: key,
+ useHTTPS: useHTTPS,
+ baseURL: blobServiceBaseURL,
+ apiVersion: apiVersion,
+ UseSharedKeyLite: false,
+ }
+ c.userAgent = c.getDefaultUserAgent()
+ return c, nil
+}
+
+func (c Client) getDefaultUserAgent() string {
+ return fmt.Sprintf("Go/%s (%s-%s) Azure-SDK-For-Go/%s storage-dataplane/%s",
+ runtime.Version(),
+ runtime.GOARCH,
+ runtime.GOOS,
+ sdkVersion,
+ c.apiVersion,
+ )
+}
+
+// AddToUserAgent adds an extension to the current user agent
+func (c *Client) AddToUserAgent(extension string) error {
+ if extension != "" {
+ c.userAgent = fmt.Sprintf("%s %s", c.userAgent, extension)
+ return nil
+ }
+ return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.userAgent)
+}
+
+// protectUserAgent is used in funcs that include extraheaders as a parameter.
+// It prevents the User-Agent header to be overwritten, instead if it happens to
+// be present, it gets added to the current User-Agent. Use it before getStandardHeaders
+func (c *Client) protectUserAgent(extraheaders map[string]string) map[string]string {
+ if v, ok := extraheaders[userAgentHeader]; ok {
+ c.AddToUserAgent(v)
+ delete(extraheaders, userAgentHeader)
+ }
+ return extraheaders
+}
+
+func (c Client) getBaseURL(service string) string {
+ scheme := "http"
+ if c.useHTTPS {
+ scheme = "https"
+ }
+ host := ""
+ if c.accountName == StorageEmulatorAccountName {
+ switch service {
+ case blobServiceName:
+ host = storageEmulatorBlob
+ case tableServiceName:
+ host = storageEmulatorTable
+ case queueServiceName:
+ host = storageEmulatorQueue
+ }
+ } else {
+ host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL)
+ }
+
+ u := &url.URL{
+ Scheme: scheme,
+ Host: host}
+ return u.String()
+}
+
+func (c Client) getEndpoint(service, path string, params url.Values) string {
+ u, err := url.Parse(c.getBaseURL(service))
+ if err != nil {
+ // really should not be happening
+ panic(err)
+ }
+
+ // API doesn't accept path segments not starting with '/'
+ if !strings.HasPrefix(path, "/") {
+ path = fmt.Sprintf("/%v", path)
+ }
+
+ if c.accountName == StorageEmulatorAccountName {
+ path = fmt.Sprintf("/%v%v", StorageEmulatorAccountName, path)
+ }
+
+ u.Path = path
+ u.RawQuery = params.Encode()
+ return u.String()
+}
+
+// GetBlobService returns a BlobStorageClient which can operate on the blob
+// service of the storage account.
+func (c Client) GetBlobService() BlobStorageClient {
+ b := BlobStorageClient{
+ client: c,
+ }
+ b.client.AddToUserAgent(blobServiceName)
+ b.auth = sharedKey
+ if c.UseSharedKeyLite {
+ b.auth = sharedKeyLite
+ }
+ return b
+}
+
+// GetQueueService returns a QueueServiceClient which can operate on the queue
+// service of the storage account.
+func (c Client) GetQueueService() QueueServiceClient {
+ q := QueueServiceClient{
+ client: c,
+ }
+ q.client.AddToUserAgent(queueServiceName)
+ q.auth = sharedKey
+ if c.UseSharedKeyLite {
+ q.auth = sharedKeyLite
+ }
+ return q
+}
+
+// GetTableService returns a TableServiceClient which can operate on the table
+// service of the storage account.
+func (c Client) GetTableService() TableServiceClient {
+ t := TableServiceClient{
+ client: c,
+ }
+ t.client.AddToUserAgent(tableServiceName)
+ t.auth = sharedKeyForTable
+ if c.UseSharedKeyLite {
+ t.auth = sharedKeyLiteForTable
+ }
+ return t
+}
+
+// GetFileService returns a FileServiceClient which can operate on the file
+// service of the storage account.
+func (c Client) GetFileService() FileServiceClient {
+ f := FileServiceClient{
+ client: c,
+ }
+ f.client.AddToUserAgent(fileServiceName)
+ f.auth = sharedKey
+ if c.UseSharedKeyLite {
+ f.auth = sharedKeyLite
+ }
+ return f
+}
+
+func (c Client) getStandardHeaders() map[string]string {
+ return map[string]string{
+ userAgentHeader: c.userAgent,
+ "x-ms-version": c.apiVersion,
+ "x-ms-date": currentTimeRfc1123Formatted(),
+ }
+}
+
+func (c Client) exec(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*storageResponse, error) {
+ headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
+ if err != nil {
+ return nil, err
+ }
+
+ req, err := http.NewRequest(verb, url, body)
+ if err != nil {
+ return nil, errors.New("azure/storage: error creating request: " + err.Error())
+ }
+
+ if clstr, ok := headers["Content-Length"]; ok {
+ // content length header is being signed, but completely ignored by golang.
+ // instead we have to use the ContentLength property on the request struct
+ // (see https://golang.org/src/net/http/request.go?s=18140:18370#L536 and
+ // https://golang.org/src/net/http/transfer.go?s=1739:2467#L49)
+ req.ContentLength, err = strconv.ParseInt(clstr, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for k, v := range headers {
+ req.Header.Add(k, v)
+ }
+
+ httpClient := c.HTTPClient
+ if httpClient == nil {
+ httpClient = http.DefaultClient
+ }
+ resp, err := httpClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+
+ statusCode := resp.StatusCode
+ if statusCode >= 400 && statusCode <= 505 {
+ var respBody []byte
+ respBody, err = readAndCloseBody(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ requestID := resp.Header.Get("x-ms-request-id")
+ if len(respBody) == 0 {
+ // no error in response body, might happen in HEAD requests
+ err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID)
+ } else {
+ // response contains storage service error object, unmarshal
+ storageErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, requestID)
+ if err != nil { // error unmarshaling the error response
+ err = errIn
+ }
+ err = storageErr
+ }
+ return &storageResponse{
+ statusCode: resp.StatusCode,
+ headers: resp.Header,
+ body: ioutil.NopCloser(bytes.NewReader(respBody)), /* restore the body */
+ }, err
+ }
+
+ return &storageResponse{
+ statusCode: resp.StatusCode,
+ headers: resp.Header,
+ body: resp.Body}, nil
+}
+
+func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) {
+ headers, err := c.addAuthorizationHeader(verb, url, headers, auth)
+ if err != nil {
+ return nil, err
+ }
+
+ req, err := http.NewRequest(verb, url, body)
+ for k, v := range headers {
+ req.Header.Add(k, v)
+ }
+
+ httpClient := c.HTTPClient
+ if httpClient == nil {
+ httpClient = http.DefaultClient
+ }
+
+ resp, err := httpClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+
+ respToRet := &odataResponse{}
+ respToRet.body = resp.Body
+ respToRet.statusCode = resp.StatusCode
+ respToRet.headers = resp.Header
+
+ statusCode := resp.StatusCode
+ if statusCode >= 400 && statusCode <= 505 {
+ var respBody []byte
+ respBody, err = readAndCloseBody(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(respBody) == 0 {
+ // no error in response body, might happen in HEAD requests
+ err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, resp.Header.Get("x-ms-request-id"))
+ return respToRet, err
+ }
+ // try unmarshal as odata.error json
+ err = json.Unmarshal(respBody, &respToRet.odata)
+ return respToRet, err
+ }
+
+ return respToRet, nil
+}
+
+func readAndCloseBody(body io.ReadCloser) ([]byte, error) {
+ defer body.Close()
+ out, err := ioutil.ReadAll(body)
+ if err == io.EOF {
+ err = nil
+ }
+ return out, err
+}
+
+func serviceErrFromXML(body []byte, statusCode int, requestID string) (AzureStorageServiceError, error) {
+ var storageErr AzureStorageServiceError
+ if err := xml.Unmarshal(body, &storageErr); err != nil {
+ return storageErr, err
+ }
+ storageErr.StatusCode = statusCode
+ storageErr.RequestID = requestID
+ return storageErr, nil
+}
+
+func serviceErrFromStatusCode(code int, status string, requestID string) AzureStorageServiceError {
+ return AzureStorageServiceError{
+ StatusCode: code,
+ Code: status,
+ RequestID: requestID,
+ Message: "no response body was available for error status code",
+ }
+}
+
+func (e AzureStorageServiceError) Error() string {
+ return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s, QueryParameterName=%s, QueryParameterValue=%s",
+ e.StatusCode, e.Code, e.Message, e.RequestID, e.QueryParameterName, e.QueryParameterValue)
+}
+
+// checkRespCode returns UnexpectedStatusError if the given response code is not
+// one of the allowed status codes; otherwise nil.
+func checkRespCode(respCode int, allowed []int) error {
+ for _, v := range allowed {
+ if respCode == v {
+ return nil
+ }
+ }
+ return UnexpectedStatusCodeError{allowed, respCode}
+}
diff --git a/vendor/github.com/Azure/azure-storage-go/container.go b/vendor/github.com/Azure/azure-storage-go/container.go
new file mode 100644
index 000000000..f06423967
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-go/container.go
@@ -0,0 +1,376 @@
+package storage
+
+import (
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "time"
+)
+
+// Container represents an Azure container.
+type Container struct {
+ bsc *BlobStorageClient
+ Name string `xml:"Name"`
+ Properties ContainerProperties `xml:"Properties"`
+}
+
+func (c *Container) buildPath() string {
+ return fmt.Sprintf("/%s", c.Name)
+}
+
+// ContainerProperties contains various properties of a container returned from
+// various endpoints like ListContainers.
+type ContainerProperties struct {
+ LastModified string `xml:"Last-Modified"`
+ Etag string `xml:"Etag"`
+ LeaseStatus string `xml:"LeaseStatus"`
+ LeaseState string `xml:"LeaseState"`
+ LeaseDuration string `xml:"LeaseDuration"`
+}
+
+// ContainerListResponse contains the response fields from
+// ListContainers call.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
+type ContainerListResponse struct {
+ XMLName xml.Name `xml:"EnumerationResults"`
+ Xmlns string `xml:"xmlns,attr"`
+ Prefix string `xml:"Prefix"`
+ Marker string `xml:"Marker"`
+ NextMarker string `xml:"NextMarker"`
+ MaxResults int64 `xml:"MaxResults"`
+ Containers []Container `xml:"Containers>Container"`
+}
+
+// BlobListResponse contains the response fields from ListBlobs call.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
+type BlobListResponse struct {
+ XMLName xml.Name `xml:"EnumerationResults"`
+ Xmlns string `xml:"xmlns,attr"`
+ Prefix string `xml:"Prefix"`
+ Marker string `xml:"Marker"`
+ NextMarker string `xml:"NextMarker"`
+ MaxResults int64 `xml:"MaxResults"`
+ Blobs []Blob `xml:"Blobs>Blob"`
+
+ // BlobPrefix is used to traverse blobs as if it were a file system.
+ // It is returned if ListBlobsParameters.Delimiter is specified.
+ // The list here can be thought of as "folders" that may contain
+ // other folders or blobs.
+ BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"`
+
+ // Delimiter is used to traverse blobs as if it were a file system.
+ // It is returned if ListBlobsParameters.Delimiter is specified.
+ Delimiter string `xml:"Delimiter"`
+}
+
+// ListBlobsParameters defines the set of customizable
+// parameters to make a List Blobs call.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
+type ListBlobsParameters struct {
+ Prefix string
+ Delimiter string
+ Marker string
+ Include string
+ MaxResults uint
+ Timeout uint
+}
+
+func (p ListBlobsParameters) getParameters() url.Values {
+ out := url.Values{}
+
+ if p.Prefix != "" {
+ out.Set("prefix", p.Prefix)
+ }
+ if p.Delimiter != "" {
+ out.Set("delimiter", p.Delimiter)
+ }
+ if p.Marker != "" {
+ out.Set("marker", p.Marker)
+ }
+ if p.Include != "" {
+ out.Set("include", p.Include)
+ }
+ if p.MaxResults != 0 {
+ out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
+ }
+ if p.Timeout != 0 {
+ out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
+ }
+
+ return out
+}
+
+// ContainerAccessType defines the access level to the container from a public
+// request.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms-
+// blob-public-access" header.
+type ContainerAccessType string
+
+// Access options for containers
+const (
+ ContainerAccessTypePrivate ContainerAccessType = ""
+ ContainerAccessTypeBlob ContainerAccessType = "blob"
+ ContainerAccessTypeContainer ContainerAccessType = "container"
+)
+
+// ContainerAccessPolicy represents each access policy in the container ACL.
+type ContainerAccessPolicy struct {
+ ID string
+ StartTime time.Time
+ ExpiryTime time.Time
+ CanRead bool
+ CanWrite bool
+ CanDelete bool
+}
+
+// ContainerPermissions represents the container ACLs.
+type ContainerPermissions struct {
+ AccessType ContainerAccessType
+ AccessPolicies []ContainerAccessPolicy
+}
+
+// ContainerAccessHeader references header used when setting/getting container ACL
+const (
+ ContainerAccessHeader string = "x-ms-blob-public-access"
+)
+
+// Create creates a blob container within the storage account
+// with given name and access level. Returns error if container already exists.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx
+func (c *Container) Create() error {
+ resp, err := c.create()
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+ return checkRespCode(resp.statusCode, []int{http.StatusCreated})
+}
+
+// CreateIfNotExists creates a blob container if it does not exist. Returns
+// true if container is newly created or false if container already exists.
+func (c *Container) CreateIfNotExists() (bool, error) {
+ resp, err := c.create()
+ if resp != nil {
+ defer readAndCloseBody(resp.body)
+ if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
+ return resp.statusCode == http.StatusCreated, nil
+ }
+ }
+ return false, err
+}
+
+func (c *Container) create() (*storageResponse, error) {
+ uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}})
+ headers := c.bsc.client.getStandardHeaders()
+ return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth)
+}
+
+// Exists returns true if a container with given name exists
+// on the storage account, otherwise returns false.
+func (c *Container) Exists() (bool, error) {
+ uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}})
+ headers := c.bsc.client.getStandardHeaders()
+
+ resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth)
+ if resp != nil {
+ defer readAndCloseBody(resp.body)
+ if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
+ return resp.statusCode == http.StatusOK, nil
+ }
+ }
+ return false, err
+}
+
+// SetPermissions sets up container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179391.aspx
+func (c *Container) SetPermissions(permissions ContainerPermissions, timeout int, leaseID string) error {
+ params := url.Values{
+ "restype": {"container"},
+ "comp": {"acl"},
+ }
+
+ if timeout > 0 {
+ params.Add("timeout", strconv.Itoa(timeout))
+ }
+
+ uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
+ headers := c.bsc.client.getStandardHeaders()
+ if permissions.AccessType != "" {
+ headers[ContainerAccessHeader] = string(permissions.AccessType)
+ }
+
+ if leaseID != "" {
+ headers[headerLeaseID] = leaseID
+ }
+
+ body, length, err := generateContainerACLpayload(permissions.AccessPolicies)
+ headers["Content-Length"] = strconv.Itoa(length)
+
+ resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth)
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+
+ if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
+ return errors.New("Unable to set permissions")
+ }
+
+ return nil
+}
+
+// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx
+// If timeout is 0 then it will not be passed to Azure
+// leaseID will only be passed to Azure if populated
+func (c *Container) GetPermissions(timeout int, leaseID string) (*ContainerPermissions, error) {
+ params := url.Values{
+ "restype": {"container"},
+ "comp": {"acl"},
+ }
+
+ if timeout > 0 {
+ params.Add("timeout", strconv.Itoa(timeout))
+ }
+
+ uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params)
+ headers := c.bsc.client.getStandardHeaders()
+
+ if leaseID != "" {
+ headers[headerLeaseID] = leaseID
+ }
+
+ resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.body.Close()
+
+ var ap AccessPolicy
+ err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList)
+ if err != nil {
+ return nil, err
+ }
+ return buildAccessPolicy(ap, &resp.headers), nil
+}
+
+func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissions {
+ // containerAccess. Blob, Container, empty
+ containerAccess := headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader))
+ permissions := ContainerPermissions{
+ AccessType: ContainerAccessType(containerAccess),
+ AccessPolicies: []ContainerAccessPolicy{},
+ }
+
+ for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
+ capd := ContainerAccessPolicy{
+ ID: policy.ID,
+ StartTime: policy.AccessPolicy.StartTime,
+ ExpiryTime: policy.AccessPolicy.ExpiryTime,
+ }
+ capd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
+ capd.CanWrite = updatePermissions(policy.AccessPolicy.Permission, "w")
+ capd.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d")
+
+ permissions.AccessPolicies = append(permissions.AccessPolicies, capd)
+ }
+ return &permissions
+}
+
+// Delete deletes the container with given name on the storage
+// account. If the container does not exist returns error.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx
+func (c *Container) Delete() error {
+ resp, err := c.delete()
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+ return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
+}
+
+// DeleteIfExists deletes the container with given name on the storage
+// account if it exists. Returns true if container is deleted with this call, or
+// false if the container did not exist at the time of the Delete Container
+// operation.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx
+func (c *Container) DeleteIfExists() (bool, error) {
+ resp, err := c.delete()
+ if resp != nil {
+ defer readAndCloseBody(resp.body)
+ if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
+ return resp.statusCode == http.StatusAccepted, nil
+ }
+ }
+ return false, err
+}
+
+func (c *Container) delete() (*storageResponse, error) {
+ uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), url.Values{"restype": {"container"}})
+ headers := c.bsc.client.getStandardHeaders()
+ return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth)
+}
+
+// ListBlobs returns an object that contains list of blobs in the container,
+// pagination token and other information in the response of List Blobs call.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
+func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) {
+ q := mergeParams(params.getParameters(), url.Values{
+ "restype": {"container"},
+ "comp": {"list"}},
+ )
+ uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q)
+ headers := c.bsc.client.getStandardHeaders()
+
+ var out BlobListResponse
+ resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth)
+ if err != nil {
+ return out, err
+ }
+ defer resp.body.Close()
+
+ err = xmlUnmarshal(resp.body, &out)
+ return out, err
+}
+
+func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) {
+ sil := SignedIdentifiers{
+ SignedIdentifiers: []SignedIdentifier{},
+ }
+ for _, capd := range policies {
+ permission := capd.generateContainerPermissions()
+ signedIdentifier := convertAccessPolicyToXMLStructs(capd.ID, capd.StartTime, capd.ExpiryTime, permission)
+ sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
+ }
+ return xmlMarshal(sil)
+}
+
+func (capd *ContainerAccessPolicy) generateContainerPermissions() (permissions string) {
+ // generate the permissions string (rwd).
+ // still want the end user API to have bool flags.
+ permissions = ""
+
+ if capd.CanRead {
+ permissions += "r"
+ }
+
+ if capd.CanWrite {
+ permissions += "w"
+ }
+
+ if capd.CanDelete {
+ permissions += "d"
+ }
+
+ return permissions
+}
diff --git a/vendor/github.com/Azure/azure-storage-go/directory.go b/vendor/github.com/Azure/azure-storage-go/directory.go
new file mode 100644
index 000000000..d27e62079
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-go/directory.go
@@ -0,0 +1,217 @@
+package storage
+
+import (
+ "encoding/xml"
+ "net/http"
+ "net/url"
+)
+
+// Directory represents a directory on a share.
+type Directory struct {
+ fsc *FileServiceClient
+ Metadata map[string]string
+ Name string `xml:"Name"`
+ parent *Directory
+ Properties DirectoryProperties
+ share *Share
+}
+
+// DirectoryProperties contains various properties of a directory.
+type DirectoryProperties struct {
+ LastModified string `xml:"Last-Modified"`
+ Etag string `xml:"Etag"`
+}
+
+// ListDirsAndFilesParameters defines the set of customizable parameters to
+// make a List Files and Directories call.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx
+type ListDirsAndFilesParameters struct {
+ Marker string
+ MaxResults uint
+ Timeout uint
+}
+
+// DirsAndFilesListResponse contains the response fields from
+// a List Files and Directories call.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx
+type DirsAndFilesListResponse struct {
+ XMLName xml.Name `xml:"EnumerationResults"`
+ Xmlns string `xml:"xmlns,attr"`
+ Marker string `xml:"Marker"`
+ MaxResults int64 `xml:"MaxResults"`
+ Directories []Directory `xml:"Entries>Directory"`
+ Files []File `xml:"Entries>File"`
+ NextMarker string `xml:"NextMarker"`
+}
+
+// builds the complete directory path for this directory object.
+func (d *Directory) buildPath() string {
+ path := ""
+ current := d
+ for current.Name != "" {
+ path = "/" + current.Name + path
+ current = current.parent
+ }
+ return d.share.buildPath() + path
+}
+
+// Create this directory in the associated share.
+// If a directory with the same name already exists, the operation fails.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx
+func (d *Directory) Create() error {
+ // if this is the root directory exit early
+ if d.parent == nil {
+ return nil
+ }
+
+ headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, nil, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated})
+ if err != nil {
+ return err
+ }
+
+ d.updateEtagAndLastModified(headers)
+ return nil
+}
+
+// CreateIfNotExists creates this directory under the associated share if the
+// directory does not exists. Returns true if the directory is newly created or
+// false if the directory already exists.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dn166993.aspx
+func (d *Directory) CreateIfNotExists() (bool, error) {
+ // if this is the root directory exit early
+ if d.parent == nil {
+ return false, nil
+ }
+
+ resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, nil, nil)
+ if resp != nil {
+ defer readAndCloseBody(resp.body)
+ if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
+ if resp.statusCode == http.StatusCreated {
+ d.updateEtagAndLastModified(resp.headers)
+ return true, nil
+ }
+
+ return false, d.FetchAttributes()
+ }
+ }
+
+ return false, err
+}
+
+// Delete removes this directory. It must be empty in order to be deleted.
+// If the directory does not exist the operation fails.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dn166969.aspx
+func (d *Directory) Delete() error {
+ return d.fsc.deleteResource(d.buildPath(), resourceDirectory)
+}
+
+// DeleteIfExists removes this directory if it exists.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dn166969.aspx
+func (d *Directory) DeleteIfExists() (bool, error) {
+ resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory)
+ if resp != nil {
+ defer readAndCloseBody(resp.body)
+ if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
+ return resp.statusCode == http.StatusAccepted, nil
+ }
+ }
+ return false, err
+}
+
+// Exists returns true if this directory exists.
+func (d *Directory) Exists() (bool, error) {
+ exists, headers, err := d.fsc.resourceExists(d.buildPath(), resourceDirectory)
+ if exists {
+ d.updateEtagAndLastModified(headers)
+ }
+ return exists, err
+}
+
+// FetchAttributes retrieves metadata for this directory.
+func (d *Directory) FetchAttributes() error {
+ headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, http.MethodHead)
+ if err != nil {
+ return err
+ }
+
+ d.updateEtagAndLastModified(headers)
+ d.Metadata = getMetadataFromHeaders(headers)
+
+ return nil
+}
+
+// GetDirectoryReference returns a child Directory object for this directory.
+func (d *Directory) GetDirectoryReference(name string) *Directory {
+ return &Directory{
+ fsc: d.fsc,
+ Name: name,
+ parent: d,
+ share: d.share,
+ }
+}
+
+// GetFileReference returns a child File object for this directory.
+func (d *Directory) GetFileReference(name string) *File {
+ return &File{
+ fsc: d.fsc,
+ Name: name,
+ parent: d,
+ share: d.share,
+ }
+}
+
+// ListDirsAndFiles returns a list of files and directories under this directory.
+// It also contains a pagination token and other response details.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dn166980.aspx
+func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) {
+ q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory))
+
+ resp, err := d.fsc.listContent(d.buildPath(), q, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ defer resp.body.Close()
+ var out DirsAndFilesListResponse
+ err = xmlUnmarshal(resp.body, &out)
+ return &out, err
+}
+
+// SetMetadata replaces the metadata for this directory.
+//
+// Some keys may be converted to Camel-Case before sending. All keys
+// are returned in lower case by GetDirectoryMetadata. HTTP header names
+// are case-insensitive so case munging should not matter to other
+// applications either.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/mt427370.aspx
+func (d *Directory) SetMetadata() error {
+ headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil))
+ if err != nil {
+ return err
+ }
+
+ d.updateEtagAndLastModified(headers)
+ return nil
+}
+
+// updates Etag and last modified date
+func (d *Directory) updateEtagAndLastModified(headers http.Header) {
+ d.Properties.Etag = headers.Get("Etag")
+ d.Properties.LastModified = headers.Get("Last-Modified")
+}
+
+// URL gets the canonical URL to this directory.
+// This method does not create a publicly accessible URL if the directory
+// is private and this method does not check if the directory exists.
+func (d *Directory) URL() string {
+ return d.fsc.client.getEndpoint(fileServiceName, d.buildPath(), url.Values{})
+}
diff --git a/vendor/github.com/Azure/azure-storage-go/file.go b/vendor/github.com/Azure/azure-storage-go/file.go
new file mode 100644
index 000000000..e4901a114
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-go/file.go
@@ -0,0 +1,412 @@
+package storage
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strconv"
+)
+
+const fourMB = uint64(4194304)
+const oneTB = uint64(1099511627776)
+
+// File represents a file on a share.
+type File struct {
+ fsc *FileServiceClient
+ Metadata map[string]string
+ Name string `xml:"Name"`
+ parent *Directory
+ Properties FileProperties `xml:"Properties"`
+ share *Share
+ FileCopyProperties FileCopyState
+}
+
+// FileProperties contains various properties of a file.
+type FileProperties struct {
+ CacheControl string `header:"x-ms-cache-control"`
+ Disposition string `header:"x-ms-content-disposition"`
+ Encoding string `header:"x-ms-content-encoding"`
+ Etag string
+ Language string `header:"x-ms-content-language"`
+ LastModified string
+ Length uint64 `xml:"Content-Length"`
+ MD5 string `header:"x-ms-content-md5"`
+ Type string `header:"x-ms-content-type"`
+}
+
+// FileCopyState contains various properties of a file copy operation.
+type FileCopyState struct {
+ CompletionTime string
+ ID string `header:"x-ms-copy-id"`
+ Progress string
+ Source string
+ Status string `header:"x-ms-copy-status"`
+ StatusDesc string
+}
+
+// FileStream contains file data returned from a call to GetFile.
+type FileStream struct {
+ Body io.ReadCloser
+ ContentMD5 string
+}
+
+// FileRequestOptions will be passed to misc file operations.
+// Currently just Timeout (in seconds) but will expand.
+type FileRequestOptions struct {
+ Timeout uint // timeout duration in seconds.
+}
+
+// getParameters, construct parameters for FileRequestOptions.
+// currently only timeout, but expecting to grow as functionality fills out.
+func (p FileRequestOptions) getParameters() url.Values {
+ out := url.Values{}
+
+ if p.Timeout != 0 {
+ out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
+ }
+
+ return out
+}
+
+// FileRanges contains a list of file range information for a file.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx
+type FileRanges struct {
+ ContentLength uint64
+ LastModified string
+ ETag string
+ FileRanges []FileRange `xml:"Range"`
+}
+
+// FileRange contains range information for a file.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx
+type FileRange struct {
+ Start uint64 `xml:"Start"`
+ End uint64 `xml:"End"`
+}
+
+func (fr FileRange) String() string {
+ return fmt.Sprintf("bytes=%d-%d", fr.Start, fr.End)
+}
+
+// builds the complete file path for this file object
+func (f *File) buildPath() string {
+ return f.parent.buildPath() + "/" + f.Name
+}
+
+// ClearRange releases the specified range of space in a file.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx
+func (f *File) ClearRange(fileRange FileRange) error {
+ headers, err := f.modifyRange(nil, fileRange, nil)
+ if err != nil {
+ return err
+ }
+
+ f.updateEtagAndLastModified(headers)
+ return nil
+}
+
+// Create creates a new file or replaces an existing one.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dn194271.aspx
+func (f *File) Create(maxSize uint64) error {
+ if maxSize > oneTB {
+ return fmt.Errorf("max file size is 1TB")
+ }
+
+ extraHeaders := map[string]string{
+ "x-ms-content-length": strconv.FormatUint(maxSize, 10),
+ "x-ms-type": "file",
+ }
+
+ headers, err := f.fsc.createResource(f.buildPath(), resourceFile, nil, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusCreated})
+ if err != nil {
+ return err
+ }
+
+ f.Properties.Length = maxSize
+ f.updateEtagAndLastModified(headers)
+ return nil
+}
+
+// CopyFile operation copied a file/blob from the sourceURL to the path provided.
+//
+// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/copy-file
+func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error {
+ extraHeaders := map[string]string{
+ "x-ms-type": "file",
+ "x-ms-copy-source": sourceURL,
+ }
+
+ var parameters url.Values
+ if options != nil {
+ parameters = options.getParameters()
+ }
+
+ headers, err := f.fsc.createResource(f.buildPath(), resourceFile, parameters, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted})
+ if err != nil {
+ return err
+ }
+
+ f.updateEtagLastModifiedAndCopyHeaders(headers)
+ return nil
+}
+
+// Delete immediately removes this file from the storage account.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx
+func (f *File) Delete() error {
+ return f.fsc.deleteResource(f.buildPath(), resourceFile)
+}
+
+// DeleteIfExists removes this file if it exists.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dn689085.aspx
+func (f *File) DeleteIfExists() (bool, error) {
+ resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile)
+ if resp != nil {
+ defer readAndCloseBody(resp.body)
+ if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
+ return resp.statusCode == http.StatusAccepted, nil
+ }
+ }
+ return false, err
+}
+
+// DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash.
+//
+// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file
+func (f *File) DownloadRangeToStream(fileRange FileRange, getContentMD5 bool) (fs FileStream, err error) {
+ if getContentMD5 && isRangeTooBig(fileRange) {
+ return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true")
+ }
+
+ extraHeaders := map[string]string{
+ "Range": fileRange.String(),
+ }
+ if getContentMD5 == true {
+ extraHeaders["x-ms-range-get-content-md5"] = "true"
+ }
+
+ resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, http.MethodGet, extraHeaders)
+ if err != nil {
+ return fs, err
+ }
+
+ if err = checkRespCode(resp.statusCode, []int{http.StatusOK, http.StatusPartialContent}); err != nil {
+ resp.body.Close()
+ return fs, err
+ }
+
+ fs.Body = resp.body
+ if getContentMD5 {
+ fs.ContentMD5 = resp.headers.Get("Content-MD5")
+ }
+ return fs, nil
+}
+
+// Exists returns true if this file exists.
+func (f *File) Exists() (bool, error) {
+ exists, headers, err := f.fsc.resourceExists(f.buildPath(), resourceFile)
+ if exists {
+ f.updateEtagAndLastModified(headers)
+ f.updateProperties(headers)
+ }
+ return exists, err
+}
+
+// FetchAttributes updates metadata and properties for this file.
+func (f *File) FetchAttributes() error {
+ headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, http.MethodHead)
+ if err != nil {
+ return err
+ }
+
+ f.updateEtagAndLastModified(headers)
+ f.updateProperties(headers)
+ f.Metadata = getMetadataFromHeaders(headers)
+ return nil
+}
+
+// returns true if the range is larger than 4MB
+func isRangeTooBig(fileRange FileRange) bool {
+ if fileRange.End-fileRange.Start > fourMB {
+ return true
+ }
+
+ return false
+}
+
+// ListRanges returns the list of valid ranges for this file.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dn166984.aspx
+func (f *File) ListRanges(listRange *FileRange) (*FileRanges, error) {
+ params := url.Values{"comp": {"rangelist"}}
+
+ // add optional range to list
+ var headers map[string]string
+ if listRange != nil {
+ headers = make(map[string]string)
+ headers["Range"] = listRange.String()
+ }
+
+ resp, err := f.fsc.listContent(f.buildPath(), params, headers)
+ if err != nil {
+ return nil, err
+ }
+
+ defer resp.body.Close()
+ var cl uint64
+ cl, err = strconv.ParseUint(resp.headers.Get("x-ms-content-length"), 10, 64)
+ if err != nil {
+ ioutil.ReadAll(resp.body)
+ return nil, err
+ }
+
+ var out FileRanges
+ out.ContentLength = cl
+ out.ETag = resp.headers.Get("ETag")
+ out.LastModified = resp.headers.Get("Last-Modified")
+
+ err = xmlUnmarshal(resp.body, &out)
+ return &out, err
+}
+
+// modifies a range of bytes in this file
+func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, contentMD5 *string) (http.Header, error) {
+ if err := f.fsc.checkForStorageEmulator(); err != nil {
+ return nil, err
+ }
+ if fileRange.End < fileRange.Start {
+ return nil, errors.New("the value for rangeEnd must be greater than or equal to rangeStart")
+ }
+ if bytes != nil && isRangeTooBig(fileRange) {
+ return nil, errors.New("range cannot exceed 4MB in size")
+ }
+
+ uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), url.Values{"comp": {"range"}})
+
+ // default to clear
+ write := "clear"
+ cl := uint64(0)
+
+ // if bytes is not nil then this is an update operation
+ if bytes != nil {
+ write = "update"
+ cl = (fileRange.End - fileRange.Start) + 1
+ }
+
+ extraHeaders := map[string]string{
+ "Content-Length": strconv.FormatUint(cl, 10),
+ "Range": fileRange.String(),
+ "x-ms-write": write,
+ }
+
+ if contentMD5 != nil {
+ extraHeaders["Content-MD5"] = *contentMD5
+ }
+
+ headers := mergeHeaders(f.fsc.client.getStandardHeaders(), extraHeaders)
+ resp, err := f.fsc.client.exec(http.MethodPut, uri, headers, bytes, f.fsc.auth)
+ if err != nil {
+ return nil, err
+ }
+ defer readAndCloseBody(resp.body)
+ return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusCreated})
+}
+
+// SetMetadata replaces the metadata for this file.
+//
+// Some keys may be converted to Camel-Case before sending. All keys
+// are returned in lower case by GetFileMetadata. HTTP header names
+// are case-insensitive so case munging should not matter to other
+// applications either.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dn689097.aspx
+func (f *File) SetMetadata() error {
+ headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil))
+ if err != nil {
+ return err
+ }
+
+ f.updateEtagAndLastModified(headers)
+ return nil
+}
+
+// SetProperties sets system properties on this file.
+//
+// Some keys may be converted to Camel-Case before sending. All keys
+// are returned in lower case by SetFileProperties. HTTP header names
+// are case-insensitive so case munging should not matter to other
+// applications either.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dn166975.aspx
+func (f *File) SetProperties() error {
+ headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties))
+ if err != nil {
+ return err
+ }
+
+ f.updateEtagAndLastModified(headers)
+ return nil
+}
+
+// updates Etag and last modified date
+func (f *File) updateEtagAndLastModified(headers http.Header) {
+ f.Properties.Etag = headers.Get("Etag")
+ f.Properties.LastModified = headers.Get("Last-Modified")
+}
+
+// updates Etag, last modified date and x-ms-copy-id
+func (f *File) updateEtagLastModifiedAndCopyHeaders(headers http.Header) {
+ f.Properties.Etag = headers.Get("Etag")
+ f.Properties.LastModified = headers.Get("Last-Modified")
+ f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id")
+ f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status")
+}
+
+// updates file properties from the specified HTTP header
+func (f *File) updateProperties(header http.Header) {
+ size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64)
+ if err == nil {
+ f.Properties.Length = size
+ }
+
+ f.updateEtagAndLastModified(header)
+ f.Properties.CacheControl = header.Get("Cache-Control")
+ f.Properties.Disposition = header.Get("Content-Disposition")
+ f.Properties.Encoding = header.Get("Content-Encoding")
+ f.Properties.Language = header.Get("Content-Language")
+ f.Properties.MD5 = header.Get("Content-MD5")
+ f.Properties.Type = header.Get("Content-Type")
+}
+
+// URL gets the canonical URL to this file.
+// This method does not create a publicly accessible URL if the file
+// is private and this method does not check if the file exists.
+func (f *File) URL() string {
+ return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), url.Values{})
+}
+
+// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content.
+// Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with a maximum size of 4MB.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dn194276.aspx
+func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, contentMD5 *string) error {
+ if bytes == nil {
+ return errors.New("bytes cannot be nil")
+ }
+
+ headers, err := f.modifyRange(bytes, fileRange, contentMD5)
+ if err != nil {
+ return err
+ }
+
+ f.updateEtagAndLastModified(headers)
+ return nil
+}
diff --git a/vendor/github.com/Azure/azure-storage-go/fileserviceclient.go b/vendor/github.com/Azure/azure-storage-go/fileserviceclient.go
new file mode 100644
index 000000000..d68bd7f64
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-go/fileserviceclient.go
@@ -0,0 +1,375 @@
+package storage
+
+import (
+ "encoding/xml"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+// FileServiceClient contains operations for Microsoft Azure File Service.
+type FileServiceClient struct {
+ client Client
+ auth authentication
+}
+
+// ListSharesParameters defines the set of customizable parameters to make a
+// List Shares call.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx
+type ListSharesParameters struct {
+ Prefix string
+ Marker string
+ Include string
+ MaxResults uint
+ Timeout uint
+}
+
+// ShareListResponse contains the response fields from
+// ListShares call.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dn167009.aspx
+type ShareListResponse struct {
+ XMLName xml.Name `xml:"EnumerationResults"`
+ Xmlns string `xml:"xmlns,attr"`
+ Prefix string `xml:"Prefix"`
+ Marker string `xml:"Marker"`
+ NextMarker string `xml:"NextMarker"`
+ MaxResults int64 `xml:"MaxResults"`
+ Shares []Share `xml:"Shares>Share"`
+}
+
+type compType string
+
+const (
+ compNone compType = ""
+ compList compType = "list"
+ compMetadata compType = "metadata"
+ compProperties compType = "properties"
+ compRangeList compType = "rangelist"
+)
+
+func (ct compType) String() string {
+ return string(ct)
+}
+
+type resourceType string
+
+const (
+ resourceDirectory resourceType = "directory"
+ resourceFile resourceType = ""
+ resourceShare resourceType = "share"
+)
+
+func (rt resourceType) String() string {
+ return string(rt)
+}
+
+func (p ListSharesParameters) getParameters() url.Values {
+ out := url.Values{}
+
+ if p.Prefix != "" {
+ out.Set("prefix", p.Prefix)
+ }
+ if p.Marker != "" {
+ out.Set("marker", p.Marker)
+ }
+ if p.Include != "" {
+ out.Set("include", p.Include)
+ }
+ if p.MaxResults != 0 {
+ out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
+ }
+ if p.Timeout != 0 {
+ out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
+ }
+
+ return out
+}
+
+func (p ListDirsAndFilesParameters) getParameters() url.Values {
+ out := url.Values{}
+
+ if p.Marker != "" {
+ out.Set("marker", p.Marker)
+ }
+ if p.MaxResults != 0 {
+ out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
+ }
+ if p.Timeout != 0 {
+ out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
+ }
+
+ return out
+}
+
+// returns url.Values for the specified types
+func getURLInitValues(comp compType, res resourceType) url.Values {
+ values := url.Values{}
+ if comp != compNone {
+ values.Set("comp", comp.String())
+ }
+ if res != resourceFile {
+ values.Set("restype", res.String())
+ }
+ return values
+}
+
+// GetShareReference returns a Share object for the specified share name.
+func (f FileServiceClient) GetShareReference(name string) Share {
+ return Share{
+ fsc: &f,
+ Name: name,
+ Properties: ShareProperties{
+ Quota: -1,
+ },
+ }
+}
+
+// ListShares returns the list of shares in a storage account along with
+// pagination token and other response details.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
+func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) {
+ q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
+
+ var out ShareListResponse
+ resp, err := f.listContent("", q, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.body.Close()
+ err = xmlUnmarshal(resp.body, &out)
+
+ // assign our client to the newly created Share objects
+ for i := range out.Shares {
+ out.Shares[i].fsc = &f
+ }
+ return &out, err
+}
+
+// GetServiceProperties gets the properties of your storage account's file service.
+// File service does not support logging
+// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-service-properties
+func (f *FileServiceClient) GetServiceProperties() (*ServiceProperties, error) {
+ return f.client.getServiceProperties(fileServiceName, f.auth)
+}
+
+// SetServiceProperties sets the properties of your storage account's file service.
+// File service does not support logging
+// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-file-service-properties
+func (f *FileServiceClient) SetServiceProperties(props ServiceProperties) error {
+ return f.client.setServiceProperties(props, fileServiceName, f.auth)
+}
+
+// retrieves directory or share content
+func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*storageResponse, error) {
+ if err := f.checkForStorageEmulator(); err != nil {
+ return nil, err
+ }
+
+ uri := f.client.getEndpoint(fileServiceName, path, params)
+ extraHeaders = f.client.protectUserAgent(extraHeaders)
+ headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
+
+ resp, err := f.client.exec(http.MethodGet, uri, headers, nil, f.auth)
+ if err != nil {
+ return nil, err
+ }
+
+ if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
+ readAndCloseBody(resp.body)
+ return nil, err
+ }
+
+ return resp, nil
+}
+
+// returns true if the specified resource exists
+func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, http.Header, error) {
+ if err := f.checkForStorageEmulator(); err != nil {
+ return false, nil, err
+ }
+
+ uri := f.client.getEndpoint(fileServiceName, path, getURLInitValues(compNone, res))
+ headers := f.client.getStandardHeaders()
+
+ resp, err := f.client.exec(http.MethodHead, uri, headers, nil, f.auth)
+ if resp != nil {
+ defer readAndCloseBody(resp.body)
+ if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
+ return resp.statusCode == http.StatusOK, resp.headers, nil
+ }
+ }
+ return false, nil, err
+}
+
+// creates a resource depending on the specified resource type
+func (f FileServiceClient) createResource(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string, expectedResponseCodes []int) (http.Header, error) {
+ resp, err := f.createResourceNoClose(path, res, urlParams, extraHeaders)
+ if err != nil {
+ return nil, err
+ }
+ defer readAndCloseBody(resp.body)
+ return resp.headers, checkRespCode(resp.statusCode, expectedResponseCodes)
+}
+
+// creates a resource depending on the specified resource type, doesn't close the response body
+func (f FileServiceClient) createResourceNoClose(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string) (*storageResponse, error) {
+ if err := f.checkForStorageEmulator(); err != nil {
+ return nil, err
+ }
+
+ values := getURLInitValues(compNone, res)
+ combinedParams := mergeParams(values, urlParams)
+ uri := f.client.getEndpoint(fileServiceName, path, combinedParams)
+ extraHeaders = f.client.protectUserAgent(extraHeaders)
+ headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
+
+ return f.client.exec(http.MethodPut, uri, headers, nil, f.auth)
+}
+
+// returns HTTP header data for the specified directory or share
+func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, verb string) (http.Header, error) {
+ resp, err := f.getResourceNoClose(path, comp, res, verb, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer readAndCloseBody(resp.body)
+
+ if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
+ return nil, err
+ }
+
+ return resp.headers, nil
+}
+
+// gets the specified resource, doesn't close the response body
+func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, verb string, extraHeaders map[string]string) (*storageResponse, error) {
+ if err := f.checkForStorageEmulator(); err != nil {
+ return nil, err
+ }
+
+ params := getURLInitValues(comp, res)
+ uri := f.client.getEndpoint(fileServiceName, path, params)
+ extraHeaders = f.client.protectUserAgent(extraHeaders)
+ headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
+
+ return f.client.exec(verb, uri, headers, nil, f.auth)
+}
+
+// deletes the resource and returns the response
+func (f FileServiceClient) deleteResource(path string, res resourceType) error {
+ resp, err := f.deleteResourceNoClose(path, res)
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+ return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
+}
+
+// deletes the resource and returns the response, doesn't close the response body
+func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType) (*storageResponse, error) {
+ if err := f.checkForStorageEmulator(); err != nil {
+ return nil, err
+ }
+
+ values := getURLInitValues(compNone, res)
+ uri := f.client.getEndpoint(fileServiceName, path, values)
+ return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth)
+}
+
+// merges metadata into extraHeaders and returns extraHeaders
+func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[string]string {
+ if metadata == nil && extraHeaders == nil {
+ return nil
+ }
+ if extraHeaders == nil {
+ extraHeaders = make(map[string]string)
+ }
+ for k, v := range metadata {
+ extraHeaders[userDefinedMetadataHeaderPrefix+k] = v
+ }
+ return extraHeaders
+}
+
+// merges extraHeaders into headers and returns headers
+func mergeHeaders(headers, extraHeaders map[string]string) map[string]string {
+ for k, v := range extraHeaders {
+ headers[k] = v
+ }
+ return headers
+}
+
+// sets extra header data for the specified resource
+func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string) (http.Header, error) {
+ if err := f.checkForStorageEmulator(); err != nil {
+ return nil, err
+ }
+
+ params := getURLInitValues(comp, res)
+ uri := f.client.getEndpoint(fileServiceName, path, params)
+ extraHeaders = f.client.protectUserAgent(extraHeaders)
+ headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders)
+
+ resp, err := f.client.exec(http.MethodPut, uri, headers, nil, f.auth)
+ if err != nil {
+ return nil, err
+ }
+ defer readAndCloseBody(resp.body)
+
+ return resp.headers, checkRespCode(resp.statusCode, []int{http.StatusOK})
+}
+
+// gets metadata for the specified resource
+func (f FileServiceClient) getMetadata(path string, res resourceType) (map[string]string, error) {
+ if err := f.checkForStorageEmulator(); err != nil {
+ return nil, err
+ }
+
+ headers, err := f.getResourceHeaders(path, compMetadata, res, http.MethodGet)
+ if err != nil {
+ return nil, err
+ }
+
+ return getMetadataFromHeaders(headers), nil
+}
+
+// returns a map of custom metadata values from the specified HTTP header
+func getMetadataFromHeaders(header http.Header) map[string]string {
+ metadata := make(map[string]string)
+ for k, v := range header {
+ // Can't trust CanonicalHeaderKey() to munge case
+ // reliably. "_" is allowed in identifiers:
+ // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
+ // https://msdn.microsoft.com/library/aa664670(VS.71).aspx
+ // http://tools.ietf.org/html/rfc7230#section-3.2
+ // ...but "_" is considered invalid by
+ // CanonicalMIMEHeaderKey in
+ // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
+ // so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar".
+ k = strings.ToLower(k)
+ if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
+ continue
+ }
+ // metadata["foo"] = content of the last X-Ms-Meta-Foo header
+ k = k[len(userDefinedMetadataHeaderPrefix):]
+ metadata[k] = v[len(v)-1]
+ }
+
+ if len(metadata) == 0 {
+ return nil
+ }
+
+ return metadata
+}
+
+//checkForStorageEmulator determines if the client is setup for use with
+//Azure Storage Emulator, and returns a relevant error
+func (f FileServiceClient) checkForStorageEmulator() error {
+ if f.client.accountName == StorageEmulatorAccountName {
+ return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator")
+ }
+ return nil
+}
diff --git a/vendor/github.com/Azure/azure-storage-go/glide.lock b/vendor/github.com/Azure/azure-storage-go/glide.lock
new file mode 100644
index 000000000..5d3ce8361
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-go/glide.lock
@@ -0,0 +1,14 @@
+hash: a97c0c90fe4d23bbd8e5745431f633e75530bb611131b786d76b8e1763bce85e
+updated: 2017-02-23T09:58:57.3701584-08:00
+imports:
+- name: github.com/Azure/go-autorest
+ version: ec5f4903f77ed9927ac95b19ab8e44ada64c1356
+ subpackages:
+ - autorest/azure
+ - autorest
+ - autorest/date
+- name: github.com/dgrijalva/jwt-go
+ version: 2268707a8f0843315e2004ee4f1d021dc08baedf
+testImports:
+- name: gopkg.in/check.v1
+ version: 20d25e2804050c1cd24a7eea1e7a6447dd0e74ec
diff --git a/vendor/github.com/Azure/azure-storage-go/glide.yaml b/vendor/github.com/Azure/azure-storage-go/glide.yaml
new file mode 100644
index 000000000..e6783b774
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-go/glide.yaml
@@ -0,0 +1,4 @@
+package: github.com/Azure/azure-sdk-for-go-storage
+import: []
+testImport:
+- package: gopkg.in/check.v1
diff --git a/vendor/github.com/Azure/azure-storage-go/queue.go b/vendor/github.com/Azure/azure-storage-go/queue.go
new file mode 100644
index 000000000..4031410ae
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-go/queue.go
@@ -0,0 +1,339 @@
+package storage
+
+import (
+ "encoding/xml"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+const (
+ // casing is per Golang's http.Header canonicalizing the header names.
+ approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count"
+ userDefinedMetadataHeaderPrefix = "X-Ms-Meta-"
+)
+
+func pathForQueue(queue string) string { return fmt.Sprintf("/%s", queue) }
+func pathForQueueMessages(queue string) string { return fmt.Sprintf("/%s/messages", queue) }
+func pathForMessage(queue, name string) string { return fmt.Sprintf("/%s/messages/%s", queue, name) }
+
+type putMessageRequest struct {
+ XMLName xml.Name `xml:"QueueMessage"`
+ MessageText string `xml:"MessageText"`
+}
+
+// PutMessageParameters is the set of options can be specified for Put Messsage
+// operation. A zero struct does not use any preferences for the request.
+type PutMessageParameters struct {
+ VisibilityTimeout int
+ MessageTTL int
+}
+
+func (p PutMessageParameters) getParameters() url.Values {
+ out := url.Values{}
+ if p.VisibilityTimeout != 0 {
+ out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
+ }
+ if p.MessageTTL != 0 {
+ out.Set("messagettl", strconv.Itoa(p.MessageTTL))
+ }
+ return out
+}
+
+// GetMessagesParameters is the set of options can be specified for Get
+// Messsages operation. A zero struct does not use any preferences for the
+// request.
+type GetMessagesParameters struct {
+ NumOfMessages int
+ VisibilityTimeout int
+}
+
+func (p GetMessagesParameters) getParameters() url.Values {
+ out := url.Values{}
+ if p.NumOfMessages != 0 {
+ out.Set("numofmessages", strconv.Itoa(p.NumOfMessages))
+ }
+ if p.VisibilityTimeout != 0 {
+ out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
+ }
+ return out
+}
+
+// PeekMessagesParameters is the set of options can be specified for Peek
+// Messsage operation. A zero struct does not use any preferences for the
+// request.
+type PeekMessagesParameters struct {
+ NumOfMessages int
+}
+
+func (p PeekMessagesParameters) getParameters() url.Values {
+ out := url.Values{"peekonly": {"true"}} // Required for peek operation
+ if p.NumOfMessages != 0 {
+ out.Set("numofmessages", strconv.Itoa(p.NumOfMessages))
+ }
+ return out
+}
+
+// UpdateMessageParameters is the set of options can be specified for Update Messsage
+// operation. A zero struct does not use any preferences for the request.
+type UpdateMessageParameters struct {
+ PopReceipt string
+ VisibilityTimeout int
+}
+
+func (p UpdateMessageParameters) getParameters() url.Values {
+ out := url.Values{}
+ if p.PopReceipt != "" {
+ out.Set("popreceipt", p.PopReceipt)
+ }
+ if p.VisibilityTimeout != 0 {
+ out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
+ }
+ return out
+}
+
+// GetMessagesResponse represents a response returned from Get Messages
+// operation.
+type GetMessagesResponse struct {
+ XMLName xml.Name `xml:"QueueMessagesList"`
+ QueueMessagesList []GetMessageResponse `xml:"QueueMessage"`
+}
+
+// GetMessageResponse represents a QueueMessage object returned from Get
+// Messages operation response.
+type GetMessageResponse struct {
+ MessageID string `xml:"MessageId"`
+ InsertionTime string `xml:"InsertionTime"`
+ ExpirationTime string `xml:"ExpirationTime"`
+ PopReceipt string `xml:"PopReceipt"`
+ TimeNextVisible string `xml:"TimeNextVisible"`
+ DequeueCount int `xml:"DequeueCount"`
+ MessageText string `xml:"MessageText"`
+}
+
+// PeekMessagesResponse represents a response returned from Get Messages
+// operation.
+type PeekMessagesResponse struct {
+ XMLName xml.Name `xml:"QueueMessagesList"`
+ QueueMessagesList []PeekMessageResponse `xml:"QueueMessage"`
+}
+
+// PeekMessageResponse represents a QueueMessage object returned from Peek
+// Messages operation response.
+type PeekMessageResponse struct {
+ MessageID string `xml:"MessageId"`
+ InsertionTime string `xml:"InsertionTime"`
+ ExpirationTime string `xml:"ExpirationTime"`
+ DequeueCount int `xml:"DequeueCount"`
+ MessageText string `xml:"MessageText"`
+}
+
+// QueueMetadataResponse represents user defined metadata and queue
+// properties on a specific queue.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx
+type QueueMetadataResponse struct {
+ ApproximateMessageCount int
+ UserDefinedMetadata map[string]string
+}
+
+// SetMetadata operation sets user-defined metadata on the specified queue.
+// Metadata is associated with the queue as name-value pairs.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179348.aspx
+func (c QueueServiceClient) SetMetadata(name string, metadata map[string]string) error {
+ uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}})
+ metadata = c.client.protectUserAgent(metadata)
+ headers := c.client.getStandardHeaders()
+ for k, v := range metadata {
+ headers[userDefinedMetadataHeaderPrefix+k] = v
+ }
+
+ resp, err := c.client.exec(http.MethodPut, uri, headers, nil, c.auth)
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+
+ return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
+}
+
+// GetMetadata operation retrieves user-defined metadata and queue
+// properties on the specified queue. Metadata is associated with
+// the queue as name-values pairs.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx
+//
+// Because the way Golang's http client (and http.Header in particular)
+// canonicalize header names, the returned metadata names would always
+// be all lower case.
+func (c QueueServiceClient) GetMetadata(name string) (QueueMetadataResponse, error) {
+ qm := QueueMetadataResponse{}
+ qm.UserDefinedMetadata = make(map[string]string)
+ uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}})
+ headers := c.client.getStandardHeaders()
+ resp, err := c.client.exec(http.MethodGet, uri, headers, nil, c.auth)
+ if err != nil {
+ return qm, err
+ }
+ defer readAndCloseBody(resp.body)
+
+ for k, v := range resp.headers {
+ if len(v) != 1 {
+ return qm, fmt.Errorf("Unexpected number of values (%d) in response header '%s'", len(v), k)
+ }
+
+ value := v[0]
+
+ if k == approximateMessagesCountHeader {
+ qm.ApproximateMessageCount, err = strconv.Atoi(value)
+ if err != nil {
+ return qm, fmt.Errorf("Unexpected value in response header '%s': '%s' ", k, value)
+ }
+ } else if strings.HasPrefix(k, userDefinedMetadataHeaderPrefix) {
+ name := strings.TrimPrefix(k, userDefinedMetadataHeaderPrefix)
+ qm.UserDefinedMetadata[strings.ToLower(name)] = value
+ }
+ }
+
+ return qm, checkRespCode(resp.statusCode, []int{http.StatusOK})
+}
+
+// CreateQueue operation creates a queue under the given account.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179342.aspx
+func (c QueueServiceClient) CreateQueue(name string) error {
+ uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{})
+ headers := c.client.getStandardHeaders()
+ resp, err := c.client.exec(http.MethodPut, uri, headers, nil, c.auth)
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+ return checkRespCode(resp.statusCode, []int{http.StatusCreated})
+}
+
+// DeleteQueue operation permanently deletes the specified queue.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179436.aspx
+func (c QueueServiceClient) DeleteQueue(name string) error {
+ uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{})
+ resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth)
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+ return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
+}
+
+// QueueExists returns true if a queue with given name exists.
+func (c QueueServiceClient) QueueExists(name string) (bool, error) {
+ uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": {"metadata"}})
+ resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth)
+ if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) {
+ return resp.statusCode == http.StatusOK, nil
+ }
+
+ return false, err
+}
+
+// PutMessage operation adds a new message to the back of the message queue.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179346.aspx
+func (c QueueServiceClient) PutMessage(queue string, message string, params PutMessageParameters) error {
+ uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
+ req := putMessageRequest{MessageText: message}
+ body, nn, err := xmlMarshal(req)
+ if err != nil {
+ return err
+ }
+ headers := c.client.getStandardHeaders()
+ headers["Content-Length"] = strconv.Itoa(nn)
+ resp, err := c.client.exec(http.MethodPost, uri, headers, body, c.auth)
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+ return checkRespCode(resp.statusCode, []int{http.StatusCreated})
+}
+
+// ClearMessages operation deletes all messages from the specified queue.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179454.aspx
+func (c QueueServiceClient) ClearMessages(queue string) error {
+ uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), url.Values{})
+ resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth)
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+ return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
+}
+
+// GetMessages operation retrieves one or more messages from the front of the
+// queue.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179474.aspx
+func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParameters) (GetMessagesResponse, error) {
+ var r GetMessagesResponse
+ uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
+ resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth)
+ if err != nil {
+ return r, err
+ }
+ defer resp.body.Close()
+ err = xmlUnmarshal(resp.body, &r)
+ return r, err
+}
+
+// PeekMessages retrieves one or more messages from the front of the queue, but
+// does not alter the visibility of the message.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179472.aspx
+func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParameters) (PeekMessagesResponse, error) {
+ var r PeekMessagesResponse
+ uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
+ resp, err := c.client.exec(http.MethodGet, uri, c.client.getStandardHeaders(), nil, c.auth)
+ if err != nil {
+ return r, err
+ }
+ defer resp.body.Close()
+ err = xmlUnmarshal(resp.body, &r)
+ return r, err
+}
+
+// DeleteMessage operation deletes the specified message.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx
+func (c QueueServiceClient) DeleteMessage(queue, messageID, popReceipt string) error {
+ uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), url.Values{
+ "popreceipt": {popReceipt}})
+ resp, err := c.client.exec(http.MethodDelete, uri, c.client.getStandardHeaders(), nil, c.auth)
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+ return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
+}
+
+// UpdateMessage operation deletes the specified message.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/hh452234.aspx
+func (c QueueServiceClient) UpdateMessage(queue string, messageID string, message string, params UpdateMessageParameters) error {
+ uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), params.getParameters())
+ req := putMessageRequest{MessageText: message}
+ body, nn, err := xmlMarshal(req)
+ if err != nil {
+ return err
+ }
+ headers := c.client.getStandardHeaders()
+ headers["Content-Length"] = fmt.Sprintf("%d", nn)
+ resp, err := c.client.exec(http.MethodPut, uri, headers, body, c.auth)
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+ return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
+}
diff --git a/vendor/github.com/Azure/azure-storage-go/queueserviceclient.go b/vendor/github.com/Azure/azure-storage-go/queueserviceclient.go
new file mode 100644
index 000000000..c26141339
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-go/queueserviceclient.go
@@ -0,0 +1,20 @@
+package storage
+
+// QueueServiceClient contains operations for Microsoft Azure Queue Storage
+// Service.
+type QueueServiceClient struct {
+ client Client
+ auth authentication
+}
+
+// GetServiceProperties gets the properties of your storage account's queue service.
+// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties
+func (c *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) {
+ return c.client.getServiceProperties(queueServiceName, c.auth)
+}
+
+// SetServiceProperties sets the properties of your storage account's queue service.
+// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties
+func (c *QueueServiceClient) SetServiceProperties(props ServiceProperties) error {
+ return c.client.setServiceProperties(props, queueServiceName, c.auth)
+}
diff --git a/vendor/github.com/Azure/azure-storage-go/share.go b/vendor/github.com/Azure/azure-storage-go/share.go
new file mode 100644
index 000000000..e190097ea
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-go/share.go
@@ -0,0 +1,186 @@
+package storage
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "strconv"
+)
+
+// Share represents an Azure file share.
+type Share struct {
+ fsc *FileServiceClient
+ Name string `xml:"Name"`
+ Properties ShareProperties `xml:"Properties"`
+ Metadata map[string]string
+}
+
+// ShareProperties contains various properties of a share.
+type ShareProperties struct {
+ LastModified string `xml:"Last-Modified"`
+ Etag string `xml:"Etag"`
+ Quota int `xml:"Quota"`
+}
+
+// builds the complete path for this share object.
+func (s *Share) buildPath() string {
+ return fmt.Sprintf("/%s", s.Name)
+}
+
+// Create this share under the associated account.
+// If a share with the same name already exists, the operation fails.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx
+func (s *Share) Create() error {
+ headers, err := s.fsc.createResource(s.buildPath(), resourceShare, nil, mergeMDIntoExtraHeaders(s.Metadata, nil), []int{http.StatusCreated})
+ if err != nil {
+ return err
+ }
+
+ s.updateEtagAndLastModified(headers)
+ return nil
+}
+
+// CreateIfNotExists creates this share under the associated account if
+// it does not exist. Returns true if the share is newly created or false if
+// the share already exists.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx
+func (s *Share) CreateIfNotExists() (bool, error) {
+ resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, nil, nil)
+ if resp != nil {
+ defer readAndCloseBody(resp.body)
+ if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
+ if resp.statusCode == http.StatusCreated {
+ s.updateEtagAndLastModified(resp.headers)
+ return true, nil
+ }
+ return false, s.FetchAttributes()
+ }
+ }
+
+ return false, err
+}
+
+// Delete marks this share for deletion. The share along with any files
+// and directories contained within it are later deleted during garbage
+// collection. If the share does not exist the operation fails
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx
+func (s *Share) Delete() error {
+ return s.fsc.deleteResource(s.buildPath(), resourceShare)
+}
+
+// DeleteIfExists operation marks this share for deletion if it exists.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx
+func (s *Share) DeleteIfExists() (bool, error) {
+ resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare)
+ if resp != nil {
+ defer readAndCloseBody(resp.body)
+ if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
+ return resp.statusCode == http.StatusAccepted, nil
+ }
+ }
+ return false, err
+}
+
+// Exists returns true if this share already exists
+// on the storage account, otherwise returns false.
+func (s *Share) Exists() (bool, error) {
+ exists, headers, err := s.fsc.resourceExists(s.buildPath(), resourceShare)
+ if exists {
+ s.updateEtagAndLastModified(headers)
+ s.updateQuota(headers)
+ }
+ return exists, err
+}
+
+// FetchAttributes retrieves metadata and properties for this share.
+func (s *Share) FetchAttributes() error {
+ headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, http.MethodHead)
+ if err != nil {
+ return err
+ }
+
+ s.updateEtagAndLastModified(headers)
+ s.updateQuota(headers)
+ s.Metadata = getMetadataFromHeaders(headers)
+
+ return nil
+}
+
+// GetRootDirectoryReference returns a Directory object at the root of this share.
+func (s *Share) GetRootDirectoryReference() *Directory {
+ return &Directory{
+ fsc: s.fsc,
+ share: s,
+ }
+}
+
+// ServiceClient returns the FileServiceClient associated with this share.
+func (s *Share) ServiceClient() *FileServiceClient {
+ return s.fsc
+}
+
+// SetMetadata replaces the metadata for this share.
+//
+// Some keys may be converted to Camel-Case before sending. All keys
+// are returned in lower case by GetShareMetadata. HTTP header names
+// are case-insensitive so case munging should not matter to other
+// applications either.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
+func (s *Share) SetMetadata() error {
+ headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil))
+ if err != nil {
+ return err
+ }
+
+ s.updateEtagAndLastModified(headers)
+ return nil
+}
+
+// SetProperties sets system properties for this share.
+//
+// Some keys may be converted to Camel-Case before sending. All keys
+// are returned in lower case by SetShareProperties. HTTP header names
+// are case-insensitive so case munging should not matter to other
+// applications either.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/mt427368.aspx
+func (s *Share) SetProperties() error {
+ if s.Properties.Quota < 1 || s.Properties.Quota > 5120 {
+ return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota)
+ }
+
+ headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, map[string]string{
+ "x-ms-share-quota": strconv.Itoa(s.Properties.Quota),
+ })
+ if err != nil {
+ return err
+ }
+
+ s.updateEtagAndLastModified(headers)
+ return nil
+}
+
+// updates Etag and last modified date
+func (s *Share) updateEtagAndLastModified(headers http.Header) {
+ s.Properties.Etag = headers.Get("Etag")
+ s.Properties.LastModified = headers.Get("Last-Modified")
+}
+
+// updates quota value
+func (s *Share) updateQuota(headers http.Header) {
+ quota, err := strconv.Atoi(headers.Get("x-ms-share-quota"))
+ if err == nil {
+ s.Properties.Quota = quota
+ }
+}
+
+// URL gets the canonical URL to this share. This method does not create a publicly accessible
+// URL if the share is private and this method does not check if the share exists.
+func (s *Share) URL() string {
+ return s.fsc.client.getEndpoint(fileServiceName, s.buildPath(), url.Values{})
+}
diff --git a/vendor/github.com/Azure/azure-storage-go/storagepolicy.go b/vendor/github.com/Azure/azure-storage-go/storagepolicy.go
new file mode 100644
index 000000000..bee1c31ad
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-go/storagepolicy.go
@@ -0,0 +1,47 @@
+package storage
+
+import (
+ "strings"
+ "time"
+)
+
+// AccessPolicyDetailsXML has specifics about an access policy
+// annotated with XML details.
+type AccessPolicyDetailsXML struct {
+ StartTime time.Time `xml:"Start"`
+ ExpiryTime time.Time `xml:"Expiry"`
+ Permission string `xml:"Permission"`
+}
+
+// SignedIdentifier is a wrapper for a specific policy
+type SignedIdentifier struct {
+ ID string `xml:"Id"`
+ AccessPolicy AccessPolicyDetailsXML `xml:"AccessPolicy"`
+}
+
+// SignedIdentifiers part of the response from GetPermissions call.
+type SignedIdentifiers struct {
+ SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"`
+}
+
+// AccessPolicy is the response type from the GetPermissions call.
+type AccessPolicy struct {
+ SignedIdentifiersList SignedIdentifiers `xml:"SignedIdentifiers"`
+}
+
+// convertAccessPolicyToXMLStructs converts between AccessPolicyDetails which is a struct better for API usage to the
+// AccessPolicy struct which will get converted to XML.
+func convertAccessPolicyToXMLStructs(id string, startTime time.Time, expiryTime time.Time, permissions string) SignedIdentifier {
+ return SignedIdentifier{
+ ID: id,
+ AccessPolicy: AccessPolicyDetailsXML{
+ StartTime: startTime.UTC().Round(time.Second),
+ ExpiryTime: expiryTime.UTC().Round(time.Second),
+ Permission: permissions,
+ },
+ }
+}
+
+func updatePermissions(permissions, permission string) bool {
+ return strings.Contains(permissions, permission)
+}
diff --git a/vendor/github.com/Azure/azure-storage-go/storageservice.go b/vendor/github.com/Azure/azure-storage-go/storageservice.go
new file mode 100644
index 000000000..817560b78
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-go/storageservice.go
@@ -0,0 +1,118 @@
+package storage
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+)
+
+// ServiceProperties represents the storage account service properties
+type ServiceProperties struct {
+ Logging *Logging
+ HourMetrics *Metrics
+ MinuteMetrics *Metrics
+ Cors *Cors
+}
+
+// Logging represents the Azure Analytics Logging settings
+type Logging struct {
+ Version string
+ Delete bool
+ Read bool
+ Write bool
+ RetentionPolicy *RetentionPolicy
+}
+
+// RetentionPolicy indicates if retention is enabled and for how many days
+type RetentionPolicy struct {
+ Enabled bool
+ Days *int
+}
+
+// Metrics provide request statistics.
+type Metrics struct {
+ Version string
+ Enabled bool
+ IncludeAPIs *bool
+ RetentionPolicy *RetentionPolicy
+}
+
+// Cors includes all the CORS rules
+type Cors struct {
+ CorsRule []CorsRule
+}
+
+// CorsRule includes all settings for a Cors rule
+type CorsRule struct {
+ AllowedOrigins string
+ AllowedMethods string
+ MaxAgeInSeconds int
+ ExposedHeaders string
+ AllowedHeaders string
+}
+
+func (c Client) getServiceProperties(service string, auth authentication) (*ServiceProperties, error) {
+ query := url.Values{
+ "restype": {"service"},
+ "comp": {"properties"},
+ }
+ uri := c.getEndpoint(service, "", query)
+ headers := c.getStandardHeaders()
+
+ resp, err := c.exec(http.MethodGet, uri, headers, nil, auth)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.body.Close()
+
+ if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
+ return nil, err
+ }
+
+ var out ServiceProperties
+ err = xmlUnmarshal(resp.body, &out)
+ if err != nil {
+ return nil, err
+ }
+
+ return &out, nil
+}
+
+func (c Client) setServiceProperties(props ServiceProperties, service string, auth authentication) error {
+ query := url.Values{
+ "restype": {"service"},
+ "comp": {"properties"},
+ }
+ uri := c.getEndpoint(service, "", query)
+
+ // Ideally, StorageServiceProperties would be the output struct
+ // This is to avoid golint stuttering, while generating the correct XML
+ type StorageServiceProperties struct {
+ Logging *Logging
+ HourMetrics *Metrics
+ MinuteMetrics *Metrics
+ Cors *Cors
+ }
+ input := StorageServiceProperties{
+ Logging: props.Logging,
+ HourMetrics: props.HourMetrics,
+ MinuteMetrics: props.MinuteMetrics,
+ Cors: props.Cors,
+ }
+
+ body, length, err := xmlMarshal(input)
+ if err != nil {
+ return err
+ }
+
+ headers := c.getStandardHeaders()
+ headers["Content-Length"] = fmt.Sprintf("%v", length)
+
+ resp, err := c.exec(http.MethodPut, uri, headers, body, auth)
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+
+ return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
+}
diff --git a/vendor/github.com/Azure/azure-storage-go/table.go b/vendor/github.com/Azure/azure-storage-go/table.go
new file mode 100644
index 000000000..4123746e5
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-go/table.go
@@ -0,0 +1,254 @@
+package storage
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strconv"
+ "time"
+)
+
+// AzureTable is the typedef of the Azure Table name
+type AzureTable string
+
+const (
+ tablesURIPath = "/Tables"
+)
+
+type createTableRequest struct {
+ TableName string `json:"TableName"`
+}
+
+// TableAccessPolicy are used for SETTING table policies
+type TableAccessPolicy struct {
+ ID string
+ StartTime time.Time
+ ExpiryTime time.Time
+ CanRead bool
+ CanAppend bool
+ CanUpdate bool
+ CanDelete bool
+}
+
+func pathForTable(table AzureTable) string { return fmt.Sprintf("%s", table) }
+
+func (c *TableServiceClient) getStandardHeaders() map[string]string {
+ return map[string]string{
+ "x-ms-version": "2015-02-21",
+ "x-ms-date": currentTimeRfc1123Formatted(),
+ "Accept": "application/json;odata=nometadata",
+ "Accept-Charset": "UTF-8",
+ "Content-Type": "application/json",
+ userAgentHeader: c.client.userAgent,
+ }
+}
+
+// QueryTables returns the tables created in the
+// *TableServiceClient storage account.
+func (c *TableServiceClient) QueryTables() ([]AzureTable, error) {
+ uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
+
+ headers := c.getStandardHeaders()
+ headers["Content-Length"] = "0"
+
+ resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.body.Close()
+
+ if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
+ ioutil.ReadAll(resp.body)
+ return nil, err
+ }
+
+ buf := new(bytes.Buffer)
+ if _, err := buf.ReadFrom(resp.body); err != nil {
+ return nil, err
+ }
+
+ var respArray queryTablesResponse
+ if err := json.Unmarshal(buf.Bytes(), &respArray); err != nil {
+ return nil, err
+ }
+
+ s := make([]AzureTable, len(respArray.TableName))
+ for i, elem := range respArray.TableName {
+ s[i] = AzureTable(elem.TableName)
+ }
+
+ return s, nil
+}
+
+// CreateTable creates the table given the specific
+// name. This function fails if the name is not compliant
+// with the specification or the tables already exists.
+func (c *TableServiceClient) CreateTable(table AzureTable) error {
+ uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
+
+ headers := c.getStandardHeaders()
+
+ req := createTableRequest{TableName: string(table)}
+ buf := new(bytes.Buffer)
+
+ if err := json.NewEncoder(buf).Encode(req); err != nil {
+ return err
+ }
+
+ headers["Content-Length"] = fmt.Sprintf("%d", buf.Len())
+
+ resp, err := c.client.execInternalJSON(http.MethodPost, uri, headers, buf, c.auth)
+
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+
+ if err := checkRespCode(resp.statusCode, []int{http.StatusCreated}); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// DeleteTable deletes the table given the specific
+// name. This function fails if the table is not present.
+// Be advised: DeleteTable deletes all the entries
+// that may be present.
+func (c *TableServiceClient) DeleteTable(table AzureTable) error {
+ uri := c.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{})
+ uri += fmt.Sprintf("('%s')", string(table))
+
+ headers := c.getStandardHeaders()
+
+ headers["Content-Length"] = "0"
+
+ resp, err := c.client.execInternalJSON(http.MethodDelete, uri, headers, nil, c.auth)
+
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+
+ if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
+ return err
+
+ }
+ return nil
+}
+
+// SetTablePermissions sets up table ACL permissions as per REST details https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Table-ACL
+func (c *TableServiceClient) SetTablePermissions(table AzureTable, policies []TableAccessPolicy, timeout uint) (err error) {
+ params := url.Values{"comp": {"acl"}}
+
+ if timeout > 0 {
+ params.Add("timeout", fmt.Sprint(timeout))
+ }
+
+ uri := c.client.getEndpoint(tableServiceName, string(table), params)
+ headers := c.client.getStandardHeaders()
+
+ body, length, err := generateTableACLPayload(policies)
+ if err != nil {
+ return err
+ }
+ headers["Content-Length"] = fmt.Sprintf("%v", length)
+
+ resp, err := c.client.execInternalJSON(http.MethodPut, uri, headers, body, c.auth)
+ if err != nil {
+ return err
+ }
+ defer readAndCloseBody(resp.body)
+
+ if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
+ return err
+ }
+ return nil
+}
+
+func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) {
+ sil := SignedIdentifiers{
+ SignedIdentifiers: []SignedIdentifier{},
+ }
+ for _, tap := range policies {
+ permission := generateTablePermissions(&tap)
+ signedIdentifier := convertAccessPolicyToXMLStructs(tap.ID, tap.StartTime, tap.ExpiryTime, permission)
+ sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier)
+ }
+ return xmlMarshal(sil)
+}
+
+// GetTablePermissions gets the table ACL permissions, as per REST details https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-acl
+func (c *TableServiceClient) GetTablePermissions(table AzureTable, timeout int) (permissionResponse []TableAccessPolicy, err error) {
+ params := url.Values{"comp": {"acl"}}
+
+ if timeout > 0 {
+ params.Add("timeout", strconv.Itoa(timeout))
+ }
+
+ uri := c.client.getEndpoint(tableServiceName, string(table), params)
+ headers := c.client.getStandardHeaders()
+ resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.body.Close()
+
+ if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
+ ioutil.ReadAll(resp.body)
+ return nil, err
+ }
+
+ var ap AccessPolicy
+ err = xmlUnmarshal(resp.body, &ap.SignedIdentifiersList)
+ if err != nil {
+ return nil, err
+ }
+ out := updateTableAccessPolicy(ap)
+ return out, nil
+}
+
+func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy {
+ out := []TableAccessPolicy{}
+ for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers {
+ tap := TableAccessPolicy{
+ ID: policy.ID,
+ StartTime: policy.AccessPolicy.StartTime,
+ ExpiryTime: policy.AccessPolicy.ExpiryTime,
+ }
+ tap.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r")
+ tap.CanAppend = updatePermissions(policy.AccessPolicy.Permission, "a")
+ tap.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u")
+ tap.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d")
+
+ out = append(out, tap)
+ }
+ return out
+}
+
+func generateTablePermissions(tap *TableAccessPolicy) (permissions string) {
+ // generate the permissions string (raud).
+ // still want the end user API to have bool flags.
+ permissions = ""
+
+ if tap.CanRead {
+ permissions += "r"
+ }
+
+ if tap.CanAppend {
+ permissions += "a"
+ }
+
+ if tap.CanUpdate {
+ permissions += "u"
+ }
+
+ if tap.CanDelete {
+ permissions += "d"
+ }
+ return permissions
+}
diff --git a/vendor/github.com/Azure/azure-storage-go/table_entities.go b/vendor/github.com/Azure/azure-storage-go/table_entities.go
new file mode 100644
index 000000000..36413a0cf
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-go/table_entities.go
@@ -0,0 +1,354 @@
+package storage
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "reflect"
+)
+
+// Annotating as secure for gas scanning
+/* #nosec */
+const (
+ partitionKeyNode = "PartitionKey"
+ rowKeyNode = "RowKey"
+ tag = "table"
+ tagIgnore = "-"
+ continuationTokenPartitionKeyHeader = "X-Ms-Continuation-Nextpartitionkey"
+ continuationTokenRowHeader = "X-Ms-Continuation-Nextrowkey"
+ maxTopParameter = 1000
+)
+
+type queryTablesResponse struct {
+ TableName []struct {
+ TableName string `json:"TableName"`
+ } `json:"value"`
+}
+
+const (
+ tableOperationTypeInsert = iota
+ tableOperationTypeUpdate = iota
+ tableOperationTypeMerge = iota
+ tableOperationTypeInsertOrReplace = iota
+ tableOperationTypeInsertOrMerge = iota
+)
+
+type tableOperation int
+
+// TableEntity interface specifies
+// the functions needed to support
+// marshaling and unmarshaling into
+// Azure Tables. The struct must only contain
+// simple types because Azure Tables do not
+// support hierarchy.
+type TableEntity interface {
+ PartitionKey() string
+ RowKey() string
+ SetPartitionKey(string) error
+ SetRowKey(string) error
+}
+
+// ContinuationToken is an opaque (ie not useful to inspect)
+// struct that Get... methods can return if there are more
+// entries to be returned than the ones already
+// returned. Just pass it to the same function to continue
+// receiving the remaining entries.
+type ContinuationToken struct {
+ NextPartitionKey string
+ NextRowKey string
+}
+
+type getTableEntriesResponse struct {
+ Elements []map[string]interface{} `json:"value"`
+}
+
+// QueryTableEntities queries the specified table and returns the unmarshaled
+// entities of type retType.
+// top parameter limits the returned entries up to top. Maximum top
+// allowed by Azure API is 1000. In case there are more than top entries to be
+// returned the function will return a non nil *ContinuationToken. You can call the
+// same function again passing the received ContinuationToken as previousContToken
+// parameter in order to get the following entries. The query parameter
+// is the odata query. To retrieve all the entries pass the empty string.
+// The function returns a pointer to a TableEntity slice, the *ContinuationToken
+// if there are more entries to be returned and an error in case something went
+// wrong.
+//
+// Example:
+// entities, cToken, err = tSvc.QueryTableEntities("table", cToken, reflect.TypeOf(entity), 20, "")
+func (c *TableServiceClient) QueryTableEntities(tableName AzureTable, previousContToken *ContinuationToken, retType reflect.Type, top int, query string) ([]TableEntity, *ContinuationToken, error) {
+ if top > maxTopParameter {
+ return nil, nil, fmt.Errorf("top accepts at maximum %d elements. Requested %d instead", maxTopParameter, top)
+ }
+
+ uri := c.client.getEndpoint(tableServiceName, pathForTable(tableName), url.Values{})
+ uri += fmt.Sprintf("?$top=%d", top)
+ if query != "" {
+ uri += fmt.Sprintf("&$filter=%s", url.QueryEscape(query))
+ }
+
+ if previousContToken != nil {
+ uri += fmt.Sprintf("&NextPartitionKey=%s&NextRowKey=%s", previousContToken.NextPartitionKey, previousContToken.NextRowKey)
+ }
+
+ headers := c.getStandardHeaders()
+
+ headers["Content-Length"] = "0"
+
+ resp, err := c.client.execInternalJSON(http.MethodGet, uri, headers, nil, c.auth)
+
+ if err != nil {
+ return nil, nil, err
+ }
+
+ contToken := extractContinuationTokenFromHeaders(resp.headers)
+
+ defer resp.body.Close()
+
+ if err = checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
+ return nil, contToken, err
+ }
+
+ retEntries, err := deserializeEntity(retType, resp.body)
+ if err != nil {
+ return nil, contToken, err
+ }
+
+ return retEntries, contToken, nil
+}
+
+// InsertEntity inserts an entity in the specified table.
+// The function fails if there is an entity with the same
+// PartitionKey and RowKey in the table.
+func (c *TableServiceClient) InsertEntity(table AzureTable, entity TableEntity) error {
+ sc, err := c.execTable(table, entity, false, http.MethodPost)
+ if err != nil {
+ return err
+ }
+
+ return checkRespCode(sc, []int{http.StatusCreated})
+}
+
+func (c *TableServiceClient) execTable(table AzureTable, entity TableEntity, specifyKeysInURL bool, method string) (int, error) {
+ uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{})
+ if specifyKeysInURL {
+ uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey()))
+ }
+
+ headers := c.getStandardHeaders()
+
+ var buf bytes.Buffer
+
+ if err := injectPartitionAndRowKeys(entity, &buf); err != nil {
+ return 0, err
+ }
+
+ headers["Content-Length"] = fmt.Sprintf("%d", buf.Len())
+
+ resp, err := c.client.execInternalJSON(method, uri, headers, &buf, c.auth)
+
+ if err != nil {
+ return 0, err
+ }
+
+ defer resp.body.Close()
+
+ return resp.statusCode, nil
+}
+
+// UpdateEntity updates the contents of an entity with the
+// one passed as parameter. The function fails if there is no entity
+// with the same PartitionKey and RowKey in the table.
+func (c *TableServiceClient) UpdateEntity(table AzureTable, entity TableEntity) error {
+ sc, err := c.execTable(table, entity, true, http.MethodPut)
+ if err != nil {
+ return err
+ }
+
+ return checkRespCode(sc, []int{http.StatusNoContent})
+}
+
+// MergeEntity merges the contents of an entity with the
+// one passed as parameter.
+// The function fails if there is no entity
+// with the same PartitionKey and RowKey in the table.
+func (c *TableServiceClient) MergeEntity(table AzureTable, entity TableEntity) error {
+ sc, err := c.execTable(table, entity, true, "MERGE")
+ if err != nil {
+ return err
+ }
+
+ return checkRespCode(sc, []int{http.StatusNoContent})
+}
+
+// DeleteEntityWithoutCheck deletes the entity matching by
+// PartitionKey and RowKey. There is no check on IfMatch
+// parameter so the entity is always deleted.
+// The function fails if there is no entity
+// with the same PartitionKey and RowKey in the table.
+func (c *TableServiceClient) DeleteEntityWithoutCheck(table AzureTable, entity TableEntity) error {
+ return c.DeleteEntity(table, entity, "*")
+}
+
+// DeleteEntity deletes the entity matching by
+// PartitionKey, RowKey and ifMatch field.
+// The function fails if there is no entity
+// with the same PartitionKey and RowKey in the table or
+// the ifMatch is different.
+func (c *TableServiceClient) DeleteEntity(table AzureTable, entity TableEntity, ifMatch string) error {
+ uri := c.client.getEndpoint(tableServiceName, pathForTable(table), url.Values{})
+ uri += fmt.Sprintf("(PartitionKey='%s',RowKey='%s')", url.QueryEscape(entity.PartitionKey()), url.QueryEscape(entity.RowKey()))
+
+ headers := c.getStandardHeaders()
+
+ headers["Content-Length"] = "0"
+ headers["If-Match"] = ifMatch
+
+ resp, err := c.client.execInternalJSON(http.MethodDelete, uri, headers, nil, c.auth)
+
+ if err != nil {
+ return err
+ }
+ defer resp.body.Close()
+
+ if err := checkRespCode(resp.statusCode, []int{http.StatusNoContent}); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// InsertOrReplaceEntity inserts an entity in the specified table
+// or replaced the existing one.
+func (c *TableServiceClient) InsertOrReplaceEntity(table AzureTable, entity TableEntity) error {
+ sc, err := c.execTable(table, entity, true, http.MethodPut)
+ if err != nil {
+ return err
+ }
+
+ return checkRespCode(sc, []int{http.StatusNoContent})
+}
+
+// InsertOrMergeEntity inserts an entity in the specified table
+// or merges the existing one.
+func (c *TableServiceClient) InsertOrMergeEntity(table AzureTable, entity TableEntity) error {
+ sc, err := c.execTable(table, entity, true, "MERGE")
+ if err != nil {
+ return err
+ }
+
+ return checkRespCode(sc, []int{http.StatusNoContent})
+}
+
+func injectPartitionAndRowKeys(entity TableEntity, buf *bytes.Buffer) error {
+ if err := json.NewEncoder(buf).Encode(entity); err != nil {
+ return err
+ }
+
+ dec := make(map[string]interface{})
+ if err := json.NewDecoder(buf).Decode(&dec); err != nil {
+ return err
+ }
+
+ // Inject PartitionKey and RowKey
+ dec[partitionKeyNode] = entity.PartitionKey()
+ dec[rowKeyNode] = entity.RowKey()
+
+ // Remove tagged fields
+ // The tag is defined in the const section
+ // This is useful to avoid storing the PartitionKey and RowKey twice.
+ numFields := reflect.ValueOf(entity).Elem().NumField()
+ for i := 0; i < numFields; i++ {
+ f := reflect.ValueOf(entity).Elem().Type().Field(i)
+
+ if f.Tag.Get(tag) == tagIgnore {
+ // we must look for its JSON name in the dictionary
+ // as the user can rename it using a tag
+ jsonName := f.Name
+ if f.Tag.Get("json") != "" {
+ jsonName = f.Tag.Get("json")
+ }
+ delete(dec, jsonName)
+ }
+ }
+
+ buf.Reset()
+
+ if err := json.NewEncoder(buf).Encode(&dec); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func deserializeEntity(retType reflect.Type, reader io.Reader) ([]TableEntity, error) {
+ buf := new(bytes.Buffer)
+
+ var ret getTableEntriesResponse
+ if err := json.NewDecoder(reader).Decode(&ret); err != nil {
+ return nil, err
+ }
+
+ tEntries := make([]TableEntity, len(ret.Elements))
+
+ for i, entry := range ret.Elements {
+
+ buf.Reset()
+ if err := json.NewEncoder(buf).Encode(entry); err != nil {
+ return nil, err
+ }
+
+ dec := make(map[string]interface{})
+ if err := json.NewDecoder(buf).Decode(&dec); err != nil {
+ return nil, err
+ }
+
+ var pKey, rKey string
+ // strip pk and rk
+ for key, val := range dec {
+ switch key {
+ case partitionKeyNode:
+ pKey = val.(string)
+ case rowKeyNode:
+ rKey = val.(string)
+ }
+ }
+
+ delete(dec, partitionKeyNode)
+ delete(dec, rowKeyNode)
+
+ buf.Reset()
+ if err := json.NewEncoder(buf).Encode(dec); err != nil {
+ return nil, err
+ }
+
+ // Create a empty retType instance
+ tEntries[i] = reflect.New(retType.Elem()).Interface().(TableEntity)
+ // Popolate it with the values
+ if err := json.NewDecoder(buf).Decode(&tEntries[i]); err != nil {
+ return nil, err
+ }
+
+ // Reset PartitionKey and RowKey
+ if err := tEntries[i].SetPartitionKey(pKey); err != nil {
+ return nil, err
+ }
+ if err := tEntries[i].SetRowKey(rKey); err != nil {
+ return nil, err
+ }
+ }
+
+ return tEntries, nil
+}
+
+func extractContinuationTokenFromHeaders(h http.Header) *ContinuationToken {
+ ct := ContinuationToken{h.Get(continuationTokenPartitionKeyHeader), h.Get(continuationTokenRowHeader)}
+
+ if ct.NextPartitionKey != "" && ct.NextRowKey != "" {
+ return &ct
+ }
+ return nil
+}
diff --git a/vendor/github.com/Azure/azure-storage-go/tableserviceclient.go b/vendor/github.com/Azure/azure-storage-go/tableserviceclient.go
new file mode 100644
index 000000000..ee5e0a867
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-go/tableserviceclient.go
@@ -0,0 +1,20 @@
+package storage
+
+// TableServiceClient contains operations for Microsoft Azure Table Storage
+// Service.
+type TableServiceClient struct {
+ client Client
+ auth authentication
+}
+
+// GetServiceProperties gets the properties of your storage account's table service.
+// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-service-properties
+func (c *TableServiceClient) GetServiceProperties() (*ServiceProperties, error) {
+ return c.client.getServiceProperties(tableServiceName, c.auth)
+}
+
+// SetServiceProperties sets the properties of your storage account's table service.
+// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-table-service-properties
+func (c *TableServiceClient) SetServiceProperties(props ServiceProperties) error {
+ return c.client.setServiceProperties(props, tableServiceName, c.auth)
+}
diff --git a/vendor/github.com/Azure/azure-storage-go/util.go b/vendor/github.com/Azure/azure-storage-go/util.go
new file mode 100644
index 000000000..57ca1b6d9
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-go/util.go
@@ -0,0 +1,85 @@
+package storage
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "reflect"
+ "time"
+)
+
+func (c Client) computeHmac256(message string) string {
+ h := hmac.New(sha256.New, c.accountKey)
+ h.Write([]byte(message))
+ return base64.StdEncoding.EncodeToString(h.Sum(nil))
+}
+
+func currentTimeRfc1123Formatted() string {
+ return timeRfc1123Formatted(time.Now().UTC())
+}
+
+func timeRfc1123Formatted(t time.Time) string {
+ return t.Format(http.TimeFormat)
+}
+
+func mergeParams(v1, v2 url.Values) url.Values {
+ out := url.Values{}
+ for k, v := range v1 {
+ out[k] = v
+ }
+ for k, v := range v2 {
+ vals, ok := out[k]
+ if ok {
+ vals = append(vals, v...)
+ out[k] = vals
+ } else {
+ out[k] = v
+ }
+ }
+ return out
+}
+
+func prepareBlockListRequest(blocks []Block) string {
+ s := `<?xml version="1.0" encoding="utf-8"?><BlockList>`
+ for _, v := range blocks {
+ s += fmt.Sprintf("<%s>%s</%s>", v.Status, v.ID, v.Status)
+ }
+ s += `</BlockList>`
+ return s
+}
+
+func xmlUnmarshal(body io.Reader, v interface{}) error {
+ data, err := ioutil.ReadAll(body)
+ if err != nil {
+ return err
+ }
+ return xml.Unmarshal(data, v)
+}
+
+func xmlMarshal(v interface{}) (io.Reader, int, error) {
+ b, err := xml.Marshal(v)
+ if err != nil {
+ return nil, 0, err
+ }
+ return bytes.NewReader(b), len(b), nil
+}
+
+func headersFromStruct(v interface{}) map[string]string {
+ headers := make(map[string]string)
+ value := reflect.ValueOf(v)
+ for i := 0; i < value.NumField(); i++ {
+ key := value.Type().Field(i).Tag.Get("header")
+ val := value.Field(i).String()
+ if key != "" && val != "" {
+ headers[key] = val
+ }
+ }
+ return headers
+}
diff --git a/vendor/github.com/Azure/azure-storage-go/version.go b/vendor/github.com/Azure/azure-storage-go/version.go
new file mode 100644
index 000000000..c25fe3371
--- /dev/null
+++ b/vendor/github.com/Azure/azure-storage-go/version.go
@@ -0,0 +1,5 @@
+package storage
+
+var (
+ sdkVersion = "0.1.0"
+)