diff --git a/.docs/about/release-notes.md b/.docs/about/release-notes.md index b7168b73..232456b7 100644 --- a/.docs/about/release-notes.md +++ b/.docs/about/release-notes.md @@ -3,6 +3,18 @@ Release early, release often --- +## Version 0.3.3 (2016/11/29) +This release includes some minor fixes as well as a new way to query +attachment information about one or more volumes. + +### Enhancements +* Enhanced attachment querying ([#313](https://github.com/codedellemc/libstorage/pull/313), [#316](https://github.com/codedellemc/libstorage/pull/316), [#319](https://github.com/codedellemc/libstorage/pull/319), [#330](https://github.com/codedellemc/libstorage/pull/330), [#331](https://github.com/codedellemc/libstorage/pull/331), [#332](https://github.com/codedellemc/libstorage/pull/332), [#334](https://github.com/codedellemc/libstorage/pull/334), +[#335](https://github.com/codedellemc/libstorage/pull/335), [#336](https://github.com/codedellemc/libstorage/pull/336), [#343](https://github.com/codedellemc/libstorage/pull/343)) + +### Bug Fixes +* AWS Config Support ([#314](https://github.com/codedellemc/libstorage/pull/314)) +* VirtualBox Executor Fix ([#325](https://github.com/codedellemc/libstorage/pull/325)) + ## Version 0.3.2 (2016/10/18) This release updates the project to reflect its new location at github.com/codedellemc. diff --git a/.docs/user-guide/storage-providers.md b/.docs/user-guide/storage-providers.md index 56ddbec4..25ebc166 100644 --- a/.docs/user-guide/storage-providers.md +++ b/.docs/user-guide/storage-providers.md @@ -47,7 +47,7 @@ The following items are configurable specific to this driver. be created and removed. * `nfsHost` is the configurable NFS server hostname or IP (often a SmartConnect name) used when mounting exports - * `dataSubnet` is the subnet the REX-Ray driver is running on. This is used + * `dataSubnet` is the subnet the REX-Ray driver is running on. This is used for the NFS export host ACLs. ### Optional Parameters @@ -429,9 +429,13 @@ example see the `Examples` section. efs: accessKey: XXXXXXXXXX secretKey: XXXXXXXXXX - securityGroups: sg-XXXXXXX,sg-XXXXXX0,sg-XXXXXX1 - region: us-east-1 - tag: test + securityGroups: + - sg-XXXXXXX + - sg-XXXXXX0 + - sg-XXXXXX1 + region: us-east-1 + tag: test + disableSessionCache: false ``` #### Configuration Notes @@ -445,6 +449,9 @@ documentation for list of supported regions. If no security groups are provided the default VPC security group is used. - `tag` is used to partition multiple services within single AWS account and is used as prefix for EFS names in format `[tagprefix]/volumeName`. +- `disableSessionCache` is a flag that can be used to disable the session cache. +If the session cache is disabled then a new AWS connection is established with +every API call. For information on the equivalent environment variable and CLI flag names please see the section on how non top-level configuration properties are @@ -500,7 +507,10 @@ libstorage: efs: accessKey: XXXXXXXXXX secretKey: XXXXXXXXXX - securityGroups: sg-XXXXXXX,sg-XXXXXX0,sg-XXXXXX1 + securityGroups: + - sg-XXXXXXX + - sg-XXXXXX0 + - sg-XXXXXX1 region: us-east-1 tag: test ``` diff --git a/.gitignore b/.gitignore index 884f14b4..db922810 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ *.a *.d *.out +got libstorage.paw .site/ site/ @@ -22,7 +23,9 @@ cli/semaphores/signal cli/semaphores/unlink api/api_generated.go api/server/executors/executors_generated.go -drivers/storage/ebs/tests/.vagrant +drivers/storage/ebs/tests/.vagrant/ +examples/ec2/vagrant/.vagrant/ + # Created by https://www.gitignore.io diff --git a/.travis.yml b/.travis.yml index 4866fc0b..1281d7ea 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,7 @@ go_import_path: github.com/codedellemc/libstorage language: go go: - 1.6.3 - - 1.7.1 + - 1.7.3 - tip os: diff --git a/README.md b/README.md index 01cc6e28..a9c12702 100644 --- a/README.md +++ b/README.md @@ -100,3 +100,6 @@ a portable format such as JSON. ## Documentation [![Docs](https://readthedocs.org/projects/libstorage/badge/?version=latest)](http://libstorage.readthedocs.org) The `libStorage` documentation is available at [libstorage.rtfd.org](http://libstorage.rtfd.org). + +## License +The `libStorage` project is licensed to you under the Apache License, Version 2.0. Please reference the LICENSE file for additional information. diff --git a/VERSION b/VERSION index d15723fb..1c09c74e 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.3.2 +0.3.3 diff --git a/api/registry/registry_integration.go b/api/registry/registry_integration.go index 1d51ad71..3c1f0647 100644 --- a/api/registry/registry_integration.go +++ b/api/registry/registry_integration.go @@ -162,7 +162,7 @@ func (d *idm) Mount( func (d *idm) Unmount( ctx types.Context, volumeID, volumeName string, - opts types.Store) error { + opts types.Store) (*types.Volume, error) { fields := log.Fields{ "volumeName": volumeName, @@ -180,7 +180,7 @@ func (d *idm) Unmount( } d.decCount(volumeName) - return nil + return nil, nil } func (d *idm) Path( diff --git a/api/server/handlers/handlers_query_params.go b/api/server/handlers/handlers_query_params.go index 19892dac..6bbc610f 100644 --- a/api/server/handlers/handlers_query_params.go +++ b/api/server/handlers/handlers_query_params.go @@ -49,7 +49,9 @@ func (h *queryParamsHandler) Handle( if len(v[0]) == 0 { store.Set(k, true) } else { - if b, err := strconv.ParseBool(v[0]); err == nil { + if i, err := strconv.ParseInt(v[0], 10, 64); err == nil { + store.Set(k, i) + } else if b, err := strconv.ParseBool(v[0]); err == nil { store.Set(k, b) } else { store.Set(k, v[0]) diff --git a/api/server/router/volume/volume_routes.go b/api/server/router/volume/volume_routes.go index 05da1446..99bf2174 100644 --- a/api/server/router/volume/volume_routes.go +++ b/api/server/router/volume/volume_routes.go @@ -5,6 +5,7 @@ import ( "strings" "sync" + log "github.com/Sirupsen/logrus" "github.com/akutz/goof" "github.com/codedellemc/libstorage/api/context" @@ -127,6 +128,98 @@ func (r *router) volumesForService( http.StatusOK) } +func handleVolAttachments( + ctx types.Context, + lf log.Fields, + iid *types.InstanceID, + vol *types.Volume, + attachments types.VolumeAttachmentsTypes) bool { + + if attachments == 0 { + vol.Attachments = nil + return true + } + + if lf == nil { + lf = log.Fields{} + } + + f := func(s types.VolumeAttachmentStates) bool { + lf["attachmentState"] = s + // if the volume has no attachments and the mask indicates that + // only attached volumes should be returned then omit this volume + if s == types.VolumeAvailable && + attachments.Attached() && + !attachments.Unattached() { + ctx.WithFields(lf).Debug("omitting unattached volume") + return false + } + // if the volume has attachments and the mask indicates that + // only unattached volumes should be returned then omit this volume + if (s == types.VolumeAttached || s == types.VolumeUnavailable) && + !attachments.Attached() && + attachments.Unattached() { + ctx.WithFields(lf).Debug("omitting attached volume") + return false + } + ctx.WithFields(lf).Debug("including volume") + return true + } + + // if the attachment state has already been set by the driver then + // use it to determine whether the volume should be omitted + if vol.AttachmentState > 0 { + ctx.WithFields(lf).Debug( + "deferring to driver-specified attachment state") + return f(vol.AttachmentState) + } + + ctx.WithFields(lf).Debug("manually calculating attachment state") + + // if only the requesting instance's attachments are requested then + // filter the volume's attachments list + if attachments.Mine() { + atts := []*types.VolumeAttachment{} + for _, a := range vol.Attachments { + alf := log.Fields{ + "attDeviceName": a.DeviceName, + "attDountPoint": a.MountPoint, + "attVolumeID": a.VolumeID, + } + if strings.EqualFold(iid.ID, a.InstanceID.ID) { + atts = append(atts, a) + ctx.WithFields(lf).WithFields(alf).Debug( + "including volume attachment") + } else { + ctx.WithFields(lf).WithFields(alf).Debug( + "omitting volume attachment") + } + } + vol.Attachments = atts + ctx.WithFields(lf).Debug("included volume attached to instance") + } + + // determine a volume's attachment state + if len(vol.Attachments) == 0 { + vol.AttachmentState = types.VolumeAvailable + } else { + vol.AttachmentState = types.VolumeUnavailable + if iid != nil { + for _, a := range vol.Attachments { + if a.InstanceID != nil && + strings.EqualFold(iid.ID, a.InstanceID.ID) { + vol.AttachmentState = types.VolumeAttached + break + } + } + } + } + + // use the ascertained attachment state to determine whether or not the + // volume should be omitted + return f(vol.AttachmentState) +} + func getFilteredVolumes( ctx types.Context, req *http.Request, @@ -147,6 +240,8 @@ func getFilteredVolumes( return nil, utils.NewMissingInstanceIDError(storSvc.Name()) } + ctx.WithField("attachments", opts.Attachments).Debug("querying volumes") + objs, err := storSvc.Driver().Volumes(ctx, opts) if err != nil { return nil, err @@ -160,34 +255,26 @@ func getFilteredVolumes( for _, obj := range objs { + lf := log.Fields{ + "attachments": opts.Attachments, + "volumeID": obj.ID, + "volumeName": obj.Name, + } + if filterOp == types.FilterEqualityMatch && filterLeft == "name" { + ctx.WithFields(lf).Debug("checking name filter") if !strings.EqualFold(obj.Name, filterRight) { + ctx.WithFields(lf).Debug("omitted volume due to name filter") continue } } - // if only the requesting instance's attachments are requested then - // filter the volume's attachments list - if opts.Attachments.Mine() { - atts := []*types.VolumeAttachment{} - for _, a := range obj.Attachments { - if strings.EqualFold(iid.ID, a.InstanceID.ID) { - atts = append(atts, a) - } - } - obj.Attachments = atts - } - - if opts.Attachments.Attached() && len(obj.Attachments) == 0 { - continue - } - - if opts.Attachments.Unattached() && len(obj.Attachments) > 0 { + if !handleVolAttachments(ctx, lf, iid, obj, opts.Attachments) { continue } if OnVolume != nil { - ctx.Debug("invoking OnVolume handler") + ctx.WithFields(lf).Debug("invoking OnVolume handler") ok, err := OnVolume(ctx, req, store, obj) if err != nil { return nil, err @@ -212,8 +299,8 @@ func (r *router) volumeInspect( attachments := store.GetAttachments() service := context.MustService(ctx) - if _, ok := context.InstanceID(ctx); !ok && - attachments.RequiresInstanceID() { + iid, iidOK := context.InstanceID(ctx) + if !iidOK && attachments.RequiresInstanceID() { return utils.NewMissingInstanceIDError(service.Name()) } @@ -239,10 +326,12 @@ func (r *router) volumeInspect( return nil, err } - volID := strings.ToLower(store.GetString("volumeID")) + volID := store.GetString("volumeID") for _, v := range vols { - if strings.ToLower(v.Name) == volID { - + if strings.EqualFold(v.Name, volID) { + if !handleVolAttachments(ctx, nil, iid, v, attachments) { + return nil, utils.NewNotFoundError(volID) + } if OnVolume != nil { ok, err := OnVolume(ctx, req, store, v) if err != nil { @@ -273,6 +362,10 @@ func (r *router) volumeInspect( return nil, err } + if !handleVolAttachments(ctx, nil, iid, v, attachments) { + return nil, utils.NewNotFoundError(v.ID) + } + if OnVolume != nil { ok, err := OnVolume(ctx, req, store, v) if err != nil { @@ -334,6 +427,9 @@ func (r *router) volumeCreate( } } + if v.AttachmentState == 0 { + v.AttachmentState = types.VolumeAvailable + } return v, nil } @@ -378,6 +474,9 @@ func (r *router) volumeCopy( } } + if v.AttachmentState == 0 { + v.AttachmentState = types.VolumeAvailable + } return v, nil } @@ -456,6 +555,10 @@ func (r *router) volumeAttach( } } + if v.AttachmentState == 0 { + v.AttachmentState = types.VolumeAttached + } + return &types.VolumeAttachResponse{ Volume: v, AttachToken: attTokn, @@ -498,7 +601,11 @@ func (r *router) volumeDetach( return nil, err } - if v != nil && OnVolume != nil { + if v == nil { + return nil, nil + } + + if OnVolume != nil { ok, err := OnVolume(ctx, req, store, v) if err != nil { return nil, err @@ -508,6 +615,10 @@ func (r *router) volumeDetach( } } + if v.AttachmentState == 0 { + v.AttachmentState = types.VolumeAvailable + } + return v, nil } @@ -584,7 +695,11 @@ func (r *router) volumeDetachAll( return nil, err } - if v != nil && OnVolume != nil { + if v == nil { + continue + } + + if OnVolume != nil { ok, err := OnVolume(ctx, req, store, v) if err != nil { return nil, err @@ -594,6 +709,10 @@ func (r *router) volumeDetachAll( } } + if v.AttachmentState == 0 { + v.AttachmentState = types.VolumeAvailable + } + volumeMap[v.ID] = v } @@ -664,7 +783,11 @@ func (r *router) volumeDetachAllForService( return nil, err } - if v != nil && OnVolume != nil { + if v == nil { + continue + } + + if OnVolume != nil { ok, err := OnVolume(ctx, req, store, v) if err != nil { return nil, err @@ -674,6 +797,10 @@ func (r *router) volumeDetachAllForService( } } + if v.AttachmentState == 0 { + v.AttachmentState = types.VolumeAvailable + } + reply[v.ID] = v } diff --git a/api/types/types_drivers_integration.go b/api/types/types_drivers_integration.go index 92ed48f6..09a8590a 100644 --- a/api/types/types_drivers_integration.go +++ b/api/types/types_drivers_integration.go @@ -60,7 +60,7 @@ type IntegrationDriver interface { Unmount( ctx Context, volumeID, volumeName string, - opts Store) error + opts Store) (*Volume, error) // Path will return the mounted path of the volumeName or volumeID. Path( diff --git a/api/types/types_drivers_storage.go b/api/types/types_drivers_storage.go index 8aab8c5e..aa40f2c1 100644 --- a/api/types/types_drivers_storage.go +++ b/api/types/types_drivers_storage.go @@ -38,20 +38,90 @@ const ( ) const ( - // VolumeAttachmentsNone specifies no attachment information is requested. - // This is the default value and the same as omitting this parameter - // altogether. - VolumeAttachmentsNone VolumeAttachmentsTypes = 0 - - // VolumeAttachmentsFalse is an alias for VolumeAttachmentsNone. - VolumeAttachmentsFalse = VolumeAttachmentsNone - - // VolumeAttachmentsTrue is a mask of - // VolumeAttachmentsRequested | VolumeAttachmentsMine | - // VolumeAttachmentsDevices | VolumeAttachmentsAttached - VolumeAttachmentsTrue = VolumeAttachmentsRequested | - VolumeAttachmentsMine | VolumeAttachmentsDevices | + // VolAttNone is the default value. This indicates no attachment + // information is requested. + VolAttNone VolumeAttachmentsTypes = 0 + + // VolAttFalse is an alias for VolAttNone. + VolAttFalse = VolAttNone + + // VolAttReq requests attachment information for all retrieved volumes. + // + // Mask: 1 + VolAttReq = VolumeAttachmentsRequested + + // VolAttReqForInstance requests attachment information for volumes attached + // to the instance provided in the instance ID + // + // Mask: 1 | 2 + VolAttReqForInstance = VolAttReq | VolumeAttachmentsMine + + // VolAttReqWithDevMapForInstance requests attachment information for + // volumes attached to the instance provided in the instance ID and perform + // device mappings where possible. + // + // Mask: 1 | 2 | 4 + VolAttReqWithDevMapForInstance = VolAttReqForInstance | + VolumeAttachmentsDevices + + // VolAttReqOnlyAttachedVols requests attachment information for all + // retrieved volumes and return only volumes that are attached to some + // instance. + // + // Mask: 1 | 8 + VolAttReqOnlyAttachedVols = VolAttReq | VolumeAttachmentsAttached + + // VolAttReqOnlyUnattachedVols requests attachment information for + // all retrieved volumes and return only volumes that are not attached to + // any instance. + // + // Mask: 1 | 16 + VolAttReqOnlyUnattachedVols = VolAttReq | VolumeAttachmentsUnattached + + // VolAttReqOnlyVolsAttachedToInstance requests attachment + // information for all retrieved volumes and return only volumes that + // attached to the instance provided in the instance ID. + // + // Mask: 1 | 2 | 8 + VolAttReqOnlyVolsAttachedToInstance = VolAttReqForInstance | VolumeAttachmentsAttached + + // VolAttReqWithDevMapOnlyVolsAttachedToInstance requests attachment + // information for all retrieved volumes and return only volumes that + // attached to the instance provided in the instance ID and perform device + // mappings where possible. + // + // Mask: 1 | 2 | 4 | 8 + VolAttReqWithDevMapOnlyVolsAttachedToInstance = VolumeAttachmentsDevices | + VolAttReqOnlyVolsAttachedToInstance + + // VolAttReqTrue is an alias for + // VolAttReqWithDevMapOnlyVolsAttachedToInstance. + VolAttReqTrue = VolAttReqWithDevMapOnlyVolsAttachedToInstance + + // VolumeAttachmentsTrue is an alias for VolAttReqTrue. + VolumeAttachmentsTrue = VolAttReqTrue + + // VolAttReqOnlyVolsAttachedToInstanceOrUnattachedVols requests attachment + // information for all retrieved volumes and return only volumes that + // attached to the instance provided in the instance ID or are not attached + // to any instance at all. tl;dr - Attached To Me or Available + // + // Mask: 1 | 2 | 8 | 16 + VolAttReqOnlyVolsAttachedToInstanceOrUnattachedVols = 0 | + VolAttReqOnlyVolsAttachedToInstance | + VolumeAttachmentsUnattached + + // VolAttReqWithDevMapOnlyVolsAttachedToInstanceOrUnattachedVols requests + // attachment information for all retrieved volumes and return only volumes + // that attached to the instance provided in the instance ID or are not + // attached to any instance at all and perform device mappings where + // possible. tl;dr - Attached To Me With Device Mappings or Available + // + // Mask: 1 | 2 | 4 | 8 | 16 + VolAttReqWithDevMapOnlyVolsAttachedToInstanceOrUnattachedVols = 0 | + VolumeAttachmentsDevices | + VolAttReqOnlyVolsAttachedToInstanceOrUnattachedVols ) // ParseVolumeAttachmentTypes parses a value into a VolumeAttachmentsTypes @@ -60,11 +130,6 @@ func ParseVolumeAttachmentTypes(v interface{}) VolumeAttachmentsTypes { switch tv := v.(type) { case VolumeAttachmentsTypes: return tv - case bool: - if tv { - return VolumeAttachmentsTrue - } - return VolumeAttachmentsRequested case int: return VolumeAttachmentsTypes(tv) case uint: @@ -92,8 +157,13 @@ func ParseVolumeAttachmentTypes(v interface{}) VolumeAttachmentsTypes { if b, err := strconv.ParseBool(tv); err == nil { return ParseVolumeAttachmentTypes(b) } + case bool: + if tv { + return VolumeAttachmentsTrue + } + return VolumeAttachmentsRequested } - return VolumeAttachmentsNone + return VolAttNone } // RequiresInstanceID returns a flag that indicates whether the attachment @@ -104,7 +174,7 @@ func (v VolumeAttachmentsTypes) RequiresInstanceID() bool { // Requested returns a flag that indicates attachment information is requested. func (v VolumeAttachmentsTypes) Requested() bool { - return v&VolumeAttachmentsRequested > 0 + return v.bitSet(VolumeAttachmentsRequested) } // Mine returns a flag that indicates attachment information should @@ -112,7 +182,7 @@ func (v VolumeAttachmentsTypes) Requested() bool { // instance ID request header. If this bit is set then the instance ID // header is required. func (v VolumeAttachmentsTypes) Mine() bool { - return v&VolumeAttachmentsMine > 0 + return v.bitSet(VolumeAttachmentsMine) } // Devices returns a flag that indicates an attempt should made to map devices @@ -120,19 +190,23 @@ func (v VolumeAttachmentsTypes) Mine() bool { // attachment information. If this bit is set then the instance ID and // local device headers are required. func (v VolumeAttachmentsTypes) Devices() bool { - return v&VolumeAttachmentsDevices > 0 + return v.bitSet(VolumeAttachmentsDevices) } // Attached returns a flag that indicates only volumes that are attached should // be returned. func (v VolumeAttachmentsTypes) Attached() bool { - return v&VolumeAttachmentsAttached > 0 + return v.bitSet(VolumeAttachmentsAttached) } // Unattached returns a flag that indicates only volumes that are unattached // should be returned. func (v VolumeAttachmentsTypes) Unattached() bool { - return v&VolumeAttachmentsUnattached > 0 + return v.bitSet(VolumeAttachmentsUnattached) +} + +func (v VolumeAttachmentsTypes) bitSet(b VolumeAttachmentsTypes) bool { + return v&b == b } // VolumesOpts are options when inspecting a volume. diff --git a/api/types/types_model.go b/api/types/types_model.go index 795474f0..dca3bb08 100644 --- a/api/types/types_model.go +++ b/api/types/types_model.go @@ -1,5 +1,7 @@ package types +import "strconv" + // StorageType is the type of storage a driver provides. type StorageType string @@ -124,10 +126,57 @@ type Snapshot struct { Fields map[string]string `json:"fields,omitempty" yaml:",omitempty"` } +// VolumeAttachmentStates is the volume's attachment state possibilities. +type VolumeAttachmentStates int + +const ( + // VolumeAttachmentStateUnknown indicates the driver has set the state, + // but it is explicitly unknown and should not be inferred from the list of + // attachments alone. + VolumeAttachmentStateUnknown VolumeAttachmentStates = 1 + + // VolumeAttached indicates the volume is attached to the instance + // specified in the API call that requested the volume information. + VolumeAttached VolumeAttachmentStates = 2 + + // VolumeAvailable indicates the volume is not attached to any instance. + VolumeAvailable VolumeAttachmentStates = 3 + + // VolumeUnavailable indicates the volume is attached to some instance + // other than the one specified in the API call that requested the + // volume information. + VolumeUnavailable VolumeAttachmentStates = 4 +) + +// String returns the string represntation of a VolumeAttachmentStates value. +func (s VolumeAttachmentStates) String() string { + switch s { + case 0: + return "0" + case VolumeAttachmentStateUnknown: + return "unknown" + case VolumeAttached: + return "attached" + case VolumeAvailable: + return "available" + case VolumeUnavailable: + return "unavailable" + } + return strconv.Itoa(int(s)) +} + // Volume provides information about a storage volume. type Volume struct { - // The volume's attachments. - Attachments []*VolumeAttachment `json:"attachments,omitempty" yaml:",omitempty"` + // Attachments is information about the instances to which the volume + // is attached. + Attachments []*VolumeAttachment `json:"attachments,omitempty" yaml:"attachments,omitempty"` + + // AttachmentState indicates whether or not a volume is attached. A client + // can surmise the same state stored in this field by inspecting a volume's + // Attachments field, but this field provides the server a means of doing + // that inspection and storing the result so the client does not have to do + // so. + AttachmentState VolumeAttachmentStates `json:"attachmentState,omitempty" yaml:"attachmentState,omitempty"` // The availability zone for which the volume is available. AvailabilityZone string `json:"availabilityZone,omitempty" yaml:"availabilityZone,omitempty"` @@ -139,17 +188,17 @@ type Volume struct { IOPS int64 `json:"iops,omitempty" yaml:"iops,omitempty"` // The name of the volume. - Name string `json:"name"` + Name string `json:"name" yaml:"name,omitempty"` // NetworkName is the name the device is known by in order to discover // locally. NetworkName string `json:"networkName,omitempty" yaml:"networkName,omitempty"` // The size of the volume. - Size int64 `json:"size,omitempty" yaml:",omitempty"` + Size int64 `json:"size,omitempty" yaml:"size,omitempty"` // The volume status. - Status string `json:"status,omitempty" yaml:",omitempty"` + Status string `json:"status,omitempty" yaml:"status,omitempty"` // ID is a piece of information that uniquely identifies the volume on // the storage platform to which the volume belongs. A volume ID is not @@ -157,10 +206,10 @@ type Volume struct { ID string `json:"id" yaml:"id"` // The volume type. - Type string `json:"type"` + Type string `json:"type" yaml:"type"` // Fields are additional properties that can be defined for this type. - Fields map[string]string `json:"fields,omitempty" yaml:",omitempty"` + Fields map[string]string `json:"fields,omitempty" yaml:"fields,omitempty"` } // VolumeName returns the volume's name. diff --git a/api/utils/schema/schema_generated.go b/api/utils/schema/schema_generated.go index 81c10916..e0a2bed5 100644 --- a/api/utils/schema/schema_generated.go +++ b/api/utils/schema/schema_generated.go @@ -31,6 +31,12 @@ const ( "description": "The volume's attachments.", "items": { "$ref": "#/definitions/volumeAttachment" } }, + "attachmentState": { + "type": "number", + "description": "Indicates the volume's attachment state - 0=none,1=unknown,2=attached,3=available,4=unavailable. A volume is marked as attached if attached to the instance specified in the requesting API call. A volume that is attached but not to the requesting instance is marked as unavailable.", + "minimum": 0, + "maximum": 4 + }, "availabilityZone": { "type": "string", "description": "The zone for which the volume is available." diff --git a/drivers/integration/docker/docker.go b/drivers/integration/docker/docker.go index 9d535e6c..e9b48a9d 100644 --- a/drivers/integration/docker/docker.go +++ b/drivers/integration/docker/docker.go @@ -169,7 +169,9 @@ func (d *driver) Mount( "opts": opts}).Info("mounting volume") vol, err := d.volumeInspectByIDOrName( - ctx, volumeID, volumeName, types.VolumeAttachmentsTrue, opts.Opts) + ctx, volumeID, volumeName, + types.VolAttReqWithDevMapOnlyVolsAttachedToInstanceOrUnattachedVols, + opts.Opts) if isErrNotFound(err) && d.volumeCreateImplicit() { var err error if vol, err = d.Create(ctx, volumeName, &types.VolumeCreateOpts{ @@ -224,7 +226,7 @@ func (d *driver) Mount( } vol, err = d.volumeInspectByIDOrName( - ctx, vol.ID, "", types.VolumeAttachmentsTrue, opts.Opts) + ctx, vol.ID, "", types.VolAttReqTrue, opts.Opts) if err != nil { return "", nil, err } @@ -311,7 +313,7 @@ func (d *driver) Mount( func (d *driver) Unmount( ctx types.Context, volumeID, volumeName string, - opts types.Store) error { + opts types.Store) (*types.Volume, error) { ctx.WithFields(log.Fields{ "volumeName": volumeName, @@ -319,24 +321,25 @@ func (d *driver) Unmount( "opts": opts}).Info("unmounting volume") if volumeName == "" && volumeID == "" { - return goof.New("missing volume name or ID") + return nil, goof.New("missing volume name or ID") } vol, err := d.volumeInspectByIDOrName( - ctx, volumeID, volumeName, types.VolumeAttachmentsTrue, opts) + ctx, volumeID, volumeName, + types.VolAttReqWithDevMapOnlyVolsAttachedToInstance, opts) if err != nil { - return err + return nil, err } if len(vol.Attachments) == 0 { - return nil + return nil, nil } client := context.MustClient(ctx) inst, err := client.Storage().InstanceInspect(ctx, utils.NewStore()) if err != nil { - return goof.New("problem getting instance ID") + return nil, goof.New("problem getting instance ID") } var ma *types.VolumeAttachment for _, att := range vol.Attachments { @@ -347,17 +350,17 @@ func (d *driver) Unmount( } if ma == nil { - return goof.New("no attachment found for instance") + return nil, goof.New("no attachment found for instance") } if ma.DeviceName == "" { - return goof.New("no device name found for attachment") + return nil, goof.New("no device name found for attachment") } mounts, err := client.OS().Mounts( ctx, ma.DeviceName, "", opts) if err != nil { - return err + return nil, err } for _, mount := range mounts { @@ -369,24 +372,24 @@ func (d *driver) Unmount( ctx.WithField("mount", mount).Debug("unmounting mount point") err = client.OS().Unmount(ctx, mount.MountPoint, opts) if err != nil { - return err + return nil, err } } } - _, err = client.Storage().VolumeDetach(ctx, vol.ID, + vol, err = client.Storage().VolumeDetach(ctx, vol.ID, &types.VolumeDetachOpts{ Force: opts.GetBool("force"), Opts: utils.NewStore(), }) if err != nil { - return err + return nil, err } ctx.WithFields(log.Fields{ "vol": vol}).Info("unmounted and detached volume") - return nil + return vol, nil } // Path will return the mounted path of the volumeName or volumeID. @@ -401,7 +404,7 @@ func (d *driver) Path( "opts": opts}).Info("getting path to volume") vol, err := d.volumeInspectByIDOrName( - ctx, volumeID, volumeName, types.VolumeAttachmentsTrue, opts) + ctx, volumeID, volumeName, types.VolAttReqTrue, opts) if err != nil { return "", err } else if vol == nil { diff --git a/drivers/integration/docker/docker_utils.go b/drivers/integration/docker/docker_utils.go index 0118d827..d86437a0 100644 --- a/drivers/integration/docker/docker_utils.go +++ b/drivers/integration/docker/docker_utils.go @@ -49,27 +49,21 @@ func (d *driver) volumeInspectByIDOrName( var obj *types.Volume if volumeID != "" { var err error - obj, err = d.volumeInspectByID( - ctx, volumeID, types.VolumeAttachmentsTrue, opts) + obj, err = d.volumeInspectByID(ctx, volumeID, attachments, opts) if err != nil { return nil, err } } else { - objs, err := client.Storage().Volumes(ctx, &types.VolumesOpts{ - Attachments: 0}) + objs, err := client.Storage().Volumes( + ctx, &types.VolumesOpts{Attachments: 0}) if err != nil { return nil, err } for _, o := range objs { if strings.EqualFold(volumeName, o.Name) { - if attachments.Requested() { - obj, err = d.volumeInspectByID( - ctx, o.ID, types.VolumeAttachmentsTrue, opts) - if err != nil { - return nil, err - } - } else { - obj = o + obj, err = d.volumeInspectByID(ctx, o.ID, attachments, opts) + if err != nil { + return nil, err } break } diff --git a/drivers/storage/ebs/storage/ebs_storage.go b/drivers/storage/ebs/storage/ebs_storage.go index 1b5b02ea..8ce3328e 100644 --- a/drivers/storage/ebs/storage/ebs_storage.go +++ b/drivers/storage/ebs/storage/ebs_storage.go @@ -332,7 +332,7 @@ func (d *driver) VolumeCreate(ctx types.Context, volumeName string, } // Return the volume created return d.VolumeInspect(ctx, *vol.VolumeId, &types.VolumeInspectOpts{ - Attachments: types.VolumeAttachmentsTrue, + Attachments: types.VolAttReqTrue, }) } @@ -583,7 +583,7 @@ func (d *driver) VolumeAttach( return nil, "", goof.WithError("error getting volume", err) } volumes, convErr := d.toTypesVolume( - ctx, ec2vols, types.VolumeAttachmentsTrue) + ctx, ec2vols, types.VolAttReqTrue) if convErr != nil { return nil, "", goof.WithError( "error converting to types.Volume", convErr) @@ -635,7 +635,7 @@ func (d *driver) VolumeAttach( // Check if successful attach attachedVol, err := d.VolumeInspect( ctx, volumeID, &types.VolumeInspectOpts{ - Attachments: types.VolumeAttachmentsTrue, + Attachments: types.VolAttReqTrue, Opts: opts.Opts, }) if err != nil { @@ -660,7 +660,7 @@ func (d *driver) VolumeDetach( return nil, goof.WithError("error getting volume", err) } volumes, convErr := d.toTypesVolume( - ctx, ec2vols, types.VolumeAttachmentsTrue) + ctx, ec2vols, types.VolAttReqTrue) if convErr != nil { return nil, goof.WithError("error converting to types.Volume", convErr) } @@ -697,7 +697,7 @@ func (d *driver) VolumeDetach( // check if successful detach detachedVol, err := d.VolumeInspect( ctx, volumeID, &types.VolumeInspectOpts{ - Attachments: types.VolumeAttachmentsTrue, + Attachments: types.VolAttReqTrue, Opts: opts.Opts, }) if err != nil { @@ -923,39 +923,51 @@ func (d *driver) toTypesVolume( ctx types.Context, ec2vols []*awsec2.Volume, attachments types.VolumeAttachmentsTypes) ([]*types.Volume, error) { - // Get local devices map from context - ld, ldOK := context.LocalDevices(ctx) - if !ldOK { - return nil, errGetLocDevs + + var ( + ld *types.LocalDevices + ldOK bool + ) + + if attachments.Devices() { + // Get local devices map from context + if ld, ldOK = context.LocalDevices(ctx); !ldOK { + return nil, errGetLocDevs + } } var volumesSD []*types.Volume for _, volume := range ec2vols { + var attachmentsSD []*types.VolumeAttachment - // Leave attachment's device name blank if attachments is false - for _, attachment := range volume.Attachments { - deviceName := "" - if attachments.Devices() { - // Compensate for kernel volume mapping i.e. change "/dev/sda" - // to "/dev/xvda" - deviceName = strings.Replace( - *attachment.Device, "sd", ebsUtils.NextDeviceInfo.Prefix, 1) - // Keep device name if it is found in local devices - if _, ok := ld.DeviceMap[deviceName]; !ok { - deviceName = "" + if attachments.Requested() { + // Leave attachment's device name blank if attachments is false + for _, attachment := range volume.Attachments { + deviceName := "" + if attachments.Devices() { + // Compensate for kernel volume mapping i.e. change + // "/dev/sda" to "/dev/xvda" + deviceName = strings.Replace( + *attachment.Device, "sd", + ebsUtils.NextDeviceInfo.Prefix, 1) + // Keep device name if it is found in local devices + if _, ok := ld.DeviceMap[deviceName]; !ok { + deviceName = "" + } } + attachmentSD := &types.VolumeAttachment{ + VolumeID: *attachment.VolumeId, + InstanceID: &types.InstanceID{ + ID: *attachment.InstanceId, + Driver: d.Name(), + }, + DeviceName: deviceName, + Status: *attachment.State, + } + attachmentsSD = append(attachmentsSD, attachmentSD) } - attachmentSD := &types.VolumeAttachment{ - VolumeID: *attachment.VolumeId, - InstanceID: &types.InstanceID{ - ID: *attachment.InstanceId, - Driver: d.Name(), - }, - DeviceName: deviceName, - Status: *attachment.State, - } - attachmentsSD = append(attachmentsSD, attachmentSD) } + name := d.getName(volume.Tags) volumeSD := &types.Volume{ Name: name, @@ -967,6 +979,7 @@ func (d *driver) toTypesVolume( Size: *volume.Size, Attachments: attachmentsSD, } + // Some volume types have no IOPS, so we get nil in volume.Iops if volume.Iops != nil { volumeSD.IOPS = *volume.Iops diff --git a/drivers/storage/ebs/tests/ebs_test.go b/drivers/storage/ebs/tests/ebs_test.go index 15f2f9ed..a527b3fe 100644 --- a/drivers/storage/ebs/tests/ebs_test.go +++ b/drivers/storage/ebs/tests/ebs_test.go @@ -465,7 +465,7 @@ func volumeInspectAttached( log.WithField("volumeID", volumeID).Info("inspecting volume") reply, err := client.API().VolumeInspect( nil, ebs.Name, volumeID, - types.VolumeAttachmentsTrue) + types.VolAttReqTrue) assert.NoError(t, err) if err != nil { @@ -483,7 +483,7 @@ func volumeInspectDetached( log.WithField("volumeID", volumeID).Info("inspecting volume") reply, err := client.API().VolumeInspect( nil, ebs.Name, volumeID, - types.VolumeAttachmentsTrue) + types.VolAttReqTrue) assert.NoError(t, err) if err != nil { diff --git a/drivers/storage/efs/efs.go b/drivers/storage/efs/efs.go index ee1eddb3..deaecae5 100644 --- a/drivers/storage/efs/efs.go +++ b/drivers/storage/efs/efs.go @@ -9,6 +9,12 @@ const ( // Name is the provider's name. Name = "efs" + // TagDelimiter separates tags from volume or snapshot names + TagDelimiter = "/" + + // DefaultMaxRetries is the max number of times to retry failed operations + DefaultMaxRetries = 10 + // InstanceIDFieldRegion is the key to retrieve the region value from the // InstanceID Field map. InstanceIDFieldRegion = "region" @@ -16,15 +22,97 @@ const ( // InstanceIDFieldAvailabilityZone is the key to retrieve the availability // zone value from the InstanceID Field map. InstanceIDFieldAvailabilityZone = "availabilityZone" + + // InstanceIDFieldSecurityGroups is the key to retrieve the default security + // group value from the InstanceID Field map. + InstanceIDFieldSecurityGroups = "securityGroups" + + // AccessKey is a key constant. + AccessKey = "accessKey" + + // SecretKey is a key constant. + SecretKey = "secretKey" + + // Region is a key constant. + Region = "region" + + // SecurityGroups is a key constant. + SecurityGroups = "securityGroups" + + // Endpoint is a key constant. + Endpoint = "endpoint" + + // EndpointEC2 is a key constant. + EndpointEC2 = "endpointEC2" + + // EndpointFormat is a key constant. + EndpointFormat = "endpointFormat" + + // EndpointEC2Format is a key constant. + EndpointEC2Format = "endpointEC2Format" + + // MaxRetries is a key constant. + MaxRetries = "maxRetries" + + // Tag is a key constant. + Tag = "tag" + + // DisableSessionCache is a key constant. + DisableSessionCache = "disableSessionCache" +) + +const ( + // ConfigEFS is a config key. + ConfigEFS = Name + + // ConfigEFSAccessKey is a config key. + ConfigEFSAccessKey = ConfigEFS + "." + AccessKey + + // ConfigEFSSecretKey is a config key. + ConfigEFSSecretKey = ConfigEFS + "." + SecretKey + + // ConfigEFSRegion is a config key. + ConfigEFSRegion = ConfigEFS + "." + Region + + // ConfigEFSSecGroups is a config key. + ConfigEFSSecGroups = ConfigEFS + "." + SecurityGroups + + // ConfigEFSEndpoint is a config key. + ConfigEFSEndpoint = ConfigEFS + "." + Endpoint + + // ConfigEFSEndpointEC2 is a config key. + ConfigEFSEndpointEC2 = ConfigEFS + "." + EndpointEC2 + + // ConfigEFSEndpointFormat is a config key. + ConfigEFSEndpointFormat = ConfigEFS + "." + EndpointFormat + + // ConfigEFSEndpointEC2Format is a config key. + ConfigEFSEndpointEC2Format = ConfigEFS + "." + EndpointEC2Format + + // ConfigEFSMaxRetries is a config key. + ConfigEFSMaxRetries = ConfigEFS + "." + MaxRetries + + // ConfigEFSTag is a config key. + ConfigEFSTag = ConfigEFS + "." + Tag + + // ConfigEFSDisableSessionCache is a config key. + ConfigEFSDisableSessionCache = ConfigEFS + "." + DisableSessionCache ) func init() { r := gofigCore.NewRegistration("EFS") - r.Key(gofig.String, "", "", "", "efs.accessKey") - r.Key(gofig.String, "", "", "", "efs.secretKey") - r.Key(gofig.String, "", "", - "Comma separated security group ids", "efs.securityGroups") - r.Key(gofig.String, "", "", "AWS region", "efs.region") - r.Key(gofig.String, "", "", "Tag prefix for EFS naming", "efs.tag") + r.Key(gofig.String, "", "", "AWS access key", ConfigEFSAccessKey) + r.Key(gofig.String, "", "", "AWS secret key", ConfigEFSSecretKey) + r.Key(gofig.String, "", "", "List of security groups", ConfigEFSSecGroups) + r.Key(gofig.String, "", "", "AWS region", ConfigEFSRegion) + r.Key(gofig.String, "", "", "AWS EFS endpoint", ConfigEFSEndpoint) + r.Key(gofig.String, "", "", "AWS EC2 endpoint", ConfigEFSEndpointEC2) + r.Key(gofig.String, "", `elasticfilesystem.%s.amazonaws.com`, + "AWS EFS endpoint format", ConfigEFSEndpointFormat) + r.Key(gofig.String, "", `ec2.%s.amazonaws.com`, + "AWS EC2 endpoint format", ConfigEFSEndpointEC2Format) + r.Key(gofig.String, "", "", "Tag prefix for EFS naming", ConfigEFSTag) + r.Key(gofig.Bool, "", false, + "A flag that disables the session cache", ConfigEFSDisableSessionCache) gofigCore.Register(r) } diff --git a/drivers/storage/efs/executor/efs_executor.go b/drivers/storage/efs/executor/efs_executor.go index 9ea49ee0..c36ac2d0 100644 --- a/drivers/storage/efs/executor/efs_executor.go +++ b/drivers/storage/efs/executor/efs_executor.go @@ -4,13 +4,10 @@ import ( "bufio" "fmt" "io" - "io/ioutil" - "net/http" "os" "strings" gofig "github.com/akutz/gofig/types" - "github.com/akutz/goof" "github.com/codedellemc/libstorage/api/registry" "github.com/codedellemc/libstorage/api/types" @@ -20,8 +17,7 @@ import ( // driver is the storage executor for the efs storage driver. type driver struct { - config gofig.Config - subnetResolver SubnetResolver + config gofig.Config } const ( @@ -34,9 +30,7 @@ func init() { } func newDriver() types.StorageExecutor { - return &driver{ - subnetResolver: NewAwsVpcSubnetResolver(), - } + return &driver{} } func (d *driver) Init(ctx types.Context, config gofig.Config) error { @@ -67,18 +61,7 @@ func InstanceID() (*types.InstanceID, error) { func (d *driver) InstanceID( ctx types.Context, opts types.Store) (*types.InstanceID, error) { - - subnetID, err := d.subnetResolver.ResolveSubnet() - if err != nil { - return nil, goof.WithError("no ec2metadata subnet id", err) - } - - iid := &types.InstanceID{Driver: efs.Name} - if err := iid.MarshalMetadata(subnetID); err != nil { - return nil, err - } - - return iid, nil + return efsUtils.InstanceID(ctx) } func (d *driver) NextDevice( @@ -157,51 +140,3 @@ func parseInfoFile(r io.Reader) ([]*types.MountInfo, error) { } return out, nil } - -// SubnetResolver defines interface that can resolve subnet from environment -type SubnetResolver interface { - ResolveSubnet() (string, error) -} - -// AwsVpcSubnetResolver is thin interface that resolves instance subnet from -// ec2metadata service. This helper is used instead of bringing AWS SDK to -// executor on purpose to keep executor dependencies minimal. -type AwsVpcSubnetResolver struct { - ec2MetadataIPAddress string -} - -// ResolveSubnet determines VPC subnet id on running AWS instance -func (r *AwsVpcSubnetResolver) ResolveSubnet() (string, error) { - resp, err := http.Get(r.getURL("mac")) - if err != nil { - return "", err - } - mac, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return "", err - } - - resp, err = http.Get(r.getURL(fmt.Sprintf("network/interfaces/macs/%s/subnet-id", mac))) - if err != nil { - return "", err - } - subnetID, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return "", err - } - - return string(subnetID), nil -} - -func (r *AwsVpcSubnetResolver) getURL(path string) string { - return fmt.Sprintf("http://%s/latest/meta-data/%s", r.ec2MetadataIPAddress, path) -} - -// NewAwsVpcSubnetResolver creates AwsVpcSubnetResolver for default AWS endpoint -func NewAwsVpcSubnetResolver() *AwsVpcSubnetResolver { - return &AwsVpcSubnetResolver{ - ec2MetadataIPAddress: "169.254.169.254", - } -} diff --git a/drivers/storage/efs/storage/efs_storage.go b/drivers/storage/efs/storage/efs_storage.go index 1655f275..321cfc7f 100644 --- a/drivers/storage/efs/storage/efs_storage.go +++ b/drivers/storage/efs/storage/efs_storage.go @@ -3,7 +3,9 @@ package storage import ( "crypto/md5" "fmt" + "hash" "strings" + "sync" "time" log "github.com/Sirupsen/logrus" @@ -16,6 +18,7 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/session" + awsec2 "github.com/aws/aws-sdk-go/service/ec2" awsefs "github.com/aws/aws-sdk-go/service/efs" "github.com/codedellemc/libstorage/api/context" @@ -30,8 +33,17 @@ const ( // Driver represents a EFS driver implementation of StorageDriver type driver struct { - config gofig.Config - awsCreds *credentials.Credentials + config gofig.Config + region *string + endpoint *string + endpointFormat string + endpointEC2 *string + endpointEC2Format string + maxRetries *int + tag string + accessKey string + secGroups []string + disableSessionCache bool } func init() { @@ -51,36 +63,244 @@ func (d *driver) Name() string { func (d *driver) Init(ctx types.Context, config gofig.Config) error { d.config = config - fields := log.Fields{ - "accessKey": d.accessKey(), - "secretKey": d.secretKey(), - "region": d.region(), - "tag": d.tag(), + fields := log.Fields{} + + d.accessKey = d.getAccessKey() + fields["accessKey"] = d.accessKey + + d.tag = d.getTag() + fields["tag"] = d.tag + + d.secGroups = d.getSecurityGroups() + fields["securityGroups"] = d.secGroups + + d.disableSessionCache = d.getDisableSessionCache() + fields["disableSessionCache"] = d.disableSessionCache + + if v := d.getRegion(); v != "" { + d.region = &v + fields["region"] = v + } + if v := d.getEndpoint(); v != "" { + d.endpoint = &v + fields["endpoint"] = v + } + d.endpointFormat = d.getEndpointFormat() + fields["endpointFormat"] = d.endpointFormat + if v := d.getEndpointEC2(); v != "" { + d.endpointEC2 = &v + fields["endpointEC2"] = v + } + d.endpointEC2Format = d.getEndpointEC2Format() + fields["endpointEC2Format"] = d.endpointEC2Format + maxRetries := d.getMaxRetries() + d.maxRetries = &maxRetries + fields["maxRetries"] = maxRetries + + log.WithFields(fields).Info("storage driver initialized") + return nil +} + +const cacheKeyC = "cacheKey" + +var ( + sessions = map[string]*awsService{} + sessionsL = &sync.Mutex{} +) + +func writeHkey(h hash.Hash, ps *string) { + if ps == nil { + return + } + h.Write([]byte(*ps)) +} + +func (d *driver) Login(ctx types.Context) (interface{}, error) { + sessionsL.Lock() + defer sessionsL.Unlock() + + var ( + endpoint *string + endpointEC2 *string + ckey string + hkey = md5.New() + akey = d.accessKey + region = d.mustRegion(ctx) + ) + + if region != nil && d.endpointFormat != "" { + szEndpoint := fmt.Sprintf(d.endpointFormat, *region) + endpoint = &szEndpoint + } else { + endpoint = d.endpoint } - if d.accessKey() == "" { - fields["accessKey"] = "" + if region != nil && d.endpointEC2Format != "" { + szEndpoint := fmt.Sprintf(d.endpointEC2Format, *region) + endpointEC2 = &szEndpoint } else { - fields["accessKey"] = "******" + endpointEC2 = d.endpointEC2 + } + + if !d.disableSessionCache { + writeHkey(hkey, region) + writeHkey(hkey, endpoint) + writeHkey(hkey, &akey) + ckey = fmt.Sprintf("%x", hkey.Sum(nil)) + + // if the session is cached then return it + if svc, ok := sessions[ckey]; ok { + ctx.WithField(cacheKeyC, ckey).Debug("using cached efs service") + return svc, nil + } } - if d.secretKey() == "" { - fields["secretKey"] = "" + var ( + skey = d.getSecretKey() + fields = map[string]interface{}{ + efs.AccessKey: akey, + efs.Tag: d.tag, + cacheKeyC: ckey, + } + ) + + if skey == "" { + fields[efs.SecretKey] = "" } else { - fields["secretKey"] = "******" + fields[efs.SecretKey] = "******" + } + if region != nil { + fields[efs.Region] = *region + } + if endpoint != nil { + fields[efs.Endpoint] = *endpoint + } + if endpointEC2 != nil { + fields[efs.EndpointEC2] = *endpointEC2 } - d.awsCreds = credentials.NewChainCredentials( - []credentials.Provider{ - &credentials.StaticProvider{Value: credentials.Value{AccessKeyID: d.accessKey(), SecretAccessKey: d.secretKey()}}, - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{}, - &ec2rolecreds.EC2RoleProvider{ - Client: ec2metadata.New(session.New()), + ctx.WithFields(fields).Debug("efs service connetion attempt") + sess := session.New() + + var ( + awsLogger = &awsLogger{ctx: ctx} + awsLogLevel = aws.LogOff + ) + if ll, ok := context.GetLogLevel(ctx); ok { + switch ll { + case log.DebugLevel: + awsLogger.lvl = log.DebugLevel + awsLogLevel = aws.LogDebugWithHTTPBody + case log.InfoLevel: + awsLogger.lvl = log.InfoLevel + awsLogLevel = aws.LogDebug + } + } + + configEFS := &aws.Config{ + Region: region, + Endpoint: endpoint, + MaxRetries: d.maxRetries, + Credentials: credentials.NewChainCredentials( + []credentials.Provider{ + &credentials.StaticProvider{ + Value: credentials.Value{ + AccessKeyID: akey, + SecretAccessKey: skey, + }, + }, + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{}, + &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(sess), + }, }, - }) + ), + Logger: awsLogger, + LogLevel: aws.LogLevel(awsLogLevel), + } + + configEC2 := &aws.Config{ + Region: region, + Endpoint: endpointEC2, + MaxRetries: d.maxRetries, + Credentials: credentials.NewChainCredentials( + []credentials.Provider{ + &credentials.StaticProvider{ + Value: credentials.Value{ + AccessKeyID: akey, + SecretAccessKey: skey, + }, + }, + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{}, + &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(sess), + }, + }, + ), + Logger: awsLogger, + LogLevel: aws.LogLevel(awsLogLevel), + } - ctx.WithFields(fields).Info("storage driver initialized") + svc := &awsService{ + awsec2.New(sess, configEC2), + awsefs.New(sess, configEFS), + } + ctx.WithFields(fields).Info("efs service connection created") + + if !d.disableSessionCache { + sessions[ckey] = svc + ctx.WithFields(fields).Info("efs service connection cached") + } + + return svc, nil +} + +type awsService struct { + ec2 *awsec2.EC2 + efs *awsefs.EFS +} + +type awsLogger struct { + lvl log.Level + ctx types.Context +} + +func (a *awsLogger) Log(args ...interface{}) { + switch a.lvl { + case log.DebugLevel: + a.ctx.Debugln(args...) + case log.InfoLevel: + a.ctx.Infoln(args...) + } +} + +func mustSession(ctx types.Context) *awsService { + return context.MustSession(ctx).(*awsService) +} + +func mustInstanceIDID(ctx types.Context) *string { + return &context.MustInstanceID(ctx).ID +} + +func (d *driver) mustRegion(ctx types.Context) *string { + if iid, ok := context.InstanceID(ctx); ok { + if v, ok := iid.Fields[efs.InstanceIDFieldRegion]; ok && v != "" { + return &v + } + } + return d.region +} + +func (d *driver) mustAvailabilityZone(ctx types.Context) *string { + if iid, ok := context.InstanceID(ctx); ok { + if v, ok := iid.Fields[efs.InstanceIDFieldAvailabilityZone]; ok { + if v != "" { + return &v + } + } + } return nil } @@ -90,17 +310,12 @@ func (d *driver) InstanceInspect( opts types.Store) (*types.Instance, error) { iid := context.MustInstanceID(ctx) - if iid.ID != "" { - return &types.Instance{InstanceID: iid}, nil - } - - var awsSubnetID string - if err := iid.UnmarshalMetadata(&awsSubnetID); err != nil { - return nil, err - } - instanceID := &types.InstanceID{ID: awsSubnetID, Driver: d.Name()} - - return &types.Instance{InstanceID: instanceID}, nil + return &types.Instance{ + Name: iid.ID, + Region: iid.Fields[efs.InstanceIDFieldRegion], + InstanceID: iid, + ProviderName: iid.Driver, + }, nil } // Type returns the type of storage a driver provides @@ -120,7 +335,9 @@ func (d *driver) Volumes( ctx types.Context, opts *types.VolumesOpts) ([]*types.Volume, error) { - fileSystems, err := d.getAllFileSystems() + svc := mustSession(ctx) + + fileSystems, err := d.getAllFileSystems(svc) if err != nil { return nil, err } @@ -136,7 +353,7 @@ func (d *driver) Volumes( } // Only volumes with partition prefix - if !strings.HasPrefix(*fileSystem.Name, d.tag()+tagDelimiter) { + if !strings.HasPrefix(*fileSystem.Name, d.tag+tagDelimiter) { continue } @@ -154,7 +371,8 @@ func (d *driver) Volumes( var atts []*types.VolumeAttachment if opts.Attachments.Requested() { - atts, err = d.getVolumeAttachments(ctx, *fileSystem.FileSystemId) + atts, err = d.getVolumeAttachments( + ctx, *fileSystem.FileSystemId, opts.Attachments) if err != nil { return nil, err } @@ -174,52 +392,53 @@ func (d *driver) VolumeInspect( volumeID string, opts *types.VolumeInspectOpts) (*types.Volume, error) { - resp, err := d.efsClient().DescribeFileSystems(&awsefs.DescribeFileSystemsInput{ - FileSystemId: aws.String(volumeID), - }) + resp, err := mustSession(ctx).efs.DescribeFileSystems( + &awsefs.DescribeFileSystemsInput{FileSystemId: aws.String(volumeID)}) if err != nil { return nil, err } - if len(resp.FileSystems) > 0 { - fileSystem := resp.FileSystems[0] - // Only volumes in "available" state - if *fileSystem.LifeCycleState != awsefs.LifeCycleStateAvailable { - return nil, nil - } + if len(resp.FileSystems) == 0 { + return nil, types.ErrNotFound{} + } - // Name is optional via tag so make sure it exists - var fileSystemName string - if fileSystem.Name != nil { - fileSystemName = *fileSystem.Name - } else { - ctx.WithFields(log.Fields{ - "filesystemid": *fileSystem.FileSystemId, - }).Warn("missing EFS filesystem name") - } + fileSystem := resp.FileSystems[0] - volume := &types.Volume{ - Name: d.getPrintableName(fileSystemName), - ID: *fileSystem.FileSystemId, - Size: *fileSystem.SizeInBytes.Value, - Attachments: nil, - } + // Only volumes in "available" state + if *fileSystem.LifeCycleState != awsefs.LifeCycleStateAvailable { + return nil, nil + } - var atts []*types.VolumeAttachment + // Name is optional via tag so make sure it exists + var fileSystemName string + if fileSystem.Name != nil { + fileSystemName = *fileSystem.Name + } else { + ctx.WithFields(log.Fields{ + "filesystemid": *fileSystem.FileSystemId, + }).Warn("missing EFS filesystem name") + } - if opts.Attachments.Requested() { - atts, err = d.getVolumeAttachments(ctx, *fileSystem.FileSystemId) - if err != nil { - return nil, err - } - } - if len(atts) > 0 { - volume.Attachments = atts - } - return volume, nil + volume := &types.Volume{ + Name: d.getPrintableName(fileSystemName), + ID: *fileSystem.FileSystemId, + Size: *fileSystem.SizeInBytes.Value, + Attachments: nil, } - return nil, types.ErrNotFound{} + var atts []*types.VolumeAttachment + + if opts.Attachments.Requested() { + atts, err = d.getVolumeAttachments( + ctx, *fileSystem.FileSystemId, opts.Attachments) + if err != nil { + return nil, err + } + } + if len(atts) > 0 { + volume.Attachments = atts + } + return volume, nil } // VolumeCreate creates a new volume. @@ -238,13 +457,15 @@ func (d *driver) VolumeCreate( if opts.Type != nil && strings.ToLower(*opts.Type) == "maxio" { request.PerformanceMode = aws.String(awsefs.PerformanceModeMaxIo) } - fileSystem, err := d.efsClient().CreateFileSystem(request) + + svc := mustSession(ctx) + fileSystem, err := svc.efs.CreateFileSystem(request) if err != nil { return nil, err } - _, err = d.efsClient().CreateTags(&awsefs.CreateTagsInput{ + _, err = svc.efs.CreateTags(&awsefs.CreateTagsInput{ FileSystemId: fileSystem.FileSystemId, Tags: []*awsefs.Tag{ { @@ -257,7 +478,7 @@ func (d *driver) VolumeCreate( if err != nil { // To not leak the EFS instances remove the filesystem that couldn't // be tagged with correct name before returning error response. - _, deleteErr := d.efsClient().DeleteFileSystem( + _, deleteErr := svc.efs.DeleteFileSystem( &awsefs.DeleteFileSystemInput{ FileSystemId: fileSystem.FileSystemId, }) @@ -272,7 +493,8 @@ func (d *driver) VolumeCreate( // Wait until FS is in "available" state for { - state, err := d.getFileSystemLifeCycleState(*fileSystem.FileSystemId) + state, err := d.getFileSystemLifeCycleState( + svc, *fileSystem.FileSystemId) if err == nil { if state != awsefs.LifeCycleStateCreating { break @@ -301,8 +523,10 @@ func (d *driver) VolumeRemove( volumeID string, opts types.Store) error { + svc := mustSession(ctx) + // Remove MountTarget(s) - resp, err := d.efsClient().DescribeMountTargets( + resp, err := svc.efs.DescribeMountTargets( &awsefs.DescribeMountTargetsInput{ FileSystemId: aws.String(volumeID), }) @@ -311,7 +535,7 @@ func (d *driver) VolumeRemove( } for _, mountTarget := range resp.MountTargets { - _, err = d.efsClient().DeleteMountTarget( + _, err = svc.efs.DeleteMountTarget( &awsefs.DeleteMountTargetInput{ MountTargetId: aws.String(*mountTarget.MountTargetId), }) @@ -325,7 +549,7 @@ func (d *driver) VolumeRemove( // just in "deleting" life cycle state). Here code will wait until all // mountpoints are deleted. for { - resp, err := d.efsClient().DescribeMountTargets( + resp, err := svc.efs.DescribeMountTargets( &awsefs.DescribeMountTargetsInput{ FileSystemId: aws.String(volumeID), }) @@ -346,7 +570,7 @@ func (d *driver) VolumeRemove( } // Remove FileSystem - _, err = d.efsClient().DeleteFileSystem( + _, err = svc.efs.DeleteFileSystem( &awsefs.DeleteFileSystemInput{ FileSystemId: aws.String(volumeID), }) @@ -359,7 +583,7 @@ func (d *driver) VolumeRemove( "filesystemid": volumeID, }).Info("waiting for FileSystem deletion") - _, err := d.efsClient().DescribeFileSystems( + _, err := svc.efs.DescribeFileSystems( &awsefs.DescribeFileSystemsInput{ FileSystemId: aws.String(volumeID), }) @@ -381,6 +605,8 @@ func (d *driver) VolumeRemove( return nil } +var errInvalidSecGroups = goof.New("security groups required") + // VolumeAttach attaches a volume and provides a token clients can use // to validate that device has appeared locally. func (d *driver) VolumeAttach( @@ -388,20 +614,19 @@ func (d *driver) VolumeAttach( volumeID string, opts *types.VolumeAttachOpts) (*types.Volume, string, error) { + svc := mustSession(ctx) + vol, err := d.VolumeInspect(ctx, volumeID, - &types.VolumeInspectOpts{Attachments: types.VolumeAttachmentsTrue}) + &types.VolumeInspectOpts{Attachments: types.VolAttReqTrue}) if err != nil { return nil, "", err } - inst, err := d.InstanceInspect(ctx, nil) - if err != nil { - return nil, "", err - } + iid := context.MustInstanceID(ctx) var ma *types.VolumeAttachment for _, att := range vol.Attachments { - if att.InstanceID.ID == inst.InstanceID.ID { + if att.InstanceID.ID == iid.ID { ma = att break } @@ -409,16 +634,30 @@ func (d *driver) VolumeAttach( // No mount targets were found if ma == nil { - request := &awsefs.CreateMountTargetInput{ - FileSystemId: aws.String(vol.ID), - SubnetId: aws.String(inst.InstanceID.ID), + + secGrpIDs := d.secGroups + if v, ok := iid.Fields[efs.InstanceIDFieldSecurityGroups]; ok { + ctx.WithField("secGrpNames", v).Info("querying security group IDs") + qSecGrpIDs, err := d.querySecGrpIDs(ctx, svc, strings.Split(v, ";")) + if err != nil { + return nil, "", err + } + secGrpIDs = qSecGrpIDs + } + + if len(secGrpIDs) == 0 { + return nil, "", errInvalidSecGroups } - if len(d.securityGroups()) > 0 { - request.SecurityGroups = aws.StringSlice(d.securityGroups()) + + request := &awsefs.CreateMountTargetInput{ + FileSystemId: aws.String(vol.ID), + SubnetId: aws.String(iid.ID), + SecurityGroups: aws.StringSlice(secGrpIDs), } - // TODO(mhrabovcin): Should we block here until MountTarget is in "available" - // LifeCycleState? Otherwise mount could fail until creation is completed. - _, err = d.efsClient().CreateMountTarget(request) + // TODO(mhrabovcin): Should we block here until MountTarget is in + // "available" LifeCycleState? Otherwise mount could fail until creation + // is completed. + _, err = svc.efs.CreateMountTarget(request) // Failed to create mount target if err != nil { return nil, "", err @@ -428,6 +667,32 @@ func (d *driver) VolumeAttach( return vol, "", err } +func (d *driver) querySecGrpIDs( + ctx types.Context, + svc *awsService, + secGrpNames []string) ([]string, error) { + + req := &awsec2.DescribeSecurityGroupsInput{ + Filters: []*awsec2.Filter{ + &awsec2.Filter{ + Name: aws.String("group-name"), + Values: aws.StringSlice(secGrpNames), + }, + }, + } + res, err := svc.ec2.DescribeSecurityGroups(req) + if err != nil { + return nil, err + } + + var secGrpIDs []string + for _, sg := range res.SecurityGroups { + secGrpIDs = append(secGrpIDs, *sg.GroupId) + } + + return secGrpIDs, nil +} + // VolumeDetach detaches a volume. func (d *driver) VolumeDetach( ctx types.Context, @@ -494,15 +759,17 @@ func (d *driver) SnapshotRemove( // Retrieve all filesystems with tags from AWS API. This is very expensive // operation as it issues AWS SDK call per filesystem to retrieve tags. -func (d *driver) getAllFileSystems() (filesystems []*awsefs.FileSystemDescription, err error) { - resp, err := d.efsClient().DescribeFileSystems(&awsefs.DescribeFileSystemsInput{}) +func (d *driver) getAllFileSystems( + svc *awsService) (filesystems []*awsefs.FileSystemDescription, err error) { + + resp, err := svc.efs.DescribeFileSystems(&awsefs.DescribeFileSystemsInput{}) if err != nil { return nil, err } filesystems = append(filesystems, resp.FileSystems...) for resp.NextMarker != nil { - resp, err = d.efsClient().DescribeFileSystems(&awsefs.DescribeFileSystemsInput{ + resp, err = svc.efs.DescribeFileSystems(&awsefs.DescribeFileSystemsInput{ Marker: resp.NextMarker, }) if err != nil { @@ -514,10 +781,13 @@ func (d *driver) getAllFileSystems() (filesystems []*awsefs.FileSystemDescriptio return filesystems, nil } -func (d *driver) getFileSystemLifeCycleState(fileSystemID string) (string, error) { - resp, err := d.efsClient().DescribeFileSystems(&awsefs.DescribeFileSystemsInput{ - FileSystemId: aws.String(fileSystemID), - }) +func (d *driver) getFileSystemLifeCycleState( + svc *awsService, + fileSystemID string) (string, error) { + + resp, err := svc.efs.DescribeFileSystems( + &awsefs.DescribeFileSystemsInput{ + FileSystemId: aws.String(fileSystemID)}) if err != nil { return "", err } @@ -527,20 +797,30 @@ func (d *driver) getFileSystemLifeCycleState(fileSystemID string) (string, error } func (d *driver) getPrintableName(name string) string { - return strings.TrimPrefix(name, d.tag()+tagDelimiter) + return strings.TrimPrefix(name, d.tag+tagDelimiter) } func (d *driver) getFullVolumeName(name string) string { - return d.tag() + tagDelimiter + name + return d.tag + tagDelimiter + name } -func (d *driver) getVolumeAttachments(ctx types.Context, volumeID string) ( +var errGetLocDevs = goof.New("error getting local devices from context") + +func (d *driver) getVolumeAttachments( + ctx types.Context, + volumeID string, + attachments types.VolumeAttachmentsTypes) ( []*types.VolumeAttachment, error) { + if !attachments.Requested() { + return nil, nil + } + if volumeID == "" { return nil, goof.New("missing volume ID") } - resp, err := d.efsClient().DescribeMountTargets( + + resp, err := mustSession(ctx).efs.DescribeMountTargets( &awsefs.DescribeMountTargetsInput{ FileSystemId: aws.String(volumeID), }) @@ -548,12 +828,24 @@ func (d *driver) getVolumeAttachments(ctx types.Context, volumeID string) ( return nil, err } - ld, ldOK := context.LocalDevices(ctx) + var ( + ld *types.LocalDevices + ldOK bool + ) + + if attachments.Devices() { + // Get local devices map from context + if ld, ldOK = context.LocalDevices(ctx); !ldOK { + return nil, errGetLocDevs + } + } var atts []*types.VolumeAttachment for _, mountTarget := range resp.MountTargets { - var dev string - var status string + var ( + dev string + status string + ) if ldOK { // TODO(kasisnu): Check lifecycle state and build the path better dev = *mountTarget.IpAddress + ":" + "/" @@ -566,8 +858,11 @@ func (d *driver) getVolumeAttachments(ctx types.Context, volumeID string) ( status = "Exported" } attachmentSD := &types.VolumeAttachment{ - VolumeID: *mountTarget.FileSystemId, - InstanceID: &types.InstanceID{ID: *mountTarget.SubnetId, Driver: d.Name()}, + VolumeID: *mountTarget.FileSystemId, + InstanceID: &types.InstanceID{ + ID: *mountTarget.SubnetId, + Driver: d.Name(), + }, DeviceName: dev, Status: status, } @@ -577,51 +872,47 @@ func (d *driver) getVolumeAttachments(ctx types.Context, volumeID string) ( return atts, nil } -func (d *driver) efsClient() *awsefs.EFS { - config := aws.NewConfig(). - WithCredentials(d.awsCreds). - WithRegion(d.region()) +// Retrieve config arguments +func (d *driver) getAccessKey() string { + return d.config.GetString(efs.ConfigEFSAccessKey) +} - if types.Debug { - config = config. - WithLogger(newAwsLogger()). - WithLogLevel(aws.LogDebug) - } +func (d *driver) getSecretKey() string { + return d.config.GetString(efs.ConfigEFSSecretKey) +} - return awsefs.New(session.New(), config) +func (d *driver) getRegion() string { + return d.config.GetString(efs.ConfigEFSRegion) } -func (d *driver) accessKey() string { - return d.config.GetString("efs.accessKey") +func (d *driver) getEndpoint() string { + return d.config.GetString(efs.ConfigEFSEndpoint) } -func (d *driver) secretKey() string { - return d.config.GetString("efs.secretKey") +func (d *driver) getEndpointFormat() string { + return d.config.GetString(efs.ConfigEFSEndpointFormat) } -func (d *driver) securityGroups() []string { - return strings.Split(d.config.GetString("efs.securityGroups"), ",") +func (d *driver) getEndpointEC2() string { + return d.config.GetString(efs.ConfigEFSEndpointEC2) } -func (d *driver) region() string { - return d.config.GetString("efs.region") +func (d *driver) getEndpointEC2Format() string { + return d.config.GetString(efs.ConfigEFSEndpointEC2Format) } -func (d *driver) tag() string { - return d.config.GetString("efs.tag") +func (d *driver) getMaxRetries() int { + return d.config.GetInt(efs.ConfigEFSMaxRetries) } -// Simple logrus adapter for AWS Logger interface -type awsLogger struct { - logger *log.Logger +func (d *driver) getTag() string { + return d.config.GetString(efs.ConfigEFSTag) } -func newAwsLogger() *awsLogger { - return &awsLogger{ - logger: log.StandardLogger(), - } +func (d *driver) getSecurityGroups() []string { + return d.config.GetStringSlice(efs.ConfigEFSSecGroups) } -func (l *awsLogger) Log(args ...interface{}) { - l.logger.Println(args...) +func (d *driver) getDisableSessionCache() bool { + return d.config.GetBool(efs.ConfigEFSDisableSessionCache) } diff --git a/drivers/storage/efs/tests/efs_test.go b/drivers/storage/efs/tests/efs_test.go index e89b1870..ff1dc08e 100644 --- a/drivers/storage/efs/tests/efs_test.go +++ b/drivers/storage/efs/tests/efs_test.go @@ -246,7 +246,7 @@ func volumeInspectAttached( log.WithField("volumeID", volumeID).Info("inspecting volume") reply, err := client.API().VolumeInspect( nil, efs.Name, volumeID, - types.VolumeAttachmentsTrue) + types.VolAttReqTrue) assert.NoError(t, err) if err != nil { @@ -265,7 +265,7 @@ func volumeInspectDetached( log.WithField("volumeID", volumeID).Info("inspecting volume") reply, err := client.API().VolumeInspect( nil, efs.Name, volumeID, - types.VolumeAttachmentsTrue) + types.VolAttReqTrue) assert.NoError(t, err) if err != nil { diff --git a/drivers/storage/efs/utils/utils.go b/drivers/storage/efs/utils/utils.go index 6553aa57..f9b28b3d 100644 --- a/drivers/storage/efs/utils/utils.go +++ b/drivers/storage/efs/utils/utils.go @@ -1,9 +1,13 @@ package utils import ( + "bufio" "encoding/json" + "fmt" + "io/ioutil" "net" "net/http" + "strings" "time" "github.com/codedellemc/libstorage/api/types" @@ -14,6 +18,10 @@ const ( raddr = "169.254.169.254" mdtURL = "http://" + raddr + "/latest/meta-data/" iidURL = "http://" + raddr + "/latest/dynamic/instance-identity/document" + macURL = "http://" + raddr + "/latest/meta-data/mac" + subURL = "http://" + raddr + + `/latest/meta-data/network/interfaces/macs/%s/subnet-id` + sgpURL = "http://" + raddr + "/latest/meta-data/security-groups" ) // IsEC2Instance returns a flag indicating whether the executing host is an EC2 @@ -62,12 +70,94 @@ func InstanceID(ctx types.Context) (*types.InstanceID, error) { return nil, err } + subnetID, err := ResolveSubnet(ctx) + if err != nil { + return nil, err + } + + secGroups, err := getSecurityGroups(ctx) + if err != nil { + return nil, err + } + + iidFields := map[string]string{ + efs.InstanceIDFieldRegion: iid.Region, + efs.InstanceIDFieldAvailabilityZone: iid.AvailabilityZone, + } + + if len(secGroups) == 1 { + iidFields[efs.InstanceIDFieldSecurityGroups] = strings.Join( + secGroups, ";") + } + return &types.InstanceID{ - ID: iid.InstanceID, + ID: subnetID, Driver: efs.Name, - Fields: map[string]string{ - efs.InstanceIDFieldRegion: iid.Region, - efs.InstanceIDFieldAvailabilityZone: iid.AvailabilityZone, - }, + Fields: iidFields, }, nil } + +// ResolveSubnet determines the VPC subnet ID on the running AWS instance. +func ResolveSubnet(ctx types.Context) (string, error) { + mac, err := getMAC(ctx) + if err != nil { + return "", err + } + subnetID, err := getSubnetID(ctx, mac) + if err != nil { + return "", err + } + return subnetID, nil +} + +func getMAC(ctx types.Context) (string, error) { + req, err := http.NewRequest(http.MethodGet, macURL, nil) + if err != nil { + return "", err + } + res, err := doRequest(ctx, req) + if err != nil { + return "", err + } + defer res.Body.Close() + buf, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", err + } + return string(buf), nil +} + +func getSubnetID(ctx types.Context, mac string) (string, error) { + req, err := http.NewRequest(http.MethodGet, fmt.Sprintf(subURL, mac), nil) + if err != nil { + return "", err + } + res, err := doRequest(ctx, req) + if err != nil { + return "", err + } + defer res.Body.Close() + buf, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", err + } + return string(buf), nil +} + +func getSecurityGroups(ctx types.Context) ([]string, error) { + req, err := http.NewRequest(http.MethodGet, sgpURL, nil) + if err != nil { + return nil, err + } + res, err := doRequest(ctx, req) + if err != nil { + return nil, err + } + defer res.Body.Close() + var secGroups []string + scanner := bufio.NewScanner(res.Body) + for scanner.Scan() { + secGroups = append(secGroups, scanner.Text()) + } + return secGroups, nil +} diff --git a/drivers/storage/efs/utils/utils_test.go b/drivers/storage/efs/utils/utils_test.go new file mode 100644 index 00000000..dc624bc7 --- /dev/null +++ b/drivers/storage/efs/utils/utils_test.go @@ -0,0 +1,26 @@ +package utils + +import ( + "os" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/codedellemc/libstorage/api/context" +) + +func skipTest(t *testing.T) { + if ok, _ := strconv.ParseBool(os.Getenv("EFS_UTILS_TEST")); !ok { + t.Skip() + } +} + +func TestInstanceID(t *testing.T) { + skipTest(t) + iid, err := InstanceID(context.Background()) + if !assert.NoError(t, err) { + t.FailNow() + } + t.Logf("instanceID=%s", iid.String()) +} diff --git a/drivers/storage/isilon/storage/isilon_storage.go b/drivers/storage/isilon/storage/isilon_storage.go index 9bcc63e9..a89bf3e8 100644 --- a/drivers/storage/isilon/storage/isilon_storage.go +++ b/drivers/storage/isilon/storage/isilon_storage.go @@ -167,23 +167,37 @@ func (d *driver) NextDeviceInfo( return nil, nil } -func (d *driver) getVolumeAttachments(ctx types.Context) ( +func (d *driver) getVolumeAttachments( + ctx types.Context, + attachments types.VolumeAttachmentsTypes) ( []*types.VolumeAttachment, error) { + if !attachments.Requested() { + return nil, nil + } + exports, err := d.getVolumeExports(ctx) if err != nil { return nil, err } iid, iidOK := context.InstanceID(ctx) - ld, ldOK := context.LocalDevices(ctx) + var ( + ld *types.LocalDevices + ldOK bool + ) + if attachments.Devices() { + ld, ldOK = context.LocalDevices(ctx) + } var atts []*types.VolumeAttachment for _, export := range exports { - var dev string - var status string + var ( + dev string + status string + ) for _, c := range export.Clients { - if iidOK && ldOK && c == iid.ID { + if iidOK && ldOK && strings.EqualFold(c, iid.ID) { dev = d.nfsMountPath(export.ExportPath) if _, ok := ld.DeviceMap[dev]; ok { status = "Exported and Mounted" @@ -240,7 +254,7 @@ func (d *driver) VolumeCreate(ctx types.Context, volumeName string, opts *types.VolumeCreateOpts) (*types.Volume, error) { vol, err := d.VolumeInspect(ctx, volumeName, - &types.VolumeInspectOpts{Attachments: types.VolumeAttachmentsTrue}) + &types.VolumeInspectOpts{Attachments: types.VolAttReqTrue}) if err != nil { return nil, err } @@ -329,7 +343,7 @@ func (d *driver) VolumeAttach( // ensure the volume exists and is exported vol, err := d.VolumeInspect(ctx, volumeID, - &types.VolumeInspectOpts{Attachments: types.VolumeAttachmentsTrue}) + &types.VolumeInspectOpts{Attachments: types.VolAttReqTrue}) if err != nil { return nil, "", err } @@ -390,7 +404,7 @@ func (d *driver) VolumeAttach( } vol, err = d.VolumeInspect(ctx, volumeID, - &types.VolumeInspectOpts{Attachments: types.VolumeAttachmentsTrue}) + &types.VolumeInspectOpts{Attachments: types.VolAttReqTrue}) if err != nil { return nil, "", err } @@ -452,7 +466,7 @@ func (d *driver) VolumeDetach( } return d.VolumeInspect(ctx, volumeID, &types.VolumeInspectOpts{ - Attachments: types.VolumeAttachmentsTrue, + Attachments: types.VolAttReqTrue, }) } @@ -542,21 +556,23 @@ func (d *driver) getVolume( return nil, nil } - var atts []*types.VolumeAttachment + var ( + err error + atts []*types.VolumeAttachment + attMap map[string][]*types.VolumeAttachment + ) + if attachments.Requested() { - var err error - atts, err = d.getVolumeAttachments(ctx) - if err != nil { + if atts, err = d.getVolumeAttachments(ctx, attachments); err != nil { return nil, err } - } - - attMap := make(map[string][]*types.VolumeAttachment) - for _, att := range atts { - if attMap[att.VolumeID] == nil { - attMap[att.VolumeID] = make([]*types.VolumeAttachment, 0) + attMap = map[string][]*types.VolumeAttachment{} + for _, att := range atts { + if attMap[att.VolumeID] == nil { + attMap[att.VolumeID] = []*types.VolumeAttachment{} + } + attMap[att.VolumeID] = append(attMap[att.VolumeID], att) } - attMap[att.VolumeID] = append(attMap[att.VolumeID], att) } var volumesSD []*types.Volume @@ -565,13 +581,15 @@ func (d *driver) getVolume( if err != nil { return nil, err } - - vatts, _ := attMap[volume.Name] volumeSD := &types.Volume{ - Name: volume.Name, - ID: volume.Name, - Size: volSize, - Attachments: vatts, + Name: volume.Name, + ID: volume.Name, + Size: volSize, + } + if attachments.Requested() { + if vatts, ok := attMap[volume.Name]; ok { + volumeSD.Attachments = vatts + } } volumesSD = append(volumesSD, volumeSD) } diff --git a/drivers/storage/isilon/tests/isilon_test.go b/drivers/storage/isilon/tests/isilon_test.go index 400674d9..86e63810 100644 --- a/drivers/storage/isilon/tests/isilon_test.go +++ b/drivers/storage/isilon/tests/isilon_test.go @@ -230,7 +230,7 @@ func volumeInspectAttached( log.WithField("volumeID", volumeID).Info("inspecting volume") reply, err := client.API().VolumeInspect( - nil, isilon.Name, volumeID, types.VolumeAttachmentsTrue) + nil, isilon.Name, volumeID, types.VolAttReqTrue) assert.NoError(t, err) if err != nil { @@ -248,7 +248,7 @@ func volumeInspectAttachedFail( log.WithField("volumeID", volumeID).Info("inspecting volume") reply, err := client.API().VolumeInspect( - nil, isilon.Name, volumeID, types.VolumeAttachmentsTrue) + nil, isilon.Name, volumeID, types.VolAttReqTrue) assert.NoError(t, err) if err != nil { @@ -265,7 +265,7 @@ func volumeInspectDetached( log.WithField("volumeID", volumeID).Info("inspecting volume") reply, err := client.API().VolumeInspect( - nil, isilon.Name, volumeID, types.VolumeAttachmentsTrue) + nil, isilon.Name, volumeID, types.VolAttReqTrue) assert.NoError(t, err) if err != nil { diff --git a/drivers/storage/rackspace/storage/rackspace_storage.go b/drivers/storage/rackspace/storage/rackspace_storage.go index 05eb0295..1c56294f 100644 --- a/drivers/storage/rackspace/storage/rackspace_storage.go +++ b/drivers/storage/rackspace/storage/rackspace_storage.go @@ -138,7 +138,7 @@ func (d *driver) Volumes( ctx types.Context, opts *types.VolumesOpts) ([]*types.Volume, error) { // always return attachments to align against other drivers for now - return d.getVolume(ctx, "", "", types.VolumeAttachmentsTrue) + return d.getVolume(ctx, "", "", opts.Attachments) } // // VolumeInspect inspects a single volume. @@ -311,7 +311,7 @@ func (d *driver) VolumeDetach( if volumeID == "" { return nil, goof.WithFields(fields, "volumeId is required for VolumeDetach") } - vols, err := d.getVolume(ctx, volumeID, "", types.VolumeAttachmentsTrue) + vols, err := d.getVolume(ctx, volumeID, "", types.VolAttReqTrue) if err != nil { return nil, err } @@ -586,37 +586,27 @@ func (d *driver) createVolume( "error waiting for volume creation to complete", err) } log.WithFields(fields).Debug("created volume") - return translateVolume(resp, types.VolumeAttachmentsTrue), nil + return translateVolume(resp, types.VolAttReqTrue), nil } //Reformats from volumes.Volume to types.Volume credit to github.com/MatMaul func translateVolume( volume *volumes.Volume, - includeAttachments types.VolumeAttachmentsTypes) *types.Volume { + attachments types.VolumeAttachmentsTypes) *types.Volume { - var attachments []*types.VolumeAttachment - if includeAttachments.Requested() { - for _, attachment := range volume.Attachments { - libstorageAttachment := &types.VolumeAttachment{ - VolumeID: attachment["volume_id"].(string), + var atts []*types.VolumeAttachment + if attachments.Requested() { + for _, att := range volume.Attachments { + lsAtt := &types.VolumeAttachment{ + VolumeID: att["volume_id"].(string), InstanceID: &types.InstanceID{ - ID: attachment["server_id"].(string), + ID: att["server_id"].(string), Driver: rackspace.Name}, - DeviceName: attachment["device"].(string), - Status: "", } - attachments = append(attachments, libstorageAttachment) - } - } else { - for _, attachment := range volume.Attachments { - libstorageAttachment := &types.VolumeAttachment{ - VolumeID: attachment["volume_id"].(string), - InstanceID: &types.InstanceID{ID: attachment["server_id"].(string), Driver: rackspace.Name}, - DeviceName: "", - Status: "", + if attachments.Devices() { + lsAtt.DeviceName = att["device"].(string) } - attachments = append(attachments, libstorageAttachment) - break + atts = append(atts, lsAtt) } } @@ -628,7 +618,7 @@ func translateVolume( Type: volume.VolumeType, IOPS: 0, Size: int64(volume.Size), - Attachments: attachments, + Attachments: atts, } } @@ -660,7 +650,7 @@ func (d *driver) volumeAttached(ctx types.Context, } volume, err := d.VolumeInspect( ctx, volumeID, &types.VolumeInspectOpts{ - Attachments: types.VolumeAttachmentsTrue}) + Attachments: types.VolAttReqTrue}) if err != nil { return true, goof.WithFieldsE(fields, "error getting volume when waiting", err) } @@ -711,7 +701,7 @@ func (d *driver) waitVolumeAttachStatus( for { volume, err := d.VolumeInspect( ctx, volumeID, - &types.VolumeInspectOpts{Attachments: types.VolumeAttachmentsTrue}) + &types.VolumeInspectOpts{Attachments: types.VolAttReqTrue}) if err != nil { return nil, goof.WithFieldsE(fields, "error getting volume when waiting", err) } diff --git a/drivers/storage/rackspace/tests/rackspace_test.go b/drivers/storage/rackspace/tests/rackspace_test.go index 1f8ad35e..032591c2 100644 --- a/drivers/storage/rackspace/tests/rackspace_test.go +++ b/drivers/storage/rackspace/tests/rackspace_test.go @@ -242,7 +242,7 @@ func volumeInspectAttached( log.WithField("volumeID", volumeID).Info("inspecting volume") reply, err := client.API().VolumeInspect( - nil, rackspace.Name, volumeID, types.VolumeAttachmentsTrue) + nil, rackspace.Name, volumeID, types.VolAttReqTrue) assert.NoError(t, err) if err != nil { @@ -259,7 +259,7 @@ func volumeInspectAttachedFail( log.WithField("volumeID", volumeID).Info("inspecting volume") reply, err := client.API().VolumeInspect( - nil, rackspace.Name, volumeID, types.VolumeAttachmentsTrue) + nil, rackspace.Name, volumeID, types.VolAttReqTrue) assert.NoError(t, err) if err != nil { @@ -276,7 +276,7 @@ func volumeInspectDetached( log.WithField("volumeID", volumeID).Info("inspecting volume") reply, err := client.API().VolumeInspect( - nil, rackspace.Name, volumeID, types.VolumeAttachmentsTrue) + nil, rackspace.Name, volumeID, types.VolAttReqTrue) assert.NoError(t, err) if err != nil { diff --git a/drivers/storage/scaleio/storage/scaleio_storage.go b/drivers/storage/scaleio/storage/scaleio_storage.go index 0e4527e6..32c9fd05 100644 --- a/drivers/storage/scaleio/storage/scaleio_storage.go +++ b/drivers/storage/scaleio/storage/scaleio_storage.go @@ -164,7 +164,7 @@ func (d *driver) Volumes( opts *types.VolumesOpts) ([]*types.Volume, error) { sdcMappedVolumes := make(map[string]string) - if opts.Attachments.Requested() { + if opts.Attachments.Devices() { if ld, ok := context.LocalDevices(ctx); ok { sdcMappedVolumes = ld.DeviceMap } @@ -195,8 +195,8 @@ func (d *driver) Volumes( return "" } - if protectionDomain, ok := mapProtectionDomainName[pool.ProtectionDomainID]; ok { - return protectionDomain.Name + if pd, ok := mapProtectionDomainName[pool.ProtectionDomainID]; ok { + return pd.Name } return "" } @@ -209,22 +209,22 @@ func (d *driver) Volumes( var volumesSD []*types.Volume for _, volume := range volumes { var attachmentsSD []*types.VolumeAttachment - for _, attachment := range volume.MappedSdcInfo { - var deviceName string - if _, exists := sdcMappedVolumes[volume.ID]; exists { - deviceName = sdcMappedVolumes[volume.ID] - } - instanceID := &types.InstanceID{ - ID: attachment.SdcID, - Driver: d.Name(), + if opts.Attachments.Requested() { + for _, attachment := range volume.MappedSdcInfo { + instanceID := &types.InstanceID{ + ID: attachment.SdcID, + Driver: d.Name(), + } + attachmentSD := &types.VolumeAttachment{ + VolumeID: volume.ID, + InstanceID: instanceID, + Status: "", + } + if devName, ok := sdcMappedVolumes[volume.ID]; ok { + attachmentSD.DeviceName = devName + } + attachmentsSD = append(attachmentsSD, attachmentSD) } - attachmentSD := &types.VolumeAttachment{ - VolumeID: volume.ID, - InstanceID: instanceID, - DeviceName: deviceName, - Status: "", - } - attachmentsSD = append(attachmentsSD, attachmentSD) } var IOPS int64 @@ -375,7 +375,7 @@ func (d *driver) VolumeCreate(ctx types.Context, volumeName string, } return d.VolumeInspect(ctx, vol.ID, &types.VolumeInspectOpts{ - Attachments: types.VolumeAttachmentsTrue, + Attachments: types.VolAttReqTrue, }) } @@ -407,7 +407,7 @@ func (d *driver) VolumeCreateFromSnapshot( } volumeInspectOpts := &types.VolumeInspectOpts{ - Attachments: types.VolumeAttachmentsTrue, + Attachments: types.VolAttReqTrue, Opts: opts.Opts, } @@ -479,7 +479,7 @@ func (d *driver) VolumeAttach( vol, err := d.VolumeInspect( ctx, volumeID, &types.VolumeInspectOpts{ - Attachments: types.VolumeAttachmentsTrue, + Attachments: types.VolAttReqTrue, }) if err != nil { return nil, "", goof.WithError("error getting volume", err) @@ -506,7 +506,7 @@ func (d *driver) VolumeAttach( attachedVol, err := d.VolumeInspect( ctx, volumeID, &types.VolumeInspectOpts{ - Attachments: types.VolumeAttachmentsTrue, + Attachments: types.VolAttReqTrue, Opts: opts.Opts, }) if err != nil { @@ -552,7 +552,7 @@ func (d *driver) VolumeDetach( } vol, err := d.VolumeInspect(ctx, volumeID, &types.VolumeInspectOpts{ - Attachments: types.VolumeAttachmentsTrue, + Attachments: types.VolAttReqTrue, }) if err != nil { return nil, err diff --git a/drivers/storage/scaleio/tests/scaleio_test.go b/drivers/storage/scaleio/tests/scaleio_test.go index 54ac69a3..672688b7 100644 --- a/drivers/storage/scaleio/tests/scaleio_test.go +++ b/drivers/storage/scaleio/tests/scaleio_test.go @@ -222,7 +222,7 @@ func volumeInspectAttached( log.WithField("volumeID", volumeID).Info("inspecting volume") reply, err := client.API().VolumeInspect( - nil, sio.Name, volumeID, types.VolumeAttachmentsTrue) + nil, sio.Name, volumeID, types.VolAttReqTrue) assert.NoError(t, err) if err != nil { @@ -239,7 +239,7 @@ func volumeInspectAttachedFail( log.WithField("volumeID", volumeID).Info("inspecting volume") reply, err := client.API().VolumeInspect( - nil, sio.Name, volumeID, types.VolumeAttachmentsTrue) + nil, sio.Name, volumeID, types.VolAttReqTrue) assert.NoError(t, err) if err != nil { @@ -256,7 +256,7 @@ func volumeInspectDetached( log.WithField("volumeID", volumeID).Info("inspecting volume") reply, err := client.API().VolumeInspect( - nil, sio.Name, volumeID, types.VolumeAttachmentsTrue) + nil, sio.Name, volumeID, types.VolAttReqTrue) assert.NoError(t, err) if err != nil { diff --git a/drivers/storage/vbox/executor/vbox_executor.go b/drivers/storage/vbox/executor/vbox_executor.go index 1d2f4807..4f274f8d 100644 --- a/drivers/storage/vbox/executor/vbox_executor.go +++ b/drivers/storage/vbox/executor/vbox_executor.go @@ -2,6 +2,7 @@ package executor import ( "bufio" + "bytes" "fmt" "io/ioutil" "net" @@ -49,7 +50,7 @@ func (d *driver) Supported( opts types.Store) (bool, error) { // Use dmidecode if installed - if gotil.FileExistsInPath("dmidecode") { + if gotil.FileExistsInPath(dmidecodeCmd) { out, err := exec.Command( dmidecodeCmd, "-s", "system-product-name").Output() if err == nil { @@ -69,19 +70,13 @@ func (d *driver) Supported( } // No luck with dmidecode, try dmesg - cmd := exec.Command("dmesg") - cmdReader, err := cmd.StdoutPipe() + out, err := exec.Command("dmesg").Output() if err != nil { return false, nil } - defer cmdReader.Close() - scanner := bufio.NewScanner(cmdReader) - - err = cmd.Run() - if err != nil { - return false, nil - } + rdr := bytes.NewReader(out) + scanner := bufio.NewScanner(rdr) for scanner.Scan() { if strings.Contains(scanner.Text(), "BIOS VirtualBox") { diff --git a/drivers/storage/vbox/storage/vbox_storage.go b/drivers/storage/vbox/storage/vbox_storage.go index 7948ac22..f9b45686 100644 --- a/drivers/storage/vbox/storage/vbox_storage.go +++ b/drivers/storage/vbox/storage/vbox_storage.go @@ -234,7 +234,7 @@ func (d *driver) VolumeCreate(ctx types.Context, volumeName string, size := *opts.Size * 1024 * 1024 * 1024 - vol, err := d.getVolume(ctx, "", volumeName, 0) + vol, err := d.getVolume(ctx, "", volumeName, types.VolAttFalse) if err != nil { return nil, err } @@ -328,7 +328,7 @@ func (d *driver) VolumeAttach( } // review volume with attachments to any host - volumes, err := d.getVolume(ctx, volumeID, "", 0) + volumes, err := d.getVolume(ctx, volumeID, "", types.VolAttReq) if err != nil { return nil, "", err } @@ -357,7 +357,7 @@ func (d *driver) VolumeAttach( ) } - volumes, err = d.getVolume(ctx, volumeID, "", types.VolumeAttachmentsTrue) + volumes, err = d.getVolume(ctx, volumeID, "", types.VolAttReqTrue) if err != nil { return nil, "", err } @@ -385,7 +385,7 @@ func (d *driver) VolumeDetach( return nil, goof.New("missing volume id") } - volumes, err := d.getVolume(ctx, volumeID, "", 0) + volumes, err := d.getVolume(ctx, volumeID, "", types.VolAttFalse) if err != nil { return nil, err } @@ -394,6 +394,8 @@ func (d *driver) VolumeDetach( return nil, goof.New("no volume returned") } + // TODO: Check if volumes[[0].Attachments > 0? + if err := d.detachVolume(ctx, volumeID, ""); err != nil { return nil, goof.WithFieldsE( log.Fields{ @@ -404,7 +406,7 @@ func (d *driver) VolumeDetach( ctx.Info("detached volume", volumeID) return d.VolumeInspect( ctx, volumeID, &types.VolumeInspectOpts{ - Attachments: types.VolumeAttachmentsTrue}) + Attachments: types.VolAttReqTrue}) } func (d *driver) VolumeDetachAll( @@ -495,7 +497,7 @@ func (d *driver) getVolume( } var mapDN map[string]string - if attachments.Requested() { + if attachments.Devices() { volumeMapping, err := d.getVolumeMapping(ctx) if err != nil { return nil, err @@ -510,25 +512,33 @@ func (d *driver) getVolume( var volumesSD []*types.Volume for _, v := range volumes { - var attachmentsSD []*types.VolumeAttachment - for _, mid := range v.MachineIDs { - dn, _ := mapDN[v.ID] - attachmentSD := &types.VolumeAttachment{ - VolumeID: v.ID, - InstanceID: &types.InstanceID{ID: mid, Driver: vbox.Name}, - DeviceName: dn, - Status: v.Location, - } - attachmentsSD = append(attachmentsSD, attachmentSD) + volumeSD := &types.Volume{ + Name: v.Name, + ID: v.ID, + Size: int64(v.LogicalSize / 1024 / 1024 / 1024), + Status: v.Location, + Type: string(v.DeviceType), } - volumeSD := &types.Volume{ - Name: v.Name, - ID: v.ID, - Size: int64(v.LogicalSize / 1024 / 1024 / 1024), - Status: v.Location, - Attachments: attachmentsSD, + if attachments.Requested() { + var attachmentsSD []*types.VolumeAttachment + for _, mid := range v.MachineIDs { + attachmentSD := &types.VolumeAttachment{ + VolumeID: v.ID, + InstanceID: &types.InstanceID{ + ID: mid, + Driver: vbox.Name, + }, + } + if attachments.Devices() && mapDN != nil { + dn, _ := mapDN[v.ID] + attachmentSD.DeviceName = dn + } + attachmentsSD = append(attachmentsSD, attachmentSD) + } + volumeSD.Attachments = attachmentsSD } + volumesSD = append(volumesSD, volumeSD) } diff --git a/drivers/storage/vbox/tests/vbox_test.go b/drivers/storage/vbox/tests/vbox_test.go index a737b741..c7390754 100644 --- a/drivers/storage/vbox/tests/vbox_test.go +++ b/drivers/storage/vbox/tests/vbox_test.go @@ -221,7 +221,7 @@ func volumeInspectAttached( log.WithField("volumeID", volumeID).Info("inspecting volume") reply, err := client.API().VolumeInspect( - nil, vbox.Name, volumeID, types.VolumeAttachmentsTrue) + nil, vbox.Name, volumeID, types.VolAttReqTrue) assert.NoError(t, err) if err != nil { @@ -238,7 +238,7 @@ func volumeInspectAttachedFail( log.WithField("volumeID", volumeID).Info("inspecting volume") reply, err := client.API().VolumeInspect( - nil, vbox.Name, volumeID, types.VolumeAttachmentsTrue) + nil, vbox.Name, volumeID, types.VolAttReqTrue) assert.NoError(t, err) if err != nil { @@ -255,7 +255,7 @@ func volumeInspectDetached( log.WithField("volumeID", volumeID).Info("inspecting volume") reply, err := client.API().VolumeInspect( - nil, vbox.Name, volumeID, types.VolumeAttachmentsTrue) + nil, vbox.Name, volumeID, types.VolAttReqTrue) assert.NoError(t, err) if err != nil { diff --git a/drivers/storage/vfs/storage/vfs_storage.go b/drivers/storage/vfs/storage/vfs_storage.go index 513034eb..55417b00 100644 --- a/drivers/storage/vfs/storage/vfs_storage.go +++ b/drivers/storage/vfs/storage/vfs_storage.go @@ -150,6 +150,9 @@ func (d *driver) Volumes( if err != nil { return nil, err } + if opts.Attachments > 0 { + v.AttachmentState = 0 + } volumes = append(volumes, v) } @@ -167,6 +170,9 @@ func (d *driver) VolumeInspect( if err != nil { return nil, err } + if opts.Attachments > 0 { + v.AttachmentState = 0 + } return v, nil } diff --git a/drivers/storage/vfs/tests/vfs_test.go b/drivers/storage/vfs/tests/vfs_test.go index 1d6ebc76..43cab2c2 100644 --- a/drivers/storage/vfs/tests/vfs_test.go +++ b/drivers/storage/vfs/tests/vfs_test.go @@ -122,7 +122,7 @@ func TestStorageDriverVolumes(t *testing.T) { context.Background().WithValue( context.ServiceKey, vfs.Name), &types.VolumesOpts{ - Attachments: types.VolumeAttachmentsTrue, + Attachments: types.VolAttReqTrue, Opts: utils.NewStore()}) assert.NoError(t, err) assert.Len(t, vols, 2) @@ -132,11 +132,12 @@ func TestStorageDriverVolumes(t *testing.T) { func TestVolumes(t *testing.T) { tc, _, vols, _ := newTestConfigAll(t) tf := func(config gofig.Config, client types.Client, t *testing.T) { - reply, err := client.API().Volumes(nil, 0) + reply, err := client.API().Volumes(nil, types.VolAttNone) if err != nil { t.Fatal(err) } for volumeID, volume := range vols { + volume.Attachments = nil assert.NotNil(t, reply["vfs"][volumeID]) assert.EqualValues(t, volume, reply["vfs"][volumeID]) } @@ -145,10 +146,10 @@ func TestVolumes(t *testing.T) { apitests.RunWithClientType(t, types.ControllerClient, vfs.Name, tc, tf) } -func TestVolumesWithAttachments(t *testing.T) { +func TestVolumesWithAttachmentsTrue(t *testing.T) { tc, _, vols, _ := newTestConfigAll(t) tf := func(config gofig.Config, client types.Client, t *testing.T) { - reply, err := client.API().Volumes(nil, types.VolumeAttachmentsTrue) + reply, err := client.API().Volumes(nil, types.VolAttReqTrue) if err != nil { t.Fatal(err) } @@ -162,11 +163,149 @@ func TestVolumesWithAttachments(t *testing.T) { apitests.Run(t, vfs.Name, tc, tf) } +func TestVolumesWithAttachmentsRequested(t *testing.T) { + tc, _, vols, _ := newTestConfigAll(t) + tf := func(config gofig.Config, client types.Client, t *testing.T) { + reply, err := client.API().Volumes(nil, types.VolAttReq) + if err != nil { + t.Fatal(err) + } + + assert.NotNil(t, reply["vfs"]["vfs-000"]) + assert.NotNil(t, reply["vfs"]["vfs-001"]) + assert.NotNil(t, reply["vfs"]["vfs-002"]) + assert.EqualValues(t, vols["vfs-000"], reply["vfs"]["vfs-000"]) + assert.EqualValues(t, vols["vfs-001"], reply["vfs"]["vfs-001"]) + assert.Len(t, reply["vfs"]["vfs-002"].Attachments, 0) + } + apitests.Run(t, vfs.Name, tc, tf) +} + +func TestVolumesWithAttachmentsNone(t *testing.T) { + tc, _, _, _ := newTestConfigAll(t) + tf := func(config gofig.Config, client types.Client, t *testing.T) { + reply, err := client.API().Volumes(nil, types.VolAttNone) + if err != nil { + t.Fatal(err) + } + + assert.NotNil(t, reply["vfs"]["vfs-000"]) + assert.NotNil(t, reply["vfs"]["vfs-001"]) + assert.NotNil(t, reply["vfs"]["vfs-002"]) + assert.Len(t, reply["vfs"]["vfs-000"].Attachments, 0) + assert.Len(t, reply["vfs"]["vfs-001"].Attachments, 0) + assert.Len(t, reply["vfs"]["vfs-002"].Attachments, 0) + } + apitests.Run(t, vfs.Name, tc, tf) +} + +func TestVolumesWithAttachmentsAttached(t *testing.T) { + tc, _, vols, _ := newTestConfigAll(t) + tf := func(config gofig.Config, client types.Client, t *testing.T) { + reply, err := client.API().Volumes(nil, types.VolAttReqOnlyAttachedVols) + if err != nil { + t.Fatal(err) + } + + assert.NotNil(t, reply["vfs"]["vfs-000"]) + assert.NotNil(t, reply["vfs"]["vfs-001"]) + assert.Nil(t, reply["vfs"]["vfs-002"]) + assert.EqualValues(t, vols["vfs-000"], reply["vfs"]["vfs-000"]) + assert.EqualValues(t, vols["vfs-001"], reply["vfs"]["vfs-001"]) + } + apitests.Run(t, vfs.Name, tc, tf) +} + +func TestVolumesWithAttachmentsUnattached(t *testing.T) { + tc, _, _, _ := newTestConfigAll(t) + tf := func(config gofig.Config, client types.Client, t *testing.T) { + reply, err := client.API().Volumes(nil, + types.VolAttReqOnlyUnattachedVols) + if err != nil { + t.Fatal(err) + } + + assert.Nil(t, reply["vfs"]["vfs-000"]) + assert.Nil(t, reply["vfs"]["vfs-001"]) + assert.NotNil(t, reply["vfs"]["vfs-002"]) + assert.Len(t, reply["vfs"]["vfs-002"].Attachments, 0) + } + apitests.Run(t, vfs.Name, tc, tf) +} + +func TestVolumesWithAttachmentsAttachedAndUnattached(t *testing.T) { + tc, _, vols, _ := newTestConfigAll(t) + tf := func(config gofig.Config, client types.Client, t *testing.T) { + reply, err := client.API().Volumes(nil, + types.VolAttReqOnlyAttachedVols|types.VolAttReqOnlyUnattachedVols) + if err != nil { + t.Fatal(err) + } + + assert.NotNil(t, reply["vfs"]["vfs-000"]) + assert.NotNil(t, reply["vfs"]["vfs-001"]) + assert.NotNil(t, reply["vfs"]["vfs-002"]) + assert.EqualValues(t, vols["vfs-000"], reply["vfs"]["vfs-000"]) + assert.EqualValues(t, vols["vfs-001"], reply["vfs"]["vfs-001"]) + } + apitests.Run(t, vfs.Name, tc, tf) +} + +func TestVolumesWithAttachmentsMineWithNotMyInstanceID( + t *testing.T) { + tc, _, _, _ := newTestConfigAll(t) + tf := func(config gofig.Config, client types.Client, t *testing.T) { + + ctx := context.Background() + iidm := types.InstanceIDMap{ + "vfs": &types.InstanceID{ID: "none", Driver: "vfs"}, + } + ctx = ctx.WithValue(context.AllInstanceIDsKey, iidm) + + reply, err := client.API().Volumes(ctx, types.VolAttReqForInstance) + if err != nil { + t.Fatal(err) + } + + assert.NotNil(t, reply["vfs"]["vfs-000"]) + assert.NotNil(t, reply["vfs"]["vfs-001"]) + assert.NotNil(t, reply["vfs"]["vfs-002"]) + assert.Len(t, reply["vfs"]["vfs-000"].Attachments, 0) + assert.Len(t, reply["vfs"]["vfs-001"].Attachments, 0) + assert.Len(t, reply["vfs"]["vfs-002"].Attachments, 0) + } + apitests.RunWithClientType(t, types.ControllerClient, vfs.Name, tc, tf) +} + +func TestVolumesWithAttachmentsAttachedAndMineWithNotMyInstanceID( + t *testing.T) { + tc, _, _, _ := newTestConfigAll(t) + tf := func(config gofig.Config, client types.Client, t *testing.T) { + + ctx := context.Background() + iidm := types.InstanceIDMap{ + "vfs": &types.InstanceID{ID: "none", Driver: "vfs"}, + } + ctx = ctx.WithValue(context.AllInstanceIDsKey, iidm) + + reply, err := client.API().Volumes(ctx, + types.VolAttReqOnlyVolsAttachedToInstance) + if err != nil { + t.Fatal(err) + } + + assert.Nil(t, reply["vfs"]["vfs-000"]) + assert.Nil(t, reply["vfs"]["vfs-001"]) + assert.Nil(t, reply["vfs"]["vfs-002"]) + } + apitests.RunWithClientType(t, types.ControllerClient, vfs.Name, tc, tf) +} + func TestVolumesWithAttachmentsWithControllerClient(t *testing.T) { tc, _, _, _ := newTestConfigAll(t) tf := func(config gofig.Config, client types.Client, t *testing.T) { - _, err := client.API().Volumes(nil, types.VolumeAttachmentsTrue) + _, err := client.API().Volumes(nil, types.VolAttReqTrue) assert.Error(t, err) assert.Equal(t, "batch processing error", err.Error()) } @@ -182,6 +321,7 @@ func TestVolumesByService(t *testing.T) { t.Fatal(err) } for volumeID, volume := range vols { + volume.Attachments = nil assert.NotNil(t, reply[volumeID]) assert.EqualValues(t, volume, reply[volumeID]) } @@ -193,7 +333,7 @@ func TestVolumesByServiceWithAttachments(t *testing.T) { tc, _, vols, _ := newTestConfigAll(t) tf := func(config gofig.Config, client types.Client, t *testing.T) { reply, err := client.API().VolumesByService( - nil, "vfs", types.VolumeAttachmentsTrue) + nil, "vfs", types.VolAttReqTrue) if err != nil { t.Fatal(err) } @@ -213,6 +353,7 @@ func TestVolumeInspect(t *testing.T) { if err != nil { t.Fatal(err) } + vols[reply.ID].Attachments = nil assert.NotNil(t, reply) assert.EqualValues(t, vols[reply.ID], reply) } @@ -223,7 +364,7 @@ func TestVolumeInspectWithAttachments(t *testing.T) { tc, _, vols, _ := newTestConfigAll(t) tf := func(config gofig.Config, client types.Client, t *testing.T) { reply, err := client.API().VolumeInspect( - nil, "vfs", "vfs-000", types.VolumeAttachmentsTrue) + nil, "vfs", "vfs-000", types.VolAttReqTrue) if err != nil { t.Fatal(err) } @@ -407,7 +548,7 @@ func TestVolumeCreateFromSnapshot(t *testing.T) { tf := func(config gofig.Config, client types.Client, t *testing.T) { ogVol, err := client.API().VolumeInspect( - nil, "vfs", "vfs-000", types.VolumeAttachmentsTrue) + nil, "vfs", "vfs-000", types.VolAttReqTrue) assert.NoError(t, err) volumeName := "Volume 003" @@ -535,7 +676,7 @@ func TestVolumeDetachAllForService(t *testing.T) { assert.EqualValues(t, vols, reply) reply, err = client.API().VolumesByService( - nil, vfs.Name, types.VolumeAttachmentsTrue) + nil, vfs.Name, types.VolAttReqTrue) assert.NoError(t, err) assert.Equal(t, 0, len(reply)) @@ -573,7 +714,7 @@ func TestVolumeDetachAll(t *testing.T) { assert.Equal(t, 3, len(reply[vfs.Name])) assert.EqualValues(t, vols, reply[vfs.Name]) - reply, err = client.API().Volumes(nil, types.VolumeAttachmentsTrue) + reply, err = client.API().Volumes(nil, types.VolAttReqTrue) assert.NoError(t, err) assert.Equal(t, 1, len(reply)) assert.Equal(t, 0, len(reply[vfs.Name])) @@ -856,6 +997,7 @@ const volJSON = `{ }, "status": "attached" }], + "attachmentState": 2, "fields": { "owner": "root@example.com", "priority": "2" @@ -869,6 +1011,7 @@ const volNoAttachJSON = `{ "size": 10240, "id": "vfs-%03[1]d", "type": "myType", + "attachmentState": 3, "fields": { "owner": "root@example.com", "priority": "2" diff --git a/examples/ec2/vagrant/README.md b/examples/ec2/vagrant/README.md new file mode 100644 index 00000000..e8741acd --- /dev/null +++ b/examples/ec2/vagrant/README.md @@ -0,0 +1,100 @@ +# EC2 Vagrantfile +This directory includes a Vagrantfile that can be used to bring a libStorage +server/client installation online using REX-Ray and EC2 instances. For example: + +``` +vagrant up --provider=aws --no-parallel +``` + +The following sections outline dependencies, settings, and different execution +scenarios that may be required or useful for using the Vagrantfile. + +### Dependencies +The following dependencies are required in order to bring the EC2 Vagrant +enviornment online: + + * [Vagrant](https://www.vagrantup.com/) 1.8.5+ + * [vagrant-aws](https://github.com/mitchellh/vagrant-aws) + * [vagrant-hostmanager](https://github.com/devopsgroup-io/vagrant-hostmanager) + +Once Vagrant is installed the required plug-ins may be installed with the +following commands: + +```bash +vagrant plugin install vagrant-aws +vagrant plugin install vagrant-hostmanager +``` + +### Settings +The following environment variables may be used to configure the `Vagrantfile`. + +Environment Variable | Description | Required | Default +---------------------|-------------|:--------:|-------- +`AWS_AKEY` | The AWS access key ID. | ✓ | +`AWS_SKEY` | The AWS secret key. | ✓ | +`AWS_KPNM` | The name of the AWS key pair for instances. | ✓ | +`AWS_AMI` | The AMI to use. Defaults to Amazon Linux PV x64 ([AMIs by region](https://aws.amazon.com/amazon-linux-ami/)) | | ami-de347abe +`AWS_REGN` | The AWS region. | | us-west-1 +`AWS_ZONE` | The AWS availability zone. | | a +`AWS_SSHK` | Local SSH key used to access AWS instances. | ✓ | + +### Nodes +The `Vagrantfile` which deploys a libStorage server and two clients onto EC2 +instances named: + + * libstorage-server + * libstorage-client0 + * libstorage-client1 + +### Test Plan Boot Order +The order in which Vagrant brings the nodes online can vary: + +```bash +vagrant up --provider=aws +``` + +The above command will bring all three nodes -- the server and both clients -- +online in parallel since the Vagrant AWS plug-in is configured to support +the parallel option. However, the Vagrantfile will fail if the server +is not brought online first. That is why when bringing the Vagrant nodes online +all at once, the following command should be used: + +```bash +vagrant up --provider=aws --no-parallel +``` + +The above command will bring the nodes online in the following order: + + 1. libstorage-server + 2. libstorage-client0 + 3. libstorage-client1 + +However, another option for starting the environment is to bring up the server +first and then the clients in parallel. Doing so can duplicate a scenario +where two clients are contending for storage resources: + +```bash +vagrant up --provider=aws libstorage-server +vagrant up --provider=aws '/.*client.*/' +``` + +### Scripts +This package includes several test scripts that act as simple ways to execute +random pieces of logic for a node after it is brought online: + + * `server.sh` + * `client0.sh` + * `client1.sh` + +The above files are copied to their respective instances and executed +as soon as the instance is online. That means the `server-tests.sh` script is +executed before the client nodes are even online since the server node is +brought online first. + +### Cleanup +It's important to remember to clean up any EC2, EBS, & EFS resources that may +have been created along the way. To do so simply execute the following command: + +```bash +vagrant destroy -f +``` diff --git a/examples/ec2/vagrant/Vagrantfile b/examples/ec2/vagrant/Vagrantfile new file mode 100644 index 00000000..d3d88d31 --- /dev/null +++ b/examples/ec2/vagrant/Vagrantfile @@ -0,0 +1,590 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : +require 'fileutils' +require 'shellwords' +require 'fog' + +# general config +$autostart_clients = true # set to false to prevent clients from auto-starting + +# aws config +$aws_akey = ENV['AWS_AKEY'] +$aws_skey = ENV['AWS_SKEY'] +$aws_kpnm = ENV['AWS_KPNM'] +$aws_ami = ENV['AWS_AMI'] ? ENV['AWS_AMI'] : "ami-ef2af28f" +$aws_regn = ENV['AWS_REGN'] ? ENV['AWS_REGN'] : "us-west-2" +$aws_zone = ENV['AWS_ZONE'] ? ENV['AWS_ZONE'] : "a" + +# ssh config +$ssh_user = "centos" +$ssh_pkey = ENV['AWS_SSHK'] # path to SSH private key for AWS + +# node info +$node0_name = "libstorage-server" +$node0_itype = "m4.large" +$node0_texps = "server.sh" + +$node1_name = "libstorage-client0" +$node1_itype = "m4.large" +$node1_texps = "client0.sh" + +$node2_name = "libstorage-client1" +$node2_itype = "t2.micro" +$node2_texps = "client1.sh" + +# Golang information +$goos = "linux" +$goarch = "amd64" +$gover = "1.7.1" +$gotgz = "go#{$gover}.#{$goos}-#{$goarch}.tar.gz" +$gourl = "https://storage.googleapis.com/golang/#{$gotgz}" +$gopath = "/opt/go" + +# project info +$srcs = "#{$gopath}/src/github.com/codedellemc/libstorage" + +# the script to ensure the copied working source is a git repo +$validate_copied_sources = <