From 5fb85d36c8ee965df691b8f2dfddeaab8315c77d Mon Sep 17 00:00:00 2001 From: AlinoeDoctari <75314813+AlinoeDoctari@users.noreply.github.com> Date: Fri, 12 Mar 2021 15:27:49 +0100 Subject: [PATCH] refactor: Seclude AWS request util from internals (#8850) --- lib/aws/request.js | 218 ++++++ lib/plugins/aws/provider.js | 233 +----- test/unit/lib/aws/request.test.js | 401 ++++++++++ test/unit/lib/plugins/aws/provider.test.js | 859 ++------------------- 4 files changed, 687 insertions(+), 1024 deletions(-) create mode 100644 lib/aws/request.js create mode 100644 test/unit/lib/aws/request.test.js diff --git a/lib/aws/request.js b/lib/aws/request.js new file mode 100644 index 000000000..7364ccaa6 --- /dev/null +++ b/lib/aws/request.js @@ -0,0 +1,218 @@ +'use strict'; + +const _ = require('lodash'); +const memoize = require('memoizee'); +const PromiseQueue = require('promise-queue'); +const sdk = require('aws-sdk'); +const ServerlessError = require('../../lib/serverless-error'); +const log = require('@serverless/utils/log'); +const HttpsProxyAgent = require('https-proxy-agent'); +const https = require('https'); +const fs = require('fs'); +const deepSortObjectByKey = require('../../lib/utils/deepSortObjectByKey'); +const ensureString = require('type/string/ensure'); +const isObject = require('type/object/is'); +const wait = require('timers-ext/promise/sleep'); +const chalk = require('chalk'); + +// Activate AWS SDK logging +if (process.env.SLS_DEBUG) { + sdk.config.logger = log; +} + +// Use HTTPS Proxy (Optional) +const proxy = + process.env.proxy || + process.env.HTTP_PROXY || + process.env.http_proxy || + process.env.HTTPS_PROXY || + process.env.https_proxy; + +const proxyOptions = {}; +if (proxy) { + Object.assign(proxyOptions, new URL(proxy)); +} + +const ca = process.env.ca || process.env.HTTPS_CA || process.env.https_ca; + +let caCerts = []; + +if (ca) { + // Can be a single certificate or multiple, comma separated. + const caArr = ca.split(','); + // Replace the newline -- https://stackoverflow.com/questions/30400341 + caCerts = caCerts.concat(caArr.map((cert) => cert.replace(/\\n/g, '\n'))); +} + +const cafile = process.env.cafile || process.env.HTTPS_CAFILE || process.env.https_cafile; + +if (cafile) { + // Can be a single certificate file path or multiple paths, comma separated. + const caPathArr = cafile.split(','); + caCerts = caCerts.concat(caPathArr.map((cafilePath) => fs.readFileSync(cafilePath.trim()))); +} + +if (caCerts.length > 0) { + Object.assign(proxyOptions, { + rejectUnauthorized: true, + ca: caCerts, + }); +} + +// Passes also certifications +if (proxy) { + sdk.config.httpOptions.agent = new HttpsProxyAgent(proxyOptions); +} else if (proxyOptions.ca) { + // Update the agent -- http://docs.aws.amazon.com/sdk-for-javascript/v2/developer-guide/node-registering-certs.html + sdk.config.httpOptions.agent = new https.Agent(proxyOptions); +} + +// Configure the AWS Client timeout (Optional). The default is 120000 (2 minutes) +const timeout = process.env.AWS_CLIENT_TIMEOUT || process.env.aws_client_timeout; +if (timeout) { + sdk.config.httpOptions.timeout = parseInt(timeout, 10); +} +PromiseQueue.configure(Promise); +const requestQueue = new PromiseQueue(2, Infinity); + +const MAX_RETRIES = (() => { + const userValue = Number(process.env.SLS_AWS_REQUEST_MAX_RETRIES); + return userValue >= 0 ? userValue : 4; +})(); + +const accelerationCompatibleS3Methods = new Set(['upload', 'putObject']); + +const shouldS3Accelerate = (method, params) => { + if ( + accelerationCompatibleS3Methods.has(method) && + params && + params.isS3TransferAccelerationEnabled + ) { + log('Using S3 Transfer Acceleration Endpoint...'); + return true; + } + return false; +}; + +const getServiceInstance = memoize( + (service, method) => { + const Service = _.get(sdk, service.name); + // we translate params to an object for the service creation by selecting keys of interest + const serviceParams = { ...service.params }; + if (service.name === 'S3') { + serviceParams.useAccelerateEndpoint = shouldS3Accelerate(method, service.params); + } + return new Service(serviceParams); + }, + { + normalizer: ([service, method]) => { + return [JSON.stringify(deepSortObjectByKey(service)), method].join('|'); + }, + } +); + +/** Execute request to AWS service + * @param {Object|string} [service] - Description of the service to call + * @prop [service.name] - Name of the service to call, support subclasses + * @prop [service.params] - Parameters to apply when creating the service and doing the request + * @prop [service.params.credentials] - AWS Credentials to use + * @prop [service.params.useCache ] - Wether to reuse result of the same request cached locally + * @prop [service.params.region] - Region in which the call should be made (default to us-east-1) + * @prop [service.params.isS3TransferAccelerationEnabled] - Use s3 acceleration when available for the request + * @param {String} method - Method to call + * @param {Array} args - Argument for the method call + */ +async function awsRequest(service, method, ...args) { + // Checks regarding expectations on service object + if (isObject(service)) { + ensureString(service.name, { name: 'service.name' }); + } else { + ensureString(service, { name: 'service' }); + service = { name: service }; + } + const BASE_BACKOFF = 5000; + const persistentRequest = async (f, numTry = 0) => { + try { + return await f(); + } catch (e) { + const { providerError } = e; + if ( + numTry < MAX_RETRIES && + providerError && + ((providerError.retryable && + providerError.statusCode !== 403 && + providerError.code !== 'CredentialsError') || + providerError.statusCode === 429) + ) { + const nextTryNum = numTry + 1; + const jitter = Math.random() * 3000 - 1000; + // backoff is between 4 and 7 seconds + const backOff = BASE_BACKOFF + jitter; + log( + [ + `Recoverable error occurred (${e.message}), sleeping for ~${Math.round( + backOff / 1000 + )} seconds.`, + `Try ${nextTryNum} of ${MAX_RETRIES}`, + ].join(' ') + ); + await wait(backOff); + return persistentRequest(f, nextTryNum); + } + throw e; + } + }; + const request = await requestQueue.add(() => + persistentRequest(async () => { + const awsService = getServiceInstance(service, method); + const req = awsService[method](...args); + try { + return await req.promise(); + } catch (err) { + let message = err.message != null ? err.message : String(err.code); + if (message.startsWith('Missing credentials in config')) { + // Credentials error + // If failed at last resort (EC2 Metadata check) expose a meaningful error + // with link to AWS documentation + // Otherwise, it's likely that user relied on some AWS creds, which appeared not correct + // therefore expose an AWS message directly + let bottomError = err; + while (bottomError.originalError && !bottomError.message.startsWith('EC2 Metadata')) { + bottomError = bottomError.originalError; + } + + const errorMessage = bottomError.message.startsWith('EC2 Metadata') + ? [ + 'AWS provider credentials not found.', + ' Learn how to set up AWS provider credentials', + ` in our docs here: <${chalk.green('http://slss.io/aws-creds-setup')}>.`, + ].join('') + : bottomError.message; + message = errorMessage; + // We do not want to trigger the retry mechanism for credential errors + throw Object.assign(new ServerlessError(errorMessage), { + providerError: Object.assign({}, err, { retryable: false }), + }); + } + throw Object.assign(new ServerlessError(message, err.code), { + providerError: err, + }); + } + }) + ); + return request; +} + +awsRequest.memoized = memoize(awsRequest, { + promise: true, + normalizer: ([service, method, args]) => { + if (!isObject(service)) service = { name: ensureString(service) }; + return [ + JSON.stringify(deepSortObjectByKey(service)), + method, + JSON.stringify(deepSortObjectByKey(args)), + ].join('|'); + }, +}); + +module.exports = awsRequest; diff --git a/lib/plugins/aws/provider.js b/lib/plugins/aws/provider.js index e9f676faa..dd7b4561b 100644 --- a/lib/plugins/aws/provider.js +++ b/lib/plugins/aws/provider.js @@ -2,24 +2,20 @@ const AWS = require('aws-sdk'); const BbPromise = require('bluebird'); -const HttpsProxyAgent = require('https-proxy-agent'); -const url = require('url'); -const chalk = require('chalk'); const _ = require('lodash'); const naming = require('./lib/naming.js'); -const https = require('https'); const fs = require('fs'); -const objectHash = require('object-hash'); -const PromiseQueue = require('promise-queue'); const getS3EndpointForRegion = require('./utils/getS3EndpointForRegion'); +const memoizeeMethods = require('memoizee/methods'); const readline = require('readline'); const reportDeprecatedProperties = require('../../utils/report-deprecated-properties'); const { ALB_LISTENER_REGEXP } = require('./package/compile/events/alb/lib/validate'); -const memoizeeMethods = require('memoizee/methods'); const d = require('d'); const path = require('path'); const spawnExt = require('child-process-ext/spawn'); const ServerlessError = require('../../serverless-error'); +const awsRequest = require('../../aws/request'); +const log = require('@serverless/utils/log'); const isLambdaArn = RegExp.prototype.test.bind(/^arn:[^:]+:lambda:/); const isEcrUri = RegExp.prototype.test.bind( @@ -60,13 +56,6 @@ const apiGatewayUsagePlan = { additionalProperties: false, }; -PromiseQueue.configure(BbPromise.Promise); - -const MAX_RETRIES = (() => { - const userValue = Number(process.env.SLS_AWS_REQUEST_MAX_RETRIES); - return userValue >= 0 ? userValue : 4; -})(); - const impl = { /** * Determine whether the given credentials are valid. It turned out that detecting invalid @@ -219,7 +208,6 @@ class AwsProvider { this.options = options; this.provider = this; // only load plugin in an AWS service context this.serverless = serverless; - this.sdk = AWS; this.serverless.setProvider(constants.providerName, this); this.hooks = { initialize: () => { @@ -1301,70 +1289,10 @@ class AwsProvider { }, }); } - this.requestCache = {}; - this.requestQueue = new PromiseQueue(2, Infinity); // Store credentials in this variable to avoid creating them several times (messes up MFA). this.cachedCredentials = null; Object.assign(this.naming, naming); - - // Activate AWS SDK logging - if (process.env.SLS_DEBUG) { - AWS.config.logger = this.serverless.cli; - } - - // Use HTTPS Proxy (Optional) - const proxy = - process.env.proxy || - process.env.HTTP_PROXY || - process.env.http_proxy || - process.env.HTTPS_PROXY || - process.env.https_proxy; - - const proxyOptions = {}; - if (proxy) { - Object.assign(proxyOptions, url.parse(proxy)); - } - - const ca = process.env.ca || process.env.HTTPS_CA || process.env.https_ca; - - let caCerts = []; - - if (ca) { - // Can be a single certificate or multiple, comma separated. - const caArr = ca.split(','); - // Replace the newline -- https://stackoverflow.com/questions/30400341 - caCerts = caCerts.concat(caArr.map((cert) => cert.replace(/\\n/g, '\n'))); - } - - const cafile = process.env.cafile || process.env.HTTPS_CAFILE || process.env.https_cafile; - - if (cafile) { - // Can be a single certificate file path or multiple paths, comma separated. - const caPathArr = cafile.split(','); - caCerts = caCerts.concat(caPathArr.map((cafilePath) => fs.readFileSync(cafilePath.trim()))); - } - - if (caCerts.length > 0) { - Object.assign(proxyOptions, { - rejectUnauthorized: true, - ca: caCerts, - }); - } - - // Passes also certifications - if (proxy) { - AWS.config.httpOptions.agent = new HttpsProxyAgent(proxyOptions); - } else if (proxyOptions.ca) { - // Update the agent -- http://docs.aws.amazon.com/sdk-for-javascript/v2/developer-guide/node-registering-certs.html - AWS.config.httpOptions.agent = new https.Agent(proxyOptions); - } - - // Configure the AWS Client timeout (Optional). The default is 120000 (2 minutes) - const timeout = process.env.AWS_CLIENT_TIMEOUT || process.env.aws_client_timeout; - if (timeout) { - AWS.config.httpOptions.timeout = parseInt(timeout, 10); - } } static getProviderName() { @@ -1381,137 +1309,24 @@ class AwsProvider { * @prop [options.region] - Specify when to request to different region */ async request(service, method, params, options) { - const credentials = Object.assign({}, this.getCredentials()); - credentials.region = this.getRegion(); - // Make sure options is an object (honors wrong calls of request) - const requestOptions = _.isObject(options) ? options : {}; - const shouldCache = _.get(requestOptions, 'useCache', false); - const paramsWithRegion = _.merge({}, params, { - region: _.get(options, 'region'), - }); - const paramsHash = objectHash.sha1(paramsWithRegion); - const BASE_BACKOFF = 5000; - const persistentRequest = (f) => - new BbPromise((resolve, reject) => { - const doCall = (numTry) => { - f() - // We're resembling if/else logic, therefore single `then` instead of `then`/`catch` pair - .then(resolve, (e) => { - const { providerError } = e; - if ( - numTry < MAX_RETRIES && - providerError && - ((providerError.retryable && - providerError.statusCode !== 403 && - providerError.code !== 'CredentialsError') || - providerError.statusCode === 429) - ) { - const nextTryNum = numTry + 1; - const jitter = Math.random() * 3000 - 1000; - // backoff is between 4 and 7 seconds - const backOff = BASE_BACKOFF + jitter; - - this.serverless.cli.log( - [ - `Recoverable error occurred (${e.message}), sleeping for ~${Math.round( - backOff / 1000 - )} seconds.`, - `Try ${nextTryNum} of ${MAX_RETRIES}`, - ].join(' ') - ); - setTimeout(doCall, backOff, nextTryNum); - } else { - reject(e); - } - }); - }; - return doCall(0); - }); - // Emit a warning for misuses of the old signature including stage and region // TODO: Determine calling module and log that if (process.env.SLS_DEBUG && options != null && !_.isObject(options)) { - this.serverless.cli.log('WARNING: Inappropriate call of provider.request()'); + log('WARNING: Inappropriate call of provider.request()'); } - - // Support S3 Transfer Acceleration - if (this.canUseS3TransferAcceleration(service, method)) { - this.enableS3TransferAcceleration(credentials); - } - - if (shouldCache) { - const cachedRequest = _.get(this.requestCache, `${service}.${method}.${paramsHash}`); - if (cachedRequest) { - return BbPromise.resolve(cachedRequest); - } - } - - const request = this.requestQueue.add(() => - persistentRequest(() => { - if (options && options.region) { - credentials.region = options.region; - } - const Service = _.get(this.sdk, service); - const awsService = new Service(credentials); - const req = awsService[method](params); - - // TODO: Add listeners, put Debug statements here... - // req.on('send', function (r) {console.log(r)}); - - const promise = req.promise - ? req.promise() - : BbPromise.fromCallback((cb) => { - req.send(cb); - }); - return promise.catch((err) => { - let message = err.message != null ? err.message : String(err.code); - if (message.startsWith('Missing credentials in config')) { - // Credentials error - // If failed at last resort (EC2 Metadata check) expose a meaningful error - // with link to AWS documentation - // Otherwise, it's likely that user relied on some AWS creds, which appeared not correct - // therefore expose an AWS message directly - let bottomError = err; - while (bottomError.originalError && !bottomError.message.startsWith('EC2 Metadata')) { - bottomError = bottomError.originalError; - } - - const errorMessage = bottomError.message.startsWith('EC2 Metadata') - ? [ - 'AWS provider credentials not found.', - ' Learn how to set up AWS provider credentials', - ` in our docs here: <${chalk.green('http://slss.io/aws-creds-setup')}>.`, - ].join('') - : bottomError.message; - message = errorMessage; - // We do not want to trigger the retry mechanism for credential errors - return BbPromise.reject( - Object.assign(new ServerlessError(errorMessage), { - providerError: Object.assign({}, err, { retryable: false }), - }) - ); - } - - return BbPromise.reject( - Object.assign(new ServerlessError(message), { - providerError: err, - }) - ); - }); - }).then((data) => { - const result = BbPromise.resolve(data); - if (shouldCache) { - _.set(this.requestCache, `${service}.${method}.${paramsHash}`, result); - } - return result; - }) - ); - - if (shouldCache) { - _.set(this.requestCache, `${service}.${method}.${paramsHash}`, request); - } - - return request; + const requestOptions = _.isObject(options) ? options : {}; + const shouldCache = _.get(requestOptions, 'useCache', false); + // Copy is required as the credentials may be modified during the request + const credentials = Object.assign({}, this.getCredentials()); + const serviceOptions = { + name: service, + params: { + credentials: credentials.credentials, + region: _.get(requestOptions, 'region', this.getRegion()), + isS3TransferAccelerationEnabled: this.isS3TransferAccelerationEnabled(), + }, + }; + return (shouldCache ? awsRequest.memoized : awsRequest)(serviceOptions, method, params); } /** @@ -1561,15 +1376,6 @@ class AwsProvider { return result; } - canUseS3TransferAcceleration(service, method) { - // TODO enable more S3 APIs? - return ( - service === 'S3' && - ['upload', 'putObject'].indexOf(method) !== -1 && - this.isS3TransferAccelerationEnabled() - ); - } - // This function will be used to block the addition of transfer acceleration options // to the cloudformation template for regions where acceleration is not supported (ie, govcloud) isS3TransferAccelerationSupported() { @@ -1591,11 +1397,6 @@ class AwsProvider { delete this.options['aws-s3-accelerate']; } - enableS3TransferAcceleration(credentials) { - this.serverless.cli.log('Using S3 Transfer Acceleration Endpoint...'); - credentials.useAccelerateEndpoint = true; // eslint-disable-line no-param-reassign - } - getValues(source, objectPaths) { return objectPaths.map((objectPath) => ({ path: objectPath, diff --git a/test/unit/lib/aws/request.test.js b/test/unit/lib/aws/request.test.js new file mode 100644 index 000000000..737904163 --- /dev/null +++ b/test/unit/lib/aws/request.test.js @@ -0,0 +1,401 @@ +'use strict'; + +const sinon = require('sinon'); +const chai = require('chai'); +const proxyquire = require('proxyquire'); +const overrideEnv = require('process-utils/override-env'); + +const expect = chai.expect; + +chai.use(require('chai-as-promised')); +chai.use(require('sinon-chai')); + +describe('#request', () => { + it('should enable aws logging when debug log is enabled', () => { + const configStub = sinon.stub(); + overrideEnv(() => { + process.env.SLS_DEBUG = true; + proxyquire('../../../../lib/aws/request', { + 'aws-sdk': { config: configStub }, + }); + expect(typeof configStub.logger).to.equal('function'); + }); + }); + + describe('Credentials support', () => { + // awsRequest supports credentials from two sources: + // - an AWS credentials object passed as part of params in the call + // - environment variable read by the AWS SDK + + // Ensure we control the process env variable so that no credentials + // are available by default + let rEnv; + beforeEach(() => { + const { restoreEnv } = overrideEnv(); + rEnv = restoreEnv; + }); + + afterEach(() => { + rEnv(); + }); + + it('should produce a meaningful error when no supported credentials are provided', async () => { + process.env.SLS_DEBUG = true; + const awsRequest = require('../../../../lib/aws/request'); + return expect( + awsRequest( + { + name: 'S3', + }, + 'putObject', + { + Bucket: 'test-bucket', + Key: 'test-key', + } + ) + ).to.be.rejectedWith( + 'AWS provider credentials not found. Learn how to set up AWS provider credentials in our docs here: <\u001b[32mhttp://slss.io/aws-creds-setup\u001b[39m>.' + ); + }); + + it('should support passing params without credentials', async () => { + const awsRequest = require('../../../../lib/aws/request'); + return expect( + awsRequest( + { + name: 'S3', + params: { isS3TransferAccelerationEnabled: true }, + }, + 'putObject', + { + Bucket: 'test-bucket', + Key: 'test-key', + } + ) + ).to.be.rejectedWith( + 'AWS provider credentials not found. Learn how to set up AWS provider credentials in our docs here: <\u001b[32mhttp://slss.io/aws-creds-setup\u001b[39m>.' + ); + }); + }); + + it('should invoke expected AWS SDK methods', async () => { + class FakeS3 { + putObject() { + return { + promise: async () => { + return { called: true }; + }, + }; + } + } + const awsRequest = proxyquire('../../../../lib/aws/request', { + 'aws-sdk': { S3: FakeS3 }, + }); + const res = await awsRequest({ name: 'S3' }, 'putObject'); + expect(res.called).to.equal(true); + }); + + it('should support string for service argument', async () => { + class FakeS3 { + putObject() { + return { + promise: async () => { + return { called: true }; + }, + }; + } + } + const awsRequest = proxyquire('../../../../lib/aws/request', { + 'aws-sdk': { S3: FakeS3 }, + }); + const res = await awsRequest('S3', 'putObject', {}); + return expect(res.called).to.equal(true); + }); + + it('should handle subclasses', async () => { + class DocumentClient { + put() { + return { + promise: () => { + return { called: true }; + }, + }; + } + } + const awsRequest = proxyquire('../../../../lib/aws/request', { + 'aws-sdk': { DynamoDB: { DocumentClient } }, + }); + const res = await awsRequest({ name: 'DynamoDB.DocumentClient' }, 'put', {}); + return expect(res.called).to.equal(true); + }); + + it('should request to the specified region if region in options set', async () => { + class FakeCloudFormation { + constructor(config) { + this.config = config; + } + describeStacks() { + return { + promise: () => + Promise.resolve({ + region: this.config.region, + }), + }; + } + } + const awsRequest = proxyquire('../../../../lib/aws/request', { + 'aws-sdk': { CloudFormation: FakeCloudFormation }, + }); + const res = await awsRequest( + { name: 'CloudFormation', params: { credentials: {}, region: 'ap-northeast-1' } }, + 'describeStacks', + { StackName: 'foo' } + ); + return expect(res).to.eql({ region: 'ap-northeast-1' }); + }); + + describe('Retries', () => { + it('should retry on retryable errors (429)', async () => { + const error = { + statusCode: 429, + retryable: true, + message: 'Testing retry', + }; + const sendFake = { + promise: sinon.stub(), + }; + sendFake.promise.onCall(0).returns(Promise.reject(error)); + sendFake.promise.onCall(1).returns(Promise.resolve({ data: {} })); + class FakeS3 { + error() { + return sendFake; + } + } + const awsRequest = proxyquire('../../../../lib/aws/request', { + 'aws-sdk': { S3: FakeS3 }, + 'timers-ext/promise/sleep': () => Promise.resolve(), + }); + const res = await awsRequest({ name: 'S3' }, 'error'); + expect(sendFake.promise).to.have.been.calledTwice; + expect(res).to.exist; + }); + + it('should retry if error code is 429 and retryable is set to false', async () => { + const error = { + statusCode: 429, + retryable: false, + message: 'Testing retry', + }; + const sendFake = { + promise: sinon.stub(), + }; + sendFake.promise.onCall(0).returns(Promise.reject(error)); + sendFake.promise.onCall(1).returns(Promise.resolve({})); + class FakeS3 { + error() { + return sendFake; + } + } + const awsRequest = proxyquire('../../../../lib/aws/request', { + 'aws-sdk': { S3: FakeS3 }, + 'timers-ext/promise/sleep': () => Promise.resolve(), + }); + const res = await awsRequest({ name: 'S3' }, 'error'); + expect(res).to.exist; + expect(sendFake.promise).to.have.been.calledTwice; + }); + + it('should not retry if error code is 403 and retryable is set to true', async () => { + const error = { + providerError: { + statusCode: 403, + retryable: true, + code: 'retry', + message: 'Testing retry', + }, + }; + const sendFake = { + promise: sinon.stub(), + }; + sendFake.promise.onFirstCall().rejects(error); + sendFake.promise.onSecondCall().resolves({}); + class FakeS3 { + error() { + return sendFake; + } + } + const awsRequest = proxyquire('../../../../lib/aws/request', { + 'aws-sdk': { S3: FakeS3 }, + }); + expect(awsRequest({ name: 'S3' }, 'error')).to.be.rejected; + return expect(sendFake.promise).to.have.been.calledOnce; + }); + + it('should expose non-retryable errors', () => { + const error = { + statusCode: 500, + message: 'Some error message', + }; + class FakeS3 { + error() { + return { + promise: async () => { + throw error; + }, + }; + } + } + const awsRequest = proxyquire('../../../../lib/aws/request', { + 'aws-sdk': { S3: FakeS3 }, + }); + return expect(awsRequest({ name: 'S3' }, 'error')).to.be.rejected; + }); + }); + + it('should expose original error message in thrown error message', () => { + const awsErrorResponse = { + message: 'Something went wrong...', + code: 'Forbidden', + region: null, + time: '2019-01-24T00:29:01.780Z', + requestId: 'DAF12C1111A62C6', + extendedRequestId: '1OnSExiLCOsKrsdjjyds31w=', + statusCode: 403, + retryable: false, + retryDelay: 13.433158364430508, + }; + class FakeS3 { + error() { + return { + promise: () => Promise.reject(awsErrorResponse), + }; + } + } + const awsRequest = proxyquire('../../../../lib/aws/request', { + 'aws-sdk': { S3: FakeS3 }, + }); + return expect(awsRequest({ name: 'S3' }, 'error')).to.be.rejectedWith(awsErrorResponse.message); + }); + + it('should default to error code if error message is non-existent', () => { + const awsErrorResponse = { + message: null, + code: 'Forbidden', + region: null, + time: '2019-01-24T00:29:01.780Z', + requestId: 'DAF12C1111A62C6', + extendedRequestId: '1OnSExiLCOsKrsdjjyds31w=', + statusCode: 403, + retryable: false, + retryDelay: 13.433158364430508, + }; + class FakeS3 { + error() { + return { + promise: () => Promise.reject(awsErrorResponse), + }; + } + } + const awsRequest = proxyquire('../../../../lib/aws/request', { + 'aws-sdk': { S3: FakeS3 }, + }); + return expect(awsRequest({ name: 'S3' }, 'error')).to.be.rejectedWith(awsErrorResponse.code); + }); + + it('should enable S3 acceleration if "--aws-s3-accelerate" CLI option is provided', async () => { + // mocking S3 for testing + class FakeS3 { + constructor(params) { + this.useAccelerateEndpoint = params.useAccelerateEndpoint; + } + putObject() { + return { + promise: () => Promise.resolve(this), + }; + } + } + const awsRequest = proxyquire('../../../../lib/aws/request', { + 'aws-sdk': { S3: FakeS3 }, + }); + const service = await awsRequest( + { name: 'S3', params: { isS3TransferAccelerationEnabled: true } }, + 'putObject', + {} + ); + return expect(service.useAccelerateEndpoint).to.be.true; + }); + + describe('Caching through memoize', () => { + it('should reuse the result if arguments are the same', async () => { + // mocking CF for testing + const expectedResult = { called: true }; + const promiseStub = sinon.stub().returns(Promise.resolve({ called: true })); + class FakeCF { + describeStacks() { + return { + promise: promiseStub, + }; + } + } + const awsRequest = proxyquire('../../../../lib/aws/request', { + 'aws-sdk': { CloudFormation: FakeCF }, + }); + const numTests = 100; + const executeRequest = () => + awsRequest.memoized( + { name: 'CloudFormation', params: { credentials: {}, useCache: true } }, + 'describeStacks', + {} + ); + const requests = []; + for (let n = 0; n < numTests; n++) { + requests.push(executeRequest()); + } + return Promise.all(requests).then((results) => { + expect(Object.keys(results).length).to.equal(numTests); + results.forEach((result) => { + expect(result).to.deep.equal(expectedResult); + }); + expect(promiseStub).to.have.been.calledOnce; + }); + }); + + it('should not reuse the result if the region change', async () => { + const expectedResult = { called: true }; + const promiseStub = sinon.stub().returns(Promise.resolve({ called: true })); + class FakeCF { + constructor(credentials) { + this.credentials = credentials; + } + + describeStacks() { + return { + promise: promiseStub, + }; + } + } + + const awsRequest = proxyquire('../../../../lib/aws/request', { + 'aws-sdk': { CloudFormation: FakeCF }, + }); + + const executeRequestWithRegion = (region) => + awsRequest( + { name: 'CloudFormation', params: { region, credentials: {}, useCache: true } }, + 'describeStacks', + { StackName: 'same-stack' } + ); + const requests = []; + requests.push(executeRequestWithRegion('us-east-1')); + requests.push(executeRequestWithRegion('ap-northeast-1')); + + return Promise.all(requests).then((results) => { + expect(Object.keys(results).length).to.equal(2); + results.forEach((result) => { + expect(result).to.deep.equal(expectedResult); + }); + return expect(promiseStub.callCount).to.equal(2); + }); + }); + }); +}); diff --git a/test/unit/lib/plugins/aws/provider.test.js b/test/unit/lib/plugins/aws/provider.test.js index 10bb79563..816f987ab 100644 --- a/test/unit/lib/plugins/aws/provider.test.js +++ b/test/unit/lib/plugins/aws/provider.test.js @@ -3,13 +3,10 @@ /* eslint-disable no-unused-expressions */ const _ = require('lodash'); -const BbPromise = require('bluebird'); const chai = require('chai'); +const path = require('path'); const proxyquire = require('proxyquire'); const sinon = require('sinon'); -const fs = require('fs'); -const os = require('os'); -const path = require('path'); const overrideEnv = require('process-utils/override-env'); const AwsProvider = require('../../../../../lib/plugins/aws/provider'); @@ -51,39 +48,10 @@ describe('AwsProvider', () => { expect(typeof awsProvider.serverless).to.not.equal('undefined'); }); - it('should set AWS instance', () => { - expect(typeof awsProvider.sdk).to.not.equal('undefined'); - }); - it('should set the provider property', () => { expect(awsProvider.provider).to.equal(awsProvider); }); - it('should have no AWS logger', () => { - expect(awsProvider.sdk.config.logger == null).to.be.true; - }); - - it('should set AWS logger', () => { - process.env.SLS_DEBUG = 'true'; - const newAwsProvider = new AwsProvider(serverless, options); - - expect(typeof newAwsProvider.sdk.config.logger).to.not.equal('undefined'); - }); - - it('should set AWS proxy', () => { - process.env.proxy = 'http://a.b.c.d:n'; - const newAwsProvider = new AwsProvider(serverless, options); - - expect(typeof newAwsProvider.sdk.config.httpOptions.agent).to.not.equal('undefined'); - }); - - it('should set AWS timeout', () => { - process.env.AWS_CLIENT_TIMEOUT = '120000'; - const newAwsProvider = new AwsProvider(serverless, options); - - expect(typeof newAwsProvider.sdk.config.httpOptions.timeout).to.not.equal('undefined'); - }); - describe('stage name validation', () => { const stages = ['myStage', 'my-stage', 'my_stage', "${opt:stage, 'prod'}"]; stages.forEach((stage) => { @@ -119,74 +87,6 @@ describe('AwsProvider', () => { }); }); - describe('certificate authority - environment variable', () => { - it('should set AWS ca single', () => { - process.env.ca = '-----BEGIN CERTIFICATE-----\n-----END CERTIFICATE-----'; - const newAwsProvider = new AwsProvider(serverless, options); - - expect(typeof newAwsProvider.sdk.config.httpOptions.agent).to.not.equal('undefined'); - }); - - it('should set AWS ca single and proxy', () => { - process.env.ca = '-----BEGIN CERTIFICATE-----\n-----END CERTIFICATE-----'; - process.env.proxy = 'http://a.b.c.d:n'; - - const newAwsProvider = new AwsProvider(serverless, options); - - expect(typeof newAwsProvider.sdk.config.httpOptions.agent).to.not.equal('undefined'); - }); - - it('should set AWS ca multiple', () => { - const certContents = '-----BEGIN CERTIFICATE-----\n-----END CERTIFICATE-----'; - process.env.ca = `${certContents},${certContents}`; - const newAwsProvider = new AwsProvider(serverless, options); - - expect(typeof newAwsProvider.sdk.config.httpOptions.agent).to.not.equal('undefined'); - }); - }); - - describe('certificate authority - file', () => { - const certContents = '-----BEGIN CERTIFICATE-----\n-----END CERTIFICATE-----'; - const tmpdir = os.tmpdir(); - let file1 = null; - let file2 = null; - - beforeEach('Create CA Files and env vars', () => { - file1 = path.join(tmpdir, 'ca1.txt'); - file2 = path.join(tmpdir, 'ca2.txt'); - fs.writeFileSync(file1, certContents); - fs.writeFileSync(file2, certContents); - }); - - afterEach('CA File Cleanup', () => { - // delete files - fs.unlinkSync(file1); - fs.unlinkSync(file2); - }); - - it('should set AWS cafile single', () => { - process.env.cafile = file1; - const newAwsProvider = new AwsProvider(serverless, options); - - expect(typeof newAwsProvider.sdk.config.httpOptions.agent).to.not.equal('undefined'); - }); - - it('should set AWS cafile multiple', () => { - process.env.cafile = `${file1},${file2}`; - const newAwsProvider = new AwsProvider(serverless, options); - - expect(typeof newAwsProvider.sdk.config.httpOptions.agent).to.not.equal('undefined'); - }); - - it('should set AWS ca and cafile', () => { - process.env.ca = certContents; - process.env.cafile = file1; - const newAwsProvider = new AwsProvider(serverless, options); - - expect(typeof newAwsProvider.sdk.config.httpOptions.agent).to.not.equal('undefined'); - }); - }); - describe('deploymentBucket configuration', () => { it('should do nothing if not defined', () => { serverless.service.provider.deploymentBucket = undefined; @@ -208,674 +108,6 @@ describe('AwsProvider', () => { }); }); - describe('#request()', () => { - beforeEach(() => { - const originalSetTimeout = setTimeout; - sinon - .stub(global, 'setTimeout') - .callsFake((cb, timeout) => originalSetTimeout(cb, Math.min(timeout || 0, 10))); - }); - - afterEach(() => { - global.setTimeout.restore(); - }); - - it('should call correct aws method', () => { - // mocking S3 for testing - class FakeS3 { - constructor(credentials) { - this.credentials = credentials; - } - - putObject() { - return { - send: (cb) => cb(null, { called: true }), - }; - } - } - awsProvider.sdk = { - S3: FakeS3, - }; - awsProvider.serverless.service.environment = { - vars: {}, - stages: { - dev: { - vars: { - profile: 'default', - }, - regions: {}, - }, - }, - }; - - return awsProvider.request('S3', 'putObject', {}).then((data) => { - expect(data.called).to.equal(true); - }); - }); - - it('should handle subclasses', () => { - class DocumentClient { - constructor(credentials) { - this.credentials = credentials; - } - - put() { - return { - send: (cb) => cb(null, { called: true }), - }; - } - } - - awsProvider.sdk = { - DynamoDB: { - DocumentClient, - }, - }; - awsProvider.serverless.service.environment = { - vars: {}, - stages: { - dev: { - vars: { - profile: 'default', - }, - regions: {}, - }, - }, - }; - - return awsProvider.request('DynamoDB.DocumentClient', 'put', {}).then((data) => { - expect(data.called).to.equal(true); - }); - }); - - it('should call correct aws method with a promise', () => { - // mocking API Gateway for testing - class FakeAPIGateway { - constructor(credentials) { - this.credentials = credentials; - } - - getRestApis() { - return { - promise: () => BbPromise.resolve({ called: true }), - }; - } - } - awsProvider.sdk = { - APIGateway: FakeAPIGateway, - }; - awsProvider.serverless.service.environment = { - vars: {}, - stages: { - dev: { - vars: { - profile: 'default', - }, - regions: {}, - }, - }, - }; - - return awsProvider.request('APIGateway', 'getRestApis', {}).then((data) => { - expect(data.called).to.equal(true); - }); - }); - - it('should request to the specified region if region in options set', () => { - // mocking S3 for testing - class FakeCloudForamtion { - constructor(config) { - this.config = config; - } - - describeStacks() { - return { - send: (cb) => - cb(null, { - region: this.config.region, - }), - }; - } - } - awsProvider.sdk = { - CloudFormation: FakeCloudForamtion, - }; - awsProvider.serverless.service.environment = { - vars: {}, - stages: { - dev: { - vars: { - profile: 'default', - }, - regions: {}, - }, - }, - }; - - return awsProvider - .request( - 'CloudFormation', - 'describeStacks', - { StackName: 'foo' }, - { region: 'ap-northeast-1' } - ) - .then((data) => { - expect(data).to.eql({ region: 'ap-northeast-1' }); - }); - }); - - it('should retry if error code is 429', (done) => { - const error = { - statusCode: 429, - retryable: true, - message: 'Testing retry', - }; - const sendFake = { - send: sinon.stub(), - }; - sendFake.send.onFirstCall().yields(error); - sendFake.send.yields(undefined, {}); - class FakeS3 { - constructor(credentials) { - this.credentials = credentials; - } - - error() { - return sendFake; - } - } - awsProvider.sdk = { - S3: FakeS3, - }; - awsProvider - .request('S3', 'error', {}) - .then((data) => { - expect(data).to.exist; - expect(sendFake.send).to.have.been.calledTwice; - done(); - }) - .catch(done); - }); - - it('should retry if error code is 429 and retryable is set to false', (done) => { - const error = { - statusCode: 429, - retryable: false, - message: 'Testing retry', - }; - const sendFake = { - send: sinon.stub(), - }; - sendFake.send.onFirstCall().yields(error); - sendFake.send.yields(undefined, {}); - class FakeS3 { - constructor(credentials) { - this.credentials = credentials; - } - - error() { - return sendFake; - } - } - awsProvider.sdk = { - S3: FakeS3, - }; - awsProvider - .request('S3', 'error', {}) - .then((data) => { - expect(data).to.exist; - expect(sendFake.send).to.have.been.calledTwice; - done(); - }) - .catch(done); - }); - - it('should not retry if error code is 403 and retryable is set to true', (done) => { - const error = { - statusCode: 403, - retryable: true, - message: 'Testing retry', - }; - const sendFake = { - send: sinon.stub(), - }; - sendFake.send.onFirstCall().yields(error); - sendFake.send.yields(undefined, {}); - class FakeS3 { - constructor(credentials) { - this.credentials = credentials; - } - - error() { - return sendFake; - } - } - awsProvider.sdk = { - S3: FakeS3, - }; - awsProvider - .request('S3', 'error', {}) - .then(() => done('Should not succeed')) - .catch(() => { - expect(sendFake.send).to.have.been.calledOnce; - done(); - }); - }); - - it('should reject errors', (done) => { - const error = { - statusCode: 500, - message: 'Some error message', - }; - class FakeS3 { - constructor(credentials) { - this.credentials = credentials; - } - - error() { - return { - send(cb) { - cb(error); - }, - }; - } - } - awsProvider.sdk = { - S3: FakeS3, - }; - awsProvider - .request('S3', 'error', {}) - .then(() => done('Should not succeed')) - .catch(() => done()); - }); - - it('should use error message if it exists', (done) => { - const awsErrorResponse = { - message: 'Something went wrong...', - code: 'Forbidden', - region: null, - time: '2019-01-24T00:29:01.780Z', - requestId: 'DAF12C1111A62C6', - extendedRequestId: '1OnSExiLCOsKrsdjjyds31w=', - statusCode: 403, - retryable: false, - retryDelay: 13.433158364430508, - }; - - class FakeS3 { - constructor(credentials) { - this.credentials = credentials; - } - - error() { - return { - send(cb) { - cb(awsErrorResponse); - }, - }; - } - } - awsProvider.sdk = { - S3: FakeS3, - }; - awsProvider - .request('S3', 'error', {}) - .then(() => done('Should not succeed')) - .catch((err) => { - expect(err.message).to.eql(awsErrorResponse.message); - done(); - }) - .catch(done); - }); - - it('should default to error code if error message is non-existent', (done) => { - const awsErrorResponse = { - message: null, - code: 'Forbidden', - region: null, - time: '2019-01-24T00:29:01.780Z', - requestId: 'DAF12C1111A62C6', - extendedRequestId: '1OnSExiLCOsKrsdjjyds31w=', - statusCode: 403, - retryable: false, - retryDelay: 13.433158364430508, - }; - - class FakeS3 { - constructor(credentials) { - this.credentials = credentials; - } - - error() { - return { - send(cb) { - cb(awsErrorResponse); - }, - }; - } - } - awsProvider.sdk = { - S3: FakeS3, - }; - awsProvider - .request('S3', 'error', {}) - .then(() => done('Should not succeed')) - .catch((err) => { - expect(err.message).to.eql(awsErrorResponse.code); - done(); - }) - .catch(done); - }); - - it('should return ref to docs for missing credentials', (done) => { - const error = { - statusCode: 403, - message: 'Missing credentials in config', - originalError: { message: 'EC2 Metadata roleName request returned error' }, - }; - class FakeS3 { - constructor(credentials) { - this.credentials = credentials; - } - - error() { - return { - send(cb) { - cb(error); - }, - }; - } - } - awsProvider.sdk = { - S3: FakeS3, - }; - awsProvider - .request('S3', 'error', {}) - .then(() => done('Should not succeed')) - .catch((err) => { - expect(err.message).to.contain('in our docs here:'); - done(); - }) - .catch(done); - }); - - it('should not retry for missing credentials', (done) => { - const error = { - statusCode: 403, - message: 'Missing credentials in config', - originalError: { message: 'EC2 Metadata roleName request returned error' }, - }; - const sendFake = { - send: sinon.stub().yields(error), - }; - class FakeS3 { - constructor(credentials) { - this.credentials = credentials; - } - - error() { - return sendFake; - } - } - awsProvider.sdk = { - S3: FakeS3, - }; - awsProvider - .request('S3', 'error', {}) - .then(() => done('Should not succeed')) - .catch((err) => { - expect(sendFake.send).to.have.been.calledOnce; - expect(err.message).to.contain('in our docs here:'); - done(); - }) - .catch(done); - }); - - it('should enable S3 acceleration if CLI option is provided', () => { - // mocking S3 for testing - class FakeS3 { - constructor(credentials) { - this.credentials = credentials; - } - - putObject() { - return { - send: (cb) => cb(null, { called: true }), - }; - } - } - awsProvider.sdk = { - S3: FakeS3, - }; - awsProvider.serverless.service.environment = { - vars: {}, - stages: { - dev: { - vars: { - profile: 'default', - }, - regions: {}, - }, - }, - }; - - const enableS3TransferAccelerationStub = sinon - .stub(awsProvider, 'enableS3TransferAcceleration') - .resolves(); - - awsProvider.options['aws-s3-accelerate'] = true; - return awsProvider.request('S3', 'putObject', {}).then(() => { - expect(enableS3TransferAccelerationStub.calledOnce).to.equal(true); - }); - }); - - describe('using the request cache', () => { - it('should call correct aws method', () => { - // mocking CF for testing - class FakeCF { - constructor(credentials) { - this.credentials = credentials; - } - - describeStacks() { - return { - send: (cb) => cb(null, { called: true }), - }; - } - } - awsProvider.sdk = { - CloudFormation: FakeCF, - }; - awsProvider.serverless.service.environment = { - vars: {}, - stages: { - dev: { - vars: { - profile: 'default', - }, - regions: {}, - }, - }, - }; - - return awsProvider - .request('CloudFormation', 'describeStacks', {}, { useCache: true }) - .then((data) => { - expect(data.called).to.equal(true); - }); - }); - - it('should request if same service, method and params but different region in option', () => { - const expectedResult = { called: true }; - const sendStub = sinon.stub().yields(null, { called: true }); - const requestSpy = sinon.spy(awsProvider, 'request'); - class FakeCF { - constructor(credentials) { - this.credentials = credentials; - } - - describeStacks() { - return { - send: sendStub, - }; - } - } - awsProvider.sdk = { - CloudFormation: FakeCF, - }; - const executeRequestWithRegion = (region) => - awsProvider.request( - 'CloudFormation', - 'describeStacks', - { StackName: 'same-stack' }, - { - useCache: true, - region, - } - ); - const requests = []; - requests.push(BbPromise.try(() => executeRequestWithRegion('us-east-1'))); - requests.push(BbPromise.try(() => executeRequestWithRegion('ap-northeast-1'))); - - return BbPromise.all(requests) - .then((results) => { - expect(Object.keys(results).length).to.equal(2); - results.forEach((result) => { - expect(result).to.deep.equal(expectedResult); - }); - return expect(sendStub.callCount).to.equal(2); - }) - .finally(() => { - requestSpy.restore(); - }); - }); - - it('should resolve to the same response with multiple parallel requests', () => { - const expectedResult = { called: true }; - const sendStub = sinon.stub().yields(null, { called: true }); - const requestSpy = sinon.spy(awsProvider, 'request'); - class FakeCF { - constructor(credentials) { - this.credentials = credentials; - } - - describeStacks() { - return { - send: sendStub, - }; - } - } - awsProvider.sdk = { - CloudFormation: FakeCF, - }; - - awsProvider.serverless.service.environment = { - vars: {}, - stages: { - dev: { - vars: { - profile: 'default', - }, - regions: {}, - }, - }, - }; - - const numTests = 1000; - const executeRequest = () => - awsProvider.request('CloudFormation', 'describeStacks', {}, { useCache: true }); - const requests = []; - for (let n = 0; n < numTests; n++) { - requests.push(BbPromise.try(() => executeRequest())); - } - - return BbPromise.all(requests) - .then((results) => { - expect(Object.keys(results).length).to.equal(numTests); - results.forEach((result) => { - expect(result).to.deep.equal(expectedResult); - }); - return BbPromise.join( - expect(sendStub).to.have.been.calledOnce, - expect(requestSpy).to.have.callCount(numTests) - ); - }) - .finally(() => { - requestSpy.restore(); - }); - }); - - describe('STS tokens', () => { - let newAwsProvider; - let originalProviderProfile; - let originalEnvironmentVariables; - const relevantEnvironment = { - AWS_SHARED_CREDENTIALS_FILE: getTmpFilePath('credentials'), - }; - - beforeEach(() => { - originalProviderProfile = serverless.service.provider.profile; - originalEnvironmentVariables = replaceEnv(relevantEnvironment); - serverless.utils.writeFileSync( - relevantEnvironment.AWS_SHARED_CREDENTIALS_FILE, - '[default]\n' + - 'aws_access_key_id = 1111\n' + - 'aws_secret_access_key = 22222\n' + - '\n' + - '[async]\n' + - 'role_arn = arn:123\n' + - 'source_profile = default' - ); - newAwsProvider = new AwsProvider(serverless, options); - }); - - afterEach(() => { - replaceEnv(originalEnvironmentVariables); - serverless.service.provider.profile = originalProviderProfile; - }); - - it('should retain reference to STS tokens when updated via SDK', () => { - const expectedToken = '123'; - - serverless.service.provider.profile = 'async'; - const startToken = newAwsProvider.getCredentials().credentials.sessionToken; - expect(startToken).to.not.equal(expectedToken); - - class FakeCloudFormation { - constructor(credentials) { - // Not sure where the the SDK resolves the STS, so for the test it's here - this.credentials = credentials; - this.credentials.credentials.sessionToken = expectedToken; - } - - describeStacks() { - return { - send: (cb) => cb(null, {}), - }; - } - } - - newAwsProvider.sdk = { - CloudFormation: FakeCloudFormation, - }; - - return newAwsProvider - .request( - 'CloudFormation', - 'describeStacks', - { StackName: 'foo' }, - { region: 'ap-northeast-1' } - ) - .then(() => { - // STS token is resolved after SDK call - const actualToken = newAwsProvider.getCredentials().credentials.sessionToken; - expect(expectedToken).to.eql(actualToken); - }); - }); - }); - }); - }); - describe('#getCredentials()', () => { const awsStub = sinon.stub().returns(); const AwsProviderProxyquired = proxyquire('../../../../../lib/plugins/aws/provider.js', { @@ -1248,6 +480,56 @@ describe('AwsProvider', () => { }); }); + describe('#request()', () => { + let awsRequestStub; + let PAwsProvider; + let logStub; + + beforeEach(() => { + logStub = sinon.stub(); + awsRequestStub = sinon.stub().resolves(); + awsRequestStub.memoized = sinon.stub().resolves(); + const AwsProviderProxyquired = proxyquire + .noCallThru() + .load('../../../../../lib/plugins/aws/provider.js', { + '../../aws/request': awsRequestStub, + '@serverless/utils/log': logStub, + }); + PAwsProvider = new AwsProviderProxyquired(serverless, options); + }); + + afterEach(() => {}); + + it('should trigger the expected AWS SDK invokation', () => { + return PAwsProvider.request('S3', 'getObject', {}).then(() => { + expect(awsRequestStub).to.have.been.calledOnce; + }); + }); + + it('should use local cache when using {useCache: true}', () => { + return PAwsProvider.request('S3', 'getObject', {}, { useCache: true }) + .then(() => PAwsProvider.request('S3', 'getObject', {}, { useCache: true })) + .then(() => { + expect(awsRequestStub).to.not.have.been.called; + expect(awsRequestStub.memoized).to.have.been.calledTwice; + }); + }); + + it('should detect incompatible legacy use of aws request and print a debug warning', () => { + // Enable debug log + process.env.SLS_DEBUG = true; + return PAwsProvider.request('S3', 'getObject', {}, 'incompatible string option') + .then(() => { + expect(logStub).to.have.been.calledWith( + 'WARNING: Inappropriate call of provider.request()' + ); + }) + .finally(() => { + process.env.SLS_DEBUG = false; + }); + }); + }); + describe('#getProfile()', () => { let newAwsProvider; @@ -1477,45 +759,6 @@ describe('AwsProvider', () => { }); }); - describe('#canUseS3TransferAcceleration()', () => { - it('should return false by default with any input', () => { - awsProvider.options['aws-s3-accelerate'] = undefined; - return expect( - awsProvider.canUseS3TransferAcceleration('lambda', 'updateFunctionCode') - ).to.equal(false); - }); - it('should return false by default with S3.upload too', () => { - awsProvider.options['aws-s3-accelerate'] = undefined; - return expect(awsProvider.canUseS3TransferAcceleration('S3', 'upload')).to.equal(false); - }); - it('should return false by default with S3.putObject too', () => { - awsProvider.options['aws-s3-accelerate'] = undefined; - return expect(awsProvider.canUseS3TransferAcceleration('S3', 'putObject')).to.equal(false); - }); - it('should return false when CLI option is provided but not an S3 upload', () => { - awsProvider.options['aws-s3-accelerate'] = true; - return expect( - awsProvider.canUseS3TransferAcceleration('lambda', 'updateFunctionCode') - ).to.equal(false); - }); - it('should return true when CLI option is provided for S3.upload', () => { - awsProvider.options['aws-s3-accelerate'] = true; - return expect(awsProvider.canUseS3TransferAcceleration('S3', 'upload')).to.equal(true); - }); - it('should return true when CLI option is provided for S3.putObject', () => { - awsProvider.options['aws-s3-accelerate'] = true; - return expect(awsProvider.canUseS3TransferAcceleration('S3', 'putObject')).to.equal(true); - }); - }); - - describe('#enableS3TransferAcceleration()', () => { - it('should update the given credentials object to enable S3 acceleration', () => { - const credentials = {}; - awsProvider.enableS3TransferAcceleration(credentials); - return expect(credentials.useAccelerateEndpoint).to.equal(true); - }); - }); - describe('#disableTransferAccelerationForCurrentDeploy()', () => { it('should remove the corresponding option for the current deploy', () => { awsProvider.options['aws-s3-accelerate'] = true;