Send to zulip

This commit is contained in:
Koper
2023-11-20 21:39:33 +07:00
parent 82f50817f8
commit ba40d28152
3609 changed files with 2311843 additions and 7 deletions

View File

@@ -0,0 +1,29 @@
var AWS = require('../core');
AWS.util.update(AWS.APIGateway.prototype, {
/**
* Sets the Accept header to application/json.
*
* @api private
*/
setAcceptHeader: function setAcceptHeader(req) {
var httpRequest = req.httpRequest;
if (!httpRequest.headers.Accept) {
httpRequest.headers['Accept'] = 'application/json';
}
},
/**
* @api private
*/
setupRequestListeners: function setupRequestListeners(request) {
request.addListener('build', this.setAcceptHeader);
if (request.operation === 'getExport') {
var params = request.params || {};
if (params.exportType === 'swagger') {
request.addListener('extractData', AWS.util.convertPayloadToString);
}
}
}
});

View File

@@ -0,0 +1,5 @@
import {Service} from '../service';
import {Signer} from '../cloudfront/signer';
export class CloudFrontCustomizations extends Service {
static Signer: typeof Signer;
}

View File

@@ -0,0 +1,12 @@
var AWS = require('../core');
// pull in CloudFront signer
require('../cloudfront/signer');
AWS.util.update(AWS.CloudFront.prototype, {
setupRequestListeners: function setupRequestListeners(request) {
request.addListener('extractData', AWS.util.hoistPayloadMember);
}
});

View File

@@ -0,0 +1,120 @@
var AWS = require('../core');
/**
* Constructs a service interface object. Each API operation is exposed as a
* function on service.
*
* ### Sending a Request Using CloudSearchDomain
*
* ```javascript
* var csd = new AWS.CloudSearchDomain({endpoint: 'my.host.tld'});
* csd.search(params, function (err, data) {
* if (err) console.log(err, err.stack); // an error occurred
* else console.log(data); // successful response
* });
* ```
*
* ### Locking the API Version
*
* In order to ensure that the CloudSearchDomain object uses this specific API,
* you can construct the object by passing the `apiVersion` option to the
* constructor:
*
* ```javascript
* var csd = new AWS.CloudSearchDomain({
* endpoint: 'my.host.tld',
* apiVersion: '2013-01-01'
* });
* ```
*
* You can also set the API version globally in `AWS.config.apiVersions` using
* the **cloudsearchdomain** service identifier:
*
* ```javascript
* AWS.config.apiVersions = {
* cloudsearchdomain: '2013-01-01',
* // other service API versions
* };
*
* var csd = new AWS.CloudSearchDomain({endpoint: 'my.host.tld'});
* ```
*
* @note You *must* provide an `endpoint` configuration parameter when
* constructing this service. See {constructor} for more information.
*
* @!method constructor(options = {})
* Constructs a service object. This object has one method for each
* API operation.
*
* @example Constructing a CloudSearchDomain object
* var csd = new AWS.CloudSearchDomain({endpoint: 'my.host.tld'});
* @note You *must* provide an `endpoint` when constructing this service.
* @option (see AWS.Config.constructor)
*
* @service cloudsearchdomain
* @version 2013-01-01
*/
AWS.util.update(AWS.CloudSearchDomain.prototype, {
/**
* @api private
*/
validateService: function validateService() {
if (!this.config.endpoint || this.config.endpoint.indexOf('{') >= 0) {
var msg = 'AWS.CloudSearchDomain requires an explicit ' +
'`endpoint\' configuration option.';
throw AWS.util.error(new Error(),
{name: 'InvalidEndpoint', message: msg});
}
},
/**
* @api private
*/
setupRequestListeners: function setupRequestListeners(request) {
request.removeListener('validate',
AWS.EventListeners.Core.VALIDATE_CREDENTIALS
);
request.onAsync('validate', this.validateCredentials);
request.addListener('validate', this.updateRegion);
if (request.operation === 'search') {
request.addListener('build', this.convertGetToPost);
}
},
/**
* @api private
*/
validateCredentials: function(req, done) {
if (!req.service.api.signatureVersion) return done(); // none
req.service.config.getCredentials(function(err) {
if (err) {
req.removeListener('sign', AWS.EventListeners.Core.SIGN);
}
done();
});
},
/**
* @api private
*/
convertGetToPost: function(request) {
var httpRequest = request.httpRequest;
// convert queries to POST to avoid length restrictions
var path = httpRequest.path.split('?');
httpRequest.method = 'POST';
httpRequest.path = path[0];
httpRequest.body = path[1];
httpRequest.headers['Content-Length'] = httpRequest.body.length;
httpRequest.headers['Content-Type'] = 'application/x-www-form-urlencoded';
},
/**
* @api private
*/
updateRegion: function updateRegion(request) {
var endpoint = request.httpRequest.endpoint.hostname;
var zones = endpoint.split('.');
request.httpRequest.region = zones[1] || request.httpRequest.region;
}
});

View File

@@ -0,0 +1,25 @@
var AWS = require('../core');
var rdsutil = require('./rdsutil');
/**
* @api private
*/
var crossRegionOperations = ['createDBCluster', 'copyDBClusterSnapshot'];
AWS.util.update(AWS.DocDB.prototype, {
/**
* @api private
*/
setupRequestListeners: function setupRequestListeners(request) {
if (
crossRegionOperations.indexOf(request.operation) !== -1 &&
this.config.params &&
this.config.params.SourceRegion &&
request.params &&
!request.params.SourceRegion
) {
request.params.SourceRegion = this.config.params.SourceRegion;
}
rdsutil.setupRequestListeners(this, request, crossRegionOperations);
},
});

View File

@@ -0,0 +1,9 @@
import {Service} from '../service';
import {DocumentClient as DDBDocumentClient} from '../dynamodb/document_client';
export class DynamoDBCustomizations extends Service {
/**
* The document client simplifies working with items in Amazon DynamoDB by abstracting away the notion of attribute values.
* This abstraction annotates native JavaScript types supplied as input parameters, as well as converts annotated response data to native JavaScript types.
*/
static DocumentClient: typeof DDBDocumentClient;
}

View File

@@ -0,0 +1,58 @@
var AWS = require('../core');
require('../dynamodb/document_client');
AWS.util.update(AWS.DynamoDB.prototype, {
/**
* @api private
*/
setupRequestListeners: function setupRequestListeners(request) {
if (request.service.config.dynamoDbCrc32) {
request.removeListener('extractData', AWS.EventListeners.Json.EXTRACT_DATA);
request.addListener('extractData', this.checkCrc32);
request.addListener('extractData', AWS.EventListeners.Json.EXTRACT_DATA);
}
},
/**
* @api private
*/
checkCrc32: function checkCrc32(resp) {
if (!resp.httpResponse.streaming && !resp.request.service.crc32IsValid(resp)) {
resp.data = null;
resp.error = AWS.util.error(new Error(), {
code: 'CRC32CheckFailed',
message: 'CRC32 integrity check failed',
retryable: true
});
resp.request.haltHandlersOnError();
throw (resp.error);
}
},
/**
* @api private
*/
crc32IsValid: function crc32IsValid(resp) {
var crc = resp.httpResponse.headers['x-amz-crc32'];
if (!crc) return true; // no (valid) CRC32 header
return parseInt(crc, 10) === AWS.util.crypto.crc32(resp.httpResponse.body);
},
/**
* @api private
*/
defaultRetryCount: 10,
/**
* @api private
*/
retryDelays: function retryDelays(retryCount, err) {
var retryDelayOptions = AWS.util.copy(this.config.retryDelayOptions);
if (typeof retryDelayOptions.base !== 'number') {
retryDelayOptions.base = 50; // default for dynamodb
}
var delay = AWS.util.calculateRetryDelay(retryCount, retryDelayOptions, err);
return delay;
}
});

View File

@@ -0,0 +1,62 @@
var AWS = require('../core');
AWS.util.update(AWS.EC2.prototype, {
/**
* @api private
*/
setupRequestListeners: function setupRequestListeners(request) {
request.removeListener('extractError', AWS.EventListeners.Query.EXTRACT_ERROR);
request.addListener('extractError', this.extractError);
if (request.operation === 'copySnapshot') {
request.onAsync('validate', this.buildCopySnapshotPresignedUrl);
}
},
/**
* @api private
*/
buildCopySnapshotPresignedUrl: function buildCopySnapshotPresignedUrl(req, done) {
if (req.params.PresignedUrl || req._subRequest) {
return done();
}
req.params = AWS.util.copy(req.params);
req.params.DestinationRegion = req.service.config.region;
var config = AWS.util.copy(req.service.config);
delete config.endpoint;
config.region = req.params.SourceRegion;
var svc = new req.service.constructor(config);
var newReq = svc[req.operation](req.params);
newReq._subRequest = true;
newReq.presign(function(err, url) {
if (err) done(err);
else {
req.params.PresignedUrl = url;
done();
}
});
},
/**
* @api private
*/
extractError: function extractError(resp) {
// EC2 nests the error code and message deeper than other AWS Query services.
var httpResponse = resp.httpResponse;
var data = new AWS.XML.Parser().parse(httpResponse.body.toString() || '');
if (data.Errors) {
resp.error = AWS.util.error(new Error(), {
code: data.Errors.Error.Code,
message: data.Errors.Error.Message
});
} else {
resp.error = AWS.util.error(new Error(), {
code: httpResponse.statusCode,
message: null
});
}
resp.error.requestId = data.RequestID || null;
}
});

View File

@@ -0,0 +1,19 @@
var AWS = require('../core');
AWS.util.update(AWS.EventBridge.prototype, {
/**
* @api private
*/
setupRequestListeners: function setupRequestListeners(request) {
if (request.operation === 'putEvents') {
var params = request.params || {};
if (params.EndpointId !== undefined) {
throw new AWS.util.error(new Error(), {
code: 'InvalidParameter',
message: 'EndpointId is not supported in current SDK.\n' +
'You should consider switching to V3(https://github.com/aws/aws-sdk-js-v3).'
});
}
}
},
});

View File

@@ -0,0 +1,16 @@
import {Service} from '../service';
export class GlacierCustomizations extends Service {
/**
* Computes the SHA-256 linear and tree hash checksums for a given
* block of Buffer data. Pass the tree hash of the computed checksums
* as the checksum input to the {completeMultipartUpload} when performing
* a multi-part upload.
*/
computeChecksums(data: Buffer|string): GlacierComputeChecksumsOutput;
}
export interface GlacierComputeChecksumsOutput {
linearHash: string;
treeHash: string;
}

View File

@@ -0,0 +1,114 @@
var AWS = require('../core');
AWS.util.update(AWS.Glacier.prototype, {
/**
* @api private
*/
setupRequestListeners: function setupRequestListeners(request) {
if (Array.isArray(request._events.validate)) {
request._events.validate.unshift(this.validateAccountId);
} else {
request.on('validate', this.validateAccountId);
}
request.removeListener('afterBuild',
AWS.EventListeners.Core.COMPUTE_SHA256);
request.on('build', this.addGlacierApiVersion);
request.on('build', this.addTreeHashHeaders);
},
/**
* @api private
*/
validateAccountId: function validateAccountId(request) {
if (request.params.accountId !== undefined) return;
request.params = AWS.util.copy(request.params);
request.params.accountId = '-';
},
/**
* @api private
*/
addGlacierApiVersion: function addGlacierApiVersion(request) {
var version = request.service.api.apiVersion;
request.httpRequest.headers['x-amz-glacier-version'] = version;
},
/**
* @api private
*/
addTreeHashHeaders: function addTreeHashHeaders(request) {
if (request.params.body === undefined) return;
var hashes = request.service.computeChecksums(request.params.body);
request.httpRequest.headers['X-Amz-Content-Sha256'] = hashes.linearHash;
if (!request.httpRequest.headers['x-amz-sha256-tree-hash']) {
request.httpRequest.headers['x-amz-sha256-tree-hash'] = hashes.treeHash;
}
},
/**
* @!group Computing Checksums
*/
/**
* Computes the SHA-256 linear and tree hash checksums for a given
* block of Buffer data. Pass the tree hash of the computed checksums
* as the checksum input to the {completeMultipartUpload} when performing
* a multi-part upload.
*
* @example Calculate checksum of 5.5MB data chunk
* var glacier = new AWS.Glacier();
* var data = Buffer.alloc(5.5 * 1024 * 1024);
* data.fill('0'); // fill with zeros
* var results = glacier.computeChecksums(data);
* // Result: { linearHash: '68aff0c5a9...', treeHash: '154e26c78f...' }
* @param data [Buffer, String] data to calculate the checksum for
* @return [map<linearHash:String,treeHash:String>] a map containing
* the linearHash and treeHash properties representing hex based digests
* of the respective checksums.
* @see completeMultipartUpload
*/
computeChecksums: function computeChecksums(data) {
if (!AWS.util.Buffer.isBuffer(data)) data = AWS.util.buffer.toBuffer(data);
var mb = 1024 * 1024;
var hashes = [];
var hash = AWS.util.crypto.createHash('sha256');
// build leaf nodes in 1mb chunks
for (var i = 0; i < data.length; i += mb) {
var chunk = data.slice(i, Math.min(i + mb, data.length));
hash.update(chunk);
hashes.push(AWS.util.crypto.sha256(chunk));
}
return {
linearHash: hash.digest('hex'),
treeHash: this.buildHashTree(hashes)
};
},
/**
* @api private
*/
buildHashTree: function buildHashTree(hashes) {
// merge leaf nodes
while (hashes.length > 1) {
var tmpHashes = [];
for (var i = 0; i < hashes.length; i += 2) {
if (hashes[i + 1]) {
var tmpHash = AWS.util.buffer.alloc(64);
tmpHash.write(hashes[i], 0, 32, 'binary');
tmpHash.write(hashes[i + 1], 32, 32, 'binary');
tmpHashes.push(AWS.util.crypto.sha256(tmpHash));
} else {
tmpHashes.push(hashes[i]);
}
}
hashes = tmpHashes;
}
return AWS.util.crypto.toHex(hashes[0]);
}
});

View File

@@ -0,0 +1,100 @@
var AWS = require('../core');
/**
* @api private
*/
var blobPayloadOutputOps = [
'deleteThingShadow',
'getThingShadow',
'updateThingShadow'
];
/**
* Constructs a service interface object. Each API operation is exposed as a
* function on service.
*
* ### Sending a Request Using IotData
*
* ```javascript
* var iotdata = new AWS.IotData({endpoint: 'my.host.tld'});
* iotdata.getThingShadow(params, function (err, data) {
* if (err) console.log(err, err.stack); // an error occurred
* else console.log(data); // successful response
* });
* ```
*
* ### Locking the API Version
*
* In order to ensure that the IotData object uses this specific API,
* you can construct the object by passing the `apiVersion` option to the
* constructor:
*
* ```javascript
* var iotdata = new AWS.IotData({
* endpoint: 'my.host.tld',
* apiVersion: '2015-05-28'
* });
* ```
*
* You can also set the API version globally in `AWS.config.apiVersions` using
* the **iotdata** service identifier:
*
* ```javascript
* AWS.config.apiVersions = {
* iotdata: '2015-05-28',
* // other service API versions
* };
*
* var iotdata = new AWS.IotData({endpoint: 'my.host.tld'});
* ```
*
* @note You *must* provide an `endpoint` configuration parameter when
* constructing this service. See {constructor} for more information.
*
* @!method constructor(options = {})
* Constructs a service object. This object has one method for each
* API operation.
*
* @example Constructing a IotData object
* var iotdata = new AWS.IotData({endpoint: 'my.host.tld'});
* @note You *must* provide an `endpoint` when constructing this service.
* @option (see AWS.Config.constructor)
*
* @service iotdata
* @version 2015-05-28
*/
AWS.util.update(AWS.IotData.prototype, {
/**
* @api private
*/
validateService: function validateService() {
if (!this.config.endpoint || this.config.endpoint.indexOf('{') >= 0) {
var msg = 'AWS.IotData requires an explicit ' +
'`endpoint\' configuration option.';
throw AWS.util.error(new Error(),
{name: 'InvalidEndpoint', message: msg});
}
},
/**
* @api private
*/
setupRequestListeners: function setupRequestListeners(request) {
request.addListener('validateResponse', this.validateResponseBody);
if (blobPayloadOutputOps.indexOf(request.operation) > -1) {
request.addListener('extractData', AWS.util.convertPayloadToString);
}
},
/**
* @api private
*/
validateResponseBody: function validateResponseBody(resp) {
var body = resp.httpResponse.body.toString() || '{}';
var bodyCheck = body.trim();
if (!bodyCheck || bodyCheck.charAt(0) !== '{') {
resp.httpResponse.body = '';
}
}
});

View File

@@ -0,0 +1,13 @@
var AWS = require('../core');
AWS.util.update(AWS.Lambda.prototype, {
/**
* @api private
*/
setupRequestListeners: function setupRequestListeners(request) {
if (request.operation === 'invoke') {
request.addListener('extractData', AWS.util.convertPayloadToString);
}
}
});

View File

@@ -0,0 +1,24 @@
var AWS = require('../core');
AWS.util.update(AWS.MachineLearning.prototype, {
/**
* @api private
*/
setupRequestListeners: function setupRequestListeners(request) {
if (request.operation === 'predict') {
request.addListener('build', this.buildEndpoint);
}
},
/**
* Updates request endpoint from PredictEndpoint
* @api private
*/
buildEndpoint: function buildEndpoint(request) {
var url = request.params.PredictEndpoint;
if (url) {
request.httpRequest.endpoint = new AWS.Endpoint(url);
}
}
});

View File

@@ -0,0 +1,25 @@
var AWS = require('../core');
var rdsutil = require('./rdsutil');
/**
* @api private
*/
var crossRegionOperations = ['createDBCluster', 'copyDBClusterSnapshot'];
AWS.util.update(AWS.Neptune.prototype, {
/**
* @api private
*/
setupRequestListeners: function setupRequestListeners(request) {
if (
crossRegionOperations.indexOf(request.operation) !== -1 &&
this.config.params &&
this.config.params.SourceRegion &&
request.params &&
!request.params.SourceRegion
) {
request.params.SourceRegion = this.config.params.SourceRegion;
}
rdsutil.setupRequestListeners(this, request, crossRegionOperations);
},
});

View File

@@ -0,0 +1,5 @@
import {Service} from '../service';
import {Presigner} from '../polly/presigner';
export class PollyCustomizations extends Service {
static Presigner: typeof Presigner;
}

View File

@@ -0,0 +1 @@
require('../polly/presigner');

View File

@@ -0,0 +1,16 @@
var AWS = require('../core');
var rdsutil = require('./rdsutil');
require('../rds/signer');
/**
* @api private
*/
var crossRegionOperations = ['copyDBSnapshot', 'createDBInstanceReadReplica', 'createDBCluster', 'copyDBClusterSnapshot', 'startDBInstanceAutomatedBackupsReplication'];
AWS.util.update(AWS.RDS.prototype, {
/**
* @api private
*/
setupRequestListeners: function setupRequestListeners(request) {
rdsutil.setupRequestListeners(this, request, crossRegionOperations);
},
});

View File

@@ -0,0 +1,19 @@
var AWS = require('../core');
AWS.util.update(AWS.RDSDataService.prototype, {
/**
* @return [Boolean] whether the error can be retried
* @api private
*/
retryableError: function retryableError(error) {
if (error.code === 'BadRequestException' &&
error.message &&
error.message.match(/^Communications link failure/) &&
error.statusCode === 400) {
return true;
} else {
var _super = AWS.Service.prototype.retryableError;
return _super.call(this, error);
}
}
});

View File

@@ -0,0 +1,61 @@
var AWS = require('../core');
var rdsutil = {
/**
* @api private
*/
setupRequestListeners: function setupRequestListeners(service, request, crossRegionOperations) {
if (crossRegionOperations.indexOf(request.operation) !== -1 &&
request.params.SourceRegion) {
request.params = AWS.util.copy(request.params);
if (request.params.PreSignedUrl ||
request.params.SourceRegion === service.config.region) {
delete request.params.SourceRegion;
} else {
var doesParamValidation = !!service.config.paramValidation;
// remove the validate parameters listener so we can re-add it after we build the URL
if (doesParamValidation) {
request.removeListener('validate', AWS.EventListeners.Core.VALIDATE_PARAMETERS);
}
request.onAsync('validate', rdsutil.buildCrossRegionPresignedUrl);
if (doesParamValidation) {
request.addListener('validate', AWS.EventListeners.Core.VALIDATE_PARAMETERS);
}
}
}
},
/**
* @api private
*/
buildCrossRegionPresignedUrl: function buildCrossRegionPresignedUrl(req, done) {
var config = AWS.util.copy(req.service.config);
config.region = req.params.SourceRegion;
delete req.params.SourceRegion;
delete config.endpoint;
// relevant params for the operation will already be in req.params
delete config.params;
config.signatureVersion = 'v4';
var destinationRegion = req.service.config.region;
var svc = new req.service.constructor(config);
var newReq = svc[req.operation](AWS.util.copy(req.params));
newReq.on('build', function addDestinationRegionParam(request) {
var httpRequest = request.httpRequest;
httpRequest.params.DestinationRegion = destinationRegion;
httpRequest.body = AWS.util.queryParamsToString(httpRequest.params);
});
newReq.presign(function(err, url) {
if (err) done(err);
else {
req.params.PreSignedUrl = url;
done();
}
});
}
};
/**
* @api private
*/
module.exports = rdsutil;

View File

@@ -0,0 +1,32 @@
var AWS = require('../core');
AWS.util.update(AWS.Route53.prototype, {
/**
* @api private
*/
setupRequestListeners: function setupRequestListeners(request) {
request.on('build', this.sanitizeUrl);
},
/**
* @api private
*/
sanitizeUrl: function sanitizeUrl(request) {
var path = request.httpRequest.path;
request.httpRequest.path = path.replace(/\/%2F\w+%2F/, '/');
},
/**
* @return [Boolean] whether the error can be retried
* @api private
*/
retryableError: function retryableError(error) {
if (error.code === 'PriorRequestNotComplete' &&
error.statusCode === 400) {
return true;
} else {
var _super = AWS.Service.prototype.retryableError;
return _super.call(this, error);
}
}
});

View File

@@ -0,0 +1,49 @@
import {Service} from '../service';
import {ManagedUpload} from '../s3/managed_upload';
import S3 = require('../../clients/s3');
export class S3Customizations extends Service {
/**
* Get a pre-signed URL for a given operation name.
*/
getSignedUrl(operation: string, params: any, callback: (err: Error, url: string) => void): void;
/**
* Get a pre-signed URL for a given operation name.
*/
getSignedUrl(operation: string, params: any): string;
/**
* Returns a 'thenable' promise that will be resolved with a pre-signed URL for a given operation name.
*/
getSignedUrlPromise(operation: string, params: any): Promise<string>;
/**
* Get the form fields and target URL for direct POST uploading.
*/
createPresignedPost(
params: S3.PresignedPost.Params,
callback: (err: Error, data: S3.PresignedPost) => void
): void;
/**
* Get the form fields and target URL for direct POST uploading.
*/
createPresignedPost(params: S3.PresignedPost.Params): S3.PresignedPost;
/**
* Uploads an arbitrarily sized buffer, blob, or stream, using intelligent
* concurrent handling of parts if the payload is large enough. You can
* configure the concurrent queue size by setting `options`. Note that this
* is the only operation for which the SDK can retry requests with stream
* bodies.
*/
upload(params: S3.Types.PutObjectRequest, options?: ManagedUpload.ManagedUploadOptions, callback?: (err: Error, data: ManagedUpload.SendData) => void): ManagedUpload;
/**
* Uploads an arbitrarily sized buffer, blob, or stream, using intelligent
* concurrent handling of parts if the payload is large enough. You can
* configure the concurrent queue size by setting `options`. Note that this
* is the only operation for which the SDK can retry requests with stream
* bodies.
*/
upload(params: S3.Types.PutObjectRequest, callback?: (err: Error, data: ManagedUpload.SendData) => void): ManagedUpload;
static ManagedUpload: typeof ManagedUpload;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,211 @@
var AWS = require('../core');
var s3util = require('./s3util');
var regionUtil = require('../region_config');
AWS.util.update(AWS.S3Control.prototype, {
/**
* @api private
*/
setupRequestListeners: function setupRequestListeners(request) {
request.addListener('extractError', this.extractHostId);
request.addListener('extractData', this.extractHostId);
request.addListener('validate', this.validateAccountId);
var isArnInBucket = s3util.isArnInParam(request, 'Bucket');
var isArnInName = s3util.isArnInParam(request, 'Name');
if (isArnInBucket) {
request._parsedArn = AWS.util.ARN.parse(request.params['Bucket']);
request.addListener('validate', this.validateOutpostsBucketArn);
request.addListener('validate', s3util.validateOutpostsArn);
request.addListener('afterBuild', this.addOutpostIdHeader);
} else if (isArnInName) {
request._parsedArn = AWS.util.ARN.parse(request.params['Name']);
request.addListener('validate', s3util.validateOutpostsAccessPointArn);
request.addListener('validate', s3util.validateOutpostsArn);
request.addListener('afterBuild', this.addOutpostIdHeader);
}
if (isArnInBucket || isArnInName) {
request.addListener('validate', this.validateArnRegion);
request.addListener('validate', this.validateArnAccountWithParams, true);
request.addListener('validate', s3util.validateArnAccount);
request.addListener('validate', s3util.validateArnService);
request.addListener('build', this.populateParamFromArn, true);
request.addListener('build', this.populateUriFromArn);
request.addListener('build', s3util.validatePopulateUriFromArn);
}
if (request.params.OutpostId &&
(request.operation === 'createBucket' ||
request.operation === 'listRegionalBuckets')) {
request.addListener('build', this.populateEndpointForOutpostId);
}
},
/**
* Adds outpostId header
*/
addOutpostIdHeader: function addOutpostIdHeader(req) {
req.httpRequest.headers['x-amz-outpost-id'] = req._parsedArn.outpostId;
},
/**
* Validate Outposts ARN supplied in Bucket parameter is a valid bucket name
*/
validateOutpostsBucketArn: function validateOutpostsBucketArn(req) {
var parsedArn = req._parsedArn;
//can be ':' or '/'
var delimiter = parsedArn.resource['outpost'.length];
if (parsedArn.resource.split(delimiter).length !== 4) {
throw AWS.util.error(new Error(), {
code: 'InvalidARN',
message: 'Bucket ARN should have two resources outpost/{outpostId}/bucket/{accesspointName}'
});
}
var bucket = parsedArn.resource.split(delimiter)[3];
if (!s3util.dnsCompatibleBucketName(bucket) || bucket.match(/\./)) {
throw AWS.util.error(new Error(), {
code: 'InvalidARN',
message: 'Bucket ARN is not DNS compatible. Got ' + bucket
});
}
//set parsed valid bucket
req._parsedArn.bucket = bucket;
},
/**
* @api private
*/
populateParamFromArn: function populateParamFromArn(req) {
var parsedArn = req._parsedArn;
if (s3util.isArnInParam(req, 'Bucket')) {
req.params.Bucket = parsedArn.bucket;
} else if (s3util.isArnInParam(req, 'Name')) {
req.params.Name = parsedArn.accessPoint;
}
},
/**
* Populate URI according to the ARN
*/
populateUriFromArn: function populateUriFromArn(req) {
var parsedArn = req._parsedArn;
var endpoint = req.httpRequest.endpoint;
var useArnRegion = req.service.config.s3UseArnRegion;
var useFipsEndpoint = req.service.config.useFipsEndpoint;
endpoint.hostname = [
's3-outposts' + (useFipsEndpoint ? '-fips': ''),
useArnRegion ? parsedArn.region : req.service.config.region,
'amazonaws.com'
].join('.');
endpoint.host = endpoint.hostname;
},
/**
* @api private
*/
populateEndpointForOutpostId: function populateEndpointForOutpostId(req) {
var endpoint = req.httpRequest.endpoint;
var useFipsEndpoint = req.service.config.useFipsEndpoint;
endpoint.hostname = [
's3-outposts' + (useFipsEndpoint ? '-fips': ''),
req.service.config.region,
'amazonaws.com'
].join('.');
endpoint.host = endpoint.hostname;
},
/**
* @api private
*/
extractHostId: function(response) {
var hostId = response.httpResponse.headers ? response.httpResponse.headers['x-amz-id-2'] : null;
response.extendedRequestId = hostId;
if (response.error) {
response.error.extendedRequestId = hostId;
}
},
/**
* @api private
*/
validateArnRegion: function validateArnRegion(req) {
s3util.validateArnRegion(req, { allowFipsEndpoint: true });
},
/**
* @api private
*/
validateArnAccountWithParams: function validateArnAccountWithParams(req) {
var params = req.params;
var inputModel = req.service.api.operations[req.operation].input;
if (inputModel.members.AccountId) {
var parsedArn = req._parsedArn;
if (parsedArn.accountId) {
if (params.AccountId) {
if (params.AccountId !== parsedArn.accountId) {
throw AWS.util.error(
new Error(),
{code: 'ValidationError', message: 'AccountId in ARN and request params should be same.'}
);
}
} else {
// Store accountId from ARN in params
params.AccountId = parsedArn.accountId;
}
}
}
},
/**
* @api private
*/
validateAccountId: function(request) {
var params = request.params;
if (!Object.prototype.hasOwnProperty.call(params, 'AccountId')) return;
var accountId = params.AccountId;
//validate type
if (typeof accountId !== 'string') {
throw AWS.util.error(
new Error(),
{code: 'ValidationError', message: 'AccountId must be a string.'}
);
}
//validate length
if (accountId.length < 1 || accountId.length > 63) {
throw AWS.util.error(
new Error(),
{code: 'ValidationError', message: 'AccountId length should be between 1 to 63 characters, inclusive.'}
);
}
//validate pattern
var hostPattern = /^[a-zA-Z0-9]{1}$|^[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]$/;
if (!hostPattern.test(accountId)) {
throw AWS.util.error(new Error(),
{code: 'ValidationError', message: 'AccountId should be hostname compatible. AccountId: ' + accountId});
}
},
/**
* @api private
*/
getSigningName: function getSigningName(req) {
var _super = AWS.Service.prototype.getSigningName;
if (req && req._parsedArn && req._parsedArn.service) {
return req._parsedArn.service;
} else if (req.params.OutpostId &&
(req.operation === 'createBucket' ||
req.operation === 'listRegionalBuckets')) {
return 's3-outposts';
} else {
return _super.call(this, req);
}
},
});

View File

@@ -0,0 +1,283 @@
var AWS = require('../core');
var regionUtil = require('../region_config');
var s3util = {
/**
* @api private
*/
isArnInParam: function isArnInParam(req, paramName) {
var inputShape = (req.service.api.operations[req.operation] || {}).input || {};
var inputMembers = inputShape.members || {};
if (!req.params[paramName] || !inputMembers[paramName]) return false;
return AWS.util.ARN.validate(req.params[paramName]);
},
/**
* Validate service component from ARN supplied in Bucket parameter
*/
validateArnService: function validateArnService(req) {
var parsedArn = req._parsedArn;
if (parsedArn.service !== 's3'
&& parsedArn.service !== 's3-outposts'
&& parsedArn.service !== 's3-object-lambda') {
throw AWS.util.error(new Error(), {
code: 'InvalidARN',
message: 'expect \'s3\' or \'s3-outposts\' or \'s3-object-lambda\' in ARN service component'
});
}
},
/**
* Validate account ID from ARN supplied in Bucket parameter is a valid account
*/
validateArnAccount: function validateArnAccount(req) {
var parsedArn = req._parsedArn;
if (!/[0-9]{12}/.exec(parsedArn.accountId)) {
throw AWS.util.error(new Error(), {
code: 'InvalidARN',
message: 'ARN accountID does not match regex "[0-9]{12}"'
});
}
},
/**
* Validate ARN supplied in Bucket parameter is a valid access point ARN
*/
validateS3AccessPointArn: function validateS3AccessPointArn(req) {
var parsedArn = req._parsedArn;
//can be ':' or '/'
var delimiter = parsedArn.resource['accesspoint'.length];
if (parsedArn.resource.split(delimiter).length !== 2) {
throw AWS.util.error(new Error(), {
code: 'InvalidARN',
message: 'Access Point ARN should have one resource accesspoint/{accesspointName}'
});
}
var accessPoint = parsedArn.resource.split(delimiter)[1];
var accessPointPrefix = accessPoint + '-' + parsedArn.accountId;
if (!s3util.dnsCompatibleBucketName(accessPointPrefix) || accessPointPrefix.match(/\./)) {
throw AWS.util.error(new Error(), {
code: 'InvalidARN',
message: 'Access point resource in ARN is not DNS compatible. Got ' + accessPoint
});
}
//set parsed valid access point
req._parsedArn.accessPoint = accessPoint;
},
/**
* Validate Outposts ARN supplied in Bucket parameter is a valid outposts ARN
*/
validateOutpostsArn: function validateOutpostsArn(req) {
var parsedArn = req._parsedArn;
if (
parsedArn.resource.indexOf('outpost:') !== 0 &&
parsedArn.resource.indexOf('outpost/') !== 0
) {
throw AWS.util.error(new Error(), {
code: 'InvalidARN',
message: 'ARN resource should begin with \'outpost/\''
});
}
//can be ':' or '/'
var delimiter = parsedArn.resource['outpost'.length];
var outpostId = parsedArn.resource.split(delimiter)[1];
var dnsHostRegex = new RegExp(/^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]{0,61}[a-zA-Z0-9])$/);
if (!dnsHostRegex.test(outpostId)) {
throw AWS.util.error(new Error(), {
code: 'InvalidARN',
message: 'Outpost resource in ARN is not DNS compatible. Got ' + outpostId
});
}
req._parsedArn.outpostId = outpostId;
},
/**
* Validate Outposts ARN supplied in Bucket parameter is a valid outposts ARN
*/
validateOutpostsAccessPointArn: function validateOutpostsAccessPointArn(req) {
var parsedArn = req._parsedArn;
//can be ':' or '/'
var delimiter = parsedArn.resource['outpost'.length];
if (parsedArn.resource.split(delimiter).length !== 4) {
throw AWS.util.error(new Error(), {
code: 'InvalidARN',
message: 'Outposts ARN should have two resources outpost/{outpostId}/accesspoint/{accesspointName}'
});
}
var accessPoint = parsedArn.resource.split(delimiter)[3];
var accessPointPrefix = accessPoint + '-' + parsedArn.accountId;
if (!s3util.dnsCompatibleBucketName(accessPointPrefix) || accessPointPrefix.match(/\./)) {
throw AWS.util.error(new Error(), {
code: 'InvalidARN',
message: 'Access point resource in ARN is not DNS compatible. Got ' + accessPoint
});
}
//set parsed valid access point
req._parsedArn.accessPoint = accessPoint;
},
/**
* Validate region field in ARN supplied in Bucket parameter is a valid region
*/
validateArnRegion: function validateArnRegion(req, options) {
if (options === undefined) {
options = {};
}
var useArnRegion = s3util.loadUseArnRegionConfig(req);
var regionFromArn = req._parsedArn.region;
var clientRegion = req.service.config.region;
var useFipsEndpoint = req.service.config.useFipsEndpoint;
var allowFipsEndpoint = options.allowFipsEndpoint || false;
if (!regionFromArn) {
var message = 'ARN region is empty';
if (req._parsedArn.service === 's3') {
message = message + '\nYou may want to use multi-regional ARN. The feature is not supported in current SDK. ' +
'You should consider switching to V3(https://github.com/aws/aws-sdk-js-v3).';
}
throw AWS.util.error(new Error(), {
code: 'InvalidARN',
message: message
});
}
if (useFipsEndpoint && !allowFipsEndpoint) {
throw AWS.util.error(new Error(), {
code: 'InvalidConfiguration',
message: 'ARN endpoint is not compatible with FIPS region'
});
}
if (regionFromArn.indexOf('fips') >= 0) {
throw AWS.util.error(new Error(), {
code: 'InvalidConfiguration',
message: 'FIPS region not allowed in ARN'
});
}
if (!useArnRegion && regionFromArn !== clientRegion) {
throw AWS.util.error(new Error(), {
code: 'InvalidConfiguration',
message: 'Configured region conflicts with access point region'
});
} else if (
useArnRegion &&
regionUtil.getEndpointSuffix(regionFromArn) !== regionUtil.getEndpointSuffix(clientRegion)
) {
throw AWS.util.error(new Error(), {
code: 'InvalidConfiguration',
message: 'Configured region and access point region not in same partition'
});
}
if (req.service.config.useAccelerateEndpoint) {
throw AWS.util.error(new Error(), {
code: 'InvalidConfiguration',
message: 'useAccelerateEndpoint config is not supported with access point ARN'
});
}
if (req._parsedArn.service === 's3-outposts' && req.service.config.useDualstackEndpoint) {
throw AWS.util.error(new Error(), {
code: 'InvalidConfiguration',
message: 'Dualstack is not supported with outposts access point ARN'
});
}
},
loadUseArnRegionConfig: function loadUseArnRegionConfig(req) {
var envName = 'AWS_S3_USE_ARN_REGION';
var configName = 's3_use_arn_region';
var useArnRegion = true;
var originalConfig = req.service._originalConfig || {};
if (req.service.config.s3UseArnRegion !== undefined) {
return req.service.config.s3UseArnRegion;
} else if (originalConfig.s3UseArnRegion !== undefined) {
useArnRegion = originalConfig.s3UseArnRegion === true;
} else if (AWS.util.isNode()) {
//load from environmental variable AWS_USE_ARN_REGION
if (process.env[envName]) {
var value = process.env[envName].trim().toLowerCase();
if (['false', 'true'].indexOf(value) < 0) {
throw AWS.util.error(new Error(), {
code: 'InvalidConfiguration',
message: envName + ' only accepts true or false. Got ' + process.env[envName],
retryable: false
});
}
useArnRegion = value === 'true';
} else { //load from shared config property use_arn_region
var profiles = {};
var profile = {};
try {
profiles = AWS.util.getProfilesFromSharedConfig(AWS.util.iniLoader);
profile = profiles[process.env.AWS_PROFILE || AWS.util.defaultProfile];
} catch (e) {}
if (profile[configName]) {
if (['false', 'true'].indexOf(profile[configName].trim().toLowerCase()) < 0) {
throw AWS.util.error(new Error(), {
code: 'InvalidConfiguration',
message: configName + ' only accepts true or false. Got ' + profile[configName],
retryable: false
});
}
useArnRegion = profile[configName].trim().toLowerCase() === 'true';
}
}
}
req.service.config.s3UseArnRegion = useArnRegion;
return useArnRegion;
},
/**
* Validations before URI can be populated
*/
validatePopulateUriFromArn: function validatePopulateUriFromArn(req) {
if (req.service._originalConfig && req.service._originalConfig.endpoint) {
throw AWS.util.error(new Error(), {
code: 'InvalidConfiguration',
message: 'Custom endpoint is not compatible with access point ARN'
});
}
if (req.service.config.s3ForcePathStyle) {
throw AWS.util.error(new Error(), {
code: 'InvalidConfiguration',
message: 'Cannot construct path-style endpoint with access point'
});
}
},
/**
* Returns true if the bucket name is DNS compatible. Buckets created
* outside of the classic region MUST be DNS compatible.
*
* @api private
*/
dnsCompatibleBucketName: function dnsCompatibleBucketName(bucketName) {
var b = bucketName;
var domain = new RegExp(/^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$/);
var ipAddress = new RegExp(/(\d+\.){3}\d+/);
var dots = new RegExp(/\.\./);
return (b.match(domain) && !b.match(ipAddress) && !b.match(dots)) ? true : false;
},
};
/**
* @api private
*/
module.exports = s3util;

View File

@@ -0,0 +1,131 @@
var AWS = require('../core');
AWS.util.update(AWS.SQS.prototype, {
/**
* @api private
*/
setupRequestListeners: function setupRequestListeners(request) {
request.addListener('build', this.buildEndpoint);
if (request.service.config.computeChecksums) {
if (request.operation === 'sendMessage') {
request.addListener('extractData', this.verifySendMessageChecksum);
} else if (request.operation === 'sendMessageBatch') {
request.addListener('extractData', this.verifySendMessageBatchChecksum);
} else if (request.operation === 'receiveMessage') {
request.addListener('extractData', this.verifyReceiveMessageChecksum);
}
}
},
/**
* @api private
*/
verifySendMessageChecksum: function verifySendMessageChecksum(response) {
if (!response.data) return;
var md5 = response.data.MD5OfMessageBody;
var body = this.params.MessageBody;
var calculatedMd5 = this.service.calculateChecksum(body);
if (calculatedMd5 !== md5) {
var msg = 'Got "' + response.data.MD5OfMessageBody +
'", expecting "' + calculatedMd5 + '".';
this.service.throwInvalidChecksumError(response,
[response.data.MessageId], msg);
}
},
/**
* @api private
*/
verifySendMessageBatchChecksum: function verifySendMessageBatchChecksum(response) {
if (!response.data) return;
var service = this.service;
var entries = {};
var errors = [];
var messageIds = [];
AWS.util.arrayEach(response.data.Successful, function (entry) {
entries[entry.Id] = entry;
});
AWS.util.arrayEach(this.params.Entries, function (entry) {
if (entries[entry.Id]) {
var md5 = entries[entry.Id].MD5OfMessageBody;
var body = entry.MessageBody;
if (!service.isChecksumValid(md5, body)) {
errors.push(entry.Id);
messageIds.push(entries[entry.Id].MessageId);
}
}
});
if (errors.length > 0) {
service.throwInvalidChecksumError(response, messageIds,
'Invalid messages: ' + errors.join(', '));
}
},
/**
* @api private
*/
verifyReceiveMessageChecksum: function verifyReceiveMessageChecksum(response) {
if (!response.data) return;
var service = this.service;
var messageIds = [];
AWS.util.arrayEach(response.data.Messages, function(message) {
var md5 = message.MD5OfBody;
var body = message.Body;
if (!service.isChecksumValid(md5, body)) {
messageIds.push(message.MessageId);
}
});
if (messageIds.length > 0) {
service.throwInvalidChecksumError(response, messageIds,
'Invalid messages: ' + messageIds.join(', '));
}
},
/**
* @api private
*/
throwInvalidChecksumError: function throwInvalidChecksumError(response, ids, message) {
response.error = AWS.util.error(new Error(), {
retryable: true,
code: 'InvalidChecksum',
messageIds: ids,
message: response.request.operation +
' returned an invalid MD5 response. ' + message
});
},
/**
* @api private
*/
isChecksumValid: function isChecksumValid(checksum, data) {
return this.calculateChecksum(data) === checksum;
},
/**
* @api private
*/
calculateChecksum: function calculateChecksum(data) {
return AWS.util.crypto.md5(data, 'hex');
},
/**
* @api private
*/
buildEndpoint: function buildEndpoint(request) {
var url = request.httpRequest.params.QueueUrl;
if (url) {
request.httpRequest.endpoint = new AWS.Endpoint(url);
// signature version 4 requires the region name to be set,
// sqs queue urls contain the region name
var matches = request.httpRequest.endpoint.host.match(/^sqs\.(.+?)\./);
if (matches) request.httpRequest.region = matches[1];
}
}
});

View File

@@ -0,0 +1,86 @@
var AWS = require('../core');
var resolveRegionalEndpointsFlag = require('../config_regional_endpoint');
var ENV_REGIONAL_ENDPOINT_ENABLED = 'AWS_STS_REGIONAL_ENDPOINTS';
var CONFIG_REGIONAL_ENDPOINT_ENABLED = 'sts_regional_endpoints';
AWS.util.update(AWS.STS.prototype, {
/**
* @overload credentialsFrom(data, credentials = null)
* Creates a credentials object from STS response data containing
* credentials information. Useful for quickly setting AWS credentials.
*
* @note This is a low-level utility function. If you want to load temporary
* credentials into your process for subsequent requests to AWS resources,
* you should use {AWS.TemporaryCredentials} instead.
* @param data [map] data retrieved from a call to {getFederatedToken},
* {getSessionToken}, {assumeRole}, or {assumeRoleWithWebIdentity}.
* @param credentials [AWS.Credentials] an optional credentials object to
* fill instead of creating a new object. Useful when modifying an
* existing credentials object from a refresh call.
* @return [AWS.TemporaryCredentials] the set of temporary credentials
* loaded from a raw STS operation response.
* @example Using credentialsFrom to load global AWS credentials
* var sts = new AWS.STS();
* sts.getSessionToken(function (err, data) {
* if (err) console.log("Error getting credentials");
* else {
* AWS.config.credentials = sts.credentialsFrom(data);
* }
* });
* @see AWS.TemporaryCredentials
*/
credentialsFrom: function credentialsFrom(data, credentials) {
if (!data) return null;
if (!credentials) credentials = new AWS.TemporaryCredentials();
credentials.expired = false;
credentials.accessKeyId = data.Credentials.AccessKeyId;
credentials.secretAccessKey = data.Credentials.SecretAccessKey;
credentials.sessionToken = data.Credentials.SessionToken;
credentials.expireTime = data.Credentials.Expiration;
return credentials;
},
assumeRoleWithWebIdentity: function assumeRoleWithWebIdentity(params, callback) {
return this.makeUnauthenticatedRequest('assumeRoleWithWebIdentity', params, callback);
},
assumeRoleWithSAML: function assumeRoleWithSAML(params, callback) {
return this.makeUnauthenticatedRequest('assumeRoleWithSAML', params, callback);
},
/**
* @api private
*/
setupRequestListeners: function setupRequestListeners(request) {
request.addListener('validate', this.optInRegionalEndpoint, true);
},
/**
* @api private
*/
optInRegionalEndpoint: function optInRegionalEndpoint(req) {
var service = req.service;
var config = service.config;
config.stsRegionalEndpoints = resolveRegionalEndpointsFlag(service._originalConfig, {
env: ENV_REGIONAL_ENDPOINT_ENABLED,
sharedConfig: CONFIG_REGIONAL_ENDPOINT_ENABLED,
clientConfig: 'stsRegionalEndpoints'
});
if (
config.stsRegionalEndpoints === 'regional' &&
service.isGlobalEndpoint
) {
//client will throw if region is not supplied; request will be signed with specified region
if (!config.region) {
throw AWS.util.error(new Error(),
{code: 'ConfigError', message: 'Missing region in config'});
}
var insertPoint = config.endpoint.indexOf('.amazonaws.com');
var regionalEndpoint = config.endpoint.substring(0, insertPoint) +
'.' + config.region + config.endpoint.substring(insertPoint);
req.httpRequest.updateEndpoint(regionalEndpoint);
req.httpRequest.region = config.region;
}
}
});

View File

@@ -0,0 +1,10 @@
var AWS = require('../core');
AWS.util.hideProperties(AWS, ['SimpleWorkflow']);
/**
* @constant
* @readonly
* Backwards compatibility for access to the {AWS.SWF} service class.
*/
AWS.SimpleWorkflow = AWS.SWF;