首页 > 解决方案 > UnknownEndpoint:无法访问的主机:“Bucket_Name.s3.ap-south-1.amazonaws.com”。此服务可能在“ap-south-1”区域不可用

问题描述

错误图像

即使我有稳定的 Internet 连接,我在为大于 1 GB 的文件执行 S3-multipart Upload 时遇到此错误。所以这是我的 S3-Multipart Upload 的完整代码。

AWS.config.update({
    accessKeyId: accessKeyId,
    secretAccessKey: secretAccessKey,
    region: region,
    correctClockSkew: true,
    httpOptions: {
        timeout: 900000      
    }
})

const startTime = new Date();
let partNum = 0;
const partSize = 1024 * 1024 * 5;
const maxUploadTries = 3;
const multipartMap = {
    Parts: []
}

async function s3MultipartUpload(s3Params) {
    try {
        console.log('##### Entered into s3VideoUpload #####');
        console.log('S3VideoUpload Params : ', s3Params);
        console.log('Start Time : ', startTime);
        console.log('Buffer Length : ', s3Params.Body.length);
        global.numPartsLeft = Math.ceil(s3Params.Body.length / partSize);
        const multiPartParams = {
            Bucket: s3Params.Bucket,
            Key: s3Params.Key,
            ContentType: s3Params.contentType
        }
        console.log('PartNum : ', partNum, 'partSize : ', partSize, 'numPartsLeft : ', numPartsLeft);
        console.log('multiPartParams : ', multiPartParams);
        console.log('creating Multipart upload for :', s3Params.Key);
        s3.createMultipartUpload(multiPartParams, function (mpErr, multipart) {
            if (mpErr) {
                throw new Error(mpErr);
            }
            console.log('Got Upload ID : ', multipart.UploadId);

            for (let start = 0; start < s3Params.Body.length; start += partSize) {
                console.log('For Loop numPartsLeft : ', numPartsLeft);
                partNum++;
                const end = Math.min(start + partSize, s3Params.Body.length);
                const partParams = {
                    Body: s3Params.Body.slice(start, end),
                    Bucket: multiPartParams.Bucket,
                    Key: multiPartParams.Key,
                    PartNumber: String(partNum),
                    UploadId: multipart.UploadId,
                };
                console.log('Uploading Part : ##', partParams.PartNumber, ' # Start : ', start);
                uploadPart(s3, multipart, partParams)
                console.log("numPartsLeft For Loop : ", numPartsLeft);
            }
        });
    } catch (error) {
        throw new Error(error);
    }
}

async function uploadPart(s3, multipart, partParams, tryNum) {
    try {
        console.log('####Entered into uploadPart####');
        var tryNum = tryNum || 1;     
        console.log('tryNum >>>>', tryNum);
        s3.uploadPart(partParams, function (multiErr, mData) {
            console.log('#### Started ####');
            if (multiErr) {
                console.log('Upload part error:', multiErr);
                if (tryNum < maxUploadTries) {
                    console.log('Retrying upload of part: #', partParams.PartNumber);
                    uploadPart(s3, multipart, partParams, tryNum + 1);
                }
                else {
                    console.log('Failed uploading part: #', partParams.PartNumber);
                    
                }
              return;    
            }

            multipartMap.Parts[this.request.params.PartNumber - 1] = {
                ETag: mData.ETag,
                PartNumber: Number(this.request.params.PartNumber),
            };

            console.log('Completed Part :', this.request.params.PartNumber);
            console.log('mData : ', mData);

            console.log('NumPartLeft : ', numPartsLeft);
            if (--numPartsLeft > 0) return;   // complete only when all parts uploaded

            var doneParams = {
                Bucket: partParams.Bucket,
                Key: partParams.Key,
                MultipartUpload: multipartMap,
                UploadId: multipart.UploadId
            };

            console.log('Completing Upload ....');
            completeMultipartUpload(s3, doneParams);
        })
    }
    catch (error) {
        throw new Error(error);
    }
}

async function completeMultipartUpload(s3, doneParams) {
    try {
        s3.completeMultipartUpload(doneParams, function (err, data) {
            if (err) {
                throw new Error(err);
            }
            else {
                const delta = (new Date() - startTime) / 1000;
                console.log('Completed Upload In : ', delta, 'seconds');
                console.log('Final Upload Data  : ', data);
            }
        });
    } catch (error) {
        throw new Error(error);
    }
}

我有稳定的 Internet 连接,它不会重试上传第一次尝试失败的块。错误后执行卡住了。

执行卡在这一点

标签: node.jsmongodbamazon-s3

解决方案


我试图用这个简单的测试代码解决你的问题,它在 785.3 MB 文件中运行良好。希望它可以帮助您在上传大文件时解决您的问题。我知道它非常基础,但您可以通过自己的方式增强代码。

之前我建议您应该使用文件流式传输来读取流内文件并将其部署在块中。

const aws = require("aws-sdk");
const fs = require("fs");

aws.config.update({
  accessKeyId: accessKeyId,
  secretAccessKey: secretAccessKey,
  region: region,
  correctClockSkew: true,
  httpOptions: {
    timeout: 900000,
  },
});

/**
 * @description read file
 * @returns {Object}
 */
function readFile() {
  const fileName = "dump.txt";
  const readStream = fs.createReadStream(fileName);
  return {
    Bucket: bucketName,
    Key: folderName,
    Body: readStream,
  };
}


/**
 * @description Uplaod allows to buffers, blob or stream using 
 *              configuration amount of concurrency to upload the file.
 * @param {Object} params 
 * @param {Object} options 
 * @returns 
 */
async function doUpload(params, options) {
  return new Promise((resolve, reject) => {
    const s3 = new aws.S3({ httpOptions: { timeout: 10 * 60 * 1000 } });
    s3.upload(params, options)
      .on("httpUploadProgress", function (event) {
        const progressPercent = ((event.loaded * 100) / event.total).toFixed();
        console.log(`${progressPercent}% file is uploaded`);
      })
      .send(function (err, data) {
        if (err) {
          reject(err);
        } else {
          resolve(data);
        }
      });
  });
}

/**
 * @description initialize uploading
 */
async function init() {
  const params = readFile();
  // Chunk of 5 kb with concurrency 10 chunks is queued.
  const options = { partSize: 5 * 1024 * 1024, queueSize: 10 };
  const s3 = new aws.S3({ httpOptions: { timeout: 10 * 60 * 1000 } });
  await doUpload(params, options)
    .then((data) => {
      console.log("File has uploaded succesfully");
      console.log(data);
    })
    .catch((err) => {
      console.error("Error occured while uploading");
      console.error(err);
    });
}

init();

希望这可能会对您的问题和代码有所了解。


推荐阅读