使用 Nginx 版本的node.js 在HTTP上上传文件

分享于 

11分钟阅读

Web开发

  繁體

性能调优

  • 在Node.js服务器之前实现反向代理服务器
  • 将文件上传请求卸载到反向代理
  • 将MergeAll阻塞同步代码转换为非阻塞异步代码
  • 为每个后端请求创建一个API,现在,UploadChunk API调用用于管理所有上传
  • 从MergeAll API调用中删除校验和计算,将创建一个GetChecksum API来计算上传文件的校验和
  • 性能测试是在运行NGINX版本1.9.9的Centos 7虚拟机上进行的。

    反向代理

    使用以下Nginx配置:

    # redirect CelerFT
     location =/api/CelerFTFileUpload/UploadChunk/XFileName {
     aio on;
     directio 10M;
     client_body_temp_path/tmp/nginx 1;
     client_body_in_file_only on;
     client_body_buffer_size 10M;
     client_max_body_size 60M;
     proxy_pass_request_headers on;
     proxy_set_body off;
     proxy_redirect off;
     proxy_ignore_client_abort on;
     proxy_http_version 1.1;
     proxy_set_header Connection "";
     proxy_set_header Host $host;
     ##proxy_set_header Host $http_host;
     proxy_set_header X-Real-IP $remote_addr;
     proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
     proxy_set_header X-Forwarded-Proto $scheme;
     proxy_set_header X-File-Name $request_body_file;
     proxy_pass http://127.0.0.1:1337; # proxy_redirect default;
     proxy_connect_timeout 600;
     proxy_send_timeout 600;
     proxy_read_timeout 600;
     send_timeout 600;
     access_log off;
     error_log/var/log/nginx/nginx.upload.error.log;
     }

    在systemd下,每次重新启动Nginx时,Nginx private temporary目录将有不同的名称,因此,我们必须先将该目录块的名称移动到。

    app.post('*/api/CelerFTFileUpload/UploadChunk/XFileName*', function (request, response) {
     // Check if we uploading using a x-file-header// This means that we have offloaded the file upload to the// web server (NGINX) and we are sending up the path to the actual// file in the header. The file chunk will not be in the body// of the requestif (request.headers['x-file-name']) {
     // Temporary location of our uploaded file// Nginx uses a private file path in/tmp on Centos// we need to get the name of that pathvar temp_dir = fs.readdirSync('/tmp');
     var nginx_temp_dir = [];
     for (var i = 0; i < temp_dir.length; i++) {
     if (temp_dir[i].match('nginx.service')) {
     nginx_temp_dir.push(temp_dir[i]);
     }
     }
     var temp_path = '/tmp/' + nginx_temp_dir[0] + request.headers['x-file-name'];
     fs.move(temp_path, response.locals.localfilepath, {}, function (err) {
     if (err) {
     response.status(500).send(err);
     return;
     }
     // Send back a sucessful response with the file name response.status(200).send(response.locals.localfilepath);
     response.end();
     });
     }
    });

    MergeAll异步API

    getfilesWithExtensionName API中调用的函数被替换为一个函数调用,这个函数用于检查是否上传了文件的所有块。

    getfilesWithExtensionName函数。

    function getfilesWithExtensionName(dir, ext) {
     var matchingfiles = [];
     if (fs.ensureDirSync(dir)) {
     return matchingfiles;
     }
     var files = fs.readdirSync(dir);
     for (var i = 0; i <files.length; i++) {
     if (path.extname(files[i]) === '.' + ext) {
     matchingfiles.push(files[i]);
     }
     }
     return matchingfiles;
    }

    一旦我们上传了所有文件块,我们用所有文件名填充一个files的数组,如下所示。

    for (var i = 0; i <fileslist.length; i++) {
     if (path.extname(fileslist[i]) == '.tmp') {
     //console.log(fileslist[i]); files.push(fileslist[i]);
     }
    }

    下一步就是使用fs.createWriteStream创建输出文件。

    // Create tthe output filevar outputFile = fs.createWriteStream(filename);

    然后使用一个MergeFiles的递归函数将文件块合并到最终的输出文件中,在MergeFiles函数中,使用fs.createReadStream读取files数组中的每一个文件并将它们写入输出文件,调用mergefiles函数时,索引设置为0,每次成功调用fs.createReadStream后,索引增加。

    var index = 0;// Recrusive function used to merge the files// in a sequential mannervar mergefiles = function (index) {
     // If teh index matches the items in the array// end the function and finalize the output fileif (index == files.length) {
     outputFile.end();
     return;
     }
     console.log(files[index]);
     // Use a read stream too read the files and write them to the write streamvar rstream = fs.createReadStream(localFilePath + '/' + files[index]);
     rstream.on('data', function (data) {
     outputFile.write(data);
     });
     rstream.on('end', function () {
     //fs.removeSync(localFilePath + '/' + files[index]); mergefiles(index + 1);
     });
     rstream.on('close', function () {
     fs.removeSync(localFilePath + '/' + files[index]);
     //mergefiles(index + 1); });
     rstream.on('error', function (err) {
     console.log('Error in file merge - ' + err);
     response.status(500).send(err);
     return;
     });
    };
    mergefiles(index);

    MergeAll API调用的完整代码。

    // Request to merge all of the file chunks into one fileapp.get('*/api/CelerFTFileUpload/MergeAll*', function (request, response) {
     if (request.method == 'GET') {
     // Get the extension from the file namevar extension = path.extname(request.param('filename'));
     // Get the base file namevar baseFilename = path.basename(request.param('filename'), extension);
     var localFilePath = uploadpath + request.param('directoryname') + '/' + baseFilename;
     var filename = localFilePath + '/' + baseFilename + extension;
     // Array to hold files to be processedvar files = [];
     // Use asynchronous readdir function to process the files// This provides better i/o fs.readdir(localFilePath, function (error, fileslist) {
     if (error) {
     response.status(400).send('Number of file chunks less than total count');
     //response.end(); console.log(error);
     return;
     }
     //console.log(fileslist.length);//console.log(request.param('numberOfChunks'));if ((fileslist.length)!= request.param('numberOfChunks')) {
     response.status(400).send('Number of file chunks less than total count');
     //response.end();return;
     }
     // Check if all of the file chunks have be uploaded// Note we only want the files with a *.tmp extensionif ((fileslist.length) == request.param('numberOfChunks')) {
     for (var i = 0; i < fileslist.length; i++) {
     if (path.extname(fileslist[i]) == '.tmp') {//console.log(fileslist[i]); files.push(fileslist[i]);
     }
     }
     if (files.length!= request.param('numberOfChunks')) {
     response.status(400).send('Number of file chunks less than total count');
     //response.end();return;
     }// Create tthe output filevar outputFile = fs.createWriteStream(filename);// Done writing the file. Move it to the top level directory outputFile.on('finish', function () {
     console.log('file has been written ' + filename);//runGC();// New name for the filevar newfilename = uploadpath + request.param('directoryname') + '/' + baseFilename + extension;
     // Check if file exists at top level if it does delete it// Use move with overwrite option fs.move(filename, newfilename, {}, function (err) {
     if (err) {
     console.log(err);
     response.status(500).send(err);//runGC();return;
     }
     else {
     // Delete the temporary directory fs.remove(localFilePath, function (err) {
     if (err) {
     response.status(500).send(err);
     //runGC();return;
     }
     // Send back a sucessful response with the file name response.status(200).send('Sucessfully merged file ' + filename);
     //response.end();//runGC(); });
     // Send back a sucessful response with the file name//response.status(200).send('Sucessfully merged file ' + filename +"," + md5results.toUpperCase());//response.end(); }
     });
     });
     var index = 0;
     // Recrusive function used to merge the files// in a sequential mannervar mergefiles = function (index) {
     // If teh index matches the items in the array// end the function and finalize the output fileif (index == files.length) {
     outputFile.end();
     return;
     }
     console.log(files[index]);
     // Use a read stream too read the files and write them to the write streamvar rstream = fs.createReadStream(localFilePath + '/' + files[index]);
     rstream.on('data', function (data) {
     outputFile.write(data);
     });
     rstream.on('end', function () {//fs.removeSync(localFilePath + '/' + files[index]); mergefiles(index + 1);
     });
     rstream.on('close', function () {
     fs.removeSync(localFilePath + '/' + files[index]);//mergefiles(index + 1); });
     rstream.on('error', function (err) {
     console.log('Error in file merge - ' + err);
     response.status(500).send(err);
     return;
     });
     };
     mergefiles(index);
     }
     /*else {
     response.status(400).send('Number of file chunks less than total count');
    //response.end();
     return;
     }*/ });
     }
    });

    这个项目的代码可以在github存储库的nginxasync分支下找到。