|
// add this to ./routes/middleware.js |
|
var crypto = require('crypto'); |
|
var request = require('request'); |
|
var path = require("path"); |
|
var fs = require('fs'); |
|
var s3 = require('s3'); |
|
var image_cache = keystone.list('Imagecache').model; |
|
var temp_dir = path.join(process.cwd(), 'temp/'); |
|
if (!fs.existsSync(temp_dir)) { |
|
fs.mkdirSync(temp_dir); |
|
} |
|
var s3_client = s3.createClient({ |
|
multipartUploadThreshold: 209715200, // this is the default (20 MB) |
|
multipartUploadSize: 157286400, // this is the default (15 MB) |
|
s3Options: { |
|
accessKeyId: "ACCESS_KEY", |
|
secretAccessKey: "SECRET" |
|
}, |
|
}); |
|
|
|
// if you already have an initLocals, just add the locals.gi function to it |
|
exports.initLocals = function(req,res,next) { |
|
locals.gi = function(img) { |
|
|
|
// console.log('looking for image =>',img) |
|
var md5 = crypto.createHash('md5'); |
|
var hash = md5.update(img).digest('hex'); |
|
var db_image; |
|
function getImage(hash) { |
|
var response; |
|
image_cache.where({hash:hash}).findOne(function(err,data){ |
|
response = data |
|
}) |
|
while(response === undefined) { |
|
require('deasync').sleep(3); |
|
} |
|
return response; |
|
} |
|
db_image = getImage(hash) |
|
|
|
if(!db_image || !db_image.uploaded) { |
|
if(!db_image) { |
|
// console.log('starting image upload') |
|
image_cache.create({hash:hash,uploaded:0},function(err,$img){ |
|
request(img).pipe(fs.createWriteStream(temp_dir+"/"+hash+".jpg")).on('close', function (error, response, body) { |
|
var params = { |
|
localFile: temp_dir+"/"+hash+".jpg", |
|
s3Params: { |
|
Bucket: "YOUR_BUCKET", |
|
Key: hash+'.jpg', |
|
ACL:'public-read', |
|
ContentType:'image/jpeg' |
|
}, |
|
}; |
|
var uploader = s3_client.uploadFile(params); |
|
uploader.on('error', function(err) { |
|
$img.remove() |
|
}); |
|
uploader.on('end', function() { |
|
console.log('successful image upload',img) |
|
$img.uploaded = true; |
|
$img.save() |
|
}); |
|
}) |
|
}) |
|
} |
|
// console.log('returning image =>',img) |
|
return img |
|
} |
|
else { |
|
// console.log('returning image =>',req.protocol+'://YOUR_BUCKET.s3.amazonaws.com/'+hash+'.jpg') |
|
return req.protocol+'://YOUR_BUCKET.s3.amazonaws.com/'+hash+'.jpg' |
|
} |
|
} |
|
} |
This is a neat solution for the Cloudinary images in particular but I imagine most sites with bandwidth issues would be better served with a basic CDN setup or a reverse proxy. AWS will charge you $0.09-0.25 per GB/month (for first 10 TB) for transfers while a $20/month plan with CloudFlare gives you unlimited transfers. Something like this is quite transparent and can be setup easily.
On the flip side, if the middleware code above was refactored to use the offical AWS SDK package, you could leverage the ability to create signed URLs for S3 objects. Doing so would let you generate a unique, time-limited URL each time an image was referenced. This has some interesting effects:
Just an idea..