Last active
August 4, 2021 04:37
-
-
Save jimmywarting/e1f2dd369e2098b4881b to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// We allocate a file with the size of the downloaded file so we can | |
// append chunks randomly to diffrent position as we download the file | |
function allocateDisk(size, callback){ | |
fss.cwd.getFile(prompt("Filename","movie.mp4"), {create: true}, (fileEntry) => { | |
fileEntry.createWriter((writer) => { | |
var blob = new Blob([new ArrayBuffer(1.049e+8)]) | |
writer.onerror = (err) => { | |
console.error("could not write", err) | |
} | |
// we can't write more then 500mb at once | |
// so we allocate a lite bit at the time | |
writer.onwriteend = (e) => { | |
if (writer.length === size) return callback(writer); | |
if(size - writer.length <= blob.size){ | |
// Last write we do so we slice it | |
blob = blob.slice(-(size - writer.length)); | |
writer.write(blob); | |
writer.onwriteend = function(){ | |
callback(writer); | |
} | |
} else { | |
writer.write(blob); | |
} | |
} | |
writer.write(new Blob([""])) | |
}) | |
console.log(fileEntry); | |
}, (err) => { | |
console.log(err) | |
}) | |
} | |
var url = "https://example.com/movie.mp4"; | |
console.time("download"); | |
getSize(url, function(size){ | |
allocateDisk(size, function(writer){ | |
download(size, url, writer, function(){ | |
console.log("done :)") | |
console.timeEnd("download"); | |
}) | |
}) | |
}) | |
// Just figureing out what the sise of the file is | |
function getSize(url, callback){ | |
var xhr = new XMLHttpRequest(); | |
xhr.open("HEAD", url); | |
xhr.send(); | |
xhr.onload = function(){ | |
callback(xhr.getResponseHeader("Accept-Ranges") === 'bytes' && ~~xhr.getResponseHeader("Content-Length")) | |
} | |
} | |
function download(size, url, writer, callback){ | |
var chunkDownload = 5000000 | |
var queue = 0; | |
var ranges = []; | |
var pool = []; | |
var isWriting = false; | |
var loaded = 0; | |
var started = Date.now(); | |
var has = []; | |
function write(){ | |
if(isWriting || !pool.length) return; | |
var chunk = pool.shift(); | |
var hasNext = true; | |
isWriting = true; | |
writer.seek(chunk[1]); | |
writer.write(new Blob([chunk[0]])); | |
} | |
writer.onerror = function(){ | |
console.log("error writing file", writer.error); | |
} | |
writer.onwriteend = function(){ | |
queue--; | |
console.log(pool.length); | |
if(!queue){ | |
clearTimeout(timecontroller); | |
callback(); | |
return; | |
} | |
isWriting = false; | |
write(); | |
}; | |
for(var i = 0; i<size; i += chunkDownload){ | |
queue++; | |
ranges.push(`bytes= ${(i||-1)+1}-${i+chunkDownload}`) | |
} | |
// Classic reusable XHR ajax | |
// Best suited when you can read byte-range | |
// and dosen't have support for fetch | |
// | |
// Uses 6 simultanis download (b/c some websites trottle the downlad speed) | |
/* | |
Array(6).fill("").forEach(function(){ | |
if(!ranges.length) return; | |
var pice = ranges.shift(); | |
var xhr = new XMLHttpRequest(); | |
var last = 0; | |
if(!pice) return; | |
xhr.responseType = "arraybuffer"; | |
xhr.open("GET", url); | |
xhr.setRequestHeader("Range", pice) | |
xhr.send(); | |
xhr.onerror = function(){ | |
ranges.unshift(pice); | |
} | |
xhr.onprogress = function(evt){ | |
loaded += evt.loaded - last; | |
last = evt.loaded; | |
} | |
xhr.onload = function(evt){ | |
var range = xhr.getResponseHeader("content-range").split(" ")[1].split("-"), | |
start = ~~range[0], | |
end = ~~range[1].split("/")[0]; | |
last = 0; | |
pool.push([xhr.response, start]); | |
write(); | |
var newPice = ranges.shift(); | |
if(!newPice) return; | |
// evt.target.response | |
xhr.open("GET", url); | |
xhr.setRequestHeader("Range", newPice) | |
xhr.send(); | |
} | |
}); | |
*/ | |
var timecontroller = setInterval(function(){ | |
var percentComplete = loaded / size; | |
var timeElapsed = Date.now() - started; | |
var downloadSpeed = loaded / (timeElapsed/1000); | |
var est = ((size - loaded) / downloadSpeed); | |
// console.log(percentComplete, loaded, downloadSpeed, est); | |
}, 1000); | |
Array(6).fill("").forEach(function(){ | |
if(!ranges.length) return; | |
var last = 0; | |
function context(){ | |
var pice = ranges.shift(); | |
if(!pice) return; | |
fetch(url, {headers: {"Range": pice}}).then(r => { | |
var range = r.headers.get('content-range') | |
start = ~~range.split(" ")[1].split("-")[0]; | |
return pump(start, r.body.getReader(), []).then(function(){ | |
context(); | |
}); | |
}); | |
} | |
context(); | |
}); | |
// Deffinetly the most RAM friendliest download | |
// you can get in javascript, but also the slowest... | |
// | |
// if you know node.js it would be like | |
// fetch(...).pipe(writer) | |
// | |
// But it dosn't have any highWaterMark so it pauses | |
// the download everytime you write a chunk to the filesystem | |
/* | |
Array(1).fill("").forEach(function(){ | |
writer.seek(0); | |
function pump(reader){ | |
reader.read().then(function(result){ | |
if(result.done) return; | |
return new Promise(function(resolve, reject){ | |
writer.onwriteend = function(){ | |
return pump(reader); | |
} | |
writer.write(result.data); | |
}); | |
}); | |
} | |
fetch(url).then(r => { | |
return pump(r.body.getReader()) | |
}); | |
}); | |
*/ | |
// Streaming cabability made possible with fetch api | |
// Pushes alot of chunks into a pool of chunks | |
// Basicly becomes a 2D array with diffrent starting points | |
// each array in the pool dosne't lack any bytes in between | |
// | |
// pool = [ | |
// [0, 20], [21, 40], [...], [...], | |
// [500000, 500020], [500021, 500040] | |
// ] | |
function pump(start, reader, localPool) { | |
return reader.read().then(function (result) { | |
if(!~pool.indexOf(localPool)){ | |
localPool = [[], start]; | |
pool.push(localPool); | |
} | |
if (result.done) return | |
queue++; | |
has.push([start, start + result.value.byteLength]); | |
has = merge(has); | |
localPool[0].push(result.value); | |
setTimeout(write, 1000); | |
return pump(result.value.byteLength + start, reader, localPool); | |
}); | |
} | |
} | |
// download: 113347.075ms with just ajax 5000000 byte (6 simultaneous download) | |
// download: 153992.638ms used both ajax & fetch 5000000 byte (12 simultaneous download) | |
// download: 321325.148ms 25mb chunks both ajax, fetch (12 simultaneous download) | |
// download: 226967.841ms 25mb chunks only ajax (6 simultaneous download) | |
// Merge arrays with overlapping values | |
// $ merge([ [10, 20], [19, 40], [40, 60], [70, 80] ]) | |
// $ [ [10, 60], [70, 80] ] | |
function merge(ranges) { | |
var len = ranges.length, | |
result = [] | |
ranges.sort((a,b) => { | |
return a[0] > b[0]; | |
}); | |
for(let range of ranges){ | |
if(!result.length || range[0] > result[result.length-1][1] + 1) | |
result.push(range); | |
else | |
result[result.length-1][1] = range[1]; | |
}; | |
return result; | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Better late then never... fss was something that came from filer.js.
But you don't really need that... here is a way to get it in another way: