Skip to content

Instantly share code, notes, and snippets.

@hax
Created April 30, 2016 08:46
Show Gist options
  • Save hax/e70086e5749adbba352b21ec97db251a to your computer and use it in GitHub Desktop.
Save hax/e70086e5749adbba352b21ec97db251a to your computer and use it in GitHub Desktop.
block scope optimization test
'use strict'
const Benchmark = require('benchmark')
function MASK() {
return 100
}
function MAX() {
return 10000
}
function es2015() {
let sum = 0
const mask = MASK()
const n = MAX()
for (let i = 0; i < n; ++i) {
let t = i ^ mask
sum += t
}
return sum
}
function es5() {
var sum = 0
var mask = MASK()
var n = MAX()
for (var i = 0; i < n; ++i) {
var t = i ^ mask
sum += t
}
return sum
}
function es5const() {
var sum = 0
const mask = MASK()
const n = MAX()
for (var i = 0; i < n; ++i) {
var t = i ^ mask
sum += t
}
return sum
}
function simpleLet() {
var sum = 0
const mask = MASK()
const n = MAX()
for (var i = 0; i < n; ++i) {
let t = i ^ mask
sum += t
}
return sum
}
function compoundLet1() {
let sum = 0
const mask = MASK()
const n = MAX()
for (var i = 0; i < n; ++i) {
var t = i ^ mask
sum += t
}
return sum
}
function compoundLet2() {
var sum = 0
const mask = MASK()
const n = MAX()
for (let i = 0; i < n; ++i) {
var t = i ^ mask
sum += t
}
return sum
}
var result = 49996536
console.assert(es2015(), result)
console.assert(es5(), result)
console.assert(es5const(), result)
console.assert(simpleLet(), result)
console.assert(compoundLet1(), result)
console.assert(compoundLet2(), result)
new Benchmark.Suite()
.add('es2015', es2015)
.add('es5', es5)
.add('es5const', es5const)
.add('simpleLet', simpleLet)
.add('compoundLet1', compoundLet1)
.add('compoundLet2', compoundLet2)
// add listeners
.on('cycle', function(event) {
console.log(String(event.target));
})
.on('complete', function() {
console.log('Fastest is ' + this.filter('fastest').map('name'));
})
// run async
.run({ 'async': true })
@hax
Copy link
Author

hax commented May 31, 2016

I revisit this performance tests, and find that it's V8 special optimization which will utilize multi-core for loops in some cases (like apply SIMD automatically), so the profiling result differ by hardware (how many core you have). It seems let will forbidden such optimization.

Consider the programmers could use Worker, and SIMD, Atomics to explicitly utilize multi-core, I guess we'd better drop such optimization.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment