Skip to content

Instantly share code, notes, and snippets.

@benlesh
Created February 13, 2015 21:15
Show Gist options
  • Save benlesh/65d62a98ce70b3ac9be9 to your computer and use it in GitHub Desktop.
Save benlesh/65d62a98ce70b3ac9be9 to your computer and use it in GitHub Desktop.
JS Bin // source http://jsbin.com/nunije
<!DOCTYPE html>
<html>
<head>
<script src="http://cdnjs.cloudflare.com/ajax/libs/rxjs/2.3.22/rx.all.js"></script>
<meta charset="utf-8">
<title>JS Bin</title>
</head>
<body>
<h1>Just say something</h1>
<script id="jsbin-javascript">
"use strict";
console.clear();
var root = window;
var Observable = Rx.Observable;
var SpeechRecognition = root.SpeechRecognition || root.webkitSpeechRecognition || root.mozSpeechRecognition || root.msSpeechRecognition || root.oSpeechRecognition;
function fromSpeechRecognition(options) {
var config = extend({
continuous: false,
maxAlternatives: 5,
lang: "en-US"
}, options);
return Observable.create(function (obs) {
if (!SpeechRecognition) {
throw new Error("speech recognition not supported");
}
var recognition = new SpeechRecognition();
recognition.continuous = config.continuous;
recognition.onresult = function (e) {
obs.onNext(e);
//obs.onCompleted();
};
recognition.onerror = function (e) {
obs.onError(e);
};
recognition.onend = function (e) {
console.log(e);
obs.onCompleted();
};
recognition.start();
return function () {
recognition.stop();
};
});
}
function fromSpeechUtterance(text) {
return Observable.create(function (obs) {
var msg = new SpeechSynthesisUtterance(text);
speechSynthesis.speak(msg);
msg.onend = function (e) {
obs.onNext(msg);
obs.onCompleted();
};
});
}
var voice = fromSpeechRecognition({ continuous: true })["do"](function (e) {
return console.log(e.results);
}).map(function (e) {
return e.results[e.resultIndex][0].transcript;
}).flatMap(function (text) {
return fromSpeechUtterance(text);
})["do"](function (uttered) {
var div = document.createElement("div");
div.innerText = uttered.text;
document.body.appendChild(div);
});
voice.subscribe(function (x) {
return console.log(x);
}, function (err) {
return console.error("err", err);
});
function extend(a, b) {
for (var prop in b) {
if (b.hasOwnProperty(prop)) {
a[prop] = b[prop];
}
}
return a;
}
</script>
<script id="jsbin-source-html" type="text/html"><!DOCTYPE html>
<html>
<head>
<script src="//cdnjs.cloudflare.com/ajax/libs/rxjs/2.3.22/rx.all.js"><\/script>
<meta charset="utf-8">
<title>JS Bin</title>
</head>
<body>
<h1>Just say something</h1>
</body>
</html></script>
<script id="jsbin-source-javascript" type="text/javascript">console.clear();
var root = window;
var { Observable } = Rx;
var SpeechRecognition = root.SpeechRecognition ||
root.webkitSpeechRecognition ||
root.mozSpeechRecognition ||
root.msSpeechRecognition ||
root.oSpeechRecognition;
function fromSpeechRecognition(options) {
var config = extend({
continuous: false,
maxAlternatives: 5,
lang: 'en-US'
}, options);
return Observable.create((obs) => {
if(!SpeechRecognition) {
throw new Error('speech recognition not supported');
}
var recognition = new SpeechRecognition();
recognition.continuous = config.continuous;
recognition.onresult = (e) => {
obs.onNext(e);
//obs.onCompleted();
};
recognition.onerror = (e) => {
obs.onError(e);
};
recognition.onend = (e) => {
console.log(e);
obs.onCompleted();
};
recognition.start();
return () => {
recognition.stop();
};
});
}
function fromSpeechUtterance(text) {
return Observable.create((obs) => {
var msg = new SpeechSynthesisUtterance(text);
speechSynthesis.speak(msg);
msg.onend = (e) => {
obs.onNext(msg);
obs.onCompleted();
}
});
}
var voice = fromSpeechRecognition({ continuous: true }).
do(e => console.log(e.results)).
map(e => e.results[e.resultIndex][0].transcript).
flatMap(text => fromSpeechUtterance(text)).
do(uttered => {
var div = document.createElement('div');
div.innerText = uttered.text;
document.body.appendChild(div);
});
voice.subscribe(x => console.log(x), err => console.error('err', err));
function extend(a, b) {
for(var prop in b) {
if(b.hasOwnProperty(prop)) {
a[prop] = b[prop];
}
}
return a;
}
</script></body>
</html>
"use strict";
console.clear();
var root = window;
var Observable = Rx.Observable;
var SpeechRecognition = root.SpeechRecognition || root.webkitSpeechRecognition || root.mozSpeechRecognition || root.msSpeechRecognition || root.oSpeechRecognition;
function fromSpeechRecognition(options) {
var config = extend({
continuous: false,
maxAlternatives: 5,
lang: "en-US"
}, options);
return Observable.create(function (obs) {
if (!SpeechRecognition) {
throw new Error("speech recognition not supported");
}
var recognition = new SpeechRecognition();
recognition.continuous = config.continuous;
recognition.onresult = function (e) {
obs.onNext(e);
//obs.onCompleted();
};
recognition.onerror = function (e) {
obs.onError(e);
};
recognition.onend = function (e) {
console.log(e);
obs.onCompleted();
};
recognition.start();
return function () {
recognition.stop();
};
});
}
function fromSpeechUtterance(text) {
return Observable.create(function (obs) {
var msg = new SpeechSynthesisUtterance(text);
speechSynthesis.speak(msg);
msg.onend = function (e) {
obs.onNext(msg);
obs.onCompleted();
};
});
}
var voice = fromSpeechRecognition({ continuous: true })["do"](function (e) {
return console.log(e.results);
}).map(function (e) {
return e.results[e.resultIndex][0].transcript;
}).flatMap(function (text) {
return fromSpeechUtterance(text);
})["do"](function (uttered) {
var div = document.createElement("div");
div.innerText = uttered.text;
document.body.appendChild(div);
});
voice.subscribe(function (x) {
return console.log(x);
}, function (err) {
return console.error("err", err);
});
function extend(a, b) {
for (var prop in b) {
if (b.hasOwnProperty(prop)) {
a[prop] = b[prop];
}
}
return a;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment