Last active
May 21, 2023 17:23
-
-
Save jetstreamin/ea197f23bad7aaf717bcf8ffcf04eee6 to your computer and use it in GitHub Desktop.
Conversational - ChatGPT with text highlighting.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
///////////////////////////////////////////////////////// | |
// Open AI - Conversant GPT //// | |
// --------------------------------------------------//// | |
// FILE NAME: openai-conversational-gpt.js | |
// | |
// DESCRIPTION: Snippet to help a user to converse rather | |
// than type for data entry with chatgpt without the need | |
// of an api key. 2 things, this doesn't require an API key and this is alpha so... there is that. | |
// This is a snippet for chrome. Haven't tested it in Edge yet but it works in a really alpha version way as a POC. It uses webkit for both speech synthesis and speech recognition. | |
// | |
// Instructions: | |
// | |
// Prompts: | |
// - If the microphone isn't on use the ctrl+spacebar key combo to toggle the microphone to the on position. Go ahead and prompt chat gpt with your query. When done, press enter, this will submit your prompt to ChatGPT. | |
// | |
// Reading: | |
// - After giving the snippet the ability to use you microphone & speakers, when launched the script will create a button in the ChatGPT input box. After launch, the microphone is toggled on you can go ahead and speak with ChatGPT. Press enter once to submit your prompt. ChatGPT will spew out it's response, when done it's done with the response, press ctrl+? to fire up the speech synthesizer. You can stop the speech synthesizer at anytime by pressing ctrl+? to toggle it off and on. | |
// | |
// Highlighting: | |
// - Super new. Just added it this morning. Waaayy slow and choppy. Not impressed. | |
// | |
// See the bottom of this file for license. | |
// | |
const style = document.createElement('style'); | |
style.innerHTML = ` | |
@keyframes pulse { | |
0% { background-color: #ff0000; } | |
50% { background-color: #ff7777; } | |
100% { background-color: #ff0000; } | |
} | |
`; | |
document.head.appendChild(style); | |
if ('SpeechRecognition' in window || 'webkitSpeechRecognition' in window) { | |
const recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)(); | |
const synthesis = window.speechSynthesis; | |
const utterance = new SpeechSynthesisUtterance(); | |
const inputBox = document.querySelector('textarea[tabindex="0"]'); | |
let isListening = false; | |
let isReading = false; | |
let finalTranscript = ''; | |
const clearTextArea = () => { | |
inputBox.value = ''; | |
finalTranscript = ''; | |
inputBox.focus(); | |
}; | |
const toggleMicrophone = () => { | |
isListening = !isListening; | |
if (isListening) { | |
recognition.start(); | |
console.log('Microphone on'); | |
micButton.style.backgroundColor = 'red'; // Set button color to red when microphone is on | |
} else { | |
recognition.stop(); | |
console.log('Microphone off'); | |
micButton.style.backgroundColor = 'black'; // Set button color back to black when microphone is off | |
inputBox.value += '\n'; | |
finalTranscript += '\n'; | |
} | |
}; | |
const toggleReading = () => { | |
isReading = !isReading; | |
if (isReading) { | |
const contentDivs = document.querySelectorAll('.group.w-full'); | |
if (contentDivs.length > 0) { | |
const lastContentDiv = contentDivs[contentDivs.length - 1]; | |
const words = lastContentDiv.innerText.split(' '); | |
const fragment = document.createDocumentFragment(); | |
let currentWordIndex = 0; | |
const highlightWord = () => { | |
if (currentWordIndex < words.length) { | |
const word = words[currentWordIndex]; | |
const wordElement = document.createElement('span'); | |
wordElement.style.backgroundColor = 'yellow'; | |
wordElement.textContent = word; | |
fragment.appendChild(wordElement); | |
lastContentDiv.innerHTML = ''; | |
lastContentDiv.appendChild(fragment.cloneNode(true)); | |
currentWordIndex++; | |
setTimeout(() => { | |
speakWord(); | |
wordElement.style.backgroundColor = ''; // Remove the highlight from the current word | |
}, 500); // Adjust the timeout duration (in milliseconds) to control the speed of speech | |
} else { | |
clearInterval(intervalId); | |
console.log('Reading completed'); | |
} | |
}; | |
const speakWord = () => { | |
const word = words[currentWordIndex - 1]; | |
synthesis.speak(new SpeechSynthesisUtterance(word)); | |
}; | |
const intervalId = setInterval(highlightWord, 1000); // Adjust the interval duration as needed | |
highlightWord(); // Start immediately without waiting for the first interval | |
} else { | |
console.log('No elements to read'); | |
} | |
} else { | |
synthesis.cancel(); | |
console.log('Reading stopped'); | |
inputBox.focus(); | |
} | |
}; | |
const setVoice = () => { | |
const voices = synthesis.getVoices(); | |
const selectedVoice = voices.find(voice => voice.name === 'Microsoft Zira - English (United States)'); | |
if (selectedVoice) utterance.voice = selectedVoice; | |
}; | |
synthesis.onvoiceschanged = setVoice; | |
const handleKeyDown = (event) => { | |
if (event.ctrlKey) { | |
if (event.code === 'Space') { | |
event.preventDefault(); | |
toggleMicrophone(); | |
} else if (event.code === 'Slash') { | |
event.preventDefault(); | |
toggleReading(); | |
} | |
} | |
}; | |
const handleSpeech = (event) => { | |
const transcript = Array.from(event.results) | |
.map(result => result[0].transcript) | |
.join(''); | |
if (transcript.toLowerCase().includes("hey gpt, mic off")) { | |
toggleMicrophone(); | |
return; | |
} | |
if (isListening) { | |
const currentTranscript = event.results[event.results.length - 1][0].transcript; | |
if (event.results[event.results.length - 1].isFinal) { | |
finalTranscript = currentTranscript + ' '; | |
inputBox.value = finalTranscript; | |
inputBox.scrollTop = inputBox.scrollHeight; | |
} | |
} | |
}; | |
document.addEventListener('keydown', handleKeyDown); | |
recognition.addEventListener('result', handleSpeech); | |
recognition.onerror = (event) => console.error('Recognition Error:', event.error); | |
recognition.onend = () => { | |
if (!isListening) { | |
clearTextArea(); | |
toggleMicrophone(); | |
} | |
}; | |
const micButton = document.createElement('div'); | |
micButton.style.position = 'relative'; | |
micButton.style.display = 'inline-block'; | |
micButton.style.width = '30px'; | |
micButton.style.height = '30px'; | |
micButton.style.borderRadius = '50%'; | |
micButton.style.marginLeft = '5px'; | |
micButton.style.backgroundColor = 'black'; | |
micButton.addEventListener('click', () => { | |
toggleMicrophone(); | |
toggleReading(); | |
}); | |
const container = document.createElement('div'); | |
container.style.display = 'flex'; | |
container.style.alignItems = 'center'; | |
container.appendChild(micButton); | |
const inputBoxParent = inputBox.parentNode; | |
inputBoxParent.insertBefore(container, inputBox); | |
inputBox.focus(); | |
toggleMicrophone(); | |
toggleMicrophone(); | |
} else { | |
console.error('Speech recognition or synthesis not supported in this browser'); | |
} | |
// LICENSE: Use it all you want but leave this this comments | |
// area untouched. 2023 Creative Commons license. | |
// AUTHOR: | |
// github: @jetstreamin && | |
// chat.openai.com 3.5 w/ a dash of 4 | |
// gist-openai-conversant-gpt.js | |
// v.0.0.2 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment