Initial setup
This commit is contained in:
86
content.js
Normal file
86
content.js
Normal file
@@ -0,0 +1,86 @@
|
||||
let audioContext;
|
||||
let mediaStream;
|
||||
let recognition;
|
||||
|
||||
chrome.runtime.onMessage.addListener((request, sender, sendResponse) => {
|
||||
if (request.action === 'startCapture') {
|
||||
startCapture(request.streamId);
|
||||
sendResponse({success: true});
|
||||
} else if (request.action === 'stopCapture') {
|
||||
stopCapture();
|
||||
sendResponse({success: true});
|
||||
}
|
||||
return true; // Keep the message channel open for async responses
|
||||
});
|
||||
|
||||
function startCapture(streamId) {
|
||||
navigator.mediaDevices.getUserMedia({
|
||||
audio: {
|
||||
chromeMediaSource: 'tab',
|
||||
chromeMediaSourceId: streamId
|
||||
}
|
||||
}).then((stream) => {
|
||||
mediaStream = stream;
|
||||
audioContext = new AudioContext();
|
||||
const source = audioContext.createMediaStreamSource(stream);
|
||||
|
||||
// Initialize speech recognition
|
||||
recognition = new webkitSpeechRecognition();
|
||||
recognition.continuous = true;
|
||||
recognition.interimResults = true;
|
||||
|
||||
recognition.onresult = function(event) {
|
||||
let finalTranscript = '';
|
||||
for (let i = event.resultIndex; i < event.results.length; ++i) {
|
||||
if (event.results[i].isFinal) {
|
||||
finalTranscript += event.results[i][0].transcript;
|
||||
}
|
||||
}
|
||||
|
||||
if (finalTranscript.trim() !== '') {
|
||||
chrome.runtime.sendMessage({action: 'updateTranscript', transcript: finalTranscript});
|
||||
|
||||
// Check if the transcript contains a question
|
||||
if (isQuestion(finalTranscript)) {
|
||||
chrome.runtime.sendMessage({action: 'getAIResponse', question: finalTranscript});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
recognition.onerror = function(event) {
|
||||
console.error('Speech recognition error:', event.error);
|
||||
chrome.runtime.sendMessage({action: 'updateAIResponse', response: `Speech recognition error: ${event.error}. Please try again.`});
|
||||
};
|
||||
|
||||
recognition.start();
|
||||
}).catch((error) => {
|
||||
console.error('Error starting capture:', error);
|
||||
let errorMessage = 'Failed to start audio capture. ';
|
||||
if (error.name === 'NotAllowedError') {
|
||||
errorMessage += 'Please allow microphone access and try again.';
|
||||
} else if (error.name === 'NotFoundError') {
|
||||
errorMessage += 'No microphone found.';
|
||||
} else {
|
||||
errorMessage += error.message || 'Unknown error occurred.';
|
||||
}
|
||||
chrome.runtime.sendMessage({action: 'updateAIResponse', response: errorMessage});
|
||||
});
|
||||
}
|
||||
|
||||
function stopCapture() {
|
||||
if (mediaStream) {
|
||||
mediaStream.getTracks().forEach(track => track.stop());
|
||||
}
|
||||
if (audioContext) {
|
||||
audioContext.close();
|
||||
}
|
||||
if (recognition) {
|
||||
recognition.stop();
|
||||
}
|
||||
}
|
||||
|
||||
function isQuestion(text) {
|
||||
const questionWords = ['what', 'when', 'where', 'who', 'why', 'how'];
|
||||
const lowerText = text.toLowerCase();
|
||||
return questionWords.some(word => lowerText.includes(word)) || text.includes('?');
|
||||
}
|
||||
Reference in New Issue
Block a user