feat: Enhance audio capture and monitoring features
- Added "audioCapture" permission to manifest for microphone access. - Introduced DeepSeek as a new AI provider option in the side panel. - Implemented a capture mode selection (tab-only, mic-only, mixed) in the side panel. - Added options to enable/disable the extension and auto-open the assistant window. - Integrated a mic monitor feature with live input level visualization. - Included buttons for requesting microphone permission and granting tab access. - Updated styles for new sections and mic level visualization. - Enhanced model fetching logic to support DeepSeek and improved error handling.
This commit is contained in:
623
background.js
623
background.js
@@ -1,23 +1,27 @@
|
||||
let recognition;
|
||||
let assistantWindowId = null;
|
||||
let currentAIConfig = { provider: 'openai', model: 'gpt-4o-mini' };
|
||||
'use strict';
|
||||
|
||||
// AI Service configurations
|
||||
const aiServices = {
|
||||
const DEFAULT_AI_CONFIG = { provider: 'openai', model: 'gpt-4o-mini' };
|
||||
const DEFAULT_CAPTURE_MODE = 'tab';
|
||||
const LISTENING_PROMPT = 'You are a helpful assistant that answers questions briefly and concisely during interviews. Provide clear, professional responses.';
|
||||
|
||||
const AI_SERVICES = {
|
||||
openai: {
|
||||
baseUrl: 'https://api.openai.com/v1/chat/completions',
|
||||
headers: (apiKey) => ({
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': `Bearer ${apiKey}`
|
||||
Authorization: `Bearer ${apiKey}`
|
||||
}),
|
||||
formatRequest: (model, question, context = '') => ({
|
||||
model: model,
|
||||
formatRequest: (model, question, context = '', options = {}) => ({
|
||||
model,
|
||||
messages: [
|
||||
{ role: "system", content: `You are a helpful assistant that answers questions briefly and concisely during interviews. Provide clear, professional responses. ${context ? `\n\nContext Information:\n${context}` : ''}` },
|
||||
{ role: "user", content: question }
|
||||
{
|
||||
role: 'system',
|
||||
content: `${LISTENING_PROMPT}${context ? `\n\nContext Information:\n${context}` : ''}`
|
||||
},
|
||||
{ role: 'user', content: question }
|
||||
],
|
||||
max_tokens: 200,
|
||||
temperature: 0.7
|
||||
max_tokens: options.maxTokens || 200,
|
||||
temperature: options.temperature ?? 0.7
|
||||
}),
|
||||
parseResponse: (data) => data.choices[0].message.content.trim()
|
||||
},
|
||||
@@ -28,11 +32,14 @@ const aiServices = {
|
||||
'x-api-key': apiKey,
|
||||
'anthropic-version': '2023-06-01'
|
||||
}),
|
||||
formatRequest: (model, question, context = '') => ({
|
||||
model: model,
|
||||
max_tokens: 200,
|
||||
formatRequest: (model, question, context = '', options = {}) => ({
|
||||
model,
|
||||
max_tokens: options.maxTokens || 200,
|
||||
messages: [
|
||||
{ role: "user", content: `You are a helpful assistant that answers questions briefly and concisely during interviews. Provide clear, professional responses.${context ? `\n\nContext Information:\n${context}` : ''}\n\nQuestion: ${question}` }
|
||||
{
|
||||
role: 'user',
|
||||
content: `${LISTENING_PROMPT}${context ? `\n\nContext Information:\n${context}` : ''}\n\nQuestion: ${question}`
|
||||
}
|
||||
]
|
||||
}),
|
||||
parseResponse: (data) => data.content[0].text.trim()
|
||||
@@ -42,68 +49,79 @@ const aiServices = {
|
||||
headers: () => ({
|
||||
'Content-Type': 'application/json'
|
||||
}),
|
||||
formatRequest: (model, question, context = '') => ({
|
||||
// Use systemInstruction for instructions/context, and user role for the question
|
||||
formatRequest: (model, question, context = '', options = {}) => ({
|
||||
systemInstruction: {
|
||||
role: 'system',
|
||||
parts: [{
|
||||
text: `You are a helpful assistant that answers questions briefly and concisely during interviews. Provide clear, professional responses.` + (context ? `\n\nContext Information:\n${context}` : '')
|
||||
}]
|
||||
parts: [
|
||||
{
|
||||
text: `${LISTENING_PROMPT}${context ? `\n\nContext Information:\n${context}` : ''}`
|
||||
}
|
||||
]
|
||||
},
|
||||
contents: [{
|
||||
role: 'user',
|
||||
parts: [{ text: `Question: ${question}` }]
|
||||
}],
|
||||
contents: [
|
||||
{
|
||||
role: 'user',
|
||||
parts: [{ text: `Question: ${question}` }]
|
||||
}
|
||||
],
|
||||
generationConfig: {
|
||||
maxOutputTokens: 200,
|
||||
temperature: 0.7
|
||||
maxOutputTokens: options.maxTokens || 200,
|
||||
temperature: options.temperature ?? 0.7
|
||||
}
|
||||
}),
|
||||
parseResponse: (data) => data.candidates[0].content.parts[0].text.trim()
|
||||
},
|
||||
deepseek: {
|
||||
baseUrl: 'https://api.deepseek.com/v1/chat/completions',
|
||||
headers: (apiKey) => ({
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${apiKey}`
|
||||
}),
|
||||
formatRequest: (model, question, context = '', options = {}) => ({
|
||||
model,
|
||||
messages: [
|
||||
{
|
||||
role: 'system',
|
||||
content: `${LISTENING_PROMPT}${context ? `\n\nContext Information:\n${context}` : ''}`
|
||||
},
|
||||
{ role: 'user', content: question }
|
||||
],
|
||||
max_tokens: options.maxTokens || 200,
|
||||
temperature: options.temperature ?? 0.7
|
||||
}),
|
||||
parseResponse: (data) => data.choices[0].message.content.trim()
|
||||
},
|
||||
ollama: {
|
||||
baseUrl: 'http://localhost:11434/api/generate',
|
||||
headers: () => ({
|
||||
'Content-Type': 'application/json'
|
||||
}),
|
||||
formatRequest: (model, question, context = '') => ({
|
||||
model: model,
|
||||
prompt: `You are a helpful assistant that answers questions briefly and concisely during interviews. Provide clear, professional responses.${context ? `\n\nContext Information:\n${context}` : ''}\n\nQuestion: ${question}\n\nAnswer:`,
|
||||
formatRequest: (model, question, context = '', options = {}) => ({
|
||||
model,
|
||||
prompt: `${LISTENING_PROMPT}${context ? `\n\nContext Information:\n${context}` : ''}\n\nQuestion: ${question}\n\nAnswer:`,
|
||||
stream: false,
|
||||
options: {
|
||||
temperature: 0.7,
|
||||
num_predict: 200
|
||||
temperature: options.temperature ?? 0.7,
|
||||
num_predict: options.maxTokens || 200
|
||||
}
|
||||
}),
|
||||
parseResponse: (data) => data.response.trim()
|
||||
}
|
||||
};
|
||||
|
||||
// Multi-device server state
|
||||
let remoteServer = null;
|
||||
let remoteServerPort = null;
|
||||
let activeConnections = new Set();
|
||||
const state = {
|
||||
recognition: undefined,
|
||||
assistantWindowId: null,
|
||||
currentAIConfig: { ...DEFAULT_AI_CONFIG },
|
||||
currentCaptureMode: DEFAULT_CAPTURE_MODE,
|
||||
remoteServer: null,
|
||||
remoteServerPort: null,
|
||||
activeConnections: new Set(),
|
||||
isActive: true
|
||||
};
|
||||
|
||||
chrome.runtime.onMessage.addListener(function(request, sender, sendResponse) {
|
||||
if (request.action === 'startListening') {
|
||||
if (request.aiProvider && request.model) {
|
||||
currentAIConfig = { provider: request.aiProvider, model: request.model };
|
||||
}
|
||||
startListening();
|
||||
} else if (request.action === 'stopListening') {
|
||||
stopListening();
|
||||
} else if (request.action === 'getAIResponse') {
|
||||
getAIResponse(request.question);
|
||||
} else if (request.action === 'startRemoteServer') {
|
||||
startRemoteServer(request.sessionId, request.port, sendResponse);
|
||||
return true; // Keep message channel open for async response
|
||||
} else if (request.action === 'stopRemoteServer') {
|
||||
stopRemoteServer(sendResponse);
|
||||
return true;
|
||||
} else if (request.action === 'remoteQuestion') {
|
||||
// Handle questions from remote devices
|
||||
getAIResponse(request.question);
|
||||
}
|
||||
chrome.runtime.onMessage.addListener((request, sender, sendResponse) => {
|
||||
return handleMessage(request, sender, sendResponse);
|
||||
});
|
||||
|
||||
chrome.action.onClicked.addListener((tab) => {
|
||||
@@ -111,76 +129,185 @@ chrome.action.onClicked.addListener((tab) => {
|
||||
});
|
||||
|
||||
chrome.windows.onRemoved.addListener((windowId) => {
|
||||
if (windowId === assistantWindowId) {
|
||||
assistantWindowId = null;
|
||||
if (windowId === state.assistantWindowId) {
|
||||
state.assistantWindowId = null;
|
||||
}
|
||||
});
|
||||
|
||||
initializeActiveState();
|
||||
|
||||
function handleMessage(request, _sender, sendResponse) {
|
||||
switch (request.action) {
|
||||
case 'startListening':
|
||||
if (!state.isActive) {
|
||||
chrome.runtime.sendMessage({
|
||||
action: 'updateAIResponse',
|
||||
response: 'Extension is inactive. Turn it on in the side panel to start listening.'
|
||||
});
|
||||
return false;
|
||||
}
|
||||
if (request.aiProvider && request.model) {
|
||||
state.currentAIConfig = { provider: request.aiProvider, model: request.model };
|
||||
}
|
||||
if (request.captureMode) {
|
||||
state.currentCaptureMode = request.captureMode;
|
||||
}
|
||||
startListening();
|
||||
return false;
|
||||
case 'stopListening':
|
||||
stopListening();
|
||||
return false;
|
||||
case 'getAIResponse':
|
||||
getAIResponse(request.question);
|
||||
return false;
|
||||
case 'startRemoteServer':
|
||||
startRemoteServer(request.sessionId, request.port, sendResponse);
|
||||
return true;
|
||||
case 'stopRemoteServer':
|
||||
stopRemoteServer(sendResponse);
|
||||
return true;
|
||||
case 'remoteQuestion':
|
||||
getAIResponse(request.question);
|
||||
return false;
|
||||
case 'grantTabAccess':
|
||||
grantTabAccess(sendResponse);
|
||||
return true;
|
||||
case 'openAssistantWindow':
|
||||
openAssistantWindow(sendResponse);
|
||||
return true;
|
||||
case 'setActiveState':
|
||||
setActiveState(Boolean(request.isActive), sendResponse);
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function startListening() {
|
||||
if (state.currentCaptureMode === 'mic') {
|
||||
startMicListening();
|
||||
return;
|
||||
}
|
||||
if (state.currentCaptureMode === 'mixed') {
|
||||
startMixedListening();
|
||||
return;
|
||||
}
|
||||
|
||||
chrome.tabs.query({ active: true, currentWindow: true }, (tabs) => {
|
||||
if (chrome.runtime.lastError) {
|
||||
console.error('Error querying tabs:', chrome.runtime.lastError);
|
||||
return;
|
||||
}
|
||||
if (tabs.length === 0) {
|
||||
if (!tabs.length) {
|
||||
console.error('No active tab found');
|
||||
return;
|
||||
}
|
||||
const activeTabId = tabs[0].id;
|
||||
if (typeof activeTabId === 'undefined') {
|
||||
console.error('Active tab ID is undefined');
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if the current tab is a valid web page (not chrome:// or extension pages)
|
||||
|
||||
const tab = tabs[0];
|
||||
if (!tab.url || tab.url.startsWith('chrome://') || tab.url.startsWith('chrome-extension://')) {
|
||||
if (!isValidCaptureTab(tab)) {
|
||||
const message = 'Error: Cannot capture audio from this page. Please navigate to a regular website.';
|
||||
console.error('Cannot capture audio from this type of page:', tab.url);
|
||||
chrome.runtime.sendMessage({action: 'updateAIResponse', response: 'Error: Cannot capture audio from this page. Please navigate to a regular website.'});
|
||||
chrome.runtime.sendMessage({ action: 'updateAIResponse', response: message });
|
||||
return;
|
||||
}
|
||||
|
||||
chrome.tabCapture.getMediaStreamId({ consumerTabId: activeTabId }, (streamId) => {
|
||||
|
||||
chrome.tabCapture.getMediaStreamId({ consumerTabId: tab.id }, (streamId) => {
|
||||
if (chrome.runtime.lastError) {
|
||||
console.error('Error getting media stream ID:', chrome.runtime.lastError);
|
||||
const errorMsg = chrome.runtime.lastError.message || 'Unknown error';
|
||||
chrome.runtime.sendMessage({action: 'updateAIResponse', response: `Error: ${errorMsg}. Make sure you've granted microphone permissions.`});
|
||||
const userMessage = buildTabCaptureErrorMessage(errorMsg);
|
||||
console.error('Error getting media stream ID:', chrome.runtime.lastError);
|
||||
chrome.runtime.sendMessage({ action: 'updateAIResponse', response: userMessage });
|
||||
return;
|
||||
}
|
||||
if (!streamId) {
|
||||
console.error('No stream ID received');
|
||||
chrome.runtime.sendMessage({action: 'updateAIResponse', response: 'Error: Failed to get media stream. Please try again.'});
|
||||
chrome.runtime.sendMessage({ action: 'updateAIResponse', response: 'Error: Failed to get media stream. Please try again.' });
|
||||
return;
|
||||
}
|
||||
injectContentScriptAndStartCapture(activeTabId, streamId);
|
||||
injectContentScriptAndStartCapture(tab.id, streamId);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function startMicListening() {
|
||||
chrome.tabs.query({ active: true, currentWindow: true }, (tabs) => {
|
||||
if (chrome.runtime.lastError || tabs.length === 0) {
|
||||
console.error('Error querying tabs:', chrome.runtime.lastError);
|
||||
return;
|
||||
}
|
||||
const tab = tabs[0];
|
||||
if (!isValidCaptureTab(tab)) {
|
||||
chrome.runtime.sendMessage({ action: 'updateAIResponse', response: 'Error: Cannot capture audio from this page. Please navigate to a regular website.' });
|
||||
return;
|
||||
}
|
||||
|
||||
chrome.scripting.executeScript({ target: { tabId: tab.id }, files: ['content.js'] }, () => {
|
||||
if (chrome.runtime.lastError) {
|
||||
chrome.runtime.sendMessage({ action: 'updateAIResponse', response: 'Error: Failed to inject content script. Please refresh the page and try again.' });
|
||||
return;
|
||||
}
|
||||
chrome.tabs.sendMessage(tab.id, { action: 'startMicCapture' }, () => {
|
||||
if (chrome.runtime.lastError) {
|
||||
chrome.runtime.sendMessage({ action: 'updateAIResponse', response: 'Error: Failed to start microphone capture.' });
|
||||
} else {
|
||||
chrome.runtime.sendMessage({ action: 'updateAIResponse', response: 'Listening for audio (mic-only)...' });
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function startMixedListening() {
|
||||
chrome.tabs.query({ active: true, currentWindow: true }, (tabs) => {
|
||||
if (chrome.runtime.lastError || tabs.length === 0) {
|
||||
console.error('Error querying tabs:', chrome.runtime.lastError);
|
||||
return;
|
||||
}
|
||||
const tab = tabs[0];
|
||||
if (!isValidCaptureTab(tab)) {
|
||||
chrome.runtime.sendMessage({ action: 'updateAIResponse', response: 'Error: Cannot capture audio from this page. Please navigate to a regular website.' });
|
||||
return;
|
||||
}
|
||||
|
||||
chrome.scripting.executeScript({ target: { tabId: tab.id }, files: ['content.js'] }, () => {
|
||||
if (chrome.runtime.lastError) {
|
||||
chrome.runtime.sendMessage({ action: 'updateAIResponse', response: 'Error: Failed to inject content script. Please refresh the page and try again.' });
|
||||
return;
|
||||
}
|
||||
chrome.tabs.sendMessage(tab.id, { action: 'startMixedCapture' }, () => {
|
||||
if (chrome.runtime.lastError) {
|
||||
chrome.runtime.sendMessage({ action: 'updateAIResponse', response: 'Error: Failed to start mixed capture.' });
|
||||
} else {
|
||||
chrome.runtime.sendMessage({ action: 'updateAIResponse', response: 'Listening for audio (mixed mode)...' });
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function injectContentScriptAndStartCapture(tabId, streamId) {
|
||||
chrome.scripting.executeScript({
|
||||
target: { tabId: tabId },
|
||||
files: ['content.js']
|
||||
}, (injectionResults) => {
|
||||
chrome.scripting.executeScript({ target: { tabId }, files: ['content.js'] }, () => {
|
||||
if (chrome.runtime.lastError) {
|
||||
console.error('Error injecting content script:', chrome.runtime.lastError);
|
||||
chrome.runtime.sendMessage({action: 'updateAIResponse', response: 'Error: Failed to inject content script. Please refresh the page and try again.'});
|
||||
chrome.runtime.sendMessage({ action: 'updateAIResponse', response: 'Error: Failed to inject content script. Please refresh the page and try again.' });
|
||||
return;
|
||||
}
|
||||
|
||||
// Wait a bit to ensure the content script is fully loaded
|
||||
|
||||
setTimeout(() => {
|
||||
chrome.tabs.sendMessage(tabId, { action: 'startCapture', streamId: streamId }, (response) => {
|
||||
chrome.tabs.sendMessage(tabId, { action: 'startCapture', streamId }, () => {
|
||||
if (chrome.runtime.lastError) {
|
||||
console.error('Error starting capture:', chrome.runtime.lastError);
|
||||
const errorMsg = chrome.runtime.lastError.message || 'Unknown error';
|
||||
chrome.runtime.sendMessage({action: 'updateAIResponse', response: `Error: ${errorMsg}. Please make sure microphone permissions are granted.`});
|
||||
console.error('Error starting capture:', chrome.runtime.lastError);
|
||||
chrome.runtime.sendMessage({
|
||||
action: 'updateAIResponse',
|
||||
response: `Error: ${errorMsg}. Please make sure microphone permissions are granted.`
|
||||
});
|
||||
} else {
|
||||
console.log('Capture started successfully');
|
||||
chrome.runtime.sendMessage({action: 'updateAIResponse', response: 'Listening for audio... Speak your questions!'});
|
||||
chrome.runtime.sendMessage({ action: 'updateAIResponse', response: 'Listening for audio... Speak your questions!' });
|
||||
}
|
||||
});
|
||||
}, 200); // Increased timeout slightly for better reliability
|
||||
}, 200);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -190,49 +317,50 @@ function stopListening() {
|
||||
console.error('Error querying tabs for stop:', chrome.runtime.lastError);
|
||||
return;
|
||||
}
|
||||
|
||||
chrome.tabs.sendMessage(tabs[0].id, { action: 'stopCapture' }, (response) => {
|
||||
|
||||
chrome.tabs.sendMessage(tabs[0].id, { action: 'stopCapture' }, () => {
|
||||
if (chrome.runtime.lastError) {
|
||||
console.error('Error stopping capture:', chrome.runtime.lastError);
|
||||
// Don't show error to user for stop operation, just log it
|
||||
} else {
|
||||
console.log('Capture stopped successfully');
|
||||
chrome.runtime.sendMessage({action: 'updateAIResponse', response: 'Stopped listening.'});
|
||||
return;
|
||||
}
|
||||
console.log('Capture stopped successfully');
|
||||
chrome.runtime.sendMessage({ action: 'updateAIResponse', response: 'Stopped listening.' });
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function isQuestion(text) {
|
||||
// Simple check for question words or question mark
|
||||
const questionWords = ['what', 'when', 'where', 'who', 'why', 'how'];
|
||||
const lowerText = text.toLowerCase();
|
||||
return questionWords.some(word => lowerText.includes(word)) || text.includes('?');
|
||||
return questionWords.some((word) => lowerText.includes(word)) || text.includes('?');
|
||||
}
|
||||
|
||||
async function getAIResponse(question) {
|
||||
try {
|
||||
const { provider, model } = currentAIConfig;
|
||||
const service = aiServices[provider];
|
||||
|
||||
const storedConfig = await getAIConfigFromStorage();
|
||||
if (storedConfig) {
|
||||
state.currentAIConfig = storedConfig;
|
||||
}
|
||||
|
||||
const { provider, model } = state.currentAIConfig;
|
||||
const service = AI_SERVICES[provider];
|
||||
const speedMode = await getSpeedModeFromStorage();
|
||||
|
||||
if (!service) {
|
||||
throw new Error(`Unsupported AI provider: ${provider}`);
|
||||
}
|
||||
|
||||
// Get saved contexts to include in the prompt
|
||||
const contextData = await getStoredContexts();
|
||||
const systemContexts = contextData.filter(c => c.type === 'system');
|
||||
const generalContexts = contextData.filter(c => c.type !== 'system');
|
||||
const { systemContexts, generalContexts } = selectContextsForRequest(contextData, speedMode);
|
||||
|
||||
const systemPromptExtra = systemContexts.length > 0
|
||||
? systemContexts.map(ctx => `${ctx.title}:\n${ctx.content}`).join('\n\n---\n\n')
|
||||
const systemPromptExtra = systemContexts.length
|
||||
? systemContexts.map((ctx) => `${ctx.title}:\n${ctx.content}`).join('\n\n---\n\n')
|
||||
: '';
|
||||
|
||||
const contextString = generalContexts.length > 0
|
||||
? generalContexts.map(ctx => `${ctx.title}:\n${ctx.content}`).join('\n\n---\n\n')
|
||||
const contextString = generalContexts.length
|
||||
? generalContexts.map((ctx) => `${ctx.title}:\n${ctx.content}`).join('\n\n---\n\n')
|
||||
: '';
|
||||
|
||||
// Get API key for the current provider (skip for Ollama)
|
||||
let apiKey = null;
|
||||
if (provider !== 'ollama') {
|
||||
apiKey = await getApiKey(provider);
|
||||
@@ -243,9 +371,8 @@ async function getAIResponse(question) {
|
||||
|
||||
console.log(`Sending request to ${provider} API (${model})...`);
|
||||
|
||||
// Prepare request configuration
|
||||
let url, headers, body;
|
||||
|
||||
let url;
|
||||
let headers;
|
||||
if (provider === 'google') {
|
||||
url = service.baseUrl(apiKey, model);
|
||||
headers = service.headers();
|
||||
@@ -253,64 +380,191 @@ async function getAIResponse(question) {
|
||||
url = service.baseUrl;
|
||||
headers = service.headers(apiKey);
|
||||
}
|
||||
|
||||
// Inject system prompt extras into question or dedicated field depending on provider
|
||||
// For consistency we keep a single system message including systemPromptExtra
|
||||
const mergedContext = systemPromptExtra
|
||||
? `${systemPromptExtra}${contextString ? '\n\n---\n\n' + contextString : ''}`
|
||||
: contextString;
|
||||
|
||||
body = JSON.stringify(service.formatRequest(model, question, mergedContext));
|
||||
const mergedContextRaw = systemPromptExtra
|
||||
? `${systemPromptExtra}${contextString ? `\n\n---\n\n${contextString}` : ''}`
|
||||
: contextString;
|
||||
const mergedContext = truncateContext(mergedContextRaw, provider, speedMode);
|
||||
|
||||
const requestOptions = buildRequestOptions(speedMode);
|
||||
const body = JSON.stringify(service.formatRequest(model, question, mergedContext, requestOptions));
|
||||
|
||||
const controller = new AbortController();
|
||||
const timeoutId = setTimeout(() => controller.abort(), speedMode ? 20000 : 30000);
|
||||
|
||||
const response = await fetch(url, {
|
||||
method: 'POST',
|
||||
headers: headers,
|
||||
body: body
|
||||
headers,
|
||||
body,
|
||||
signal: controller.signal
|
||||
});
|
||||
|
||||
|
||||
clearTimeout(timeoutId);
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text();
|
||||
let errorMessage;
|
||||
|
||||
|
||||
try {
|
||||
const errorData = JSON.parse(errorText);
|
||||
errorMessage = errorData.error?.message || errorData.message || errorText;
|
||||
} catch {
|
||||
errorMessage = errorText;
|
||||
}
|
||||
|
||||
|
||||
throw new Error(`Failed to get response from ${provider}: ${response.status} ${response.statusText}\n${errorMessage}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const answer = service.parseResponse(data);
|
||||
|
||||
// Send response to both local UI and remote devices
|
||||
chrome.runtime.sendMessage({action: 'updateAIResponse', response: answer});
|
||||
broadcastToRemoteDevices('aiResponse', { response: answer, question: question });
|
||||
|
||||
|
||||
chrome.runtime.sendMessage({ action: 'updateAIResponse', response: answer });
|
||||
broadcastToRemoteDevices('aiResponse', { response: answer, question });
|
||||
} catch (error) {
|
||||
console.error('Error getting AI response:', error);
|
||||
|
||||
// Provide more specific error messages
|
||||
|
||||
let errorMessage = error.message;
|
||||
if (error.message.includes('API key')) {
|
||||
errorMessage = `${error.message}. Please check your API key in the settings.`;
|
||||
} else if (error.message.includes('Failed to fetch')) {
|
||||
if (currentAIConfig.provider === 'ollama') {
|
||||
if (state.currentAIConfig.provider === 'ollama') {
|
||||
errorMessage = 'Failed to connect to Ollama. Make sure Ollama is running locally on port 11434.';
|
||||
} else {
|
||||
errorMessage = 'Network error. Please check your internet connection.';
|
||||
}
|
||||
} else if (error.message.includes('aborted')) {
|
||||
errorMessage = 'Request timed out. Try again or enable speed mode.';
|
||||
}
|
||||
|
||||
const fullErrorMessage = 'Error: ' + errorMessage;
|
||||
chrome.runtime.sendMessage({action: 'updateAIResponse', response: fullErrorMessage});
|
||||
broadcastToRemoteDevices('aiResponse', { response: fullErrorMessage, question: question });
|
||||
|
||||
const fullErrorMessage = `Error: ${errorMessage}`;
|
||||
chrome.runtime.sendMessage({ action: 'updateAIResponse', response: fullErrorMessage });
|
||||
broadcastToRemoteDevices('aiResponse', { response: fullErrorMessage, question });
|
||||
}
|
||||
}
|
||||
|
||||
async function getApiKey(provider) {
|
||||
function truncateContext(context, provider, speedMode) {
|
||||
if (!context) return '';
|
||||
const maxContextCharsByProvider = {
|
||||
deepseek: speedMode ? 30000 : 60000,
|
||||
openai: speedMode ? 50000 : 120000,
|
||||
anthropic: speedMode ? 50000 : 120000,
|
||||
google: speedMode ? 50000 : 120000,
|
||||
ollama: speedMode ? 50000 : 120000
|
||||
};
|
||||
const maxChars = maxContextCharsByProvider[provider] || 200000;
|
||||
if (context.length <= maxChars) return context;
|
||||
return `${context.slice(0, maxChars)}\n\n[Context truncated to fit model limits.]`;
|
||||
}
|
||||
|
||||
function selectContextsForRequest(contexts, speedMode) {
|
||||
const sorted = [...contexts].sort((a, b) => (b.createdAt || '').localeCompare(a.createdAt || ''));
|
||||
const systemContexts = sorted.filter((ctx) => ctx.type === 'system');
|
||||
const generalContexts = sorted.filter((ctx) => ctx.type !== 'system');
|
||||
|
||||
const maxGeneralItems = speedMode ? 2 : 4;
|
||||
const maxSystemItems = speedMode ? 1 : 2;
|
||||
const maxItemChars = speedMode ? 4000 : 8000;
|
||||
|
||||
const trimItem = (ctx) => ({
|
||||
...ctx,
|
||||
content: (ctx.content || '').slice(0, maxItemChars)
|
||||
});
|
||||
|
||||
return {
|
||||
systemContexts: systemContexts.slice(0, maxSystemItems).map(trimItem),
|
||||
generalContexts: generalContexts.slice(0, maxGeneralItems).map(trimItem)
|
||||
};
|
||||
}
|
||||
|
||||
function buildRequestOptions(speedMode) {
|
||||
if (!speedMode) {
|
||||
return { maxTokens: 200, temperature: 0.7 };
|
||||
}
|
||||
return { maxTokens: 120, temperature: 0.4 };
|
||||
}
|
||||
|
||||
function getSpeedModeFromStorage() {
|
||||
return new Promise((resolve) => {
|
||||
chrome.storage.sync.get(['speedMode'], (result) => {
|
||||
if (chrome.runtime.lastError) {
|
||||
resolve(false);
|
||||
return;
|
||||
}
|
||||
resolve(Boolean(result.speedMode));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function grantTabAccess(sendResponse) {
|
||||
chrome.tabs.query({ active: true, currentWindow: true }, (tabs) => {
|
||||
if (chrome.runtime.lastError || !tabs.length) {
|
||||
sendResponse({ success: false, error: 'No active tab found.' });
|
||||
return;
|
||||
}
|
||||
|
||||
const tabId = tabs[0].id;
|
||||
chrome.sidePanel.open({ tabId }, () => {
|
||||
if (chrome.runtime.lastError) {
|
||||
sendResponse({ success: false, error: 'Click the extension icon on the target tab to grant access.' });
|
||||
return;
|
||||
}
|
||||
|
||||
if (chrome.action && chrome.action.openPopup) {
|
||||
chrome.action.openPopup(() => {
|
||||
sendResponse({ success: true });
|
||||
});
|
||||
} else {
|
||||
sendResponse({ success: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function openAssistantWindow(sendResponse) {
|
||||
if (state.assistantWindowId !== null) {
|
||||
chrome.windows.update(state.assistantWindowId, { focused: true }, () => {
|
||||
sendResponse({ success: true });
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
chrome.windows.create(
|
||||
{
|
||||
url: chrome.runtime.getURL('assistant.html'),
|
||||
type: 'popup',
|
||||
width: 420,
|
||||
height: 320
|
||||
},
|
||||
(win) => {
|
||||
if (chrome.runtime.lastError || !win) {
|
||||
sendResponse({ success: false, error: 'Failed to open assistant window.' });
|
||||
return;
|
||||
}
|
||||
state.assistantWindowId = win.id;
|
||||
sendResponse({ success: true });
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
function getAIConfigFromStorage() {
|
||||
return new Promise((resolve) => {
|
||||
chrome.storage.sync.get(['aiProvider', 'selectedModel'], (result) => {
|
||||
if (chrome.runtime.lastError) {
|
||||
resolve(null);
|
||||
return;
|
||||
}
|
||||
const provider = result.aiProvider;
|
||||
const model = result.selectedModel;
|
||||
if (!provider || !model) {
|
||||
resolve(null);
|
||||
return;
|
||||
}
|
||||
resolve({ provider, model });
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function getApiKey(provider) {
|
||||
return new Promise((resolve) => {
|
||||
chrome.storage.sync.get('apiKeys', (result) => {
|
||||
const apiKeys = result.apiKeys || {};
|
||||
@@ -319,7 +573,7 @@ async function getApiKey(provider) {
|
||||
});
|
||||
}
|
||||
|
||||
async function getStoredContexts() {
|
||||
function getStoredContexts() {
|
||||
return new Promise((resolve) => {
|
||||
chrome.storage.local.get('contexts', (result) => {
|
||||
resolve(result.contexts || []);
|
||||
@@ -327,52 +581,93 @@ async function getStoredContexts() {
|
||||
});
|
||||
}
|
||||
|
||||
// Multi-device server functions
|
||||
async function startRemoteServer(sessionId, port, sendResponse) {
|
||||
function startRemoteServer(sessionId, port, sendResponse) {
|
||||
try {
|
||||
// Note: Chrome extensions can't directly create HTTP servers
|
||||
// This is a simplified implementation that would need a companion app
|
||||
// For now, we'll simulate the server functionality
|
||||
|
||||
remoteServerPort = port;
|
||||
state.remoteServerPort = port;
|
||||
console.log(`Starting remote server on port ${port} with session ${sessionId}`);
|
||||
|
||||
// In a real implementation, you would:
|
||||
// 1. Start a local HTTP/WebSocket server
|
||||
// 2. Handle incoming connections
|
||||
// 3. Route audio data and responses
|
||||
|
||||
// For this demo, we'll just track the state
|
||||
sendResponse({
|
||||
success: true,
|
||||
|
||||
sendResponse({
|
||||
success: true,
|
||||
message: 'Remote server started (demo mode)',
|
||||
url: `http://localhost:${port}?session=${sessionId}`
|
||||
});
|
||||
|
||||
} catch (error) {
|
||||
console.error('Error starting remote server:', error);
|
||||
sendResponse({
|
||||
success: false,
|
||||
error: error.message
|
||||
sendResponse({
|
||||
success: false,
|
||||
error: error.message
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function stopRemoteServer(sendResponse) {
|
||||
remoteServer = null;
|
||||
remoteServerPort = null;
|
||||
activeConnections.clear();
|
||||
|
||||
state.remoteServer = null;
|
||||
state.remoteServerPort = null;
|
||||
state.activeConnections.clear();
|
||||
|
||||
console.log('Remote server stopped');
|
||||
sendResponse({ success: true });
|
||||
}
|
||||
|
||||
function broadcastToRemoteDevices(type, data) {
|
||||
// In a real implementation, this would send data to all connected WebSocket clients
|
||||
console.log('Broadcasting to remote devices:', type, data);
|
||||
|
||||
// For demo purposes, we'll just log the broadcast
|
||||
if (activeConnections.size > 0) {
|
||||
console.log(`Broadcasting ${type} to ${activeConnections.size} connected devices`);
|
||||
if (state.activeConnections.size > 0) {
|
||||
console.log(`Broadcasting ${type} to ${state.activeConnections.size} connected devices`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function isValidCaptureTab(tab) {
|
||||
if (!tab || !tab.url) return false;
|
||||
return !tab.url.startsWith('chrome://') && !tab.url.startsWith('chrome-extension://');
|
||||
}
|
||||
|
||||
function buildTabCaptureErrorMessage(errorMsg) {
|
||||
let userMessage = `Error: ${errorMsg}.`;
|
||||
if (errorMsg.includes('Extension has not been invoked')) {
|
||||
userMessage += ' Click the extension icon on the tab you want to capture, then press Start Listening.';
|
||||
} else {
|
||||
userMessage += ' Make sure you\'ve granted microphone permissions.';
|
||||
}
|
||||
return userMessage;
|
||||
}
|
||||
|
||||
function initializeActiveState() {
|
||||
chrome.storage.sync.get(['extensionActive'], (result) => {
|
||||
if (chrome.runtime.lastError) {
|
||||
state.isActive = true;
|
||||
updateActionBadge();
|
||||
return;
|
||||
}
|
||||
state.isActive = result.extensionActive !== false;
|
||||
updateActionBadge();
|
||||
});
|
||||
}
|
||||
|
||||
function setActiveState(isActive, sendResponse) {
|
||||
state.isActive = isActive;
|
||||
chrome.storage.sync.set({ extensionActive: isActive }, () => {
|
||||
updateActionBadge();
|
||||
if (!isActive) {
|
||||
stopListeningAcrossTabs();
|
||||
}
|
||||
sendResponse({ success: true, isActive });
|
||||
});
|
||||
}
|
||||
|
||||
function updateActionBadge() {
|
||||
if (!chrome.action || !chrome.action.setBadgeText) return;
|
||||
chrome.action.setBadgeText({ text: state.isActive ? 'ON' : 'OFF' });
|
||||
chrome.action.setBadgeBackgroundColor({ color: state.isActive ? '#2ecc71' : '#e74c3c' });
|
||||
}
|
||||
|
||||
function stopListeningAcrossTabs() {
|
||||
chrome.tabs.query({}, (tabs) => {
|
||||
if (chrome.runtime.lastError || !tabs.length) return;
|
||||
tabs.forEach((tab) => {
|
||||
if (!tab.id) return;
|
||||
chrome.tabs.sendMessage(tab.id, { action: 'stopCapture' }, () => {
|
||||
// Ignore errors for tabs without the content script.
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user