Initial commit

This commit is contained in:
gsinghpal
2026-02-22 01:22:18 -05:00
commit 5200d5baf0
2394 changed files with 386834 additions and 0 deletions

View File

@@ -0,0 +1,416 @@
/** @odoo-module **/
// Fusion Notes - Chatter Voice Note Integration
// Copyright 2026 Nexa Systems Inc.
// License OPL-1
//
// Patches the Odoo 19 Chatter component to add a voice recording
// microphone button. Records audio via MediaRecorder, transcribes
// with OpenAI Whisper, optionally formats with GPT, and posts the
// result as a "Log note" in the chatter.
import { Chatter } from "@mail/chatter/web_portal/chatter";
import { patch } from "@web/core/utils/patch";
import { onWillUnmount } from "@odoo/owl";
import { useService } from "@web/core/utils/hooks";
import {
AudioRecorder,
SpeechRecognizer,
blobToBase64,
} from "./voice_note_service";
import { rpc } from "@web/core/network/rpc";
// ---------------------------------------------------------------------------
// Settings cache - avoids repeated RPC calls for user preferences
// ---------------------------------------------------------------------------
let _settingsCache = null;
let _settingsCacheTime = 0;
const SETTINGS_CACHE_TTL = 60000; // 1 minute
async function getSettings() {
const now = Date.now();
if (_settingsCache && now - _settingsCacheTime < SETTINGS_CACHE_TTL) {
return _settingsCache;
}
try {
const result = await rpc('/fusion_notes/get_settings');
_settingsCache = result;
_settingsCacheTime = now;
return result;
} catch {
return {
review_mode: true,
ai_format: false,
max_seconds: 300,
has_api_key: false,
};
}
}
// ---------------------------------------------------------------------------
// Patch Chatter to add voice note capabilities
// ---------------------------------------------------------------------------
patch(Chatter.prototype, {
setup() {
super.setup(...arguments);
// Notification service for user feedback
this.notificationService = useService("notification");
// Voice note reactive state
Object.assign(this.state, {
// Status: idle | recording | transcribing | formatting | review
voiceStatus: 'idle',
voiceText: '', // Current display text
voiceRawText: '', // Original transcription
voiceFormattedText: '', // AI-formatted version (cached)
voiceAiFormat: false, // AI formatting toggle
voiceQuickPost: false, // Quick-post mode toggle
voiceDuration: 0, // Recording duration in seconds
voiceError: '', // Last error message
});
// Private instances (not reactive)
this._voiceRecorder = new AudioRecorder();
this._voiceSpeechRecognizer = null;
this._voiceTimer = null;
this._voiceUsingSpeechFallback = false;
this._voiceSpeechPromise = null;
// Cleanup on component destroy
onWillUnmount(() => {
this._voiceCleanup();
});
},
// ===================================================================
// Public methods (called from template)
// ===================================================================
/**
* Start recording audio from the microphone.
* Uses MediaRecorder if available, falls back to Web Speech API.
*/
async voiceStartRecording() {
// Close the normal composer if open
if (this.state.composerType) {
this.state.composerType = false;
}
// Reset voice state
Object.assign(this.state, {
voiceStatus: 'recording',
voiceText: '',
voiceRawText: '',
voiceFormattedText: '',
voiceError: '',
voiceDuration: 0,
voiceAiFormat: false,
});
// Load user preferences
const settings = await getSettings();
this.state.voiceQuickPost = !settings.review_mode;
this.state.voiceAiFormat = settings.ai_format;
try {
if (this._voiceRecorder.isSupported) {
// Primary: MediaRecorder + Whisper API
await this._voiceRecorder.start();
this._voiceUsingSpeechFallback = false;
} else {
// Fallback: Browser Speech Recognition
this._voiceSpeechRecognizer = new SpeechRecognizer();
if (!this._voiceSpeechRecognizer.isSupported) {
throw new Error(
'Neither audio recording nor speech recognition ' +
'is supported in this browser. Please use Chrome, ' +
'Edge, or Firefox.'
);
}
this._voiceSpeechPromise =
this._voiceSpeechRecognizer.start();
this._voiceUsingSpeechFallback = true;
}
// Start duration timer
this._voiceTimer = setInterval(() => {
this.state.voiceDuration++;
// Auto-stop at max duration
if (
settings.max_seconds &&
this.state.voiceDuration >= settings.max_seconds
) {
this.voiceStopRecording();
}
}, 1000);
} catch (error) {
this.state.voiceStatus = 'idle';
this.state.voiceError =
error.message || 'Failed to start recording.';
this._voiceNotify(this.state.voiceError, 'danger');
}
},
/**
* Stop recording and transcribe the audio.
*/
async voiceStopRecording() {
if (this.state.voiceStatus !== 'recording') {
return;
}
// Stop timer
clearInterval(this._voiceTimer);
this._voiceTimer = null;
this.state.voiceStatus = 'transcribing';
try {
let text = '';
if (this._voiceUsingSpeechFallback) {
// Speech Recognition fallback - text already accumulated
this._voiceSpeechRecognizer.stop();
text = await this._voiceSpeechPromise;
this._voiceSpeechRecognizer = null;
this._voiceSpeechPromise = null;
} else {
// MediaRecorder - send audio to Whisper API
const audioBlob = await this._voiceRecorder.stop();
const audioBase64 = await blobToBase64(audioBlob);
const result = await rpc('/fusion_notes/transcribe', {
audio_base64: audioBase64,
mime_type: this._voiceRecorder.baseMimeType,
});
if (result.error) {
throw new Error(result.error);
}
text = result.text;
}
if (!text || !text.trim()) {
throw new Error('No speech detected. Please try again.');
}
this.state.voiceRawText = text.trim();
this.state.voiceText = this.state.voiceRawText;
// AI format if enabled by default
if (this.state.voiceAiFormat) {
await this._voiceFormatText();
}
// Quick post or show for review
if (this.state.voiceQuickPost) {
await this._voicePostNote();
} else {
this.state.voiceStatus = 'review';
}
} catch (error) {
this.state.voiceStatus = 'idle';
this.state.voiceError =
error.message || 'Transcription failed.';
this._voiceNotify(this.state.voiceError, 'danger');
}
},
/**
* Toggle AI formatting on the transcribed text.
*/
async voiceToggleAiFormat() {
if (this.state.voiceStatus !== 'review') {
return;
}
this.state.voiceAiFormat = !this.state.voiceAiFormat;
if (this.state.voiceAiFormat) {
if (this.state.voiceFormattedText) {
// Use cached formatted text
this.state.voiceText = this.state.voiceFormattedText;
} else {
// Fetch from GPT
await this._voiceFormatText();
}
} else {
// Switch back to raw transcription
this.state.voiceText = this.state.voiceRawText;
}
},
/**
* Post the current voice note text to chatter.
*/
async voicePostNote() {
await this._voicePostNote();
},
/**
* Cancel the voice note review and reset to idle.
*/
voiceCancelNote() {
this._voiceCleanup();
Object.assign(this.state, {
voiceStatus: 'idle',
voiceText: '',
voiceRawText: '',
voiceFormattedText: '',
voiceError: '',
voiceDuration: 0,
});
},
/**
* Cancel an active recording without transcribing.
*/
voiceCancelRecording() {
this._voiceRecorder.cancel();
if (this._voiceSpeechRecognizer) {
this._voiceSpeechRecognizer.cancel();
this._voiceSpeechRecognizer = null;
}
clearInterval(this._voiceTimer);
this._voiceTimer = null;
this.state.voiceStatus = 'idle';
this.state.voiceDuration = 0;
},
/**
* Format seconds as M:SS for the recording timer display.
*/
voiceFormatDuration(seconds) {
const mins = Math.floor(seconds / 60);
const secs = seconds % 60;
return `${mins}:${secs.toString().padStart(2, '0')}`;
},
/**
* Handle textarea input for the review panel.
*/
onVoiceTextInput(ev) {
this.state.voiceText = ev.target.value;
},
/**
* Override toggleComposer to close voice note review when the user
* opens the normal Send message / Log note composer.
*/
toggleComposer(mode = false, options = {}) {
if (this.state.voiceStatus === 'review') {
this.voiceCancelNote();
}
return super.toggleComposer(mode, options);
},
// ===================================================================
// Private methods
// ===================================================================
/**
* Send the raw transcription to GPT for professional formatting.
*/
async _voiceFormatText() {
const previousStatus = this.state.voiceStatus;
this.state.voiceStatus = 'formatting';
try {
const result = await rpc('/fusion_notes/format', {
text: this.state.voiceRawText,
});
if (result.error) {
this._voiceNotify(
`Formatting failed: ${result.error}`,
'warning'
);
this.state.voiceAiFormat = false;
this.state.voiceText = this.state.voiceRawText;
} else {
this.state.voiceFormattedText = result.text;
this.state.voiceText = result.text;
}
} catch {
this._voiceNotify(
'AI formatting failed. Showing raw text.',
'warning'
);
this.state.voiceAiFormat = false;
this.state.voiceText = this.state.voiceRawText;
}
// Restore appropriate status
this.state.voiceStatus =
previousStatus === 'transcribing' || previousStatus === 'formatting'
? 'review'
: previousStatus;
},
/**
* Post the voice note to the thread's chatter via RPC.
*/
async _voicePostNote() {
if (!this.state.voiceText || !this.state.voiceText.trim()) {
this._voiceNotify('Cannot post an empty note.', 'warning');
return;
}
const thread = this.state.thread;
if (!thread || !thread.id) {
this._voiceNotify(
'Cannot post: no active record.',
'warning'
);
return;
}
try {
const result = await rpc('/fusion_notes/post_note', {
thread_model: thread.model,
thread_id: thread.id,
body: this.state.voiceText,
});
if (result.error) {
throw new Error(result.error);
}
// Success - reset state and refresh chatter messages
this._voiceNotify('Voice note posted.', 'success');
this.voiceCancelNote();
this.onPostCallback();
} catch (error) {
this._voiceNotify(
`Failed to post note: ${error.message || 'Unknown error'}`,
'danger'
);
}
},
/**
* Show a notification to the user.
*/
_voiceNotify(message, type = 'info') {
if (this.notificationService) {
this.notificationService.add(message, { type });
}
},
/**
* Clean up all voice recording resources.
*/
_voiceCleanup() {
if (this._voiceTimer) {
clearInterval(this._voiceTimer);
this._voiceTimer = null;
}
if (this._voiceRecorder) {
this._voiceRecorder.cancel();
}
if (this._voiceSpeechRecognizer) {
this._voiceSpeechRecognizer.cancel();
this._voiceSpeechRecognizer = null;
}
},
});

View File

@@ -0,0 +1,265 @@
/** @odoo-module **/
// Fusion Notes - Voice Recording Service
// Copyright 2026 Nexa Systems Inc.
// License OPL-1
//
// Provides AudioRecorder (MediaRecorder API) and SpeechRecognizer
// (Web Speech API fallback) for voice-to-text in Odoo chatter.
/**
* AudioRecorder - Records microphone audio using the MediaRecorder API.
*
* Usage:
* const recorder = new AudioRecorder();
* await recorder.start(); // request mic + begin recording
* const blob = await recorder.stop(); // stop and get audio Blob
* recorder.cancel(); // abort without producing output
*/
export class AudioRecorder {
constructor() {
this.mediaRecorder = null;
this.audioChunks = [];
this.stream = null;
this.mimeType = this._getSupportedMimeType();
}
/**
* Pick the best supported MIME type for recording.
* Whisper accepts webm, ogg, mp4, mp3, wav, etc.
*/
_getSupportedMimeType() {
const types = [
'audio/webm;codecs=opus',
'audio/webm',
'audio/ogg;codecs=opus',
'audio/ogg',
'audio/mp4',
];
for (const type of types) {
if (
typeof MediaRecorder !== 'undefined' &&
MediaRecorder.isTypeSupported(type)
) {
return type;
}
}
return 'audio/webm';
}
/** Base MIME type without codec suffix (for Whisper file upload). */
get baseMimeType() {
return this.mimeType.split(';')[0];
}
/** Whether MediaRecorder is available in this browser. */
get isSupported() {
return (
typeof navigator !== 'undefined' &&
navigator.mediaDevices &&
typeof navigator.mediaDevices.getUserMedia === 'function' &&
typeof MediaRecorder !== 'undefined'
);
}
/** Request microphone access and begin recording. */
async start() {
if (!this.isSupported) {
throw new Error(
'Audio recording is not supported in this browser.'
);
}
this.audioChunks = [];
this.stream = await navigator.mediaDevices.getUserMedia({
audio: {
echoCancellation: true,
noiseSuppression: true,
sampleRate: 16000,
},
});
this.mediaRecorder = new MediaRecorder(this.stream, {
mimeType: this.mimeType,
});
this.mediaRecorder.ondataavailable = (event) => {
if (event.data.size > 0) {
this.audioChunks.push(event.data);
}
};
// Collect data every second for more granular chunks
this.mediaRecorder.start(1000);
}
/** Stop recording and return the audio Blob. */
async stop() {
return new Promise((resolve, reject) => {
if (
!this.mediaRecorder ||
this.mediaRecorder.state === 'inactive'
) {
reject(new Error('No active recording.'));
return;
}
this.mediaRecorder.onstop = () => {
const blob = new Blob(this.audioChunks, {
type: this.baseMimeType,
});
this._cleanup();
resolve(blob);
};
this.mediaRecorder.onerror = (e) => {
this._cleanup();
reject(e);
};
this.mediaRecorder.stop();
});
}
/** Cancel recording and release resources without producing output. */
cancel() {
if (
this.mediaRecorder &&
this.mediaRecorder.state !== 'inactive'
) {
try {
this.mediaRecorder.stop();
} catch {
// Already stopped
}
}
this._cleanup();
}
/** Release microphone stream and reset state. */
_cleanup() {
if (this.stream) {
this.stream.getTracks().forEach((track) => track.stop());
this.stream = null;
}
this.mediaRecorder = null;
this.audioChunks = [];
}
}
/**
* SpeechRecognizer - Fallback using the browser Web Speech API.
*
* Provides real-time transcription without a server round-trip.
* Supported in Chrome, Edge, and some Chromium browsers.
*
* Usage:
* const recognizer = new SpeechRecognizer();
* const promise = recognizer.start(); // returns Promise<string>
* recognizer.stop(); // stop and resolve promise
* const text = await promise;
*/
export class SpeechRecognizer {
constructor() {
const SR =
window.SpeechRecognition || window.webkitSpeechRecognition;
this.isSupported = !!SR;
this.recognition = this.isSupported ? new SR() : null;
this.transcript = '';
this.isListening = false;
this._resolveStop = null;
if (this.recognition) {
this.recognition.continuous = true;
this.recognition.interimResults = true;
this.recognition.lang = 'en-US';
}
}
/**
* Start listening. Returns a promise that resolves with the final
* transcript when stop() is called or recognition ends naturally.
*/
start() {
if (!this.isSupported || !this.recognition) {
throw new Error(
'Speech Recognition is not supported in this browser.'
);
}
this.transcript = '';
this.isListening = true;
let finalTranscript = '';
return new Promise((resolve, reject) => {
this.recognition.onresult = (event) => {
let interimTranscript = '';
for (let i = event.resultIndex; i < event.results.length; i++) {
if (event.results[i].isFinal) {
finalTranscript += event.results[i][0].transcript + ' ';
} else {
interimTranscript += event.results[i][0].transcript;
}
}
this.transcript = (finalTranscript + interimTranscript).trim();
};
this.recognition.onerror = (event) => {
this.isListening = false;
if (event.error === 'no-speech') {
resolve(finalTranscript.trim());
} else {
reject(
new Error(`Speech recognition error: ${event.error}`)
);
}
};
this.recognition.onend = () => {
this.isListening = false;
resolve(finalTranscript.trim());
};
this.recognition.start();
this._resolveStop = () => {
this.recognition.stop();
};
});
}
/** Signal recognition to stop; the start() promise will resolve. */
stop() {
if (this._resolveStop) {
this._resolveStop();
}
}
/** Abort immediately without waiting for results. */
cancel() {
if (this.recognition && this.isListening) {
try {
this.recognition.abort();
} catch {
// Already stopped
}
}
this.isListening = false;
this.transcript = '';
this._resolveStop = null;
}
}
/**
* Convert a Blob to a base64-encoded string (without data-URI prefix).
*/
export function blobToBase64(blob) {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onloadend = () => {
// reader.result is "data:<mime>;base64,<data>" - extract <data>
const base64 = reader.result.split(',')[1];
resolve(base64);
};
reader.onerror = reject;
reader.readAsDataURL(blob);
});
}