Initial commit

This commit is contained in:
gsinghpal
2026-02-22 01:22:18 -05:00
commit 5200d5baf0
2394 changed files with 386834 additions and 0 deletions

6
fusion_notes/__init__.py Normal file
View File

@@ -0,0 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2026 Nexa Systems Inc.
# License OPL-1 (Odoo Proprietary License v1.0)
from . import controllers
from . import models

View File

@@ -0,0 +1,47 @@
# -*- coding: utf-8 -*-
# Copyright 2026 Nexa Systems Inc.
# License OPL-1 (Odoo Proprietary License v1.0)
{
'name': 'Fusion Notes',
'version': '19.0.1.0.0',
'category': 'Productivity',
'summary': 'Voice-to-text notes for Odoo chatter with AI-powered transcription',
'description': """
Fusion Notes
============
Voice-to-text note logging for every Odoo chatter window.
Features:
- One-click voice recording from any chatter
- AI transcription via OpenAI Whisper
- Optional AI formatting to professionally clean up notes
- Browser Speech Recognition fallback
- Review before posting or quick-post mode
Copyright 2026 Nexa Systems Inc.
""",
'author': 'Nexa Systems Inc.',
'website': 'https://www.nexasystems.ca',
'maintainer': 'Nexa Systems Inc.',
'support': 'support@nexasystems.ca',
'license': 'OPL-1',
'depends': ['mail'],
'data': [
'data/ir_config_parameter_data.xml',
'views/res_config_settings_views.xml',
],
'assets': {
'web.assets_backend': [
'fusion_notes/static/src/scss/fusion_notes.scss',
'fusion_notes/static/src/xml/voice_note_button.xml',
'fusion_notes/static/src/js/voice_note_service.js',
'fusion_notes/static/src/js/voice_note_button.js',
],
},
'images': ['static/description/icon.png'],
'installable': True,
'auto_install': False,
'application': True,
}

View File

@@ -0,0 +1,2 @@
# -*- coding: utf-8 -*-
from . import main

View File

@@ -0,0 +1,237 @@
# -*- coding: utf-8 -*-
# Copyright 2026 Nexa Systems Inc.
# License OPL-1 (Odoo Proprietary License v1.0)
import base64
import logging
import os
import tempfile
from odoo import http
from odoo.http import request
_logger = logging.getLogger(__name__)
class FusionNotesController(http.Controller):
# ------------------------------------------------------------------
# Settings endpoint (for frontend to fetch user preferences)
# ------------------------------------------------------------------
@http.route('/fusion_notes/get_settings', type='jsonrpc', auth='user')
def get_settings(self, **kwargs):
"""Return Fusion Notes settings for the frontend."""
ICP = request.env['ir.config_parameter'].sudo()
return {
'review_mode': ICP.get_param(
'fusion_notes.default_review_mode', 'True') == 'True',
'ai_format': ICP.get_param(
'fusion_notes.default_ai_format', 'False') == 'True',
'max_seconds': int(
ICP.get_param('fusion_notes.max_recording_seconds', '300')),
'has_api_key': bool(
ICP.get_param('fusion_notes.openai_api_key', '')),
}
# ------------------------------------------------------------------
# Whisper transcription
# ------------------------------------------------------------------
@http.route('/fusion_notes/transcribe', type='jsonrpc', auth='user')
def transcribe(self, audio_base64, mime_type='audio/webm', **kwargs):
"""Transcribe audio using OpenAI Whisper API.
Args:
audio_base64: Base64-encoded audio data.
mime_type: MIME type of the audio (default audio/webm).
Returns:
dict with 'text' on success or 'error' on failure.
"""
ICP = request.env['ir.config_parameter'].sudo()
api_key = ICP.get_param('fusion_notes.openai_api_key', '')
if not api_key:
return {
'error': 'OpenAI API key not configured. '
'Go to Settings > Fusion Notes.',
}
try:
import requests as req
except ImportError:
return {'error': 'Python requests library not available.'}
# Decode audio from base64
try:
audio_data = base64.b64decode(audio_base64)
except Exception as e:
_logger.error('Failed to decode audio: %s', e)
return {'error': 'Invalid audio data.'}
if len(audio_data) < 1000:
return {'error': 'Audio too short. Please record a longer message.'}
# Determine file extension from MIME type
ext_map = {
'audio/webm': '.webm',
'audio/ogg': '.ogg',
'audio/wav': '.wav',
'audio/mp4': '.m4a',
'audio/mpeg': '.mp3',
}
suffix = ext_map.get(mime_type, '.webm')
# Write to temp file and send to Whisper
tmp_path = None
try:
with tempfile.NamedTemporaryFile(
suffix=suffix, delete=False
) as tmp:
tmp.write(audio_data)
tmp_path = tmp.name
with open(tmp_path, 'rb') as audio_file:
# Use translations endpoint to always output English
# (auto-translates any spoken language to English)
response = req.post(
'https://api.openai.com/v1/audio/translations',
headers={'Authorization': f'Bearer {api_key}'},
files={
'file': (
f'recording{suffix}', audio_file, mime_type,
),
},
data={'model': 'whisper-1'},
timeout=60,
)
response.raise_for_status()
result = response.json()
text = result.get('text', '').strip()
if not text:
return {'error': 'No speech detected in the recording.'}
return {'text': text}
except req.exceptions.Timeout:
return {
'error': 'Transcription timed out. '
'Please try a shorter recording.',
}
except req.exceptions.HTTPError as e:
body = e.response.text if e.response else ''
_logger.error('Whisper API error: %s - %s', e, body)
return {'error': f'Transcription failed: {e}'}
except Exception as e:
_logger.error('Transcription error: %s', e)
return {'error': f'Transcription failed: {e}'}
finally:
if tmp_path and os.path.exists(tmp_path):
try:
os.unlink(tmp_path)
except OSError:
pass
# ------------------------------------------------------------------
# GPT formatting
# ------------------------------------------------------------------
@http.route('/fusion_notes/format', type='jsonrpc', auth='user')
def format_note(self, text, **kwargs):
"""Format transcribed text into a professional note using GPT.
Args:
text: Raw transcription text.
Returns:
dict with 'text' on success or 'error' on failure.
"""
ICP = request.env['ir.config_parameter'].sudo()
api_key = ICP.get_param('fusion_notes.openai_api_key', '')
if not api_key:
return {'error': 'OpenAI API key not configured.'}
model = ICP.get_param('fusion_notes.ai_model', 'gpt-4o-mini')
try:
import requests as req
except ImportError:
return {'error': 'Python requests library not available.'}
system_prompt = (
"You are a professional note formatter. Rewrite the following "
"voice transcription as a clean, professional log entry.\n"
"Rules:\n"
"- ALWAYS output in English regardless of input language\n"
"- If the input is in another language, translate it to English\n"
"- Fix grammar and punctuation\n"
"- Remove filler words (um, uh, like, you know, so, basically)\n"
"- Keep ALL facts, names, dates, and details exactly as stated\n"
"- Organize into clear, concise sentences\n"
"- Use professional tone appropriate for clinical/business notes\n"
"- Do NOT add information that was not in the original\n"
"- Do NOT add headers, bullet points, or extra formatting "
"unless the content clearly warrants it\n"
"- Return ONLY the formatted text, no explanations or preamble"
)
try:
response = req.post(
'https://api.openai.com/v1/chat/completions',
headers={
'Authorization': f'Bearer {api_key}',
'Content-Type': 'application/json',
},
json={
'model': model,
'messages': [
{'role': 'system', 'content': system_prompt},
{'role': 'user', 'content': text},
],
'max_tokens': 1000,
'temperature': 0.3,
},
timeout=30,
)
response.raise_for_status()
result = response.json()
formatted = result['choices'][0]['message']['content'].strip()
return {'text': formatted}
except Exception as e:
_logger.error('GPT formatting error: %s', e)
return {'error': f'Formatting failed: {e}'}
# ------------------------------------------------------------------
# Post note to chatter
# ------------------------------------------------------------------
@http.route('/fusion_notes/post_note', type='jsonrpc', auth='user')
def post_note(self, thread_model, thread_id, body, **kwargs):
"""Post a voice note to the specified record's chatter.
Args:
thread_model: The Odoo model name (e.g. 'sale.order').
thread_id: The record ID.
body: The note text to post.
Returns:
dict with 'success' or 'error'.
"""
try:
record = request.env[thread_model].browse(int(thread_id))
if not record.exists():
return {'error': 'Record not found.'}
record.message_post(
body=body,
message_type='comment',
subtype_xmlid='mail.mt_note',
)
return {'success': True}
except Exception as e:
_logger.error('Failed to post voice note: %s', e)
return {'error': f'Failed to post note: {e}'}

View File

@@ -0,0 +1,24 @@
<?xml version="1.0" encoding="utf-8"?>
<odoo>
<!--
Default configuration parameters for Fusion Notes.
noupdate="1" ensures these are ONLY set on first install,
never overwritten during module upgrades.
-->
<data noupdate="1">
<record id="config_ai_model" model="ir.config_parameter">
<field name="key">fusion_notes.ai_model</field>
<field name="value">gpt-4o-mini</field>
</record>
<record id="config_default_review_mode" model="ir.config_parameter">
<field name="key">fusion_notes.default_review_mode</field>
<field name="value">True</field>
</record>
<record id="config_max_recording_seconds" model="ir.config_parameter">
<field name="key">fusion_notes.max_recording_seconds</field>
<field name="value">300</field>
</record>
</data>
</odoo>

View File

@@ -0,0 +1,2 @@
# -*- coding: utf-8 -*-
from . import res_config_settings

View File

@@ -0,0 +1,72 @@
# -*- coding: utf-8 -*-
# Copyright 2026 Nexa Systems Inc.
# License OPL-1 (Odoo Proprietary License v1.0)
import logging
from odoo import fields, models
_logger = logging.getLogger(__name__)
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
# ------------------------------------------------------------------
# API Configuration
# ------------------------------------------------------------------
fn_openai_api_key = fields.Char(
string='OpenAI API Key',
config_parameter='fusion_notes.openai_api_key',
help='OpenAI API key for Whisper transcription and GPT formatting. '
'Get your key at https://platform.openai.com',
)
fn_ai_model = fields.Selection(
selection=[
('gpt-4o-mini', 'GPT-4o Mini (Fast, Low Cost)'),
('gpt-4o', 'GPT-4o (Best Quality)'),
('gpt-4.1-mini', 'GPT-4.1 Mini'),
('gpt-4.1', 'GPT-4.1'),
],
string='AI Formatting Model',
config_parameter='fusion_notes.ai_model',
help='Model used for formatting voice notes into professional text',
)
# ------------------------------------------------------------------
# Default Preferences
# ------------------------------------------------------------------
fn_default_review_mode = fields.Boolean(
string='Review Before Posting',
config_parameter='fusion_notes.default_review_mode',
help='Show transcribed text for review and editing before posting. '
'If disabled, notes are posted immediately after transcription.',
)
fn_default_ai_format = fields.Boolean(
string='AI Format by Default',
config_parameter='fusion_notes.default_ai_format',
help='Automatically format transcribed text with AI before review. '
'Users can still toggle this per note.',
)
fn_max_recording_seconds = fields.Integer(
string='Max Recording Duration (seconds)',
config_parameter='fusion_notes.max_recording_seconds',
help='Maximum recording duration in seconds (default: 300 = 5 minutes)',
)
def set_values(self):
ICP = self.env['ir.config_parameter'].sudo()
# Protect API key from accidental blanking
_protected = {
'fusion_notes.openai_api_key': ICP.get_param('fusion_notes.openai_api_key', ''),
'fusion_notes.ai_model': ICP.get_param('fusion_notes.ai_model', ''),
'fusion_notes.max_recording_seconds': ICP.get_param('fusion_notes.max_recording_seconds', ''),
}
super().set_values()
for key, old_val in _protected.items():
new_val = ICP.get_param(key, '')
if not new_val and old_val:
ICP.set_param(key, old_val)
_logger.warning("Settings protection: restored %s", key)

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

View File

@@ -0,0 +1,416 @@
/** @odoo-module **/
// Fusion Notes - Chatter Voice Note Integration
// Copyright 2026 Nexa Systems Inc.
// License OPL-1
//
// Patches the Odoo 19 Chatter component to add a voice recording
// microphone button. Records audio via MediaRecorder, transcribes
// with OpenAI Whisper, optionally formats with GPT, and posts the
// result as a "Log note" in the chatter.
import { Chatter } from "@mail/chatter/web_portal/chatter";
import { patch } from "@web/core/utils/patch";
import { onWillUnmount } from "@odoo/owl";
import { useService } from "@web/core/utils/hooks";
import {
AudioRecorder,
SpeechRecognizer,
blobToBase64,
} from "./voice_note_service";
import { rpc } from "@web/core/network/rpc";
// ---------------------------------------------------------------------------
// Settings cache - avoids repeated RPC calls for user preferences
// ---------------------------------------------------------------------------
let _settingsCache = null;
let _settingsCacheTime = 0;
const SETTINGS_CACHE_TTL = 60000; // 1 minute
async function getSettings() {
const now = Date.now();
if (_settingsCache && now - _settingsCacheTime < SETTINGS_CACHE_TTL) {
return _settingsCache;
}
try {
const result = await rpc('/fusion_notes/get_settings');
_settingsCache = result;
_settingsCacheTime = now;
return result;
} catch {
return {
review_mode: true,
ai_format: false,
max_seconds: 300,
has_api_key: false,
};
}
}
// ---------------------------------------------------------------------------
// Patch Chatter to add voice note capabilities
// ---------------------------------------------------------------------------
patch(Chatter.prototype, {
setup() {
super.setup(...arguments);
// Notification service for user feedback
this.notificationService = useService("notification");
// Voice note reactive state
Object.assign(this.state, {
// Status: idle | recording | transcribing | formatting | review
voiceStatus: 'idle',
voiceText: '', // Current display text
voiceRawText: '', // Original transcription
voiceFormattedText: '', // AI-formatted version (cached)
voiceAiFormat: false, // AI formatting toggle
voiceQuickPost: false, // Quick-post mode toggle
voiceDuration: 0, // Recording duration in seconds
voiceError: '', // Last error message
});
// Private instances (not reactive)
this._voiceRecorder = new AudioRecorder();
this._voiceSpeechRecognizer = null;
this._voiceTimer = null;
this._voiceUsingSpeechFallback = false;
this._voiceSpeechPromise = null;
// Cleanup on component destroy
onWillUnmount(() => {
this._voiceCleanup();
});
},
// ===================================================================
// Public methods (called from template)
// ===================================================================
/**
* Start recording audio from the microphone.
* Uses MediaRecorder if available, falls back to Web Speech API.
*/
async voiceStartRecording() {
// Close the normal composer if open
if (this.state.composerType) {
this.state.composerType = false;
}
// Reset voice state
Object.assign(this.state, {
voiceStatus: 'recording',
voiceText: '',
voiceRawText: '',
voiceFormattedText: '',
voiceError: '',
voiceDuration: 0,
voiceAiFormat: false,
});
// Load user preferences
const settings = await getSettings();
this.state.voiceQuickPost = !settings.review_mode;
this.state.voiceAiFormat = settings.ai_format;
try {
if (this._voiceRecorder.isSupported) {
// Primary: MediaRecorder + Whisper API
await this._voiceRecorder.start();
this._voiceUsingSpeechFallback = false;
} else {
// Fallback: Browser Speech Recognition
this._voiceSpeechRecognizer = new SpeechRecognizer();
if (!this._voiceSpeechRecognizer.isSupported) {
throw new Error(
'Neither audio recording nor speech recognition ' +
'is supported in this browser. Please use Chrome, ' +
'Edge, or Firefox.'
);
}
this._voiceSpeechPromise =
this._voiceSpeechRecognizer.start();
this._voiceUsingSpeechFallback = true;
}
// Start duration timer
this._voiceTimer = setInterval(() => {
this.state.voiceDuration++;
// Auto-stop at max duration
if (
settings.max_seconds &&
this.state.voiceDuration >= settings.max_seconds
) {
this.voiceStopRecording();
}
}, 1000);
} catch (error) {
this.state.voiceStatus = 'idle';
this.state.voiceError =
error.message || 'Failed to start recording.';
this._voiceNotify(this.state.voiceError, 'danger');
}
},
/**
* Stop recording and transcribe the audio.
*/
async voiceStopRecording() {
if (this.state.voiceStatus !== 'recording') {
return;
}
// Stop timer
clearInterval(this._voiceTimer);
this._voiceTimer = null;
this.state.voiceStatus = 'transcribing';
try {
let text = '';
if (this._voiceUsingSpeechFallback) {
// Speech Recognition fallback - text already accumulated
this._voiceSpeechRecognizer.stop();
text = await this._voiceSpeechPromise;
this._voiceSpeechRecognizer = null;
this._voiceSpeechPromise = null;
} else {
// MediaRecorder - send audio to Whisper API
const audioBlob = await this._voiceRecorder.stop();
const audioBase64 = await blobToBase64(audioBlob);
const result = await rpc('/fusion_notes/transcribe', {
audio_base64: audioBase64,
mime_type: this._voiceRecorder.baseMimeType,
});
if (result.error) {
throw new Error(result.error);
}
text = result.text;
}
if (!text || !text.trim()) {
throw new Error('No speech detected. Please try again.');
}
this.state.voiceRawText = text.trim();
this.state.voiceText = this.state.voiceRawText;
// AI format if enabled by default
if (this.state.voiceAiFormat) {
await this._voiceFormatText();
}
// Quick post or show for review
if (this.state.voiceQuickPost) {
await this._voicePostNote();
} else {
this.state.voiceStatus = 'review';
}
} catch (error) {
this.state.voiceStatus = 'idle';
this.state.voiceError =
error.message || 'Transcription failed.';
this._voiceNotify(this.state.voiceError, 'danger');
}
},
/**
* Toggle AI formatting on the transcribed text.
*/
async voiceToggleAiFormat() {
if (this.state.voiceStatus !== 'review') {
return;
}
this.state.voiceAiFormat = !this.state.voiceAiFormat;
if (this.state.voiceAiFormat) {
if (this.state.voiceFormattedText) {
// Use cached formatted text
this.state.voiceText = this.state.voiceFormattedText;
} else {
// Fetch from GPT
await this._voiceFormatText();
}
} else {
// Switch back to raw transcription
this.state.voiceText = this.state.voiceRawText;
}
},
/**
* Post the current voice note text to chatter.
*/
async voicePostNote() {
await this._voicePostNote();
},
/**
* Cancel the voice note review and reset to idle.
*/
voiceCancelNote() {
this._voiceCleanup();
Object.assign(this.state, {
voiceStatus: 'idle',
voiceText: '',
voiceRawText: '',
voiceFormattedText: '',
voiceError: '',
voiceDuration: 0,
});
},
/**
* Cancel an active recording without transcribing.
*/
voiceCancelRecording() {
this._voiceRecorder.cancel();
if (this._voiceSpeechRecognizer) {
this._voiceSpeechRecognizer.cancel();
this._voiceSpeechRecognizer = null;
}
clearInterval(this._voiceTimer);
this._voiceTimer = null;
this.state.voiceStatus = 'idle';
this.state.voiceDuration = 0;
},
/**
* Format seconds as M:SS for the recording timer display.
*/
voiceFormatDuration(seconds) {
const mins = Math.floor(seconds / 60);
const secs = seconds % 60;
return `${mins}:${secs.toString().padStart(2, '0')}`;
},
/**
* Handle textarea input for the review panel.
*/
onVoiceTextInput(ev) {
this.state.voiceText = ev.target.value;
},
/**
* Override toggleComposer to close voice note review when the user
* opens the normal Send message / Log note composer.
*/
toggleComposer(mode = false, options = {}) {
if (this.state.voiceStatus === 'review') {
this.voiceCancelNote();
}
return super.toggleComposer(mode, options);
},
// ===================================================================
// Private methods
// ===================================================================
/**
* Send the raw transcription to GPT for professional formatting.
*/
async _voiceFormatText() {
const previousStatus = this.state.voiceStatus;
this.state.voiceStatus = 'formatting';
try {
const result = await rpc('/fusion_notes/format', {
text: this.state.voiceRawText,
});
if (result.error) {
this._voiceNotify(
`Formatting failed: ${result.error}`,
'warning'
);
this.state.voiceAiFormat = false;
this.state.voiceText = this.state.voiceRawText;
} else {
this.state.voiceFormattedText = result.text;
this.state.voiceText = result.text;
}
} catch {
this._voiceNotify(
'AI formatting failed. Showing raw text.',
'warning'
);
this.state.voiceAiFormat = false;
this.state.voiceText = this.state.voiceRawText;
}
// Restore appropriate status
this.state.voiceStatus =
previousStatus === 'transcribing' || previousStatus === 'formatting'
? 'review'
: previousStatus;
},
/**
* Post the voice note to the thread's chatter via RPC.
*/
async _voicePostNote() {
if (!this.state.voiceText || !this.state.voiceText.trim()) {
this._voiceNotify('Cannot post an empty note.', 'warning');
return;
}
const thread = this.state.thread;
if (!thread || !thread.id) {
this._voiceNotify(
'Cannot post: no active record.',
'warning'
);
return;
}
try {
const result = await rpc('/fusion_notes/post_note', {
thread_model: thread.model,
thread_id: thread.id,
body: this.state.voiceText,
});
if (result.error) {
throw new Error(result.error);
}
// Success - reset state and refresh chatter messages
this._voiceNotify('Voice note posted.', 'success');
this.voiceCancelNote();
this.onPostCallback();
} catch (error) {
this._voiceNotify(
`Failed to post note: ${error.message || 'Unknown error'}`,
'danger'
);
}
},
/**
* Show a notification to the user.
*/
_voiceNotify(message, type = 'info') {
if (this.notificationService) {
this.notificationService.add(message, { type });
}
},
/**
* Clean up all voice recording resources.
*/
_voiceCleanup() {
if (this._voiceTimer) {
clearInterval(this._voiceTimer);
this._voiceTimer = null;
}
if (this._voiceRecorder) {
this._voiceRecorder.cancel();
}
if (this._voiceSpeechRecognizer) {
this._voiceSpeechRecognizer.cancel();
this._voiceSpeechRecognizer = null;
}
},
});

View File

@@ -0,0 +1,265 @@
/** @odoo-module **/
// Fusion Notes - Voice Recording Service
// Copyright 2026 Nexa Systems Inc.
// License OPL-1
//
// Provides AudioRecorder (MediaRecorder API) and SpeechRecognizer
// (Web Speech API fallback) for voice-to-text in Odoo chatter.
/**
* AudioRecorder - Records microphone audio using the MediaRecorder API.
*
* Usage:
* const recorder = new AudioRecorder();
* await recorder.start(); // request mic + begin recording
* const blob = await recorder.stop(); // stop and get audio Blob
* recorder.cancel(); // abort without producing output
*/
export class AudioRecorder {
constructor() {
this.mediaRecorder = null;
this.audioChunks = [];
this.stream = null;
this.mimeType = this._getSupportedMimeType();
}
/**
* Pick the best supported MIME type for recording.
* Whisper accepts webm, ogg, mp4, mp3, wav, etc.
*/
_getSupportedMimeType() {
const types = [
'audio/webm;codecs=opus',
'audio/webm',
'audio/ogg;codecs=opus',
'audio/ogg',
'audio/mp4',
];
for (const type of types) {
if (
typeof MediaRecorder !== 'undefined' &&
MediaRecorder.isTypeSupported(type)
) {
return type;
}
}
return 'audio/webm';
}
/** Base MIME type without codec suffix (for Whisper file upload). */
get baseMimeType() {
return this.mimeType.split(';')[0];
}
/** Whether MediaRecorder is available in this browser. */
get isSupported() {
return (
typeof navigator !== 'undefined' &&
navigator.mediaDevices &&
typeof navigator.mediaDevices.getUserMedia === 'function' &&
typeof MediaRecorder !== 'undefined'
);
}
/** Request microphone access and begin recording. */
async start() {
if (!this.isSupported) {
throw new Error(
'Audio recording is not supported in this browser.'
);
}
this.audioChunks = [];
this.stream = await navigator.mediaDevices.getUserMedia({
audio: {
echoCancellation: true,
noiseSuppression: true,
sampleRate: 16000,
},
});
this.mediaRecorder = new MediaRecorder(this.stream, {
mimeType: this.mimeType,
});
this.mediaRecorder.ondataavailable = (event) => {
if (event.data.size > 0) {
this.audioChunks.push(event.data);
}
};
// Collect data every second for more granular chunks
this.mediaRecorder.start(1000);
}
/** Stop recording and return the audio Blob. */
async stop() {
return new Promise((resolve, reject) => {
if (
!this.mediaRecorder ||
this.mediaRecorder.state === 'inactive'
) {
reject(new Error('No active recording.'));
return;
}
this.mediaRecorder.onstop = () => {
const blob = new Blob(this.audioChunks, {
type: this.baseMimeType,
});
this._cleanup();
resolve(blob);
};
this.mediaRecorder.onerror = (e) => {
this._cleanup();
reject(e);
};
this.mediaRecorder.stop();
});
}
/** Cancel recording and release resources without producing output. */
cancel() {
if (
this.mediaRecorder &&
this.mediaRecorder.state !== 'inactive'
) {
try {
this.mediaRecorder.stop();
} catch {
// Already stopped
}
}
this._cleanup();
}
/** Release microphone stream and reset state. */
_cleanup() {
if (this.stream) {
this.stream.getTracks().forEach((track) => track.stop());
this.stream = null;
}
this.mediaRecorder = null;
this.audioChunks = [];
}
}
/**
* SpeechRecognizer - Fallback using the browser Web Speech API.
*
* Provides real-time transcription without a server round-trip.
* Supported in Chrome, Edge, and some Chromium browsers.
*
* Usage:
* const recognizer = new SpeechRecognizer();
* const promise = recognizer.start(); // returns Promise<string>
* recognizer.stop(); // stop and resolve promise
* const text = await promise;
*/
export class SpeechRecognizer {
constructor() {
const SR =
window.SpeechRecognition || window.webkitSpeechRecognition;
this.isSupported = !!SR;
this.recognition = this.isSupported ? new SR() : null;
this.transcript = '';
this.isListening = false;
this._resolveStop = null;
if (this.recognition) {
this.recognition.continuous = true;
this.recognition.interimResults = true;
this.recognition.lang = 'en-US';
}
}
/**
* Start listening. Returns a promise that resolves with the final
* transcript when stop() is called or recognition ends naturally.
*/
start() {
if (!this.isSupported || !this.recognition) {
throw new Error(
'Speech Recognition is not supported in this browser.'
);
}
this.transcript = '';
this.isListening = true;
let finalTranscript = '';
return new Promise((resolve, reject) => {
this.recognition.onresult = (event) => {
let interimTranscript = '';
for (let i = event.resultIndex; i < event.results.length; i++) {
if (event.results[i].isFinal) {
finalTranscript += event.results[i][0].transcript + ' ';
} else {
interimTranscript += event.results[i][0].transcript;
}
}
this.transcript = (finalTranscript + interimTranscript).trim();
};
this.recognition.onerror = (event) => {
this.isListening = false;
if (event.error === 'no-speech') {
resolve(finalTranscript.trim());
} else {
reject(
new Error(`Speech recognition error: ${event.error}`)
);
}
};
this.recognition.onend = () => {
this.isListening = false;
resolve(finalTranscript.trim());
};
this.recognition.start();
this._resolveStop = () => {
this.recognition.stop();
};
});
}
/** Signal recognition to stop; the start() promise will resolve. */
stop() {
if (this._resolveStop) {
this._resolveStop();
}
}
/** Abort immediately without waiting for results. */
cancel() {
if (this.recognition && this.isListening) {
try {
this.recognition.abort();
} catch {
// Already stopped
}
}
this.isListening = false;
this.transcript = '';
this._resolveStop = null;
}
}
/**
* Convert a Blob to a base64-encoded string (without data-URI prefix).
*/
export function blobToBase64(blob) {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onloadend = () => {
// reader.result is "data:<mime>;base64,<data>" - extract <data>
const base64 = reader.result.split(',')[1];
resolve(base64);
};
reader.onerror = reject;
reader.readAsDataURL(blob);
});
}

View File

@@ -0,0 +1,104 @@
// Fusion Notes - Voice Recording Styles
// Copyright 2026 Nexa Systems Inc.
// License OPL-1
//
// Uses only Odoo/Bootstrap variables and utility classes so the
// component follows the active theme (light, dark, or custom) with
// zero hardcoded colours.
// ======================================================================
// Mic button (idle state)
// ======================================================================
.fusion-notes-mic-btn {
.fa-microphone {
font-size: 1.1em;
}
&:hover:not(:disabled) {
color: var(--o-action, var(--bs-primary));
border-color: var(--o-action, var(--bs-primary));
}
}
// ======================================================================
// Recording state - inline indicator
// ======================================================================
.fusion-notes-recording {
background-color: var(--danger-bg-subtle, rgba($danger, 0.06));
border: 1px solid var(--danger-border-subtle, rgba($danger, 0.2));
border-radius: $border-radius;
padding: 2px 8px;
}
// Pulsing red dot
.fusion-notes-pulse {
display: inline-block;
width: 10px;
height: 10px;
border-radius: 50%;
background-color: $danger;
animation: fusion-notes-pulse-anim 1s ease-in-out infinite;
}
@keyframes fusion-notes-pulse-anim {
0%,
100% {
opacity: 1;
transform: scale(1);
}
50% {
opacity: 0.4;
transform: scale(1.4);
}
}
// ======================================================================
// Review panel - inherits background from parent (Chatter-top)
// ======================================================================
.fusion-notes-review {
background-color: inherit;
border-bottom: 1px solid $border-color;
.fusion-notes-textarea {
font-size: $font-size-sm;
resize: vertical;
min-height: 60px;
max-height: 200px;
}
.badge {
font-size: 0.7rem;
font-weight: 500;
}
}
// ======================================================================
// Mobile: keep recording indicator and review panel compact
// ======================================================================
@media (max-width: 767px) {
.fusion-notes-recording {
padding: 2px 6px;
// Hide timer text on very small screens to save space
.text-danger.fw-bold.small {
font-size: 0.7rem;
}
}
.fusion-notes-review {
.fusion-notes-textarea {
min-height: 50px;
max-height: 150px;
}
// Stack post button below badge on narrow screens
.d-flex.align-items-center.justify-content-between.mt-2 {
flex-wrap: wrap;
gap: 0.5rem;
}
}
}

View File

@@ -0,0 +1,123 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
Fusion Notes - Chatter Voice Note Templates
Copyright 2026 Nexa Systems Inc.
License OPL-1
Extends the mail.Chatter template to add:
1. A microphone button in the topbar (idle / recording / transcribing states)
2. A review panel below the topbar for editing before posting
All styling uses Bootstrap/Odoo utility classes so the component
follows the active theme (light, dark, or custom) automatically.
-->
<templates xml:space="preserve">
<!-- Extend Chatter to inject voice note UI -->
<t t-inherit="mail.Chatter" t-inherit-mode="extension">
<!-- ============================================================ -->
<!-- 1. Mic button / recording controls in the topbar -->
<!-- Inserted right after the "Log note" button -->
<!-- ============================================================ -->
<xpath expr="//button[hasclass('o-mail-Chatter-logNote')]" position="after">
<!-- IDLE: Microphone button -->
<button t-if="state.voiceStatus === 'idle'"
class="o-mail-Chatter-voiceNote btn text-nowrap me-1 fusion-notes-mic-btn"
t-att-class="{
'btn-primary': state.composerType === 'note',
'btn-secondary': state.composerType !== 'note',
'my-2': !props.compactHeight
}"
t-att-disabled="isDisabled"
t-on-click="voiceStartRecording"
title="Record voice note">
<i class="fa fa-microphone"/>
</button>
<!-- RECORDING: Stop button + pulsing indicator + timer -->
<div t-if="state.voiceStatus === 'recording'"
class="d-flex align-items-center me-1 fusion-notes-recording"
t-att-class="{ 'my-2': !props.compactHeight }">
<button class="btn btn-danger btn-sm me-1 px-2"
t-on-click="voiceStopRecording"
title="Stop recording">
<i class="fa fa-stop"/>
</button>
<span class="fusion-notes-pulse me-1"/>
<span class="text-danger fw-bold small"
t-esc="voiceFormatDuration(state.voiceDuration)"/>
<button class="btn btn-link btn-sm opacity-75 opacity-100-hover ms-1 p-0"
t-on-click="voiceCancelRecording"
title="Cancel recording">
<i class="fa fa-times"/>
</button>
</div>
<!-- TRANSCRIBING / FORMATTING: Spinner -->
<div t-if="state.voiceStatus === 'transcribing' || state.voiceStatus === 'formatting'"
class="d-flex align-items-center me-1"
t-att-class="{ 'my-2': !props.compactHeight }">
<i class="fa fa-spinner fa-spin me-1 opacity-75"/>
<span class="small opacity-75"
t-if="state.voiceStatus === 'transcribing'">Transcribing...</span>
<span class="small opacity-75"
t-if="state.voiceStatus === 'formatting'">Formatting...</span>
</div>
</xpath>
<!-- ============================================================ -->
<!-- 2. Review panel below topbar (before the normal Composer) -->
<!-- ============================================================ -->
<xpath expr="//div[hasclass('o-mail-Chatter-topbar')]" position="after">
<div t-if="state.voiceStatus === 'review'" class="fusion-notes-review">
<!-- Header row -->
<div class="d-flex align-items-center justify-content-between px-3 pt-2 pb-1">
<span class="fw-bold small">
<i class="fa fa-microphone me-1"/>Voice Note
</span>
<div class="d-flex align-items-center gap-1">
<button class="btn btn-sm"
t-att-class="state.voiceAiFormat ? 'btn-primary' : 'btn-light border'"
t-on-click="voiceToggleAiFormat"
title="Toggle AI formatting">
<i class="fa fa-magic me-1"/>AI Format
</button>
<button class="btn btn-sm btn-light border"
t-on-click="voiceCancelNote"
title="Cancel">
<i class="fa fa-times"/>
</button>
</div>
</div>
<!-- Text area -->
<div class="px-3 pb-2">
<textarea class="form-control fusion-notes-textarea"
t-att-value="state.voiceText"
t-on-input="onVoiceTextInput"
rows="3"
placeholder="Transcribed text will appear here..."/>
<div class="d-flex align-items-center justify-content-between mt-2">
<div class="small opacity-75">
<span t-if="state.voiceAiFormat"
class="badge rounded-pill text-bg-info me-1">AI Formatted</span>
<span t-else=""
class="badge rounded-pill text-bg-light border me-1">Raw Transcription</span>
</div>
<button class="btn btn-primary btn-sm"
t-on-click="voicePostNote">
<i class="fa fa-paper-plane me-1"/>Post Note
</button>
</div>
</div>
</div>
</xpath>
</t>
</templates>

View File

@@ -0,0 +1,99 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright 2026 Nexa Systems Inc.
License OPL-1 (Odoo Proprietary License v1.0)
-->
<odoo>
<record id="res_config_settings_view_form_fusion_notes" model="ir.ui.view">
<field name="name">res.config.settings.view.form.fusion.notes</field>
<field name="model">res.config.settings</field>
<field name="inherit_id" ref="base.res_config_settings_view_form"/>
<field name="arch" type="xml">
<xpath expr="//form" position="inside">
<app data-string="Fusion Notes" string="Fusion Notes" name="fusion_notes">
<h2>Voice Transcription</h2>
<div class="row mt-4 o_settings_container">
<!-- OpenAI API Key -->
<div class="col-12 col-lg-6 o_setting_box">
<div class="o_setting_right_pane">
<span class="o_form_label">OpenAI API Key</span>
<div class="text-muted">
Required for Whisper voice transcription and GPT note
formatting. Get your key at
<a href="https://platform.openai.com" target="_blank">
platform.openai.com
</a>
</div>
<div class="content-group mt-2">
<field name="fn_openai_api_key" password="True"/>
</div>
</div>
</div>
<!-- AI Formatting Model -->
<div class="col-12 col-lg-6 o_setting_box">
<div class="o_setting_right_pane">
<span class="o_form_label">AI Formatting Model</span>
<div class="text-muted">
Model used to clean up and professionally format
transcribed voice notes. GPT-4o Mini is recommended
for speed and low cost.
</div>
<div class="content-group mt-2">
<field name="fn_ai_model"/>
</div>
</div>
</div>
</div>
<h2>Defaults</h2>
<div class="row mt-4 o_settings_container">
<!-- Review Before Posting -->
<div class="col-12 col-lg-6 o_setting_box">
<div class="o_setting_left_pane">
<field name="fn_default_review_mode"/>
</div>
<div class="o_setting_right_pane">
<span class="o_form_label">Review Before Posting</span>
<div class="text-muted">
Show transcribed text for review and editing before
posting to chatter. If disabled, notes are posted
immediately after transcription.
</div>
</div>
</div>
<!-- AI Format by Default -->
<div class="col-12 col-lg-6 o_setting_box">
<div class="o_setting_left_pane">
<field name="fn_default_ai_format"/>
</div>
<div class="o_setting_right_pane">
<span class="o_form_label">AI Format by Default</span>
<div class="text-muted">
Automatically format notes with AI before review.
Users can still toggle this per note.
</div>
</div>
</div>
<!-- Max Recording Duration -->
<div class="col-12 col-lg-6 o_setting_box">
<div class="o_setting_right_pane">
<span class="o_form_label">Max Recording Duration</span>
<div class="text-muted">
Maximum recording time in seconds. Recording
automatically stops when this limit is reached.
Default: 300 seconds (5 minutes).
</div>
<div class="content-group mt-2">
<field name="fn_max_recording_seconds"/>
</div>
</div>
</div>
</div>
</app>
</xpath>
</field>
</record>
</odoo>