1+ import React , { useState , useRef , useEffect } from 'react' ;
2+ import { Mic , StopCircle , Loader2 } from 'lucide-react' ;
3+ import axios from 'axios' ;
4+ import { TranscriptResponse } from '../types' ;
5+
6+ interface LiveRecordingProps {
7+ onTranscriptReceived : ( transcript : TranscriptResponse ) => void ;
8+ apiKey : string ;
9+ }
10+
11+ const LiveRecording : React . FC < LiveRecordingProps > = ( { onTranscriptReceived, apiKey } ) => {
12+ const [ isRecording , setIsRecording ] = useState ( false ) ;
13+ const [ recordingTime , setRecordingTime ] = useState ( 0 ) ;
14+ const [ isProcessing , setIsProcessing ] = useState ( false ) ;
15+ const [ error , setError ] = useState < string | null > ( null ) ;
16+
17+ const mediaRecorderRef = useRef < MediaRecorder | null > ( null ) ;
18+ const audioChunksRef = useRef < Blob [ ] > ( [ ] ) ;
19+ const timerRef = useRef < number | null > ( null ) ;
20+ const streamRef = useRef < MediaStream | null > ( null ) ;
21+
22+ const startRecording = async ( ) => {
23+ try {
24+ setError ( null ) ;
25+ audioChunksRef . current = [ ] ;
26+
27+ console . log ( "Requesting microphone access..." ) ;
28+ const stream = await navigator . mediaDevices . getUserMedia ( { audio : true } ) ;
29+ streamRef . current = stream ;
30+
31+ console . log ( "Creating MediaRecorder..." ) ;
32+ const mediaRecorder = new MediaRecorder ( stream ) ;
33+
34+ mediaRecorder . ondataavailable = ( event ) => {
35+ console . log ( "Data available event triggered" , event . data . size ) ;
36+ if ( event . data . size > 0 ) {
37+ audioChunksRef . current . push ( event . data ) ;
38+ }
39+ } ;
40+
41+ mediaRecorder . onstop = ( ) => {
42+ console . log ( "MediaRecorder stopped" ) ;
43+ processAudio ( ) ;
44+ } ;
45+
46+ mediaRecorderRef . current = mediaRecorder ;
47+
48+ console . log ( "Starting MediaRecorder..." ) ;
49+ mediaRecorder . start ( 1000 ) ; // Collect data every second
50+ setIsRecording ( true ) ;
51+ setRecordingTime ( 0 ) ;
52+
53+ // Start timer
54+ timerRef . current = window . setInterval ( ( ) => {
55+ setRecordingTime ( prev => prev + 1 ) ;
56+ } , 1000 ) ;
57+
58+ console . log ( "Recording started successfully" ) ;
59+ } catch ( err ) {
60+ console . error ( 'Error accessing microphone:' , err ) ;
61+ setError ( 'Could not access your microphone. Please check permissions.' ) ;
62+ }
63+ } ;
64+
65+ const stopRecording = ( ) => {
66+ console . log ( "Stopping recording..." ) ;
67+ if ( mediaRecorderRef . current && isRecording ) {
68+ try {
69+ mediaRecorderRef . current . stop ( ) ;
70+
71+ // Stop all audio tracks
72+ if ( streamRef . current ) {
73+ streamRef . current . getTracks ( ) . forEach ( track => {
74+ console . log ( "Stopping track:" , track . kind ) ;
75+ track . stop ( ) ;
76+ } ) ;
77+ streamRef . current = null ;
78+ }
79+
80+ // Clear timer
81+ if ( timerRef . current ) {
82+ clearInterval ( timerRef . current ) ;
83+ timerRef . current = null ;
84+ }
85+
86+ setIsRecording ( false ) ;
87+ console . log ( "Recording stopped successfully" ) ;
88+ } catch ( err ) {
89+ console . error ( "Error stopping recording:" , err ) ;
90+ setError ( 'Error stopping recording. Please try again.' ) ;
91+ }
92+ } else {
93+ console . warn ( "Tried to stop recording but no MediaRecorder was active" ) ;
94+ }
95+ } ;
96+
97+ const processAudio = async ( ) => {
98+ console . log ( "Processing audio..." , audioChunksRef . current . length ) ;
99+ if ( audioChunksRef . current . length === 0 ) {
100+ setError ( "No audio recorded. Please try again." ) ;
101+ return ;
102+ }
103+
104+ setIsProcessing ( true ) ;
105+ setError ( null ) ;
106+
107+ try {
108+ console . log ( "Creating audio blob..." ) ;
109+ const audioBlob = new Blob ( audioChunksRef . current , { type : 'audio/webm' } ) ;
110+ console . log ( "Audio blob size:" , audioBlob . size ) ;
111+
112+ // Debug: Create an audio element to test the recording
113+ const audioUrl = URL . createObjectURL ( audioBlob ) ;
114+ const audio = new Audio ( audioUrl ) ;
115+ console . log ( "Audio URL created:" , audioUrl ) ;
116+
117+ const formData = new FormData ( ) ;
118+ formData . append ( 'file' , audioBlob , 'recording.webm' ) ;
119+ formData . append ( 'model_id' , 'scribe_v1' ) ;
120+ formData . append ( 'diarize' , 'true' ) ;
121+ formData . append ( 'timestamps_granularity' , 'word' ) ;
122+
123+ console . log ( "Sending request to ElevenLabs API..." ) ;
124+ console . log ( "API Key available:" , ! ! apiKey ) ;
125+
126+ const response = await axios . post < TranscriptResponse > (
127+ 'https://api.elevenlabs.io/v1/speech-to-text' ,
128+ formData ,
129+ {
130+ headers : {
131+ 'xi-api-key' : apiKey ,
132+ 'Content-Type' : 'multipart/form-data' ,
133+ } ,
134+ }
135+ ) ;
136+
137+ console . log ( "Response received:" , response . status ) ;
138+ onTranscriptReceived ( response . data ) ;
139+ } catch ( err ) {
140+ console . error ( 'Error processing audio:' , err ) ;
141+ setError (
142+ err instanceof Error
143+ ? err . message
144+ : 'An error occurred while processing your recording'
145+ ) ;
146+ } finally {
147+ setIsProcessing ( false ) ;
148+ }
149+ } ;
150+
151+ useEffect ( ( ) => {
152+ return ( ) => {
153+ if ( timerRef . current ) {
154+ clearInterval ( timerRef . current ) ;
155+ }
156+
157+ if ( streamRef . current ) {
158+ streamRef . current . getTracks ( ) . forEach ( track => track . stop ( ) ) ;
159+ }
160+ } ;
161+ } , [ ] ) ;
162+
163+ const formatTime = ( seconds : number ) => {
164+ const mins = Math . floor ( seconds / 60 ) ;
165+ const secs = seconds % 60 ;
166+ return `${ mins } :${ secs . toString ( ) . padStart ( 2 , '0' ) } ` ;
167+ } ;
168+
169+ return (
170+ < div className = "w-full max-w-md mx-auto bg-gray-800 p-6 rounded-xl shadow-xl border border-gray-700 transform hover:scale-[1.01] transition-all duration-300 mt-8" >
171+ < h2 className = "text-xl font-semibold mb-4 text-transparent bg-clip-text bg-gradient-to-r from-cyan-400 to-blue-500" > Live Recording</ h2 >
172+
173+ < div className = "flex flex-col items-center justify-center" >
174+ { isRecording ? (
175+ < div className = "relative mb-4" >
176+ < div className = "absolute inset-0 bg-red-500/20 rounded-full animate-ping" > </ div >
177+ < button
178+ onClick = { stopRecording }
179+ className = "relative z-10 p-6 bg-gradient-to-r from-red-600 to-red-500 rounded-full shadow-glow hover:shadow-lg hover:from-red-700 hover:to-red-600 transition-all duration-300"
180+ >
181+ < StopCircle className = "h-10 w-10 text-white" />
182+ </ button >
183+ </ div >
184+ ) : (
185+ < button
186+ onClick = { startRecording }
187+ disabled = { isProcessing }
188+ className = "p-6 bg-gradient-to-r from-green-600 to-emerald-500 rounded-full shadow-glow hover:shadow-lg hover:from-green-700 hover:to-emerald-600 transition-all duration-300 mb-4 disabled:opacity-50 disabled:cursor-not-allowed"
189+ >
190+ < Mic className = "h-10 w-10 text-white" />
191+ </ button >
192+ ) }
193+
194+ { isRecording && (
195+ < div className = "flex items-center space-x-2 mb-4" >
196+ < div className = "w-3 h-3 rounded-full bg-red-500 animate-pulse" > </ div >
197+ < span className = "text-lg font-medium text-white" > { formatTime ( recordingTime ) } </ span >
198+ </ div >
199+ ) }
200+
201+ { isProcessing && (
202+ < div className = "flex items-center space-x-2 text-gray-300 mt-2" >
203+ < Loader2 className = "animate-spin h-5 w-5" />
204+ < span > Processing audio...</ span >
205+ </ div >
206+ ) }
207+
208+ { error && (
209+ < div className = "mt-4 p-3 bg-red-900/30 text-red-400 border border-red-800 rounded-md text-sm animate-shake w-full" >
210+ { error }
211+ </ div >
212+ ) }
213+ </ div >
214+
215+ < div className = "mt-4 text-center text-sm text-gray-400" >
216+ { isRecording ?
217+ "Click the stop button when you're finished recording" :
218+ "Click the microphone to start recording your conversation"
219+ }
220+ </ div >
221+
222+ < div className = "mt-4 text-xs text-gray-500" >
223+ Note: Make sure your browser has permission to access your microphone.
224+ </ div >
225+ </ div >
226+ ) ;
227+ } ;
228+
229+ export default LiveRecording ;
0 commit comments