here i come!
import { ElevenLabsClient } from '@elevenlabs/elevenlabs-js';
import React, { useState } from 'react';
interface TextToSpeechComponentProps {
apiKey: string;
}
const TextToSpeechComponent: React.FC = ({ apiKey }) => {
const [text, setText] = useState('');
const [isLoading, setIsLoading] = useState(false);
const [audioUrl, setAudioUrl] = useState(null);
// Initialize ElevenLabs client
const elevenlabs = new ElevenLabsClient({
apiKey: apiKey,
});
const generateSpeech = async () => {
if (!text.trim()) return;
setIsLoading(true);
try {
// Convert text to speech using your specific model ID
const response = await elevenlabs.textToSpeech.convert('AvcDVzbaOUnXz0B27dGq', {
text: text,
modelId: 'eleven_multilingual_v2', // or another model like 'eleven_flash_v2_5'
outputFormat: 'mp3_44100_128',
voiceSettings: {
stability: 0.5,
similarityBoost: 0.75,
style: 0.0,
useSpeakerBoost: true,
speed: 1.0,
},
});
// Convert response to blob and create URL
const chunks: Uint8Array[] = [];
for await (const chunk of response) {
chunks.push(chunk);
}
const audioBlob = new Blob(chunks, { type: 'audio/mpeg' });
const url = URL.createObjectURL(audioBlob);
setAudioUrl(url);
} catch (error) {
console.error('Error generating speech:', error);
} finally {
setIsLoading(false);
}
};
const playAudio = () => {
if (audioUrl) {
const audio = new Audio(audioUrl);
audio.play();
}
};
return (
value={text}
onChange={(e) => setText(e.target.value)}
placeholder="Enter text to convert to speech..."
rows={4}
cols={50}
/>
<div>
<button
onClick={generateSpeech}
disabled={isLoading || !text.trim()}
>
{isLoading ? 'Generating...' : 'Generate Speech'}
</button>
{audioUrl && (
<button onClick={playAudio}>
Play Audio
</button>
)}
</div>
{audioUrl && (
<audio controls src={audioUrl}>
Your browser does not support the audio element.
</audio>
)}
</div>
);
};
export default TextToSpeechComponent;
Top comments (0)