Hey Dev,
Want to make a voice assistant that feels like Jarvis? Glowing eyes, particle beams, AI typing, and real voice interaction โ all using just frontend tech!
๐ก Project Goal
Weโre building a Jarvis-style AI assistant UI in the browser that:
Greets the user automatically
Speaks in a calm robotic voice
Uses animated glowing circles for โthinkingโ
Displays answers with a typewriter effect
Supports voice recognition (mic input)
All in a single-page HTML + JS setup โ no backend required.
๐ง Core Features
Voice input (Web Speech Recognition)
Voice output (Web Speech Synthesis)
Typewriter text animation
Glowing particle beam effect on response
Time projection (like AIโs eyes projecting time into space ๐)
โจ UI Screenshot #
๐ง Tech Stack
HTML + CSS (UI and animations)
JavaScript (logic + speech APIs)
No frameworks needed
๐ป Sample Code Snippet
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>Blob AI - ChatGPT + ElevenLabs</title>
<style>
@import url('https://fonts.googleapis.com/css2?family=Orbitron:wght@400;700&display=swap');
* { margin:0; padding:0; box-sizing:border-box; font-family:'Orbitron',sans-serif; }
body {
background:radial-gradient(circle at center,#000 40%,#001f24 100%);
color:#00f6ff;
display:flex;
justify-content:center;
align-items:center;
height:100vh;
flex-direction:column;
overflow:hidden;
text-align:center;
}
.assistant-core { position:relative; width:180px; height:180px; }
.glow-circle {
position:absolute; inset:0; border-radius:50%;
background:rgba(0,246,255,0.1);
border:2px solid #00f6ff;
box-shadow:0 0 40px #00f6ff, inset 0 0 40px #00f6ff;
display:flex; justify-content:center; align-items:center;
animation:pulse 2s infinite;
}
@keyframes pulse {
0%,100%{transform:scale(1);opacity:0.9}
50%{transform:scale(1.1);opacity:1}
}
.waves { pointer-events:none; position:absolute; inset:0; }
.waves span {
position:absolute; inset:0; border-radius:50%;
border:2px solid rgba(0,246,255,0.8);
box-shadow:0 0 10px rgba(0,246,255,0.8);
opacity:0; transform:scale(1);
animation:wavePulse 1.8s linear infinite;
animation-play-state:paused;
}
.waves span:nth-child(2){animation-delay:0.4s}
.waves span:nth-child(3){animation-delay:0.8s}
.waves span:nth-child(4){animation-delay:1.2s}
.assistant-core.speaking .waves span{animation-play-state:running}
@keyframes wavePulse {
0%{opacity:0.9;transform:scale(1);box-shadow:0 0 10px rgba(0,246,255,0.9)}
60%{opacity:0.4}
100%{opacity:0;transform:scale(2.8);box-shadow:0 0 60px rgba(0,246,255,0)}
}
.text { margin-top:20px; font-size:1.2rem; min-height:30px; max-width: 80%; }
button {
margin-top:30px; padding:10px 20px; border:none;
background:#00f6ff; color:#000; font-weight:bold;
border-radius:8px; cursor:pointer; transition:0.3s;
}
button:hover { background:#0ff; box-shadow:0 0 10px #00f6ff; }
audio { display:none; }
</style>
</head>
<body>
<div class="assistant-core" id="assistant-core">
<div class="glow-circle"></div>
<div class="waves">
<span></span><span></span><span></span><span></span>
</div>
</div>
<div class="text" id="assistant-text"></div>
<button id="start-btn">๐ Start Listening</button>
<audio id="voice-player"></audio>
<audio id="background-hum" loop autoplay muted>
<source src="https://cdn.pixabay.com/audio/2021/08/04/audio_1b46e2d89d.mp3" type="audio/mpeg">
</audio>
<script>
const ELEVENLABS_API_KEY = "Your api key"; // Your ElevenLabs key
const VOICE_ID = "XjLkpWUlnhS8i7gGz3lZ"; // ElevenLabs voice ID
const OPENAI_API_KEY = "your api key"; // <-- Add your OpenAI API key here
const textEl = document.getElementById('assistant-text');
const startBtn = document.getElementById('start-btn');
const assistantCore = document.getElementById('assistant-core');
const voicePlayer = document.getElementById('voice-player');
const backgroundHum = document.getElementById('background-hum');
function typeText(el, text, speed = 40) {
el.innerHTML = "";
let i = 0;
function step(){
if(i < text.length){
el.innerHTML += text.charAt(i++);
setTimeout(step, speed);
}
}
step();
}
// ElevenLabs TTS
async function generateVoiceElevenLabs(message) {
assistantCore.classList.add('speaking');
try {
const response = await fetch(`https://api.elevenlabs.io/v1/text-to-speech/${VOICE_ID}`, {
method: "POST",
headers: {
"Accept": "audio/mpeg",
"Content-Type": "application/json",
"xi-api-key": ELEVENLABS_API_KEY
},
body: JSON.stringify({
text: message,
voice_settings: { stability: 0.5, similarity_boost: 0.5 }
})
});
const audioData = await response.blob();
const audioUrl = URL.createObjectURL(audioData);
voicePlayer.src = audioUrl;
voicePlayer.play();
voicePlayer.onended = () => assistantCore.classList.remove('speaking');
} catch (err) {
console.error("ElevenLabs Error:", err);
}
}
// ChatGPT API
async function getChatGPTReply(userInput) {
try {
const response = await fetch("https://api.openai.com/v1/chat/completions", {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${OPENAI_API_KEY}`
},
body: JSON.stringify({
model: "gpt-4o-mini",
messages: [{ role: "user", content: userInput }],
max_tokens: 60
})
});
const data = await response.json();
return data.choices[0].message.content.trim();
} catch (err) {
console.error("ChatGPT Error:", err);
return "I am having trouble connecting to ChatGPT right now.";
}
}
// Speech Recognition
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
let recognition = null;
if (SpeechRecognition) {
recognition = new SpeechRecognition();
recognition.lang = 'en-US';
recognition.interimResults = false;
recognition.continuous = true; // Always listening
recognition.onstart = () => typeText(textEl, 'Listening...');
recognition.onerror = (e) => {
typeText(textEl, 'Error: ' + (e.error || 'unknown'));
generateVoiceElevenLabs('Sorry, I had trouble listening.');
};
recognition.onresult = async (event) => {
const userSpeech = event.results[event.results.length - 1][0].transcript;
typeText(textEl, 'You said: ' + userSpeech);
// Get response from ChatGPT
const aiReply = await getChatGPTReply(userSpeech);
typeText(textEl, aiReply);
generateVoiceElevenLabs(aiReply);
};
recognition.onend = () => recognition.start(); // Restart for always listening
}
// Enable background hum after user interaction
startBtn.addEventListener('click', () => {
backgroundHum.muted = false;
if (recognition) {
recognition.start();
} else {
typeText(textEl, 'Speech recognition not supported in this browser.');
generateVoiceElevenLabs('Speech recognition is not supported in this browser.');
}
});
const greeting = 'Hello! I am Blob AI, your personal voice assistant.';
window.addEventListener('load', () => {
typeText(textEl, greeting);
setTimeout(() => generateVoiceElevenLabs(greeting), 300);
});
</script>
</body>
</html>
๐ Add-Ons You Can Build
AI is thinking...โ glow ring
Particle beam when answering
Time/date projection as hologram
Voice wake-word detection (โHey Jarvisโ)
๐ Final Words
This project is perfect for beginners + intermediate devs looking to build something futuristic and interactive. It can be the base of:
A portfolio project
A personal productivity tool
A smart bot UI on your homepage
And it looks awesome.
๐ฌ What features would you add to your personal Jarvis ?
_____________Raj Guru Yadav--------
Top comments (0)