Hey dev,
Imagine having a personal AI assistant like Iron Man’s Jarvis — one that can answer questions, control your computer, manage reminders, play music, and even tell jokes. In this post, I’ll show you how to build Lucy AI, a modular, real-world Jarvis prototype, with working code examples.
Project Structure
lucy-ai/
├─ server.js             # Node.js backend
├─ public/
│  ├─ index.html         # UI frontend
│  ├─ main.js            # Frontend JS for chat & voice
│  └─ style.css          # Dark-glass UI styling
├─ modules/
│  ├─ multiLLM.js        # Multi-LLM reasoning (ChatGPT, Gemini, Grok, DeepSeek, OpenRouter)
│  ├─ speech.js          # Voice recognition & TTS
│  └─ fun.js             # Fun modules (jokes, trivia)
├─ package.json
└─ .env                  # API keys
server.js (Backend)
import express from "express";
import dotenv from "dotenv";
import { askAI } from "./modules/multiLLM.js";
import { tellJoke } from "./modules/fun.js";
dotenv.config();
const app = express();
app.use(express.json());
app.use(express.static("public"));
// AI reasoning endpoint
app.post("/ask", async (req, res) => {
    const { question, provider } = req.body;
    const answer = await askAI(question, provider || "auto");
    res.json({ answer });
});
// Fun endpoint
app.get("/joke", async (req, res) => {
    const joke = await tellJoke();
    res.json({ joke });
});
app.listen(3000, () => console.log("Lucy AI running at http://localhost:3000"));
modules/multiLLM.js
import OpenAI from "openai";
import axios from "axios";
import dotenv from "dotenv";
dotenv.config();
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
// Main Multi-LLM function
export async function askAI(question, provider = "auto") {
    let answer;
    try {
        switch(provider) {
            case "chatgpt": answer = await chatGPT(question); break;
            case "gemini": answer = await gemini(question); break;
            case "grok": answer = await grok(question); break;
            case "deepseek": answer = await deepSeek(question); break;
            case "openrouter": answer = await openRouter(question); break;
            case "auto":
            default:
                answer = await chatGPT(question)
                    .catch(() => gemini(question))
                    .catch(() => grok(question))
                    .catch(() => deepSeek(question))
                    .catch(() => openRouter(question));
        }
    } catch (err) {
        console.error("All LLM providers failed:", err);
        answer = "Sorry, I couldn't fetch an answer right now.";
    }
    return answer;
}
// Providers
async function chatGPT(question) {
    const res = await openai.chat.completions.create({
        model: "gpt-4o-mini",
        messages: [{ role: "user", content: question }],
    });
    return res.choices[0].message.content;
}
async function gemini(question) {
    const res = await axios.post("https://api.gemini.com/v1/query", {
        prompt: question,
        key: process.env.GEMINI_API_KEY,
    });
    return res.data.answer;
}
async function grok(question) {
    const res = await axios.post("https://api.grok.ai/query", {
        prompt: question,
        api_key: process.env.GROK_API_KEY,
    });
    return res.data.response;
}
async function deepSeek(question) {
    const res = await axios.post("https://api.deepseek.ai/ask", {
        query: question,
        token: process.env.DEEPSEEK_API_KEY
    });
    return res.data.answer;
}
async function openRouter(question) {
    const res = await axios.post("https://openrouter.ai/api/v1/chat/completions", {
        model: "gpt-4o-mini",
        messages: [{ role: "user", content: question }]
    }, {
        headers: { "Authorization": `Bearer ${process.env.OPENROUTER_API_KEY}` }
    });
    return res.data.choices[0].message.content;
}
modules/speech.js
// Voice recognition and TTS
export function speak(text) {
    const utterance = new SpeechSynthesisUtterance(text);
    utterance.lang = "en-US";
    utterance.rate = 0.95;
    speechSynthesis.speak(utterance);
}
export function listen(callback) {
    const recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
    recognition.lang = "en-US";
    recognition.onresult = (e) => callback(e.results[0][0].transcript);
    recognition.start();
}
modules/fun.js
export async function tellJoke() {
    const res = await fetch("https://official-joke-api.appspot.com/random_joke");
    const joke = await res.json();
    return `${joke.setup} ... ${joke.punchline}`;
}
public/index.html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Lucy AI - Your Personal Assistant</title>
<link rel="stylesheet" href="style.css">
<link href="https://fonts.googleapis.com/css2?family=Roboto:wght@400;500;700&display=swap" rel="stylesheet">
</head>
<body>
<div class="app-container">
    <header class="app-header">
        <h1>Lucy AI 🤖</h1>
        <p>Your intelligent personal assistant</p>
    </header>
    <main class="chat-container">
        <div id="messages" class="messages"></div>
        <div class="input-container">
            <input type="text" id="query" placeholder="Ask Lucy something..." />
            <button id="askBtn">Send</button>
            <button id="voiceBtn" title="Talk to Lucy">🎙️</button>
            <button id="jokeBtn" title="Ask Lucy for a joke">😂</button>
        </div>
    </main>
    <footer class="app-footer">
        <small>Lucy AI © 2025 | Developed by YourName</small>
    </footer>
</div>
<script src="main.js"></script>
</body>
</html>
public/style.css (Dark-Glass UI)
/* Global Styles */
* {
    margin: 0;
    padding: 0;
    box-sizing: border-box;
}
body {
    font-family: 'Roboto', sans-serif;
    background: linear-gradient(135deg, #0f2027, #203a43, #2c5364);
    color: #fff;
    display: flex;
    justify-content: center;
    align-items: center;
    height: 100vh;
}
/* App Container */
.app-container {
    width: 100%;
    max-width: 500px;
    height: 90vh;
    background: rgba(255, 255, 255, 0.05);
    backdrop-filter: blur(15px);
    border-radius: 20px;
    box-shadow: 0 0 40px rgba(0, 255, 255, 0.3);
    display: flex;
    flex-direction: column;
    overflow: hidden;
    border: 1px solid rgba(255,255,255,0.1);
}
/* Header */
.app-header {
    text-align: center;
    padding: 15px 20px;
    border-bottom: 1px solid rgba(255,255,255,0.1);
}
.app-header h1 {
    font-size: 1.8rem;
    color: #00f0ff;
}
.app-header p {
    font-size: 0.9rem;
    color: #aaa;
    margin-top: 5px;
}
/* Chat Container */
.chat-container {
    flex: 1;
    display: flex;
    flex-direction: column;
    padding: 10px 15px;
}
.messages {
    flex: 1;
    overflow-y: auto;
    padding-right: 5px;
}
.messages div {
    margin-bottom: 12px;
    max-width: 80%;
    padding: 10px 15px;
    border-radius: 15px;
    animation: fadeIn 0.3s ease-in;
}
.messages .user {
    background: rgba(0, 255, 255, 0.2);
    align-self: flex-end;
    text-align: right;
}
.messages .lucy {
    background: rgba(0, 255, 255, 0.1);
    align-self: flex-start;
    text-align: left;
}
/* Input Container */
.input-container {
    display: flex;
    align-items: center;
    gap: 8px;
    margin-top: 10px;
}
#query {
    flex: 1;
    padding: 10px 15px;
    border-radius: 25px;
    border: none;
    background: rgba(255,255,255,0.1);
    color: #fff;
    outline: none;
    font-size: 0.95rem;
}
.input-container button {
    padding: 10px 15px;
    border-radius: 50%;
    border: none;
    cursor: pointer;
    background: #00f0ff;
    color: #000;
    font-weight: bold;
    transition: all 0.2s ease;
}
.input-container button:hover {
    background: #00c0cc;
    transform: scale(1.1);
}
/* Footer */
.app-footer {
    text-align: center;
    padding: 8px;
    font-size: 0.75rem;
    color: #888;
    border-top: 1px solid rgba(255,255,255,0.1);
}
/* Animations */
@keyframes fadeIn {
    from { opacity: 0; transform: translateY(10px);}
    to { opacity: 1; transform: translateY(0);}
}
/* Scrollbar Styling */
.messages::-webkit-scrollbar {
    width: 6px;
}
.messages::-webkit-scrollbar-thumb {
    background: rgba(0,255,255,0.3);
    border-radius: 3px;
}
public/main.js
import { speak, listen } from "../modules/speech.js";
const messagesDiv = document.getElementById("messages");
const queryInput = document.getElementById("query");
document.getElementById("askBtn").addEventListener("click", askLucy);
document.getElementById("voiceBtn").addEventListener("click", () => {
    listen((text) => {
        queryInput.value = text;
        askLucy();
    });
});
document.getElementById("jokeBtn").addEventListener("click", async () => {
    const res = await fetch("/joke");
    const data = await res.json();
    addMessage("Lucy", data.joke);
    speak(data.joke);
});
async function askLucy() {
    const question = queryInput.value;
    if(!question) return;
    addMessage("You", question);
    queryInput.value = "";
    const res = await fetch("/ask", {
        method: "POST",
        headers: { "Content-Type": "application/json" },
        body: JSON.stringify({ question })
    });
    const data = await res.json();
    addMessage("Lucy", data.answer);
    speak(data.answer);
}
function addMessage(sender, text) {
    const msg = document.createElement("div");
    msg.innerHTML = `<b>${sender}:</b> ${text}`;
    messagesDiv.appendChild(msg);
    messagesDiv.scrollTop = messagesDiv.scrollHeight;
}
✅ Features of this Full Starter Kit#
- Multi-LLM Reasoning: ChatGPT, Gemini, Grok, DeepSeek, OpenRouter 
- Interactive UI: Dark-glass theme with chatbox and buttons 
- Voice Interaction: Speech-to-text and TTS 
- Fun Module: Random jokes 
- Modular Structure: Easily expandable for IoT, reminders, calendar, or multimedia 
This is ready to run:
1) Add your API keys to .env
2) Run:
          node server.js
3) Open **
- Raj Guru Yadav
 
 
              
 
    
Top comments (0)