In this post, we’ll walk through building a React Native app that generates children’s stories based on prompts and age ranges, using Hugging Face's powerful AI models. The app allows users to enter a prompt, select an age range, and then see a custom story along with a cartoonish image summarizing the story.
Features
- Interactive Story Generation: User input guides the AI to create engaging children’s stories.
- Summarization and Visualization: The story is summarized and displayed alongside an AI-generated image.
- Smooth UI Animation: Animations adapt the UI for keyboard input.
- Navigation and Styling: Using Expo Router for easy navigation and custom styles for an attractive UI.
Let’s break down each part!
Step 1: Setting Up React Native and Hugging Face API
Start by creating a new React Native project with Expo:
npx create-expo-app@latest KidsStoryApp
cd KidsStoryApp
Set up Expo Router in your app for easy navigation, and install any additional dependencies you may need, like icons or animations.
Step 2: Creating the Story Generator Home Screen
In the Home.js
file, we set up a screen where users can select an age range, enter a story prompt, and press a button to generate a story.
Home.js Code
import React, { useState } from "react";
import {
View,
Text,
TouchableOpacity,
StyleSheet,
TextInput,
Animated,
ActivityIndicator,
} from "react-native";
import useKeyboardOffsetHeight from "../hooks/useKeyboardOffsetHeight";
import { HUGGING_FACE_KEY } from "../env";
import { useRouter } from "expo-router";
const Home = () => {
const ageRanges = ["0-3", "4-6", "7-9"];
const [selectedAgeRange, setSelectedAgeRange] = useState("0-3");
const [textInput, setTextInput] = useState("");
const [isLoading, setIsLoading] = useState(false);
const keyboardOffsetHeight = useKeyboardOffsetHeight();
const router = useRouter();
const handleAgeRangeSelect = (r) => setSelectedAgeRange(r);
const handleShowResult = () => {
if (textInput.length > 5) {
fetchStory();
} else {
alert("Describe more...");
}
};
async function fetchStory() {
setIsLoading(true); // Start loading
try {
let message = `Write a simple story for kids about ${textInput} ${
selectedAgeRange ? "for age group " + selectedAgeRange : ""
}, in plain words. Only provide the story content without any headings, titles, or extra information.`;
// First request to get the story
const response = await fetch(
"https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-3B-Instruct/v1/chat/completions",
{
method: "POST",
headers: {
Authorization: `Bearer ${HUGGING_FACE_KEY}`,
"Content-Type": "application/json",
},
body: JSON.stringify({
model: "meta-llama/Llama-3.2-3B-Instruct",
messages: [
{
role: "user",
content: message,
},
],
max_tokens: 500,
}),
}
);
if (!response.ok) {
throw new Error("Failed to fetch story");
}
const data = await response.json();
const storyContent = data.choices[0].message.content;
console.log("Generated Story:", message, storyContent);
// Second request to summarize the story
const summaryResponse = await fetch(
"https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-3B-Instruct/v1/chat/completions",
{
method: "POST",
headers: {
Authorization: `Bearer ${HUGGING_FACE_KEY}`,
"Content-Type": "application/json",
},
body: JSON.stringify({
model: "meta-llama/Llama-3.2-3B-Instruct",
messages: [
{
role: "user",
content: `Summarize this story in a line: "${storyContent}"`,
},
],
max_tokens: 30,
}),
}
);
if (!summaryResponse.ok) {
throw new Error("Failed to fetch summary");
}
const summaryData = await summaryResponse.json();
const summaryContent = summaryData.choices[0].message.content;
if (summaryContent) {
router.push({
pathname: "/detail",
params: { story: storyContent, summary: summaryContent },
});
}
console.log("Story Summary:", summaryContent);
setTextInput(""); // Clear input after successful fetch
} catch (error) {
console.error("Error fetching story or summary:", error);
alert("Error fetching story or summary. Please try again later.");
} finally {
setIsLoading(false); // End loading
}
}
return (
<Animated.ScrollView
bounces={false}
keyboardShouldPersistTaps="handled"
keyboardDismissMode="on-drag"
contentContainerStyle={styles.container}
>
<Text style={styles.header}>Select Age Range</Text>
<View style={styles.checkboxContainer}>
{ageRanges.map((range, index) => (
<TouchableOpacity
key={index}
style={[
styles.checkbox,
selectedAgeRange === range && styles.selectedCheckbox,
]}
onPress={() => handleAgeRangeSelect(range)}
>
<Text style={styles.checkboxLabel}>{range}</Text>
</TouchableOpacity>
))}
</View>
<TextInput
style={styles.textInput}
placeholder="Make a story..."
placeholderTextColor="#888"
value={textInput}
onChangeText={setTextInput}
multiline
/>
{!keyboardOffsetHeight && (
<TouchableOpacity
style={styles.showButton}
onPress={handleShowResult}
disabled={isLoading} // Disable button while loading
>
{isLoading ? (
<ActivityIndicator color="#fff" />
) : (
<Text style={styles.showButtonText}>Show</Text>
)}
</TouchableOpacity>
)}
</Animated.ScrollView>
);
};
const styles = StyleSheet.create({
container: {
flex: 1,
padding: 20,
alignItems: "center",
backgroundColor: "#1c1c1c",
},
header: {
fontSize: 20,
fontWeight: "bold",
color: "#fff",
marginVertical: 10,
},
checkboxContainer: {
flexDirection: "row",
flexWrap: "wrap",
justifyContent: "center",
marginBottom: 20,
},
checkbox: {
width: 60,
height: 60,
borderRadius: 30,
backgroundColor: "#333",
alignItems: "center",
justifyContent: "center",
margin: 10,
},
selectedCheckbox: {
backgroundColor: "#1fb28a",
},
checkboxLabel: {
color: "#fff",
fontSize: 14,
fontWeight: "bold",
},
textInput: {
width: "100%",
height: 150,
backgroundColor: "#333",
color: "#fff",
borderRadius: 8,
padding: 10,
marginVertical: 20,
fontSize: 16,
},
showButton: {
backgroundColor: "#1fb28a",
borderRadius: 8,
paddingVertical: 10,
paddingHorizontal: 20,
marginTop: 10,
},
showButtonText: {
color: "#fff",
fontSize: 16,
fontWeight: "bold",
},
});
export default Home;
Key Components of Home.js
- Text Input and Age Selector: Allows the user to select an age range and enter a story prompt.
-
Fetch Story:
fetchStory
interacts with the Hugging Face API to generate and summarize a story based on the input. -
Navigation: If a story and summary are successfully fetched, the app navigates to the
Detail
screen to display the results.
Step 3: Displaying Story and Image on the Detail Screen
The Detail
screen retrieves the generated story, summarizes it, and displays an AI-generated cartoon image related to the story.
Detail.js Code
import React, { useEffect, useState } from "react";
import { StyleSheet, View, Text, TouchableOpacity, ActivityIndicator, Image, ScrollView } from "react-native";
import { useLocalSearchParams, useRouter } from "expo-router";
import { HUGGING_FACE_KEY } from "../env";
import Ionicons from "@expo/vector-icons/Ionicons";
const Detail = () => {
const params = useLocalSearchParams();
const { story, summary } = params;
const [imageUri, setImageUri] = useState(null);
const [loading, setLoading] = useState(false);
const router = useRouter();
useEffect(() => {
const fetchImage = async () => {
setLoading(true);
try {
const response = await fetch("https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0", {
method: "POST",
headers: { Authorization: `Bearer ${HUGGING_FACE_KEY}`, "Content-Type": "application/json" },
body: JSON.stringify
({ inputs: `Cartoonish ${summary}`, target_size: { width: 300, height: 300 } }),
});
if (!response.ok) throw new Error(`Request failed: ${response.status}`);
const blob = await response.blob();
const base64Data = await blobToBase64(blob);
setImageUri(`data:image/jpeg;base64,${base64Data}`);
} catch (error) {
console.error("Error fetching image:", error);
} finally {
setLoading(false);
}
};
fetchImage();
}, []);
const blobToBase64 = (blob) => {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onloadend = () => resolve(reader.result.split(",")[1]);
reader.onerror = reject;
reader.readAsDataURL(blob);
});
};
return (
<ScrollView style={styles.container}>
<TouchableOpacity onPress={() => router.back()}>
<Ionicons name="arrow-back-circle-sharp" size={50} color="yellow" />
</TouchableOpacity>
{loading ? (
<ActivityIndicator size="large" color="#ffffff" />
) : imageUri ? (
<Image source={{ uri: imageUri }} style={styles.image} />
) : (
<Text style={styles.text}>No image generated yet.</Text>
)}
<Text style={styles.header}>{story}</Text>
</ScrollView>
);
};
export default Detail;
const styles = StyleSheet.create({
container: { flex: 1, padding: 16, backgroundColor: "#1c1c1c" },
header: { fontSize: 24, color: "#ffffff", marginVertical: 16 },
text: { color: "#ffffff" },
image: { width: 300, height: 300, marginTop: 20, borderRadius: 10, alignSelf: "center" },
});
Wrapping Up
This app is a great way to combine user input with AI models to create a dynamic storytelling experience. By using React Native, Hugging Face API, and Expo Router, we’ve created a simple yet powerful storytelling app that can entertain kids with custom-made stories and illustrations.
Top comments (0)