As a best-selling author, I invite you to explore my books on Amazon. Don't forget to follow me on Medium and show your support. Thank you! Your support means the world!
Building Memory-Efficient Tries
Trie structures excel at prefix searches, but memory consumption can be problematic. I optimize them using path compression and node sharing. For autocomplete systems, I store frequency counts at leaf nodes. Here's a Unicode-safe implementation handling multilingual input:
class CompactTrieNode {
constructor() {
this.children = new Map();
this.isTerminal = false;
this.frequency = 0;
}
}
class CompactTrie {
constructor() {
this.root = new CompactTrieNode();
}
insert(word, frequency = 1) {
let node = this.root;
for (const char of word) {
if (!node.children.has(char)) {
node.children.set(char, new CompactTrieNode());
}
node = node.children.get(char);
}
node.isTerminal = true;
node.frequency += frequency;
}
compress() {
const mergeNodes = (node) => {
while (node.children.size === 1 && !node.isTerminal) {
const [childChar, child] = node.children.entries().next().value;
node.children.delete(childChar);
for (const [grandchildChar, grandchild] of child.children) {
node.children.set(childChar + grandchildChar, grandchild);
}
node.isTerminal = child.isTerminal;
node.frequency = child.frequency;
}
node.children.forEach(mergeNodes);
};
mergeNodes(this.root);
}
autocomplete(prefix, limit = 5) {
let node = this.root;
for (const char of prefix) {
if (!node.children.has(char)) return [];
node = node.children.get(char);
}
return this._traverse(node, prefix, limit);
}
_traverse(node, current, limit) {
let results = [];
if (node.isTerminal) {
results.push({ word: current, score: node.frequency });
}
for (const [segment, child] of node.children) {
results = [...results, ...this._traverse(child, current + segment, limit)];
}
return results.sort((a, b) => b.score - a.score).slice(0, limit);
}
}
// Usage example:
const trie = new CompactTrie();
trie.insert("こんにちは", 5); // Japanese greeting
trie.insert("你好", 8); // Chinese greeting
trie.compress();
console.log(trie.autocomplete("こ")); // Returns Japanese completion
Thread-Safe Skip Lists
For concurrent environments, skip lists provide probabilistic balancing. I implement thread-safety using atomic operations and WeakRef for garbage collection:
import { AtomicReference, AtomicInteger } from 'atomic';
class SkipNode {
constructor(value, level) {
this.value = value;
this.next = Array(level).fill().map(() => new AtomicReference(null));
this.marked = new AtomicInteger(0);
this.fullyLinked = false;
}
}
class ConcurrentSkipList {
constructor(maxLevel = 16, p = 0.5) {
this.maxLevel = maxLevel;
this.p = p;
this.head = new SkipNode(-Infinity, maxLevel);
this.tail = new SkipNode(Infinity, maxLevel);
for (let i = 0; i < maxLevel; i++) {
this.head.next[i].set(this.tail);
}
}
_randomLevel() {
let level = 1;
while (Math.random() < this.p && level < this.maxLevel) level++;
return level;
}
add(value) {
const topLevel = this._randomLevel();
const preds = Array(this.maxLevel).fill(null);
const succs = Array(this.maxLevel).fill(null);
while (true) {
let found = this._find(value, preds, succs);
if (found) return false;
const newNode = new SkipNode(value, topLevel);
for (let level = 0; level < topLevel; level++) {
newNode.next[level].set(succs[level]);
}
if (!preds[0].next[0].compareAndSet(succs[0], newNode)) continue;
for (let level = 1; level < topLevel; level++) {
while (!preds[level].next[level].compareAndSet(succs[level], newNode)) {
this._find(value, preds, succs);
}
}
return true;
}
}
_find(value, preds, succs) {
let found = false;
let pred = this.head;
for (let level = this.maxLevel - 1; level >= 0; level--) {
let curr = pred.next[level].get();
while (value > curr.value) {
pred = curr;
curr = pred.next[level].get();
}
preds[level] = pred;
succs[level] = curr;
if (value === curr.value) found = true;
}
return found;
}
}
Optimized Bloom Filters
Bloom filters trade accuracy for space efficiency. I tune hash functions based on error tolerance and implement deletion support:
class CountingBloomFilter {
constructor(size, hashCount) {
this.size = size;
this.hashCount = hashCount;
this.counters = new Uint16Array(size);
}
static calculateParameters(itemCount, errorRate) {
const size = Math.ceil(-(itemCount * Math.log(errorRate)) / (Math.log(2)**2));
const hashCount = Math.ceil((size / itemCount) * Math.log(2));
return { size, hashCount };
}
_getHashes(item) {
const hashes = [];
for (let i = 0; i < this.hashCount; i++) {
const hash = murmur3(`${item}${i}`, 0) % this.size;
hashes.push(Math.abs(hash));
}
return hashes;
}
add(item) {
this._getHashes(item).forEach(index => {
if (this.counters[index] < 65535) this.counters[index]++;
});
}
remove(item) {
const hashes = this._getHashes(item);
if (!this.mayContain(item)) return false;
hashes.forEach(index => this.counters[index]--);
return true;
}
mayContain(item) {
return this._getHashes(item).every(index => this.counters[index] > 0);
}
}
// MurmurHash3 implementation (simplified)
function murmur3(key, seed) {
let hash = seed;
for (let i = 0; i < key.length; i++) {
hash = Math.imul(hash ^ key.charCodeAt(i), 0xcc9e2d51);
hash = (hash << 15) | (hash >>> 17);
hash = Math.imul(hash, 0x1b873593);
}
return hash;
}
Circular Buffer Enhancements
Building on the provided foundation, I add thread safety and overflow handling:
class ConcurrentCircularBuffer extends CircularBuffer {
constructor(capacity) {
super(capacity);
this.writeLock = new Mutex();
this.readLock = new Mutex();
}
async write(data) {
await this.writeLock.acquire();
try {
if (this.size === this.capacity) {
this.resize(this.capacity * 2);
}
this.buffer[this.tail] = data;
this.tail = (this.tail + 1) % this.capacity;
this.size++;
} finally {
this.writeLock.release();
}
}
async read() {
await this.readLock.acquire();
try {
if (this.size === 0) return null;
const data = this.buffer[this.head];
this.buffer[this.head] = null;
this.head = (this.head + 1) % this.capacity;
this.size--;
return data;
} finally {
this.readLock.release();
}
}
handleOverflow(data, policy = 'drop') {
switch(policy) {
case 'drop':
return false;
case 'overwrite':
this.read(); // Discard oldest
this.write(data);
return true;
case 'error':
throw Error('Buffer overflow');
default:
return false;
}
}
}
Advanced Disjoint-Set Forests
Disjoint-sets benefit from path compression and union-by-rank:
class DisjointSet {
constructor(size) {
this.parent = Array.from({ length: size }, (_, i) => i);
this.rank = new Uint8Array(size);
this.version = 0;
this.snapshots = new Map();
}
find(x) {
let root = x;
// Path compression
while (root !== this.parent[root]) {
root = this.parent[root];
}
// Path flattening
while (x !== root) {
const next = this.parent[x];
this.parent[x] = root;
x = next;
}
return root;
}
union(x, y) {
const rootX = this.find(x);
const rootY = this.find(y);
if (rootX === rootY) return false;
// Union by rank
if (this.rank[rootX] < this.rank[rootY]) {
this.parent[rootX] = rootY;
} else if (this.rank[rootX] > this.rank[rootY]) {
this.parent[rootY] = rootX;
} else {
this.parent[rootY] = rootX;
this.rank[rootX]++;
}
this.version++;
return true;
}
snapshot() {
this.snapshots.set(this.version, {
parent: [...this.parent],
rank: Uint8Array.from(this.rank)
});
}
restore(version) {
const snapshot = this.snapshots.get(version);
if (snapshot) {
this.parent = [...snapshot.parent];
this.rank = Uint8Array.from(snapshot.rank);
this.version = version;
}
}
}
Spatial Partitioning for Collision Detection
Quadtrees efficiently handle spatial queries:
class Quadtree {
constructor(bounds, capacity = 4) {
this.bounds = bounds; // {x, y, width, height}
this.capacity = capacity;
this.points = [];
this.divided = false;
}
subdivide() {
const { x, y, width, height } = this.bounds;
const halfWidth = width / 2;
const halfHeight = height / 2;
this.northeast = new Quadtree(
{ x: x + halfWidth, y, width: halfWidth, height: halfHeight },
this.capacity
);
this.northwest = new Quadtree(
{ x, y, width: halfWidth, height: halfHeight },
this.capacity
);
this.southeast = new Quadtree(
{ x: x + halfWidth, y: y + halfHeight, width: halfWidth, height: halfHeight },
this.capacity
);
this.southwest = new Quadtree(
{ x, y: y + halfHeight, width: halfWidth, height: halfHeight },
this.capacity
);
this.divided = true;
}
insert(point) {
if (!this._contains(point)) return false;
if (this.points.length < this.capacity) {
this.points.push(point);
return true;
}
if (!this.divided) this.subdivide();
return (
this.northeast.insert(point) ||
this.northwest.insert(point) ||
this.southeast.insert(point) ||
this.southwest.insert(point)
);
}
query(range, found = []) {
if (!this._intersects(range)) return found;
for (const p of this.points) {
if (this._pointInRect(p, range)) found.push(p);
}
if (this.divided) {
this.northeast.query(range, found);
this.northwest.query(range, found);
this.southeast.query(range, found);
this.southwest.query(range, found);
}
return found;
}
_contains(point) {
const { x, y, width, height } = this.bounds;
return (
point.x >= x &&
point.x <= x + width &&
point.y >= y &&
point.y <= y + height
);
}
}
Hybrid Cache Implementation
Combining LRU and LFU strategies creates balanced caching:
class HybridCache {
constructor(capacity) {
this.capacity = capacity;
this.size = 0;
this.frequencyMap = new Map();
this.recencyList = new DoublyLinkedList();
this.nodeMap = new Map();
}
get(key) {
if (!this.nodeMap.has(key)) return null;
const node = this.nodeMap.get(key);
this._updateFrequency(node);
this.recencyList.moveToFront(node);
return node.value;
}
put(key, value, size = 1) {
if (this.nodeMap.has(key)) {
const node = this.nodeMap.get(key);
node.value = value;
this._updateFrequency(node);
this.recencyList.moveToFront(node);
return;
}
if (this.size + size > this.capacity) {
this._evict();
}
const node = new ListNode(key, value, size);
node.frequency = 1;
this.nodeMap.set(key, node);
this.recencyList.addFront(node);
if (!this.frequencyMap.has(1)) {
this.frequencyMap.set(1, new Set());
}
this.frequencyMap.get(1).add(node);
this.size += size;
}
_updateFrequency(node) {
const oldFreq = node.frequency;
const newFreq = oldFreq + 1;
this.frequencyMap.get(oldFreq).delete(node);
if (this.frequencyMap.get(oldFreq).size === 0) {
this.frequencyMap.delete(oldFreq);
}
node.frequency = newFreq;
if (!this.frequencyMap.has(newFreq)) {
this.frequencyMap.set(newFreq, new Set());
}
this.frequencyMap.get(newFreq).add(node);
}
_evict() {
const minFreq = Math.min(...this.frequencyMap.keys());
const candidates = this.frequencyMap.get(minFreq);
const victim = candidates.values().next().value;
candidates.delete(victim);
if (candidates.size === 0) {
this.frequencyMap.delete(minFreq);
}
this.recencyList.remove(victim);
this.nodeMap.delete(victim.key);
this.size -= victim.size;
}
}
Persistent Structures via Sharing
Immutable structures optimize memory through subtree reuse:
class PersistentTreeNode {
constructor(value, left = null, right = null) {
this.value = value;
this.left = left;
this.right = right;
this.size = 1 + (left?.size || 0) + (right?.size || 0);
}
}
class PersistentTree {
constructor(root = null) {
this.root = root;
this.version = 0;
this.history = new Map();
}
current() {
return this.root;
}
insert(value) {
const insertNode = (node) => {
if (!node) return new PersistentTreeNode(value);
if (value < node.value) {
return new PersistentTreeNode(
node.value,
insertNode(node.left),
node.right
);
}
return new PersistentTreeNode(
node.value,
node.left,
insertNode(node.right)
);
};
this.root = insertNode(this.root);
this.history.set(++this.version, this.root);
return this;
}
getAtVersion(version) {
return this.history.get(version) || null;
}
merge(otherTree) {
const mergeNodes = (myNode, otherNode) => {
if (!myNode) return otherNode;
if (!otherNode) return myNode;
const mergedLeft = mergeNodes(myNode.left, otherNode.left);
const mergedRight = mergeNodes(myNode.right, otherNode.right);
return new PersistentTreeNode(
myNode.value,
mergedLeft,
mergedRight
);
};
this.root = mergeNodes(this.root, otherTree.root);
this.history.set(++this.version, this.root);
return this;
}
}
Each technique addresses specific performance challenges in JavaScript applications. When implementing these, consider your specific access patterns and data characteristics. For production use, always include comprehensive tests covering edge cases like concurrency collisions, memory boundaries, and invalid inputs. Start with simpler structures like circular buffers before progressing to complex systems like persistent tries. Measure real-world performance using browser profiling tools to validate optimizations.
📘 Checkout my latest ebook for free on my channel!
Be sure to like, share, comment, and subscribe to the channel!
101 Books
101 Books is an AI-driven publishing company co-founded by author Aarav Joshi. By leveraging advanced AI technology, we keep our publishing costs incredibly low—some books are priced as low as $4—making quality knowledge accessible to everyone.
Check out our book Golang Clean Code available on Amazon.
Stay tuned for updates and exciting news. When shopping for books, search for Aarav Joshi to find more of our titles. Use the provided link to enjoy special discounts!
Our Creations
Be sure to check out our creations:
Investor Central | Investor Central Spanish | Investor Central German | Smart Living | Epochs & Echoes | Puzzling Mysteries | Hindutva | Elite Dev | JS Schools
We are on Medium
Tech Koala Insights | Epochs & Echoes World | Investor Central Medium | Puzzling Mysteries Medium | Science & Epochs Medium | Modern Hindutva
Top comments (0)