If you store infinite number of entries in the HashMap, Garbage Collector will not kickin. So at a certain point Out of Memory error will occured.
Below is the example(using VisualVM to profile the heap)

So I changed a few places inside the HashMap implemetation to allow the GC to kickin. The strategy is to use the CacheHashMap. Afer changing that my profile becomes like following.
As you can see above GC is regularly kicks-in. See the spikes, that's mean GC is doing it's job.
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Objects;
import java.util.Set;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.Function;
public class CachedHashMap<K,V> implements Map<K, V>
{
private int maxEntries = 1000;
private final LinkedList<K> insertionOrder = new LinkedList(); // track keys for eviction
private static int nextPowerOfTwo(int n)
{
if (n <= 1) return 1;
return Integer.highestOneBit(n - 1) << 1;
}
private static int previousPowerOfTwo(int n)
{
return Integer.highestOneBit(n);
}
private static class Entry<K, V>
{
final int hash;
final K key;
V value;
Entry<K, V> next;
Entry(int hash, K key, V value, Entry<K, V> next)
{
this.hash = hash;
this.key = key;
this.value = value;
this.next = next;
}
}
private static final int MIN_CAPACITY = 16;
private static final float LOAD_FACTOR = 0.75f;
private static final float SHRINK_FACTOR = 0.25f;
private Entry<K, V>[] m_Table;
private int m_Size = 0;
private int m_Capacity;
@SuppressWarnings("unchecked")
public CachedHashMap(int initialCapacity)
{
m_Capacity = nextPowerOfTwo(Math.max(MIN_CAPACITY, initialCapacity));
m_Table = (Entry<K, V>[]) new Entry[m_Capacity]; //its fine...due to type erasure everything will get reduce to a common object ref
}
public CachedHashMap(int initialCapacity,int maxEntries)
{
m_Capacity = nextPowerOfTwo(Math.max(MIN_CAPACITY, initialCapacity));
m_Table = (Entry<K, V>[]) new Entry[m_Capacity]; //its fine...due to type erasure everything will get reduce to a common object ref
this.maxEntries = maxEntries;
}
public CachedHashMap()
{
this(MIN_CAPACITY);
}
public CachedHashMap(Map<? extends K, ? extends V> map)
{
this(map != null ? map.size() : MIN_CAPACITY);
if (map != null)
{
putAll(map);
}
}
public CachedHashMap(Collection<? extends Map.Entry<? extends K, ? extends V>> entries)
{
this(entries != null ? entries.size() : MIN_CAPACITY);
if (entries != null)
{
for (Map.Entry<? extends K, ? extends V> entry : entries)
{
put(entry.getKey(), entry.getValue());
}
}
}
public int size()
{
return m_Size;
}
public int capacity()
{
return m_Capacity;
}
@Override
public boolean isEmpty()
{
return m_Size == 0;
}
@Override
public Collection<V> values()
{
List<V> values = new ArrayList<>();
for (Entry<K, V> bucket : m_Table)
{
Entry<K, V> e = bucket;
while (e != null)
{
values.add(e.value);
e = e.next;
}
}
return values;
}
@Override
public boolean containsValue(Object value)
{
for (Entry<K, V> bucket : m_Table)
{
Entry<K, V> e = bucket;
while (e != null)
{
if (Objects.equals(e.value, value))
{
return true;
}
e = e.next;
}
}
return false;
}
@Override
public boolean containsKey(Object key)
{
int hash = hash(key);
int index = indexFor(hash, m_Capacity);
for (Entry<K, V> e = m_Table[index]; e != null; e = e.next)
{
if (e.hash == hash && Objects.equals(e.key, key)) {
return true;
}
}
return false;
}
public boolean hasKey(K key)
{
return get(key) != null;
}
@Override
public V get(Object key)
{
int hash = hash(key);
int index = indexFor(hash, m_Table.length);
Entry<K, V> e = m_Table[index];
while (e != null)
{
if (e.hash == hash && Objects.equals(e.key, key))
{
return e.value;
}
e = e.next;
}
return null;
}
@Override
public V getOrDefault(Object key, V defaultValue)
{
int hash = hash(key);
int index = indexFor(hash, m_Table.length);
Entry<K, V> e = m_Table[index];
while (e != null)
{
if (e.hash == hash && Objects.equals(e.key, key))
{
return e.value;
}
e = e.next;
}
return defaultValue;
}
@Override
public void forEach(BiConsumer<? super K, ? super V> action)
{
if(action == null)
return;
for (Entry<K, V> bucket : m_Table)
{
Entry<K, V> e = bucket;
while (e != null)
{
action.accept(e.key, e.value);
e = e.next;
}
}
}
public V fetch(K key)
{
int hash = hash(key);
int index = indexFor(hash, m_Table.length);
Entry<K, V> e = m_Table[index];
while (e != null)
{
if (e.hash == hash && Objects.equals(e.key, key))
{
return e.value;
}
e = e.next;
}
return null;
}
private boolean checkMapIsFull() {
return m_Size > m_Capacity;
}
@Override
public V put(K key, V value)
{
int hash = hash(key); //compute hash
int index = indexFor(hash, m_Table.length);
Entry<K, V> e = m_Table[index];
boolean isNewKey = true;
while (e != null)
{
if (e.hash == hash && Objects.equals(e.key, key))
{
V old = e.value;
isNewKey = false;
e.value = value;
return old;
}
e = e.next;
}
if (isNewKey) {
insertionOrder.addLast(key); // track order for eviction
}
Entry<K, V> newEntry = new Entry<>(hash, key, value, m_Table[index]);
m_Table[index] = newEntry;
m_Size++;
tryResize();
while (m_Size > maxEntries) {
K oldest = insertionOrder.removeFirst();
remove(oldest); // call your own remove method
}
return null;
}
@Override
public void putAll(Map<? extends K, ? extends V> m)
{
for (Map.Entry<? extends K, ? extends V> entry : m.entrySet())
{
put(entry.getKey(), entry.getValue());
}
}
@Override
public V remove(Object key)
{
int hash = hash(key);
int index = indexFor(hash, m_Table.length);
Entry<K, V> prev = null;
Entry<K, V> e = m_Table[index];
while (e != null)
{
if (e.hash == hash && Objects.equals(e.key, key))
{
if (prev == null)
{
m_Table[index] = e.next;
}
else
{
prev.next = e.next;
}
V oldValue = e.value;
e.next = null;
e.value = null;
e = null;
m_Size--;
tryResize();
return oldValue;
}
prev = e;
e = e.next;
}
return null;
}
public V removeKey(K key)
{
int hash = hash(key);
int index = indexFor(hash, m_Table.length);
Entry<K, V> prev = null;
Entry<K, V> e = m_Table[index];
while (e != null)
{
if (e.hash == hash && Objects.equals(e.key, key))
{
if (prev == null)
{
m_Table[index] = e.next;
}
else
{
prev.next = e.next;
}
V oldValue = e.value;
e.next = null;
e.value = null;
e = null;
m_Size--;
tryResize();
return oldValue;
}
prev = e;
e = e.next;
}
return null;
}
public void clear()
{
Arrays.fill(m_Table, null);
m_Size = 0;
tryResize(); //shrink to minimum
}
public Set<K> keySet()
{
Set<K> keys = new HashSet<>();
for (Entry<K, V> bucket : m_Table)
{
Entry<K, V> e = bucket;
while (e != null)
{
keys.add(e.key);
e = e.next;
}
}
return keys;
}
public Set<Map.Entry<K, V>> entrySet()
{
Set<Map.Entry<K, V>> entries = new HashSet<>();
for (Entry<K, V> bucket : m_Table)
{
Entry<K, V> e = bucket;
while (e != null)
{
entries.add(new AbstractMap.SimpleImmutableEntry<>(e.key, e.value));
e = e.next;
}
}
return entries;
}
public Iterator<Map.Entry<K, V>> iterator()
{
return new EntryIterator();
}
private int hash(Object key)
{
int h = key == null ? 0 : key.hashCode();
return h ^ (h >>> 16); // same as HashMap spreader
}
private int indexFor(int hash, int length)
{
return hash & (length - 1);
}
private void tryResize() ///grows or shrinks the map
{
if (m_Size > m_Capacity * LOAD_FACTOR)
{
resize(nextPowerOfTwo(m_Capacity * 2));
}
else if (m_Capacity > MIN_CAPACITY && m_Size < m_Capacity * SHRINK_FACTOR)
{
//shrink to the nearest by halving down to the nearest power of 2
resize(Math.max(previousPowerOfTwo(m_Capacity / 2), MIN_CAPACITY));
}
}
@Override
public boolean remove(Object key, Object value)
{
int hash = hash(key);
int index = indexFor(hash, m_Table.length);
Entry<K, V> prev = null;
Entry<K, V> e = m_Table[index];
while (e != null)
{
if (e.hash == hash && Objects.equals(e.key, key) && Objects.equals(e.value, value))
{
if (prev == null)
{
m_Table[index] = e.next;
}
else
{
prev.next = e.next;
}
m_Size--;
tryResize();
return true;
}
prev = e;
e = e.next;
}
return false;
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction)
{
Objects.requireNonNull(mappingFunction);
int hash = hash(key);
int index = indexFor(hash, m_Table.length);
Entry<K, V> e = m_Table[index];
while (e != null)
{
if (e.hash == hash && Objects.equals(e.key, key))
{
return e.value; // Already present
}
e = e.next;
}
V newValue = mappingFunction.apply(key);
if (newValue != null)
{
m_Table[index] = new Entry<>(hash, key, newValue, m_Table[index]);
m_Size++;
tryResize();
return newValue;
}
return null;
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction)
{
Objects.requireNonNull(remappingFunction);
int hash = hash(key);
int index = indexFor(hash, m_Table.length);
Entry<K, V> prev = null;
Entry<K, V> e = m_Table[index];
while (e != null)
{
if (e.hash == hash && Objects.equals(e.key, key))
{
V newValue = remappingFunction.apply(key, e.value);
if (newValue == null)
{
// Remove
if (prev == null) m_Table[index] = e.next;
else prev.next = e.next;
m_Size--;
tryResize();
return null;
}
else
{
e.value = newValue;
return newValue;
}
}
prev = e;
e = e.next;
}
V newValue = remappingFunction.apply(key, null);
if (newValue != null)
{
m_Table[index] = new Entry<>(hash, key, newValue, m_Table[index]);
m_Size++;
tryResize();
return newValue;
}
return null;
}
@Override
public V computeIfPresent(K key,
BiFunction<? super K, ? super V, ? extends V> remappingFunction)
{
Objects.requireNonNull(remappingFunction);
int hash = hash(key);
int index = indexFor(hash, m_Table.length);
Entry<K, V> e = m_Table[index];
while (e != null)
{
if (e.hash == hash && Objects.equals(e.key, key))
{
V newValue = remappingFunction.apply(key, e.value);
if (newValue == null)
{
remove(key);
return null;
} else
{
e.value = newValue;
return newValue;
}
}
e = e.next;
}
return null;
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction)
{
Objects.requireNonNull(remappingFunction);
int hash = hash(key);
int index = indexFor(hash, m_Table.length);
Entry<K, V> e = m_Table[index];
while (e != null)
{
if (e.hash == hash && Objects.equals(e.key, key))
{
V newValue = remappingFunction.apply(e.value, value);
if (newValue == null)
{
remove(key); // Will trigger resize if needed
return null;
}
e.value = newValue;
return newValue;
}
e = e.next;
}
if (value != null)
{
m_Table[index] = new Entry<>(hash, key, value, m_Table[index]);
m_Size++;
tryResize();
return value;
}
return null;
}
@SuppressWarnings("unchecked")
private void resize(int newCapacity)
{
Entry<K, V>[] oldTable = m_Table;
Entry<K, V>[] newTable = (Entry<K, V>[]) new Entry[newCapacity];
for (Entry<K, V> e : oldTable)
{
while (e != null)
{
Entry<K, V> next = e.next;
int index = indexFor(e.hash, newCapacity);
e.next = newTable[index];
newTable[index] = e;
e = next;
}
}
m_Table = newTable; //please garbage collect
m_Capacity = newCapacity;
}
private class EntryIterator implements Iterator<Map.Entry<K, V>>
{
int bucketIndex = 0;
Entry<K, V> current = null;
public boolean hasNext()
{
while (current == null && bucketIndex < m_Table.length)
{
current = m_Table[bucketIndex++];
}
return current != null;
}
public Map.Entry<K, V> next()
{
if (!hasNext())
throw new NoSuchElementException();
Entry<K, V> e = current;
current = current.next;
return new AbstractMap.SimpleImmutableEntry<>(e.key, e.value);
}
}
@Override
public boolean replace(K key, V oldValue, V newValue)
{
int hash = hash(key);
int index = indexFor(hash, m_Table.length);
Entry<K, V> e = m_Table[index];
while (e != null)
{
if (e.hash == hash && Objects.equals(e.key, key) && Objects.equals(e.value, oldValue))
{
e.value = newValue;
return true;
}
e = e.next;
}
return false;
}
@Override
public V replace(K key, V newValue)
{
int hash = hash(key);
int index = indexFor(hash, m_Table.length);
Entry<K, V> e = m_Table[index];
while (e != null)
{
if (e.hash == hash && Objects.equals(e.key, key))
{
V oldValue = e.value;
e.value = newValue;
return oldValue;
}
e = e.next;
}
return null;
}
@Override
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function)
{
if(function == null)
return;
for (int i = 0; i < m_Table.length; i++)
{
Entry<K, V> e = m_Table[i];
while (e != null)
{
e.value = function.apply(e.key, e.value);
e = e.next;
}
}
}
}
This is my Cached HashMap implementation. The thing is to use the bounded list.
boolean isNewKey = true;
....
if (isNewKey) {
insertionOrder.addLast(key); // track order for eviction
}
....
while (m_Size > maxEntries) {
K oldest = insertionOrder.removeFirst();
remove(oldest); // call your own remove method
}
This will be useful if the incoming data are infinite and map doesn't need to hold historical data. It's FIFO based.

Top comments (0)