As a best-selling author, I invite you to explore my books on Amazon. Don't forget to follow me on Medium and show your support. Thank you! Your support means the world!
Managing memory efficiently in WebGL data visualizations remains one of the most critical aspects of building performant applications. I've learned through experience that improper buffer management can quickly lead to memory leaks and degraded performance, especially when dealing with large datasets that change frequently.
The key to successful memory management lies in implementing buffer reuse strategies. Rather than creating new vertex buffers for every data update, I maintain a pool of pre-allocated buffers that can be repurposed. This approach significantly reduces garbage collection pressure and eliminates allocation overhead during runtime.
class MemoryManager {
constructor(gl) {
this.gl = gl;
this.bufferPool = new Map();
this.texturePool = new Map();
this.activeBuffers = new Set();
}
getBuffer(size, usage = this.gl.STATIC_DRAW) {
const key = `${size}_${usage}`;
if (this.bufferPool.has(key)) {
const buffers = this.bufferPool.get(key);
if (buffers.length > 0) {
const buffer = buffers.pop();
this.activeBuffers.add(buffer);
return buffer;
}
}
const buffer = this.gl.createBuffer();
this.gl.bindBuffer(this.gl.ARRAY_BUFFER, buffer);
this.gl.bufferData(this.gl.ARRAY_BUFFER, size, usage);
this.activeBuffers.add(buffer);
return buffer;
}
releaseBuffer(buffer) {
if (!this.activeBuffers.has(buffer)) return;
this.activeBuffers.delete(buffer);
// Determine buffer size and usage for pooling
this.gl.bindBuffer(this.gl.ARRAY_BUFFER, buffer);
const size = this.gl.getBufferParameter(this.gl.ARRAY_BUFFER, this.gl.BUFFER_SIZE);
const usage = this.gl.getBufferParameter(this.gl.ARRAY_BUFFER, this.gl.BUFFER_USAGE);
const key = `${size}_${usage}`;
if (!this.bufferPool.has(key)) {
this.bufferPool.set(key, []);
}
this.bufferPool.get(key).push(buffer);
}
updateBufferData(buffer, data, offset = 0) {
this.gl.bindBuffer(this.gl.ARRAY_BUFFER, buffer);
this.gl.bufferSubData(this.gl.ARRAY_BUFFER, offset, data);
}
}
Shader program compilation represents another area where performance gains can be substantial. I've found that creating a robust shader management system prevents redundant compilations and enables dynamic shader swapping without performance penalties.
class ShaderManager {
constructor(gl) {
this.gl = gl;
this.programs = new Map();
this.shaders = new Map();
this.activeProgram = null;
}
createProgram(vertexSource, fragmentSource, defines = {}) {
const key = this.generateProgramKey(vertexSource, fragmentSource, defines);
if (this.programs.has(key)) {
return this.programs.get(key);
}
const processedVertexSource = this.preprocessShader(vertexSource, defines);
const processedFragmentSource = this.preprocessShader(fragmentSource, defines);
const vertexShader = this.getOrCreateShader(this.gl.VERTEX_SHADER, processedVertexSource);
const fragmentShader = this.getOrCreateShader(this.gl.FRAGMENT_SHADER, processedFragmentSource);
const program = this.gl.createProgram();
this.gl.attachShader(program, vertexShader);
this.gl.attachShader(program, fragmentShader);
this.gl.linkProgram(program);
if (!this.gl.getProgramParameter(program, this.gl.LINK_STATUS)) {
const error = this.gl.getProgramInfoLog(program);
throw new Error(`Program linking failed: ${error}`);
}
this.programs.set(key, program);
return program;
}
useProgram(program) {
if (this.activeProgram !== program) {
this.gl.useProgram(program);
this.activeProgram = program;
}
}
preprocessShader(source, defines) {
let processed = source;
for (const [key, value] of Object.entries(defines)) {
const regex = new RegExp(`#define\\s+${key}\\s+.*`, 'g');
const replacement = `#define ${key} ${value}`;
if (processed.includes(`#define ${key}`)) {
processed = processed.replace(regex, replacement);
} else {
processed = `#define ${key} ${value}\n${processed}`;
}
}
return processed;
}
generateProgramKey(vertexSource, fragmentSource, defines) {
const definesString = Object.entries(defines)
.sort(([a], [b]) => a.localeCompare(b))
.map(([key, value]) => `${key}:${value}`)
.join('|');
return `${vertexSource.length}_${fragmentSource.length}_${definesString}`;
}
getOrCreateShader(type, source) {
const key = `${type}_${this.hashCode(source)}`;
if (this.shaders.has(key)) {
return this.shaders.get(key);
}
const shader = this.gl.createShader(type);
this.gl.shaderSource(shader, source);
this.gl.compileShader(shader);
if (!this.gl.getShaderParameter(shader, this.gl.COMPILE_STATUS)) {
const error = this.gl.getShaderInfoLog(shader);
throw new Error(`Shader compilation failed: ${error}`);
}
this.shaders.set(key, shader);
return shader;
}
hashCode(str) {
let hash = 0;
for (let i = 0; i < str.length; i++) {
const char = str.charCodeAt(i);
hash = ((hash << 5) - hash) + char;
hash = hash & hash;
}
return hash;
}
}
Geometry instancing transforms the way we handle repetitive rendering tasks. Instead of making individual draw calls for thousands of similar objects, I use instancing to render them all in a single operation. This technique proves especially valuable when visualizing scatter plots or particle systems with thousands of data points.
class InstancedRenderer {
constructor(gl, memoryManager, shaderManager) {
this.gl = gl;
this.memoryManager = memoryManager;
this.shaderManager = shaderManager;
this.instanceData = new Map();
}
createInstanceBuffer(baseGeometry, instances) {
const instanceCount = instances.length;
const instanceDataSize = 16; // 4 floats per instance (position + scale)
const instanceBuffer = this.memoryManager.getBuffer(
instanceCount * instanceDataSize,
this.gl.DYNAMIC_DRAW
);
const data = new Float32Array(instanceCount * 4);
for (let i = 0; i < instanceCount; i++) {
const offset = i * 4;
data[offset] = instances[i].x;
data[offset + 1] = instances[i].y;
data[offset + 2] = instances[i].z;
data[offset + 3] = instances[i].scale || 1.0;
}
this.memoryManager.updateBufferData(instanceBuffer, data);
return {
buffer: instanceBuffer,
count: instanceCount,
baseGeometry
};
}
renderInstanced(instancedData) {
const gl = this.gl;
const program = this.shaderManager.createProgram(
this.getInstancedVertexShader(),
this.getInstancedFragmentShader()
);
this.shaderManager.useProgram(program);
// Bind base geometry
gl.bindBuffer(gl.ARRAY_BUFFER, instancedData.baseGeometry.vertexBuffer);
gl.enableVertexAttribArray(0);
gl.vertexAttribPointer(0, 3, gl.FLOAT, false, 0, 0);
// Bind instance data
gl.bindBuffer(gl.ARRAY_BUFFER, instancedData.buffer);
gl.enableVertexAttribArray(1);
gl.vertexAttribPointer(1, 3, gl.FLOAT, false, 16, 0);
gl.vertexAttribDivisor(1, 1);
gl.enableVertexAttribArray(2);
gl.vertexAttribPointer(2, 1, gl.FLOAT, false, 16, 12);
gl.vertexAttribDivisor(2, 1);
// Render all instances
gl.drawArraysInstanced(
gl.TRIANGLES,
0,
instancedData.baseGeometry.vertexCount,
instancedData.count
);
}
getInstancedVertexShader() {
return `#version 300 es
layout(location = 0) in vec3 a_position;
layout(location = 1) in vec3 a_instancePosition;
layout(location = 2) in float a_instanceScale;
uniform mat4 u_mvpMatrix;
void main() {
vec3 scaledPosition = a_position * a_instanceScale;
vec3 worldPosition = scaledPosition + a_instancePosition;
gl_Position = u_mvpMatrix * vec4(worldPosition, 1.0);
}
`;
}
getInstancedFragmentShader() {
return `#version 300 es
precision highp float;
out vec4 fragColor;
void main() {
fragColor = vec4(0.8, 0.4, 0.2, 1.0);
}
`;
}
}
Level of detail systems become essential when dealing with datasets containing millions of points. I implement LOD algorithms that dynamically adjust the complexity of rendered geometry based on viewing distance and data importance. This approach maintains visual quality while preserving performance.
class LODManager {
constructor() {
this.lodLevels = new Map();
this.distanceThresholds = [10, 50, 200, 1000];
}
generateLODLevels(baseGeometry) {
const levels = [];
// Full detail (LOD 0)
levels.push({
geometry: baseGeometry,
vertexCount: baseGeometry.vertices.length / 3,
threshold: this.distanceThresholds[0]
});
// Medium detail (LOD 1) - reduce by 50%
levels.push({
geometry: this.simplifyGeometry(baseGeometry, 0.5),
vertexCount: Math.floor(baseGeometry.vertices.length / 6),
threshold: this.distanceThresholds[1]
});
// Low detail (LOD 2) - reduce by 75%
levels.push({
geometry: this.simplifyGeometry(baseGeometry, 0.25),
vertexCount: Math.floor(baseGeometry.vertices.length / 12),
threshold: this.distanceThresholds[2]
});
// Point cloud (LOD 3) - just points
levels.push({
geometry: this.createPointCloud(baseGeometry),
vertexCount: Math.floor(baseGeometry.vertices.length / 30),
threshold: this.distanceThresholds[3]
});
return levels;
}
selectLOD(distance, lodLevels) {
for (let i = 0; i < lodLevels.length; i++) {
if (distance <= lodLevels[i].threshold) {
return lodLevels[i];
}
}
return lodLevels[lodLevels.length - 1];
}
simplifyGeometry(geometry, reductionFactor) {
const vertices = geometry.vertices;
const simplified = new Float32Array(Math.floor(vertices.length * reductionFactor));
const step = Math.floor(1 / reductionFactor);
let writeIndex = 0;
for (let i = 0; i < vertices.length; i += step * 3) {
if (writeIndex < simplified.length - 2) {
simplified[writeIndex] = vertices[i];
simplified[writeIndex + 1] = vertices[i + 1];
simplified[writeIndex + 2] = vertices[i + 2];
writeIndex += 3;
}
}
return { vertices: simplified };
}
createPointCloud(geometry) {
const vertices = geometry.vertices;
const pointCount = Math.floor(vertices.length / 30);
const points = new Float32Array(pointCount * 3);
const step = Math.floor(vertices.length / (pointCount * 3));
let writeIndex = 0;
for (let i = 0; i < vertices.length && writeIndex < points.length; i += step) {
points[writeIndex] = vertices[i];
points[writeIndex + 1] = vertices[i + 1];
points[writeIndex + 2] = vertices[i + 2];
writeIndex += 3;
}
return { vertices: points };
}
}
Frustum culling eliminates objects outside the camera's view before they reach the GPU. I implement spatial partitioning using octrees or bounding volume hierarchies to quickly identify visible objects, dramatically reducing unnecessary rendering operations.
class FrustumCuller {
constructor() {
this.frustumPlanes = new Float32Array(24); // 6 planes * 4 components each
}
extractFrustumPlanes(mvpMatrix) {
const m = mvpMatrix;
// Left plane
this.frustumPlanes[0] = m[3] + m[0];
this.frustumPlanes[1] = m[7] + m[4];
this.frustumPlanes[2] = m[11] + m[8];
this.frustumPlanes[3] = m[15] + m[12];
// Right plane
this.frustumPlanes[4] = m[3] - m[0];
this.frustumPlanes[5] = m[7] - m[4];
this.frustumPlanes[6] = m[11] - m[8];
this.frustumPlanes[7] = m[15] - m[12];
// Bottom plane
this.frustumPlanes[8] = m[3] + m[1];
this.frustumPlanes[9] = m[7] + m[5];
this.frustumPlanes[10] = m[11] + m[9];
this.frustumPlanes[11] = m[15] + m[13];
// Top plane
this.frustumPlanes[12] = m[3] - m[1];
this.frustumPlanes[13] = m[7] - m[5];
this.frustumPlanes[14] = m[11] - m[9];
this.frustumPlanes[15] = m[15] - m[13];
// Near plane
this.frustumPlanes[16] = m[3] + m[2];
this.frustumPlanes[17] = m[7] + m[6];
this.frustumPlanes[18] = m[11] + m[10];
this.frustumPlanes[19] = m[15] + m[14];
// Far plane
this.frustumPlanes[20] = m[3] - m[2];
this.frustumPlanes[21] = m[7] - m[6];
this.frustumPlanes[22] = m[11] - m[10];
this.frustumPlanes[23] = m[15] - m[14];
// Normalize planes
for (let i = 0; i < 6; i++) {
const offset = i * 4;
const length = Math.sqrt(
this.frustumPlanes[offset] * this.frustumPlanes[offset] +
this.frustumPlanes[offset + 1] * this.frustumPlanes[offset + 1] +
this.frustumPlanes[offset + 2] * this.frustumPlanes[offset + 2]
);
this.frustumPlanes[offset] /= length;
this.frustumPlanes[offset + 1] /= length;
this.frustumPlanes[offset + 2] /= length;
this.frustumPlanes[offset + 3] /= length;
}
}
isBoxInFrustum(minX, minY, minZ, maxX, maxY, maxZ) {
for (let i = 0; i < 6; i++) {
const offset = i * 4;
const a = this.frustumPlanes[offset];
const b = this.frustumPlanes[offset + 1];
const c = this.frustumPlanes[offset + 2];
const d = this.frustumPlanes[offset + 3];
// Test positive vertex
const px = a > 0 ? maxX : minX;
const py = b > 0 ? maxY : minY;
const pz = c > 0 ? maxZ : minZ;
if (a * px + b * py + c * pz + d < 0) {
return false;
}
}
return true;
}
cullObjects(objects, mvpMatrix) {
this.extractFrustumPlanes(mvpMatrix);
const visibleObjects = [];
for (const obj of objects) {
if (this.isBoxInFrustum(
obj.bounds.minX, obj.bounds.minY, obj.bounds.minZ,
obj.bounds.maxX, obj.bounds.maxY, obj.bounds.maxZ
)) {
visibleObjects.push(obj);
}
}
return visibleObjects;
}
}
Texture atlas management reduces texture binding operations by combining multiple small textures into larger ones. I create systems that automatically pack textures and manage UV coordinate mappings to maintain rendering efficiency.
class TextureAtlasManager {
constructor(gl, maxAtlasSize = 2048) {
this.gl = gl;
this.maxAtlasSize = maxAtlasSize;
this.atlases = [];
this.textureMap = new Map();
}
addTexture(imageData, id) {
const atlas = this.findOrCreateAtlas(imageData.width, imageData.height);
const position = this.packTexture(atlas, imageData);
if (position) {
this.textureMap.set(id, {
atlas: atlas,
u: position.x / atlas.width,
v: position.y / atlas.height,
width: imageData.width / atlas.width,
height: imageData.height / atlas.height
});
this.updateAtlasTexture(atlas);
return true;
}
return false;
}
findOrCreateAtlas(width, height) {
for (const atlas of this.atlases) {
if (this.canFitTexture(atlas, width, height)) {
return atlas;
}
}
return this.createNewAtlas();
}
createNewAtlas() {
const atlas = {
width: this.maxAtlasSize,
height: this.maxAtlasSize,
canvas: document.createElement('canvas'),
context: null,
texture: this.gl.createTexture(),
freeRects: [{ x: 0, y: 0, width: this.maxAtlasSize, height: this.maxAtlasSize }],
usedRects: []
};
atlas.canvas.width = this.maxAtlasSize;
atlas.canvas.height = this.maxAtlasSize;
atlas.context = atlas.canvas.getContext('2d');
this.atlases.push(atlas);
return atlas;
}
packTexture(atlas, imageData) {
const bestFit = this.findBestFit(atlas.freeRects, imageData.width, imageData.height);
if (!bestFit) return null;
const position = { x: bestFit.x, y: bestFit.y };
// Split the rectangle
this.splitRectangle(atlas, bestFit, imageData.width, imageData.height);
// Draw the image to the atlas
atlas.context.drawImage(imageData, position.x, position.y);
atlas.usedRects.push({
x: position.x,
y: position.y,
width: imageData.width,
height: imageData.height
});
return position;
}
findBestFit(freeRects, width, height) {
let bestRect = null;
let bestArea = Infinity;
for (const rect of freeRects) {
if (rect.width >= width && rect.height >= height) {
const area = rect.width * rect.height;
if (area < bestArea) {
bestArea = area;
bestRect = rect;
}
}
}
return bestRect;
}
splitRectangle(atlas, rect, width, height) {
const index = atlas.freeRects.indexOf(rect);
atlas.freeRects.splice(index, 1);
// Create new rectangles from the remaining space
if (rect.width > width) {
atlas.freeRects.push({
x: rect.x + width,
y: rect.y,
width: rect.width - width,
height: height
});
}
if (rect.height > height) {
atlas.freeRects.push({
x: rect.x,
y: rect.y + height,
width: rect.width,
height: rect.height - height
});
}
}
updateAtlasTexture(atlas) {
const gl = this.gl;
gl.bindTexture(gl.TEXTURE_2D, atlas.texture);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, atlas.canvas);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
}
getTextureCoordinates(id) {
return this.textureMap.get(id);
}
}
Asynchronous data loading prevents the main thread from blocking during large dataset processing. I implement streaming systems that load and process data incrementally, maintaining smooth user interactions even with massive datasets.
class AsyncDataLoader {
constructor() {
this.workers = [];
this.maxWorkers = navigator.hardwareConcurrency || 4;
this.taskQueue = [];
this.activeWorkers = 0;
}
async loadDataset(url, chunkSize = 10000) {
const response = await fetch(url);
const reader = response.body.getReader();
const chunks = [];
let done = false;
let buffer = '';
while (!done) {
const { value, done: readerDone } = await reader.read();
done = readerDone;
if (value) {
buffer += new TextDecoder().decode(value);
const lines = buffer.split('\n');
buffer = lines.pop() || '';
if (lines.length >= chunkSize) {
chunks.push(lines.splice(0, chunkSize));
}
}
}
if (buffer) {
chunks.push([buffer]);
}
return this.processChunks(chunks);
}
async processChunks(chunks) {
const results = [];
const promises = [];
for (const chunk of chunks) {
const promise = this.processChunkAsync(chunk);
promises.push(promise);
if (promises.length >= this.maxWorkers) {
const completed = await Promise.all(promises.splice(0, this.maxWorkers));
results.push(...completed);
}
}
if (promises.length > 0) {
const remaining = await Promise.all(promises);
results.push(...remaining);
}
return results.flat();
}
processChunkAsync(chunk) {
return new Promise((resolve) => {
const worker = new Worker(URL.createObjectURL(new Blob([`
self.onmessage = function(e) {
const data = e.data;
const processed = data.map(line => {
const parts = line.split(',');
return {
x: parseFloat(parts[0]),
y: parseFloat(parts[1]),
z: parseFloat(parts[2]),
value: parseFloat(parts[3])
};
});
self.postMessage(processed);
};
`], { type: 'application/javascript' })));
worker.onmessage = (e) => {
resolve(e.data);
worker.terminate();
};
worker.postMessage(chunk);
});
}
async streamData(url, onChunk, chunkSize = 1000) {
const response = await fetch(url);
const reader = response.body.getReader();
let buffer = '';
let chunkCount = 0;
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += new TextDecoder().decode(value);
const lines = buffer.split('\n');
buffer = lines.pop() || '';
while (lines.length >= chunkSize) {
const chunk = lines.splice(0, chunkSize);
const processed = await this.processChunkAsync(chunk);
await onChunk(processed, chunkCount++);
}
}
if (buffer) {
const processed = await this.processChunkAsync([buffer]);
await onChunk(processed, chunkCount);
}
}
}
GPU-based computations move processing operations from the CPU to the GPU, where parallel processing capabilities excel. I implement compute shaders and fragment shader techniques for data transformations, filtering, and aggregation operations.
javascript
class GPUCompute {
constructor(gl) {
this.gl = gl;
this.computePrograms = new Map();
this.framebuffers = new Map();
}
createComputeProgram(fragmentShaderSource) {
const vertexShaderSource = `#version 300 es
in vec2 a_position;
void main() {
gl_Position = vec4(a_position, 0.0, 1.0);
}
`;
const vertexShader = this.createShader(this.gl.VERTEX_SHADER, vertexShaderSource);
const fragmentShader = this.createShader(this.gl.FRAGMENT_SHADER, fragmentShaderSource);
const program = this.gl.createProgram();
this.gl.attachShader(program, vertexShader);
this.gl.attachShader(program, fragmentShader);
this.gl.linkProgram(program);
if (!this.gl.getProgramParameter(program, this.gl.LINK_STATUS)) {
throw new Error('Compute program linking failed: ' + this.gl.getProgramInfoLog(program));
}
return program;
}
createShader(type, source) {
const shader = this.gl.createShader(type);
this.gl.shaderSource(shader, source);
this.gl.compileShader(shader);
if (!this.gl.getShaderParameter(shader, this.gl.COMPILE_STATUS)) {
throw new Error('Shader compilation failed: ' + this.gl.getShaderInfoLog(shader));
}
return shader;
}
setupComputeFramebuffer(width, height) {
const gl = this.gl;
const framebuffer = gl.createFramebuffer();
const texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA32F, width, height, 0, gl.RGBA, gl.FLOAT, null);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.bindFramebuffer(gl.FRAMEBUFFER, framebuffer);
gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture, 0);
return { framebuffer, texture };
}
computeDataTransform(inputData, width, height) {
const transformShader = `#version 300 es
precision highp float;
uniform sampler2D u_inputTexture;
uniform vec2 u_resolution;
out vec4 fragColor;
void main() {
vec2 texCoord = gl_FragCoord.xy / u_resolution;
vec4 data = texture(u_inputTexture, texCoord);
// Apply transformation (example: normalization)
float maxVal = max(max(data.r, data.g), max(data.b, data.a));
vec4 normalized = data / maxVal;
fragColor = normalized;
}
`;
const program = this.createComputeProgram(transformShader);
const { framebuffer, texture: outputTexture } = this.setupComputeFramebuffer(width, height);
// Create input texture
const inputTexture = this.gl.createTexture();
this.gl.bindTexture(this.gl.TEXTURE_2D, inputTexture);
this.gl.texImage2D(this.gl
---
## 101 Books
**101 Books** is an AI-driven publishing company co-founded by author **Aarav Joshi**. By leveraging advanced AI technology, we keep our publishing costs incredibly low—some books are priced as low as **$4**—making quality knowledge accessible to everyone.
Check out our book **[Golang Clean Code](https://www.amazon.com/dp/B0DQQF9K3Z)** available on Amazon.
Stay tuned for updates and exciting news. When shopping for books, search for **Aarav Joshi** to find more of our titles. Use the provided link to enjoy **special discounts**!
## Our Creations
Be sure to check out our creations:
**[Investor Central](https://www.investorcentral.co.uk/)** | **[Investor Central Spanish](https://spanish.investorcentral.co.uk/)** | **[Investor Central German](https://german.investorcentral.co.uk/)** | **[Smart Living](https://smartliving.investorcentral.co.uk/)** | **[Epochs & Echoes](https://epochsandechoes.com/)** | **[Puzzling Mysteries](https://www.puzzlingmysteries.com/)** | **[Hindutva](http://hindutva.epochsandechoes.com/)** | **[Elite Dev](https://elitedev.in/)** | **[JS Schools](https://jsschools.com/)**
---
### We are on Medium
**[Tech Koala Insights](https://techkoalainsights.com/)** | **[Epochs & Echoes World](https://world.epochsandechoes.com/)** | **[Investor Central Medium](https://medium.investorcentral.co.uk/)** | **[Puzzling Mysteries Medium](https://medium.com/puzzling-mysteries)** | **[Science & Epochs Medium](https://science.epochsandechoes.com/)** | **[Modern Hindutva](https://modernhindutva.substack.com/)**
Top comments (0)