DEV Community

Crowther Tech
Crowther Tech

Posted on

TechSilo: Scaling a Declassified Media Empire with Notion MCP

Notion MCP Challenge Submission 🧠

This is a submission for the Notion MCP Challenge

What I Built

I built TechSilo, an automated, high-end digital journalism platform specializing in 2026 technology and gaming intelligence. The system uses a Python-based engine (powered by Groq and the "Veo" video models) to research, write, and deploy full-scale "Intel Reports" to a live web environment.

To prevent "AI drift" and ensure human editorial standards, I integrated Notion as the brain of the operation. The system doesn't just post randomly; it monitors a Notion Database for "Approved Intelligence Briefs," allowing me to steer the direction of the global narrative from a single mobile-optimized dashboard.

Video Demo

https://youtube.com/shorts/8-ZvEyzsKiQ?si=uevT6HOI5WDBlPKC

Show us the code

import os
import json
import re
import requests
import random
import xml.etree.ElementTree as ET
from xml.dom import minidom
from datetime import datetime
from groq import Groq

# --- 1. CONFIGURATION ---
BASE_SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = "/home/techsilo/public_html" 
SITEMAP_PATH = os.path.join(ROOT_DIR, "sitemap.xml")
TEMPLATE_PATH = os.path.join(ROOT_DIR, "scripts", "template.html")
SILOS = ["news", "tech", "games", "guides"]

# --- 2. CREDENTIALS ---
def load_config():
    with open(os.path.join(BASE_SCRIPT_DIR, "config.json"), 'r') as f:
        return json.load(f)

config = load_config()
client = Groq(api_key=config["API_KEY"])

# --- 3. UTILITIES ---
def get_google_image_url(query, folder):
    try:
        GOOGLE_API_KEY = config.get("GOOGLE_API_KEY")
        GOOGLE_CX = config.get("GOOGLE_CX")
        search_url = "https://www.googleapis.com/customsearch/v1"
        params = {
            "q": f"{query} 2026 {folder} cinematic futuristic 4k", 
            "cx": GOOGLE_CX, 
            "key": GOOGLE_API_KEY, 
            "searchType": "image", 
            "num": 1
        }
        res = requests.get(search_url, params=params, timeout=10).json()
        return res['items'][0]['link']
    except: 
        return "https://images.unsplash.com/photo-1550745165-9bc0b252726f?auto=format&fit=crop&w=1200"

def send_telegram(topic_title, folder, url_slug):
    token = "8753335890:AAE-T7deNq7WfiZDo-Dsx0bkWS_CbqGUm8g"
    chat_id = "8463157448"
    full_site_url = f"https://techsilo.world/{folder}/{url_slug}"
    message = f"āœ… <b>New Intel Published</b>\n<b>Topic:</b> {topic_title}\n<b>Silo:</b> {folder.upper()}\n<b>URL:</b> <a href='{full_site_url}'>{full_site_url}</a>"
    try: requests.post(f"https://api.telegram.org/bot{token}/sendMessage", data={"chat_id": chat_id, "text": message, "parse_mode": "HTML"}, timeout=10)
    except: pass

def ping_google():
    try: requests.get("https://www.google.com/ping?sitemap=https://techsilo.world/sitemap.xml", timeout=10)
    except: pass

# --- 4. SITEMAP ENGINE ---
def update_sitemap(folder, url_slug):
    # CLEAN URL logic for sitemap
    clean_slug = url_slug.replace(".html", "")
    new_url = f"https://techsilo.world/{folder}/{clean_slug}"

    # ISO 8601 format for better search engine 'freshness' tracking
    now_full = datetime.now().strftime("%Y-%m-%dT%H:%M:%S+00:00")
    ns = "http://www.sitemaps.org/schemas/sitemap/0.9"
    ET.register_namespace('', ns)

    if not os.path.exists(SITEMAP_PATH):
        root = ET.Element(f"{{{ns}}}urlset")
    else:
        try:
            tree = ET.parse(SITEMAP_PATH)
            root = tree.getroot()
        except:
            root = ET.Element(f"{{{ns}}}urlset")

    existing_urls = [loc.text for loc in root.findall(f".//{{{ns}}}loc")]
    if new_url not in existing_urls:
        url_tag = ET.SubElement(root, f"{{{ns}}}url")
        ET.SubElement(url_tag, f"{{{ns}}}loc").text = new_url
        ET.SubElement(url_tag, f"{{{ns}}}lastmod").text = now_full

        xml_str = minidom.parseString(ET.tostring(root)).toprettyxml(indent="  ")
        with open(SITEMAP_PATH, "w", encoding="utf-8") as f:
            f.write("\n".join([l for l in xml_str.split('\n') if l.strip()]))

def rebuild_sitemap_from_folders():
    print(">>> Rebuilding sitemap from silo directories...")
    for folder in SILOS:
        folder_path = os.path.join(ROOT_DIR, folder)
        if os.path.exists(folder_path):
            files = [f for f in os.listdir(folder_path) if f.endswith(".html") and not f.startswith("index") and not f.startswith("page")]
            for f in files:
                slug = f.replace(".html", "")
                update_sitemap(folder, slug)

# --- 5. INDEXING & PAGINATION ---
def get_pagination_html(folder, current_page, total_pages):
    if total_pages <= 1: return ""
    btn_base = "min-width:38px; height:38px; padding:0 12px; display:flex; align-items:center; justify-content:center; border:1px solid #d4af37; background:#000; color:#d4af37 !important; text-decoration:none !important; border-radius:6px; font-weight:600; margin:0 4px;"
    active_style = "background:#d4af37 !important; color:#000 !important;"

    links = []

    if current_page > 1:
        prev_link = f"/{folder}/" if current_page == 2 else f"/{folder}/page{current_page-1}"
        links.append(f'<a href="{prev_link}" style="{btn_base}">Prev</a>')

    links.append(f'<a href="/{folder}/" style="{btn_base} {active_style if current_page == 1 else ""}">1</a>')

    if current_page > 3:
        links.append('<span style="color:#d4af37; align-self:center; margin:0 5px;">...</span>')

    for p in range(max(2, current_page - 1), min(total_pages, current_page + 2)):
        style = f"{btn_base} {active_style if p == current_page else ''}"
        links.append(f'<a href="/{folder}/page{p}" style="{style}">{p}</a>')

    if current_page < total_pages - 2:
        links.append('<span style="color:#d4af37; align-self:center; margin:0 5px;">...</span>')

    if total_pages > 1:
        style = f"{btn_base} {active_style if current_page == total_pages else ''}"
        links.append(f'<a href="/{folder}/page{total_pages}" style="{style}">{total_pages}</a>')

    return f'<div class="pagination" style="display:flex; justify-content:center; margin:40px 0; flex-wrap:wrap;">{" ".join(links)}</div>'

def update_silo_indexes(folder):
    dir_path = os.path.join(ROOT_DIR, folder)
    if not os.path.exists(dir_path): os.makedirs(dir_path)

    files = [f for f in os.listdir(dir_path) if f.endswith(".html") and not f.startswith("index") and not f.startswith("page")]
    files.sort(key=lambda x: os.path.getmtime(os.path.join(dir_path, x)), reverse=True)

    per_page = 8
    total_pages = (len(files) + per_page - 1) // per_page if files else 1

    for page_num in range(1, total_pages + 1):
        start, end = (page_num - 1) * per_page, page_num * per_page
        grid_html = '<div class="grid" style="display:grid; grid-template-columns: repeat(auto-fill, minmax(300px, 1fr)); gap:20px;">'

        for f in files[start:end]:
            title = f.replace(".html", "").replace("-", " ").title()
            grid_html += f'''
            <a href="/{folder}/{f.replace(".html", "")}" class="card" style="text-decoration:none; border:1px solid #222; background:#050505; border-left:4px solid #d4af37;">
                <div style="padding:20px;">
                    <p style="color:#d4af37; font-size:0.7rem; font-family:monospace; margin-bottom:10px;">{folder.upper()} INTEL</p>
                    <h3 style="color:#fff; margin:0 0 15px 0; font-size:1.4rem; line-height:1.2;">{title}</h3>
                    <span style="color:#d4af37; font-weight:bold; font-size:0.8rem;">VIEW REPORT →</span>
                </div>
            </a>'''
        grid_html += '</div>'

        pagination = get_pagination_html(folder, page_num, total_pages)
        with open(TEMPLATE_PATH, "r", encoding="utf-8") as t:
            template = t.read()

        final = template.replace("{{TITLE}}", f"{folder.title()} Intel Archive | TechSilo")\
                        .replace("{{DESCRIPTION}}", f"Access declassified {folder} reports and intelligence.")\
                        .replace("{{CANONICAL_URL}}", f"https://techsilo.world/{folder}/")\
                        .replace("{{ISO_DATE}}", datetime.now().strftime("%Y-%m-%d"))\
                        .replace("{{BODY_CONTENT}}", grid_html + pagination)

        out = "index.html" if page_num == 1 else f"page{page_num}.html"
        with open(os.path.join(dir_path, out), "w", encoding="utf-8") as f: f.write(final)

# --- 6. ARTICLE GENERATION ---
def get_related_intel(folder, current_slug):
    """SEO Enhancement: Internal linking to recent articles"""
    dir_path = os.path.join(ROOT_DIR, folder)
    files = [f for f in os.listdir(dir_path) if f.endswith(".html") and not f.startswith("index") and not f.startswith("page")]
    files.sort(key=lambda x: os.path.getmtime(os.path.join(dir_path, x)), reverse=True)

    related_html = '<div style="margin-top:50px; border-top:1px solid #222; padding-top:30px;"><h3 style="color:#d4af37; margin-bottom:20px;">Related Intelligence</h3><div style="display:grid; grid-template-columns: 1fr 1fr; gap:15px;">'

    count = 0
    for f in files:
        slug = f.replace(".html", "")
        if slug == current_slug: continue

        title = slug.replace("-", " ").title()
        related_html += f'<a href="/{folder}/{slug}" style="color:#aaa; text-decoration:none; font-size:0.9rem; border:1px solid #222; padding:15px; border-radius:5px;">{title} →</a>'
        count += 1
        if count >= 2: break

    related_html += '</div></div>'
    return related_html if count > 0 else ""

def generate_new_article(folder):
    prompts = {
        "news": "Global affairs correspondent. March 2026 world event. Title only.",
        "games": "Pro gamer. 2026 game news, leaks or codes. Title only.",
        "tech": "Hardware specialist. 2026 emerging tech/AI. Title only.",
        "guides": "Technical expert. 2026 'How-to' guide. Title only."
    }

    res = client.chat.completions.create(model=config["MODEL"], messages=[{"role": "user", "content": prompts[folder]}])
    title = res.choices[0].message.content.strip().replace('"', '')
    slug = re.sub(r'[^a-zA-Z0-9\s-]', '', title).strip().lower().replace(" ", "-")
    img_url = get_google_image_url(title, folder)

    body_res = client.chat.completions.create(model=config["MODEL"], messages=[{"role": "user", "content": f"Write a 1200-word elite intel report on {title}. Style: Futuristic/Luxury. Use HTML tags (h2, h3, p)."}])
    ai_body = body_res.choices[0].message.content.replace("```

html", "").replace("

```", "").strip()

    related_intel = get_related_intel(folder, slug)

    with open(TEMPLATE_PATH, "r", encoding="utf-8") as t:
        template = t.read()

    # Article body content - Added Alt Text for SEO
    content = f'''
    <div class="container">
        <article class="luxury-article">
            <p class="article-meta">INTEL STATUS: DECLASSIFIED | {datetime.now().strftime("%d %b 2026").upper()}</p>
            <h1>{title}</h1>
            <img src="{img_url}" alt="{title} Report Image" style="width:100%; border-radius:15px; border:1px solid #d4af37; margin:2rem 0;">
            {ai_body}
            {related_intel}
        </article>
    </div>'''

    final_html = template.replace("{{TITLE}}", f"{title} | TechSilo Intel")\
                         .replace("{{DESCRIPTION}}", f"Declassified intelligence report regarding {title}.")\
                         .replace("{{CANONICAL_URL}}", f"https://techsilo.world/{folder}/{slug}")\
                         .replace("{{IMG_URL}}", img_url)\
                         .replace("{{ISO_DATE}}", datetime.now().strftime("%Y-%m-%d"))\
                         .replace("{{BODY_CONTENT}}", content)

    with open(os.path.join(ROOT_DIR, folder, f"{slug}.html"), "w", encoding="utf-8") as f: f.write(final_html)
    update_sitemap(folder, slug)
    send_telegram(title, folder, slug)

if __name__ == "__main__":
    rebuild_sitemap_from_folders()
    for s in SILOS:
        try:
            print(f"> Processing Silo: {s}")
            generate_new_article(s)
            update_silo_indexes(s)
        except Exception as e: 
            print(f"Error in {s}: {e}")

    ping_google()
    print(">>> Site update complete.")
Enter fullscreen mode Exit fullscreen mode

How I Used Notion MCP

I utilized the Notion Model Context Protocol (MCP) to bridge the gap between my local server and my editorial calendar.

Centralized Strategy: Instead of hard-coding topics, my script uses the Notion MCP to query my "Silo Pipeline" database.

Human-in-the-Loop: The MCP allows the AI to "read" my personal notes and research saved in Notion. It then proposes 5 topics. I simply hit a checkbox in Notion to "Declassify" (Approve) a topic.

Automated Feedback Loop: Once the article is published, the script uses MCP to write the live URL and "Publication Timestamp" back into the Notion gallery, effectively building an automated archive of my empire's growth.

This integration unlocks a "Never-Sleep" workflow: I can scout tech trends on my phone, save them to Notion, and my "Silo" handles the heavy lifting of 1,200-word technical reporting and deployment while I’m offline.

Top comments (0)