DEV Community

agenthustler
agenthustler

Posted on

How to Build an Automated Competitive SEO Tracker

Knowing where your competitors rank for target keywords is essential for SEO strategy. Instead of paying for expensive tools, you can build your own competitive SEO tracker with Python.

What We'll Build

  • SERP scraper for target keywords
  • Rank tracking over time
  • Competitor domain analysis
  • Automated daily reporting

Setup

pip install requests beautifulsoup4 pandas schedule
Enter fullscreen mode Exit fullscreen mode

SERP Scraper

Scraping Google search results requires reliable proxies. ScraperAPI has a dedicated Google SERP endpoint that handles this cleanly:

import requests
from bs4 import BeautifulSoup
from urllib.parse import urlparse
from datetime import datetime

SCRAPER_API_KEY = "YOUR_KEY"

def scrape_serp(keyword, num_results=20):
    """Scrape Google SERP for a keyword."""
    api_url = (
        f"http://api.scraperapi.com?api_key={SCRAPER_API_KEY}"
        f"&url=https://www.google.com/search?q={keyword}&num={num_results}"
        f"&autoparse=true"
    )
    response = requests.get(api_url, timeout=60)

    if response.headers.get("content-type", "").startswith("application/json"):
        return parse_serp_json(response.json(), keyword)

    return parse_serp_html(response.text, keyword)

def parse_serp_html(html, keyword):
    """Parse SERP HTML into structured results."""
    soup = BeautifulSoup(html, "html.parser")
    results = []
    position = 1

    for div in soup.select("div.g"):
        title_el = div.select_one("h3")
        link_el = div.select_one("a[href]")
        snippet_el = div.select_one(".VwiC3b")

        if title_el and link_el:
            url = link_el["href"]
            if url.startswith("/url?q="):
                url = url.split("/url?q=")[1].split("&")[0]

            domain = urlparse(url).netloc

            results.append({
                "keyword": keyword,
                "position": position,
                "title": title_el.text.strip(),
                "url": url,
                "domain": domain,
                "snippet": snippet_el.text.strip() if snippet_el else "",
                "date": datetime.now().isoformat()
            })
            position += 1

    return results
Enter fullscreen mode Exit fullscreen mode

Rank Tracker with SQLite

import sqlite3
import time

def init_seo_db(path="seo_tracker.db"):
    conn = sqlite3.connect(path)
    conn.execute("""
        CREATE TABLE IF NOT EXISTS rankings (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            keyword TEXT,
            domain TEXT,
            position INTEGER,
            url TEXT,
            title TEXT,
            checked_at TEXT
        )
    """)
    conn.commit()
    return conn

def track_rankings(keywords, competitors):
    """Track rankings for keywords, focusing on competitor domains."""
    conn = init_seo_db()

    for keyword in keywords:
        print(f"Checking: {keyword}")
        results = scrape_serp(keyword, num_results=30)

        for r in results:
            if any(comp in r["domain"] for comp in competitors):
                conn.execute(
                    """INSERT INTO rankings
                    (keyword, domain, position, url, title, checked_at)
                    VALUES (?, ?, ?, ?, ?, ?)""",
                    (keyword, r["domain"], r["position"],
                     r["url"], r["title"], r["date"])
                )
                print(f"  #{r['position']}: {r['domain']}")

        conn.commit()
        time.sleep(5)

    conn.close()
Enter fullscreen mode Exit fullscreen mode

Trend Analysis

import pandas as pd

def analyze_rankings(db_path="seo_tracker.db"):
    conn = sqlite3.connect(db_path)
    df = pd.read_sql("SELECT * FROM rankings", conn)
    df["checked_at"] = pd.to_datetime(df["checked_at"])

    print("=== Current Rankings ===\n")
    latest = df.sort_values("checked_at").groupby(
        ["keyword", "domain"]
    ).last().reset_index()

    for keyword in latest["keyword"].unique():
        kw_data = latest[latest["keyword"] == keyword].sort_values("position")
        print(f"\n{keyword}:")
        for _, row in kw_data.iterrows():
            print(f"  #{row['position']}: {row['domain']}")

    print("\n=== Rank Changes ===\n")
    for domain in df["domain"].unique():
        domain_data = df[df["domain"] == domain]
        for keyword in domain_data["keyword"].unique():
            kw = domain_data[domain_data["keyword"] == keyword].sort_values("checked_at")
            if len(kw) > 1:
                first = kw.iloc[0]["position"]
                last = kw.iloc[-1]["position"]
                change = first - last
                arrow = "up" if change > 0 else "down" if change < 0 else "same"
                print(f"  {domain} | {keyword}: {first} -> {last} ({arrow} {abs(change)})")
Enter fullscreen mode Exit fullscreen mode

Automated Daily Tracking

import schedule

KEYWORDS = [
    "best web scraping tool",
    "python web scraping tutorial",
    "web scraping api",
    "automated data extraction",
]

COMPETITORS = ["scrapy.org", "selenium.dev", "playwright.dev", "apify.com"]

def daily_check():
    print(f"\n=== Daily SEO Check: {datetime.now()} ===")
    track_rankings(KEYWORDS, COMPETITORS)
    analyze_rankings()

schedule.every().day.at("09:00").do(daily_check)

if __name__ == "__main__":
    daily_check()
    while True:
        schedule.run_pending()
        time.sleep(60)
Enter fullscreen mode Exit fullscreen mode

Scaling Your SEO Tracker

For reliable SERP scraping, ScraperAPI is purpose-built with Google-specific features like autoparse. For geo-targeted results, ThorData provides location-specific proxies. Track your scraping success rates with ScrapeOps.

Conclusion

A custom SEO tracker gives you competitive intelligence without the $100+/month tool subscriptions. Run it daily, track trends over weeks, and let the data guide your content strategy.

Top comments (0)