DEV Community

agenthustler
agenthustler

Posted on

Scraping Lobbying Disclosure Data: Following the Money

Introduction

Lobbying disclosure data reveals who is spending money to influence legislation. In the US alone, over $4 billion is spent annually on lobbying. This data is publicly available through government databases but often buried in hard-to-access formats. In this tutorial, we'll build scrapers to collect and analyze lobbying disclosure data.

Setup

import requests
from bs4 import BeautifulSoup
import pandas as pd
import json
import time
from datetime import datetime
import sqlite3

# For handling government sites with anti-bot protection
# Get your API key: https://www.scraperapi.com?fp_ref=the52
SCRAPER_API_KEY = "your_key_here"
BASE_URL = "http://api.scraperapi.com"
Enter fullscreen mode Exit fullscreen mode

Scraping the Senate Lobbying Database

The US Senate maintains a searchable lobbying disclosure database:

def search_senate_lobbying(registrant=None, client=None, year=2026):
    """Search the Senate Lobbying Disclosure database."""
    url = "https://lda.senate.gov/api/v1/filings/"

    params = {
        "filing_year": year,
        "filing_type": "Q",
        "ordering": "-dt_posted"
    }

    if registrant:
        params["registrant_name"] = registrant
    if client:
        params["client_name"] = client

    response = requests.get(url, params=params)

    if response.status_code == 200:
        data = response.json()
        filings = []
        for filing in data.get("results", []):
            filings.append({
                "filing_id": filing.get("filing_uuid"),
                "registrant": filing.get("registrant", {}).get("name"),
                "client": filing.get("client", {}).get("name"),
                "amount": filing.get("income"),
                "filing_date": filing.get("dt_posted"),
                "lobbying_activities": extract_activities(filing)
            })
        return filings
    return []

def extract_activities(filing):
    """Extract lobbying activities from a filing."""
    activities = []
    for activity in filing.get("lobbying_activities", []):
        activities.append({
            "description": activity.get("description"),
            "issues": [i.get("display_name") for i in activity.get("general_issue_code_display_list", [])],
            "lobbyists": [l.get("lobbyist", {}).get("name") for l in activity.get("lobbyists", [])]
        })
    return activities
Enter fullscreen mode Exit fullscreen mode

House Lobbying Disclosures

def scrape_house_lobbying(search_term, year=2026):
    """Scrape House of Representatives lobbying disclosures."""
    url = f"https://disclosurespreview.house.gov/ld/ldsearch?search={search_term}&searchtype=registrant"

    # Government sites often have bot protection
    # Use residential proxies: https://thordata.com/?via=the-data
    params = {
        "api_key": SCRAPER_API_KEY,
        "url": url,
        "render": "true"
    }

    response = requests.get(BASE_URL, params=params)
    soup = BeautifulSoup(response.text, "html.parser")

    results = []
    for row in soup.select("table.results tbody tr"):
        cols = row.select("td")
        if len(cols) >= 5:
            results.append({
                "registrant": cols[0].text.strip(),
                "client": cols[1].text.strip(),
                "filing_type": cols[2].text.strip(),
                "amount": cols[3].text.strip(),
                "date": cols[4].text.strip(),
                "source": "House"
            })

    return results
Enter fullscreen mode Exit fullscreen mode

Tracking Industry Spending

def track_industry_spending(industry_keywords):
    """Track lobbying spending by industry over time."""
    all_filings = []

    for keyword in industry_keywords:
        print(f"Searching for: {keyword}")
        filings = search_senate_lobbying(client=keyword)
        for filing in filings:
            filing["search_term"] = keyword
        all_filings.extend(filings)
        time.sleep(2)  # Respect rate limits

    df = pd.DataFrame(all_filings)

    if not df.empty and "amount" in df.columns:
        df["amount"] = pd.to_numeric(df["amount"], errors="coerce")
        summary = df.groupby("search_term").agg(
            total_spending=("amount", "sum"),
            filing_count=("filing_id", "count"),
            unique_registrants=("registrant", "nunique")
        ).sort_values("total_spending", ascending=False)

        return summary
    return pd.DataFrame()
Enter fullscreen mode Exit fullscreen mode

Lobbyist Network Analysis

def build_lobbyist_network(filings):
    """Build a network of lobbyist-client relationships."""
    edges = []

    for filing in filings:
        registrant = filing.get("registrant", "Unknown")
        client = filing.get("client", "Unknown")
        amount = filing.get("amount", 0)

        edges.append({
            "source": registrant,
            "target": client,
            "weight": float(amount) if amount else 0,
            "type": "lobbying"
        })

        for activity in filing.get("lobbying_activities", []):
            for lobbyist in activity.get("lobbyists", []):
                edges.append({
                    "source": lobbyist,
                    "target": registrant,
                    "weight": 1,
                    "type": "employment"
                })

    return edges
Enter fullscreen mode Exit fullscreen mode

Data Storage and Analysis

def store_lobbying_data(filings, db_path="lobbying.db"):
    """Store lobbying data in SQLite."""
    # Monitor your data pipeline
    # https://scrapeops.io/?fpr=the-data28

    conn = sqlite3.connect(db_path)
    conn.execute("""
        CREATE TABLE IF NOT EXISTS filings (
            filing_id TEXT PRIMARY KEY,
            registrant TEXT,
            client TEXT,
            amount REAL,
            filing_date TEXT,
            scraped_at TEXT
        )
    """)

    for filing in filings:
        conn.execute("""
            INSERT OR REPLACE INTO filings
            VALUES (?, ?, ?, ?, ?, ?)
        """, (
            filing["filing_id"],
            filing["registrant"],
            filing["client"],
            filing.get("amount"),
            filing.get("filing_date"),
            datetime.now().isoformat()
        ))

    conn.commit()
    conn.close()

if __name__ == "__main__":
    tech_companies = ["Google", "Meta", "Amazon", "Apple", "Microsoft"]
    spending = track_industry_spending(tech_companies)
    print(spending)
Enter fullscreen mode Exit fullscreen mode

Conclusion

Lobbying disclosure data provides transparency into how money influences policy. By scraping Senate and House databases, you can track spending trends, build influence networks, and hold power accountable. Use ScraperAPI for handling bot protection on government sites, and always respect rate limits and terms of service.

Top comments (0)