Gamers love deals, and video game prices fluctuate constantly across Steam, Epic Games Store, and other platforms. SteamDB tracks historical pricing data that's invaluable for building deal-finding tools. In this tutorial, we'll build a video game price tracker using Python.
Why Track Game Prices?
- Find the best time to buy — games go on sale in predictable patterns
- Set price alerts — get notified when a game hits your target price
- Analyze pricing trends — understand publisher pricing strategies
- Compare across stores — Steam vs Epic vs GOG
Setting Up
pip install requests beautifulsoup4 pandas
Using the Steam Store API
Steam provides a free API for current pricing:
import requests
import json
def get_steam_price(app_id, country="us"):
url = f"https://store.steampowered.com/api/appdetails"
params = {
"appids": app_id,
"cc": country,
"filters": "price_overview"
}
response = requests.get(url, params=params)
data = response.json()
app_data = data.get(str(app_id), {})
if app_data.get("success"):
price_info = app_data["data"].get("price_overview", {})
return {
"app_id": app_id,
"currency": price_info.get("currency"),
"initial_price": price_info.get("initial", 0) / 100,
"final_price": price_info.get("final", 0) / 100,
"discount_percent": price_info.get("discount_percent", 0)
}
return None
# Check a few popular games
games = {
730: "Counter-Strike 2",
570: "Dota 2",
1091500: "Cyberpunk 2077",
1245620: "Elden Ring"
}
for app_id, name in games.items():
price = get_steam_price(app_id)
if price:
print(f"{name}: ${price['final_price']} ({price['discount_percent']}% off)")
Scraping SteamDB for Price History
SteamDB tracks historical prices that Steam's API doesn't expose:
from bs4 import BeautifulSoup
import time
def scrape_steamdb_prices(app_id, api_key):
url = f"https://steamdb.info/app/{app_id}/"
proxy_url = f"http://api.scraperapi.com?api_key={api_key}&url={url}&render=true"
response = requests.get(proxy_url)
soup = BeautifulSoup(response.text, "html.parser")
price_data = {}
# Extract current price info
price_el = soup.find("td", {"data-cc": "us"})
if price_el:
price_data["current_price"] = price_el.get_text(strip=True)
# Extract lowest recorded price
lowest_el = soup.find("td", class_="price-lowest")
if lowest_el:
price_data["lowest_price"] = lowest_el.get_text(strip=True)
# Extract app info
info_table = soup.find("table", class_="app-info")
if info_table:
rows = info_table.find_all("tr")
for row in rows:
label = row.find("td", class_="label")
value = row.find("td", class_="value") or row.find_all("td")[-1] if row.find_all("td") else None
if label and value:
price_data[label.get_text(strip=True)] = value.get_text(strip=True)
return price_data
data = scrape_steamdb_prices(1091500, "YOUR_API_KEY")
print(json.dumps(data, indent=2))
Building a Game Price Tracker
import pandas as pd
from datetime import datetime
class GamePriceTracker:
def __init__(self, api_key=None):
self.api_key = api_key
self.watchlist = {}
self.price_history = []
def add_game(self, app_id, name, target_price=None):
self.watchlist[app_id] = {
"name": name,
"target_price": target_price
}
def check_prices(self):
alerts = []
for app_id, info in self.watchlist.items():
price = get_steam_price(app_id)
if price:
price["name"] = info["name"]
price["checked_at"] = datetime.now().isoformat()
self.price_history.append(price)
if info["target_price"] and price["final_price"] <= info["target_price"]:
alerts.append({
"name": info["name"],
"price": price["final_price"],
"target": info["target_price"]
})
print(f"{info['name']}: ${price['final_price']}")
time.sleep(2)
return alerts
def get_price_history(self, app_id=None):
df = pd.DataFrame(self.price_history)
if app_id:
df = df[df["app_id"] == app_id]
return df
def export(self, filename="game_prices.csv"):
df = pd.DataFrame(self.price_history)
df.to_csv(filename, index=False)
tracker = GamePriceTracker()
tracker.add_game(1091500, "Cyberpunk 2077", target_price=29.99)
tracker.add_game(1245620, "Elden Ring", target_price=39.99)
tracker.add_game(1174180, "Red Dead Redemption 2", target_price=19.99)
alerts = tracker.check_prices()
for alert in alerts:
print(f"DEAL ALERT: {alert['name']} is ${alert['price']} (target: ${alert['target']})")
Comparing Prices Across Stores
def check_isthereanydeal(game_name, api_key):
"""IsThereAnyDeal.com tracks prices across all PC game stores"""
search_url = f"https://isthereanydeal.com/search/?q={game_name}"
proxy_url = f"http://api.scraperapi.com?api_key={api_key}&url={search_url}&render=true"
response = requests.get(proxy_url)
soup = BeautifulSoup(response.text, "html.parser")
deals = []
deal_cards = soup.find_all("div", class_="card")
for card in deal_cards[:5]:
title = card.find("a", class_="title")
price = card.find("span", class_="price")
store = card.find("span", class_="shop")
if title:
deals.append({
"title": title.get_text(strip=True),
"price": price.get_text(strip=True) if price else "N/A",
"store": store.get_text(strip=True) if store else "Unknown"
})
return deals
Automated Price Monitoring
import schedule
def daily_price_check():
tracker = GamePriceTracker()
tracker.add_game(1091500, "Cyberpunk 2077", 29.99)
tracker.add_game(1245620, "Elden Ring", 39.99)
alerts = tracker.check_prices()
tracker.export()
if alerts:
print(f"Found {len(alerts)} deals!")
for a in alerts:
print(f" {a['name']}: ${a['price']}")
schedule.every().day.at("10:00").do(daily_price_check)
Handling Anti-Bot Measures
SteamDB uses Cloudflare protection and rate limiting. Here's how to handle it:
- ScraperAPI with JavaScript rendering — essential for SteamDB's dynamic content
- ThorData residential proxies — datacenter IPs are commonly blocked
- Request delays — minimum 3 seconds between SteamDB requests
- Use Steam's API first — it's free, fast, and doesn't require proxies
For monitoring scraper performance across multiple game stores, ScrapeOps dashboards show you which sources are reliable.
Conclusion
Building a game price tracker combines free APIs (Steam Store) with web scraping (SteamDB, IsThereAnyDeal) to give you comprehensive pricing intelligence. The Steam API covers current prices perfectly, while scraping adds historical data and cross-store comparisons. Start with the API and add scraping as needed.
Happy scraping!
Top comments (0)