Automate Social Media with Python: Post, Schedule, and Analyze Across Platforms
Managing multiple social media accounts manually is a full-time job. Here's how to automate posting, scheduling, and analytics with Python.
Twitter/X API v2 Client
# pip install tweepy
import tweepy
import time
from datetime import datetime, timedelta
from pathlib import Path
import json
class TwitterAutomation:
def __init__(self, api_key: str, api_secret: str, access_token: str, access_secret: str):
auth = tweepy.OAuthHandler(api_key, api_secret)
auth.set_access_token(access_token, access_secret)
self.api = tweepy.API(auth, wait_on_rate_limit=True)
# API v2 client
self.client = tweepy.Client(
consumer_key=api_key,
consumer_secret=api_secret,
access_token=access_token,
access_token_secret=access_secret,
wait_on_rate_limit=True
)
def post_tweet(self, text: str, image_path: str = None) -> dict:
"""Post a tweet with optional image."""
media_ids = []
if image_path:
media = self.api.media_upload(image_path)
media_ids = [media.media_id]
if media_ids:
tweet = self.client.create_tweet(text=text, media_ids=media_ids)
else:
tweet = self.client.create_tweet(text=text)
return tweet.data
def reply_to_tweet(self, tweet_id: str, reply_text: str) -> dict:
"""Reply to a specific tweet."""
tweet = self.client.create_tweet(
text=reply_text,
in_reply_to_tweet_id=tweet_id
)
return tweet.data
def get_mentions(self, since_hours: int = 24) -> list:
"""Get recent mentions."""
since = datetime.utcnow() - timedelta(hours=since_hours)
mentions = self.client.get_users_mentions(
id=self.client.get_me().data.id,
start_time=since,
tweet_fields=['author_id', 'created_at', 'text']
)
return mentions.data or []
def schedule_tweet(self, text: str, post_at: datetime, image_path: str = None):
"""Schedule a tweet for future posting."""
delay = (post_at - datetime.now()).total_seconds()
if delay > 0:
time.sleep(delay)
return self.post_tweet(text, image_path)
Content Calendar Manager
import csv
from dataclasses import dataclass
from typing import Optional
@dataclass
class ScheduledPost:
platform: str
post_time: datetime
content: str
image_path: Optional[str] = None
hashtags: Optional[list] = None
posted: bool = False
class ContentCalendar:
def __init__(self, calendar_file: str = "content_calendar.csv"):
self.calendar_file = calendar_file
self.posts = []
self._load()
def _load(self):
if not Path(self.calendar_file).exists():
return
with open(self.calendar_file, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
post = ScheduledPost(
platform=row['platform'],
post_time=datetime.fromisoformat(row['post_time']),
content=row['content'],
image_path=row.get('image_path'),
hashtags=row.get('hashtags', '').split(',') if row.get('hashtags') else [],
posted=row.get('posted', 'false').lower() == 'true'
)
self.posts.append(post)
def add_post(self, platform: str, content: str, post_time: datetime, **kwargs):
post = ScheduledPost(platform=platform, post_time=post_time, content=content, **kwargs)
self.posts.append(post)
self._save()
def get_pending_posts(self, platform: str = None) -> list:
"""Get all unposted posts that are due."""
now = datetime.now()
pending = [p for p in self.posts if not p.posted and p.post_time <= now]
if platform:
pending = [p for p in pending if p.platform == platform]
return pending
def _save(self):
with open(self.calendar_file, 'w', newline='') as f:
fieldnames = ['platform', 'post_time', 'content', 'image_path', 'hashtags', 'posted']
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for post in self.posts:
writer.writerow({
'platform': post.platform,
'post_time': post.post_time.isoformat(),
'content': post.content,
'image_path': post.image_path or '',
'hashtags': ','.join(post.hashtags) if post.hashtags else '',
'posted': str(post.posted).lower()
})
# Usage
calendar = ContentCalendar()
# Schedule a week of content
posts = [
("twitter", "🚀 New blog post: Python automation tips for developers. #Python #Automation", datetime.now() + timedelta(hours=2)),
("twitter", "💡 Quick tip: Use f-strings instead of .format() — 2x faster. #PythonTips", datetime.now() + timedelta(days=1)),
("twitter", "🛠️ How to automate your data pipeline in 50 lines of Python. Link in bio. #DataEngineering", datetime.now() + timedelta(days=2)),
]
for platform, content, post_time in posts:
calendar.add_post(platform, content, post_time)
Auto-Reply Bot
Automatically engage with relevant content:
class AutoEngageBot:
def __init__(self, twitter: TwitterAutomation, keywords: list):
self.twitter = twitter
self.keywords = keywords
self.engaged = set() # Track already-engaged tweets
def find_relevant_tweets(self) -> list:
"""Search for tweets matching our keywords."""
all_tweets = []
for keyword in self.keywords:
tweets = self.twitter.client.search_recent_tweets(
query=f"{keyword} -is:retweet lang:en",
tweet_fields=['author_id', 'created_at', 'public_metrics'],
max_results=10
)
if tweets.data:
all_tweets.extend(tweets.data)
return all_tweets
def engage_with_tweet(self, tweet_id: str, like: bool = True, retweet: bool = False):
"""Like and optionally retweet a post."""
if tweet_id in self.engaged:
return
me = self.twitter.client.get_me().data.id
if like:
self.twitter.client.like(me, tweet_id)
if retweet:
self.twitter.client.retweet(me, tweet_id)
self.engaged.add(tweet_id)
time.sleep(2) # Avoid rate limits
Analytics Dashboard
def generate_analytics_report(twitter: TwitterAutomation, days: int = 30) -> dict:
"""Generate engagement analytics for the past N days."""
me = twitter.client.get_me().data.id
# Get recent tweets
tweets = twitter.client.get_users_tweets(
id=me,
tweet_fields=['public_metrics', 'created_at'],
max_results=100
)
if not tweets.data:
return {}
total_likes = sum(t.public_metrics['like_count'] for t in tweets.data)
total_retweets = sum(t.public_metrics['retweet_count'] for t in tweets.data)
total_replies = sum(t.public_metrics['reply_count'] for t in tweets.data)
best_tweet = max(tweets.data, key=lambda t: t.public_metrics['like_count'])
return {
"total_tweets": len(tweets.data),
"total_likes": total_likes,
"total_retweets": total_retweets,
"total_replies": total_replies,
"avg_likes": total_likes / len(tweets.data),
"best_performing": {
"text": best_tweet.text[:100],
"likes": best_tweet.public_metrics['like_count']
}
}
Complete Automation Pipeline
def run_daily_social_automation(
twitter: TwitterAutomation,
calendar: ContentCalendar
):
"""Run the full daily social media automation pipeline."""
print(f"Starting social automation: {datetime.now()}")
# 1. Post scheduled content
pending = calendar.get_pending_posts('twitter')
for post in pending:
content = post.content
if post.hashtags:
content += ' ' + ' '.join(f'#{tag}' for tag in post.hashtags)
result = twitter.post_tweet(content, post.image_path)
post.posted = True
print(f"Posted: {content[:50]}...")
calendar._save()
# 2. Engage with relevant content
bot = AutoEngageBot(twitter, keywords=['python automation', 'web scraping', 'data engineering'])
tweets = bot.find_relevant_tweets()
for tweet in tweets[:5]: # Limit to 5 engagements per run
bot.engage_with_tweet(tweet.id, like=True)
# 3. Generate report
report = generate_analytics_report(twitter)
print(f"\n=== Today's Analytics ===")
print(f"Total likes: {report.get('total_likes', 0)}")
print(f"Total retweets: {report.get('total_retweets', 0)}")
# Run daily
run_daily_social_automation(twitter, calendar)
Want the Complete Marketing Automation Toolkit?
This social media automation is part of a larger toolkit for growing an online presence on autopilot.
👉 Get 50+ Python automation scripts — social media schedulers, email campaigns, content generators, analytics tools, and more.
Build your automated marketing engine once. Run it forever.
Top comments (0)