Building Native Telegram Bots with Maximum Performance
Telegram bots have become essential tools for automation, customer service, and integrations. While many frameworks exist, building natively against Telegram's API offers unparalleled speed and control. This guide will walk you through creating a high-performance Telegram bot from scratch.
Why Native Implementation?
Before we dive in, let's understand why native implementation matters:
- Zero Framework Overhead: No unnecessary abstraction layers
- Direct API Control: Full access to all Telegram features
- Performance Optimization: Fine-tuned request handling
- Minimal Dependencies: Reduced attack surface and smaller footprint
Prerequisites
- Python 3.9+ (we'll use Python for examples)
- Telegram API credentials (@botfather token)
- Basic understanding of HTTP and webhooks
Step 1: Getting Your Bot Token
First, create your bot with BotFather and obtain your API token:
/start
/newbot
MyAwesomeBot
myawesomebot
Save the token securely - we'll need it for all API calls.
Step 2: Setting Up the Basic Structure
Create a minimal bot handler with direct HTTP calls:
import requests
import json
from http.server import BaseHTTPRequestHandler, HTTPServer
class TelegramBot:
def __init__(self, token):
self.token = token
self.api_url = f"https://api.telegram.org/bot{self.token}/"
self.offset = 0 # For update tracking
def _make_request(self, method, params=None):
url = f"{self.api_url}{method}"
response = requests.post(url, json=params)
return response.json()
def get_updates(self):
params = {'offset': self.offset, 'timeout': 30}
response = self._make_request('getUpdates', params)
if response.get('ok'):
updates = response['result']
if updates:
self.offset = updates[-1]['update_id'] + 1
return updates
return []
Step 3: Implementing Webhook or Long Polling
For production, webhooks are more efficient. Here's how to set one up:
def set_webhook(self, url):
return self._make_request('setWebhook', {'url': url})
class WebhookHandler(BaseHTTPRequestHandler):
bot = None # Will be set after initialization
def do_POST(self):
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
update = json.loads(post_data.decode('utf-8'))
# Process update
chat_id = update['message']['chat']['id']
text = update['message']['text']
response_text = f"Echo: {text}"
self.bot._make_request('sendMessage', {
'chat_id': chat_id,
'text': response_text
})
self.send_response(200)
self.end_headers()
Step 4: Optimizing Message Handling
For high throughput, implement efficient message processing:
class MessageProcessor:
def __init__(self, bot):
self.bot = bot
self.commands = {
'/start': self.handle_start,
'/help': self.handle_help
}
def process(self, update):
if 'message' not in update:
return
message = update['message']
text = message.get('text', '').strip()
chat_id = message['chat']['id']
if text in self.commands:
self.commands[text](chat_id)
else:
self.bot._make_request('sendMessage', {
'chat_id': chat_id,
'text': "Command not recognized"
})
def handle_start(self, chat_id):
self.bot._make_request('sendMessage', {
'chat_id': chat_id,
'text': "Welcome to the bot!",
'parse_mode': 'Markdown'
})
def handle_help(self, chat_id):
help_text = """
*Available commands:*
/start - Start the bot
/help - Show this help
"""
self.bot._make_request('sendMessage', {
'chat_id': chat_id,
'text': help_text,
'parse_mode': 'Markdown'
})
Step 5: Advanced Features
Inline Keyboards
def send_inline_keyboard(self, chat_id):
keyboard = {
'inline_keyboard': [[
{'text': 'Option 1', 'callback_data': 'opt1'},
{'text': 'Option 2', 'callback_data': 'opt2'}
]]
}
self._make_request('sendMessage', {
'chat_id': chat_id,
'text': 'Choose an option:',
'reply_markup': json.dumps(keyboard)
})
File Handling
def send_document(self, chat_id, file_path):
url = f"{self.api_url}sendDocument"
with open(file_path, 'rb') as file:
files = {'document': file}
data = {'chat_id': chat_id}
requests.post(url, files=files, data=data)
Performance Optimization Techniques
- Connection Pooling:
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
session = requests.Session()
retries = Retry(total=5, backoff_factor=1)
session.mount('https://', HTTPAdapter(max_retries=retries))
- Async Processing:
import asyncio
import aiohttp
async def async_send_message(self, chat_id, text):
url = f"{self.api_url}sendMessage"
params = {'chat_id': chat_id, 'text': text}
async with aiohttp.ClientSession() as session:
async with session.post(url, json=params) as response:
return await response.json()
- Update Batching:
def process_updates_batch(self, updates):
with ThreadPoolExecutor(max_workers=4) as executor:
futures = [executor.submit(self.process, update) for update in updates]
for future in as_completed(futures):
future.result() # Handle exceptions if needed
Error Handling and Recovery
def safe_make_request(self, method, params=None, max_retries=3):
for attempt in range(max_retries):
try:
response = self._make_request(method, params)
if response.get('ok'):
return response
elif response.get('error_code') == 429:
retry_after = response.get('parameters', {}).get('retry_after', 5)
time.sleep(retry_after)
continue
return None
except requests.exceptions.RequestException as e:
if attempt == max_retries - 1:
raise
time.sleep(2 ** attempt)
Monitoring and Analytics
def log_message(self, message):
log_entry = {
'timestamp': datetime.utcnow().isoformat(),
'chat_id': message['chat']['id'],
'user_id': message['from']['id'],
'text': message.get('text'),
'type': 'received'
}
# Send to your logging system
requests.post('https://logs.example.com/telegram', json=log_entry)
Deployment Considerations
- Webhook HTTPS Requirement: Use a reverse proxy like Nginx
- Rate Limiting: Implement proper queuing for high-volume bots
- Stateless Design: Store minimal session data to enable horizontal scaling
Complete Example
Here's a complete, minimal bot implementation:
import json
from http.server import BaseHTTPRequestHandler, HTTPServer
import requests
class TelegramBot:
def __init__(self, token):
self.token = token
self.api_url = f"https://api.telegram.org/bot{self.token}/"
def _make_request(self, method, params=None):
url = f"{self.api_url}{method}"
response = requests.post(url, json=params)
return response.json()
def send_message(self, chat_id, text):
return self._make_request('sendMessage', {
'chat_id': chat_id,
'text': text
})
class BotHandler(BaseHTTPRequestHandler):
bot = None
def do_POST(self):
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
update = json.loads(post_data.decode('utf-8'))
if 'message' in update:
message = update['message']
self.bot.send_message(
message['chat']['id'],
f"Echo: {message.get('text', '')}"
)
self.send_response(200)
self.end_headers()
def run_server(bot, port=8443):
BotHandler.bot = bot
server = HTTPServer(('0.0.0.0', port), BotHandler)
print(f"Server running on port {port}")
server.serve_forever()
if __name__ == '__main__':
import sys
token = sys.argv[1] if len(sys.argv) > 1 else 'YOUR_TOKEN'
bot = TelegramBot(token)
bot.set_webhook('https://your.domain.com/webhook')
run_server(bot)
Next Steps
- Implement proper authentication for sensitive actions
- Add database persistence for user data
- Set up monitoring and alerting
- Implement rate limiting to prevent abuse
By building your Telegram bot natively, you maintain complete control over performance characteristics and can optimize for your specific use case. This approach scales from simple bots to complex, high-volume applications while keeping resource usage minimal.
Remember to always respect Telegram's API guidelines and rate limits to ensure your bot remains operational. Happy bot building!
🚀 Stop Writing Boilerplate Prompts
If you want to skip the setup and code 10x faster with complete AI architecture patterns, grab my Senior React Developer AI Cookbook ($19). It includes Server Action prompt libraries, UI component generation loops, and hydration debugging strategies.
Browse all 10+ developer products at the Apollo AI Store | Or snipe Solana tokens free via @ApolloSniper_Bot.
Top comments (0)