Automate Your SEO: 6 Scripts and Plugins That Actually Save Hours Weekly
Quick Implementation Checklist
Start with these in order:
- Table of Contents – Add the PHP function, see immediate SEO benefit
- Dynamic dates – Stop manually updating years in titles
- Broken link checker – Run monthly, fix issues before Google finds them
- XML sitemap – Real-time generation beats static files
- Meta descriptions – Bulk generate for products/categories
- Image optimization – WebP + alt text = better Core Web Vitals
Each automation saves 2-5 hours monthly. Implement all six? That’s a full day back every month to work on strategy instead of repetitive tasks.
Stop doing SEO tasks manually that a script could handle in seconds. Here’s exactly how to automate the repetitive stuff, with actual code you can use today.
1. Auto-Generate Table of Contents Without Another Plugin
Google loves structruded data – often showing jump links directly in search results. But what if you don’t want to install third party plugins like Table of Contents Plus or Easy Table of Contents plugins slowing down your site.
WordPress Solution (No Plugin Needed)
Add this to your theme’s functions.php:
function auto_generate_toc($content) {
// Only for single posts
if (!is_single()) return $content;
// Find all H2 and H3 headings
preg_match_all('/<h([2-3]).*?>(.*?)<\/h[2-3]>/i', $content, $matches, PREG_SET_ORDER);
if (count($matches) < 3) return $content; // Only add TOC if 3+ headings
$toc = '<div class="auto-toc"><p><strong>Quick Navigation</strong></p><ol>';
$content_updated = $content;
foreach ($matches as $index => $match) {
$heading_text = strip_tags($match[2]);
$anchor = 'section-' . ($index + 1);
$level = $match[1];
// Create TOC entry (indent H3s)
if ($level == '3') {
$toc .= '<li style="margin-left:20px">';
} else {
$toc .= '<li>';
}
$toc .= '<a href="#' . $anchor . '">' . $heading_text . '</a></li>';
// Add ID to heading in content
$heading_with_id = str_replace(
'<h' . $level,
'<h' . $level . ' id="' . $anchor . '"',
$match[0]
);
$content_updated = str_replace($match[0], $heading_with_id, $content_updated);
}
$toc .= '</ol></div>';
// Insert after first paragraph
$content_updated = preg_replace('/<\/p>/', '</p>' . $toc, $content_updated, 1);
return $content_updated;
}
add_filter('the_content', 'auto_generate_toc', 1);
// Add basic CSS
function toc_styles() {
echo '<style>
.auto-toc {
background: #f5f5f5;
padding: 20px;
margin: 20px 0;
border-left: 3px solid #2271b1;
}
.auto-toc ol { margin: 10px 0 0 20px; }
.auto-toc a { text-decoration: none; }
</style>';
}
add_action('wp_head', 'toc_styles');
Python/Django Solution
For Django sites, create a template filter:
# In your app/templatetags/toc_filter.py
import re
from django import template
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter(name='auto_toc')
def generate_toc(content):
# Find all headings
headings = re.findall(r'<h([2-3]).*?>(.*?)</h[2-3]>', content, re.IGNORECASE)
if len(headings) < 3:
return mark_safe(content)
toc_html = '<div class="toc-container"><strong>Contents</strong><ol>'
modified_content = content
for idx, (level, text) in enumerate(headings):
clean_text = re.sub('<.*?>', '', text) # Remove any HTML tags
anchor_id = f'toc-{idx + 1}'
# Add to TOC with indentation for H3
indent = 'style="margin-left:20px"' if level == '3' else ''
toc_html += f'<li {indent}><a href="#{anchor_id}">{clean_text}</a></li>'
# Add IDs to headings
pattern = f'<h{level}.*?>{re.escape(text)}</h{level}>'
replacement = f'<h{level} id="{anchor_id}">{text}</h{level}>'
modified_content = re.sub(pattern, replacement, modified_content, count=1)
toc_html += '</ol></div>'
# Insert TOC after first </p> tag
p_end = modified_content.find('</p>')
if p_end != -1:
modified_content = modified_content[:p_end+4] + toc_html + modified_content[p_end+4:]
return mark_safe(modified_content)
Use in template: {{ article.content|auto_toc }}
Node.js/Express Solution
// toc-generator.js
function generateTOC(content) {
const headingRegex = /<h([2-3]).*?>(.*?)<\/h[2-3]>/gi;
const matches = [...content.matchAll(headingRegex)];
if (matches.length < 3) return content;
let toc = '<div class="toc"><strong>Jump to:</strong><ul>';
let updatedContent = content;
matches.forEach((match, index) => {
const level = match[1];
const text = match[2].replace(/<.*?>/g, ''); // Strip HTML
const id = `section-${index + 1}`;
// Build TOC
const indent = level === '3' ? 'class="indent"' : '';
toc += `<li ${indent}><a href="#${id}">${text}</a></li>`;
// Add IDs to headings
const newHeading = match[0].replace(/<h[2-3]/i, `$& id="${id}"`);
updatedContent = updatedContent.replace(match[0], newHeading);
});
toc += '</ul></div>';
// Insert after first paragraph
const firstP = updatedContent.indexOf('</p>');
updatedContent = updatedContent.slice(0, firstP + 4) + toc + updatedContent.slice(firstP + 4);
return updatedContent;
}
module.exports = generateTOC;
2. Bulk Check for Broken Links with Python
Forget paying for Broken Link Checker Pro or Screaming Frog. This Python script finds all broken links on your site for free.
import csv
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse
import concurrent.futures
from collections import defaultdict
def check_link_status(url, timeout=5):
"""Check single URL status"""
try:
response = requests.head(url, timeout=timeout, allow_redirects=True)
return {
'url': url,
'status_code': response.status_code,
'redirect_to': response.url if response.url != url else None
}
except requests.exceptions.RequestException as e:
return {
'url': url,
'status_code': 'ERROR',
'error': str(e)[:50] # Truncate error message
}
def extract_all_links(page_url):
"""Extract all internal links from a page"""
try:
response = requests.get(page_url, timeout=10)
soup = BeautifulSoup(response.text, 'html.parser')
base_domain = urlparse(page_url).netloc
internal_links = set()
# Find all links
for tag in soup.find_all(['a', 'img']):
if tag.name == 'a':
link = tag.get('href')
else: # img tag
link = tag.get('src')
if link:
full_url = urljoin(page_url, link)
# Only check internal links
if urlparse(full_url).netloc == base_domain:
internal_links.add(full_url)
return internal_links
except Exception as e:
print(f"Error scanning {page_url}: {e}")
return set()
def scan_website_for_broken_links(csv_file):
"""Main function - reads URLs from CSV and checks all their links"""
# Read URLs to check
urls_to_scan = []
with open(csv_file, 'r') as f:
reader = csv.reader(f)
next(reader, None) # Skip header if exists
urls_to_scan = [row[0] for row in reader if row]
print(f"Scanning {len(urls_to_scan)} pages for broken links...")
all_results = []
links_by_page = defaultdict(set)
# Step 1: Extract all links from all pages
for page_url in urls_to_scan:
print(f"Extracting links from: {page_url}")
links = extract_all_links(page_url)
links_by_page[page_url] = links
print(f" Found {len(links)} internal links")
# Step 2: Get unique links to check
all_links = set()
for links in links_by_page.values():
all_links.update(links)
print(f"\nChecking {len(all_links)} unique links...")
# Step 3: Check all links in parallel
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
future_to_url = {executor.submit(check_link_status, url): url for url in all_links}
link_results = {}
for future in concurrent.futures.as_completed(future_to_url):
result = future.result()
link_results[result['url']] = result
# Print broken links immediately
if isinstance(result.get('status_code'), int) and result['status_code'] >= 400:
print(f"❌ BROKEN: {result['url']} (Status: {result['status_code']})")
# Step 4: Create report
with open('broken_links_report.csv', 'w', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow(['Page URL', 'Broken Link', 'Status Code', 'Error/Redirect'])
for page_url, links in links_by_page.items():
for link in links:
result = link_results.get(link, {})
status = result.get('status_code', 'Not checked')
# Only write problematic links
if status == 'ERROR' or (isinstance(status, int) and status >= 400):
error_or_redirect = result.get('error', result.get('redirect_to', ''))
writer.writerow([page_url, link, status, error_or_redirect])
# Summary
broken_count = sum(1 for r in link_results.values()
if r.get('status_code') == 'ERROR' or
(isinstance(r.get('status_code'), int) and r['status_code'] >= 400))
print(f"\n✅ Scan complete!")
print(f"Total links checked: {len(all_links)}")
print(f"Broken links found: {broken_count}")
print(f"Report saved to: broken_links_report.csv")
# Run it
if __name__ == "__main__":
# Create a CSV file called 'urls_to_check.csv' with one URL per row
scan_website_for_broken_links('urls_to_check.csv')
Create urls_to_check.csv:
https://yoursite.com/
https://yoursite.com/about/
https://yoursite.com/blog/
list goes on
Run the script, get a complete report of broken links with exactly where they appear.
3. Generate Meta Descriptions in Bulk Using AI
Got 500 product pages with no meta descriptions? Here’s how to generate them all in one go.
Using OpenAI’s GPT
import csv
import openai
import time
# Set your API key
openai.api_key = "your-api-key-here"
def generate_meta_description(product_title, brand_voice="professional"):
"""Generate single meta description"""
prompt = f"""Write a meta description for this product:
Product: {product_title}
Requirements:
- Length: 150-160 characters
- Include the product name
- Mention a key benefit
- Sound {brand_voice}
- No quotation marks
- Return only the meta description text"""
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}],
temperature=0.7,
max_tokens=50
)
meta = response.choices[0].message.content.strip()
# Ensure proper length
if len(meta) > 160:
meta = meta[:157] + "..."
return meta
except Exception as e:
return f"Error: {str(e)}"
def process_products_csv(input_file, output_file):
"""Process entire CSV of products"""
processed = 0
with open(input_file, 'r', encoding='utf-8') as infile:
with open(output_file, 'w', newline='', encoding='utf-8') as outfile:
reader = csv.DictReader(infile)
fieldnames = reader.fieldnames + ['meta_description']
writer = csv.DictWriter(outfile, fieldnames=fieldnames)
writer.writeheader()
for row in reader:
product = row.get('product_title', row.get('title', ''))
if product:
print(f"Processing: {product}")
meta_desc = generate_meta_description(product)
row['meta_description'] = meta_desc
processed += 1
# Rate limiting
if processed % 10 == 0:
time.sleep(1) # Pause every 10 requests
writer.writerow(row)
print(f"\n✅ Generated {processed} meta descriptions")
print(f"Output saved to: {output_file}")
# Run it
process_products_csv('products.csv', 'products_with_meta.csv')
Using Google’s Gemini (Often Free Tier Available)
import google.generativeai as genai
import csv
import time
# Configure Gemini
genai.configure(api_key='your-gemini-api-key')
model = genai.GenerativeModel('gemini-pro')
def generate_meta_with_gemini(product_title):
"""Generate meta description using Gemini"""
prompt = f"""Create a meta description for: {product_title}
Must be exactly 150-160 characters.
Include product name and main benefit.
Write in active voice.
Return only the description, no quotes."""
try:
response = model.generate_content(prompt)
return response.text.strip()[:160]
except Exception as e:
return f"Failed: {str(e)}"
# Same CSV processing logic as above
Using Perplexity API (Good for Research-Based Descriptions)
import requests
import csv
import json
PERPLEXITY_API_KEY = "your-api-key"
def generate_with_perplexity(product_title, category=""):
"""Generate using Perplexity for more factual descriptions"""
headers = {
"Authorization": f"Bearer {PERPLEXITY_API_KEY}",
"Content-Type": "application/json"
}
prompt = f"Write a 155 character meta description for {product_title}"
if category:
prompt += f" in the {category} category"
payload = {
"model": "mistral-7b-instruct",
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.5,
"max_tokens": 60
}
try:
response = requests.post(
"https://api.perplexity.ai/chat/completions",
headers=headers,
json=payload
)
if response.status_code == 200:
result = response.json()
return result['choices'][0]['message']['content'][:160]
else:
return "API Error"
except Exception as e:
return f"Error: {str(e)}"
4. Auto-Update Dates and Dynamic Variables
Keep your content fresh without manually updating dates everywhere.
WordPress – Using RankMath or Yoast Variables
Both RankMath and Yoast SEO support variables, but here’s how to create your own:
// Add to functions.php for custom variables
function dynamic_seo_variables($text) {
// Current dates
$current_year = date('Y');
$current_month = date('F');
$next_year = date('Y', strtotime('+1 year'));
// Custom calculations
$days_until_black_friday = calculate_days_until_black_friday();
$current_quarter = 'Q' . ceil(date('n')/3) . ' ' . date('Y');
$replacements = array(
'%%currentyear%%' => $current_year,
'%%currentmonth%%' => $current_month,
'%%nextyear%%' => $next_year,
'%%currentquarter%%' => $current_quarter,
'%%blackfriday_countdown%%' => $days_until_black_friday . ' days',
'%%season%%' => get_current_season(),
'%%last_updated%%' => get_the_modified_date('F Y')
);
return str_replace(array_keys($replacements), array_values($replacements), $text);
}
function calculate_days_until_black_friday() {
$year = date('Y');
$november = mktime(0, 0, 0, 11, 1, $year);
$thanksgiving = strtotime("fourth thursday", $november);
$black_friday = strtotime("+1 day", $thanksgiving);
if (time() > $black_friday) {
// If past this year's Black Friday, calculate next year's
$year++;
$november = mktime(0, 0, 0, 11, 1, $year);
$thanksgiving = strtotime("fourth thursday", $november);
$black_friday = strtotime("+1 day", $thanksgiving);
}
return floor(($black_friday - time()) / 86400);
}
function get_current_season() {
$month = date('n');
if ($month >= 3 && $month <= 5) return 'Spring';
if ($month >= 6 && $month <= 8) return 'Summer';
if ($month >= 9 && $month <= 11) return 'Fall';
return 'Winter';
}
// Apply to title and content
add_filter('the_title', 'dynamic_seo_variables', 10);
add_filter('the_content', 'dynamic_seo_variables', 10);
// Also work with RankMath
add_filter('rank_math/frontend/title', 'dynamic_seo_variables', 10);
add_filter('rank_math/frontend/description', 'dynamic_seo_variables', 10);
// Also work with Yoast
add_filter('wpseo_title', 'dynamic_seo_variables', 10);
add_filter('wpseo_metadesc', 'dynamic_seo_variables', 10);
Now write titles like: “Best SEO Tools for %%currentyear%% (Updated %%currentmonth%%)”
For Custom PHP Sites
class DynamicContent {
private static $variables = [];
public static function init() {
self::$variables = [
'{{year}}' => date('Y'),
'{{month}}' => date('F'),
'{{month_year}}' => date('F Y'),
'{{next_month}}' => date('F', strtotime('+1 month')),
'{{quarter}}' => 'Q' . ceil(date('n')/3),
'{{days_until_christmas}}' => self::daysUntil('12-25'),
'{{days_until_newyear}}' => self::daysUntil('01-01', true),
'{{week_number}}' => date('W'),
'{{financial_year}}' => self::getFinancialYear()
];
}
private static function daysUntil($date, $next_year = false) {
$target = strtotime(date('Y') . '-' . $date);
if ($next_year || time() > $target) {
$target = strtotime((date('Y') + 1) . '-' . $date);
}
return floor(($target - time()) / 86400);
}
private static function getFinancialYear() {
$month = date('n');
return $month >= 4 ? date('Y') . '-' . (date('y') + 1) : (date('Y') - 1) . '-' . date('y');
}
public static function parse($content) {
if (empty(self::$variables)) {
self::init();
}
return strtr($content, self::$variables);
}
}
// Usage
echo DynamicContent::parse("Best Deals {{month_year}} - Only {{days_until_christmas}} Days Until Christmas!");
5. Real-Time XML Sitemap from Database
Stop using sitemap plugins that update on a schedule. Generate fresh sitemaps directly from your database every time Google visits.
PHP Version (Works with Any CMS)
Create a file called sitemap.php in your root directory:
<?php
header('Content-Type: application/xml; charset=utf-8');
// Database connection (adjust for your setup)
$host = 'localhost';
$db = 'your_database';
$user = 'your_user';
$pass = 'your_password';
try {
$pdo = new PDO("mysql:host=$host;dbname=$db;charset=utf8", $user, $pass);
$pdo->setAttribute(PDO::ATTR_ERRMODE, PDO::ERRMODE_EXCEPTION);
} catch(PDOException $e) {
die('Database connection failed');
}
// Start XML
echo '<?xml version="1.0" encoding="UTF-8"?>';
echo '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">';
// Homepage (always include)
echo '<url>';
echo '<loc>https://yoursite.com/</loc>';
echo '<lastmod>' . date('Y-m-d') . '</lastmod>';
echo '<changefreq>daily</changefreq>';
echo '<priority>1.0</priority>';
echo '</url>';
// Pages/Posts from database
$stmt = $pdo->prepare("
SELECT
slug,
updated_at,
post_type,
priority
FROM posts
WHERE status = 'published'
AND post_type IN ('page', 'post', 'product')
ORDER BY priority DESC, updated_at DESC
");
$stmt->execute();
while ($row = $stmt->fetch(PDO::FETCH_ASSOC)) {
echo '<url>';
// Build URL based on post type
switch($row['post_type']) {
case 'product':
$url = 'https://yoursite.com/products/' . $row['slug'];
break;
case 'post':
$url = 'https://yoursite.com/blog/' . $row['slug'];
break;
default:
$url = 'https://yoursite.com/' . $row['slug'];
}
echo '<loc>' . htmlspecialchars($url) . '</loc>';
echo '<lastmod>' . date('Y-m-d', strtotime($row['updated_at'])) . '</lastmod>';
// Dynamic changefreq based on last update
$days_old = (time() - strtotime($row['updated_at'])) / 86400;
if ($days_old < 7) {
$freq = 'daily';
} elseif ($days_old < 30) {
$freq = 'weekly';
} elseif ($days_old < 180) {
$freq = 'monthly';
} else {
$freq = 'yearly';
}
echo '<changefreq>' . $freq . '</changefreq>';
echo '<priority>' . number_format($row['priority'], 1) . '</priority>';
echo '</url>';
}
// Categories/Tags (if needed)
$stmt = $pdo->prepare("
SELECT slug, updated_at
FROM categories
WHERE post_count > 0
");
$stmt->execute();
while ($row = $stmt->fetch(PDO::FETCH_ASSOC)) {
echo '<url>';
echo '<loc>https://yoursite.com/category/' . htmlspecialchars($row['slug']) . '</loc>';
echo '<lastmod>' . date('Y-m-d', strtotime($row['updated_at'])) . '</lastmod>';
echo '<changefreq>weekly</changefreq>';
echo '<priority>0.8</priority>';
echo '</url>';
}
echo '</urlset>';
?>
Add to .htaccess for pretty URL:
RewriteRule ^sitemap\.xml$ sitemap.php [L]
Python/Flask Version
from flask import Flask, Response, make_response
from datetime import datetime, timedelta
import pymysql
app = Flask(__name__)
def get_db():
return pymysql.connect(
host='localhost',
user='your_user',
password='your_password',
database='your_database',
cursorclass=pymysql.cursors.DictCursor
)
@app.route('/sitemap.xml')
def generate_sitemap():
"""Generate XML sitemap from database"""
db = get_db()
cursor = db.cursor()
xml_parts = ['<?xml version="1.0" encoding="UTF-8"?>']
xml_parts.append('<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">')
# Add homepage
xml_parts.append(f"""
<url>
<loc>https://yoursite.com/</loc>
<lastmod>{datetime.now().strftime('%Y-%m-%d')}</lastmod>
<changefreq>daily</changefreq>
<priority>1.0</priority>
</url>
""")
# Get all published content
cursor.execute("""
SELECT
url_slug,
last_modified,
content_type,
importance_score
FROM content
WHERE published = 1
ORDER BY importance_score DESC
""")
for item in cursor.fetchall():
# Build URL based on content type
if item['content_type'] == 'product':
url = f"https://yoursite.com/shop/{item['url_slug']}"
elif item['content_type'] == 'blog':
url = f"https://yoursite.com/blog/{item['url_slug']}"
else:
url = f"https://yoursite.com/{item['url_slug']}"
# Calculate change frequency
if item['last_modified']:
days_old = (datetime.now() - item['last_modified']).days
if days_old < 7:
freq = 'daily'
elif days_old < 30:
freq = 'weekly'
elif days_old < 365:
freq = 'monthly'
else:
freq = 'yearly'
else:
freq = 'monthly'
# Add to sitemap
priority = (item['importance_score'] or 50) / 100
xml_parts.append(f"""
<url>
<loc>{url}</loc>
<lastmod>{item['last_modified'].strftime('%Y-%m-%d') if item['last_modified'] else datetime.now().strftime('%Y-%m-%d')}</lastmod>
<changefreq>{freq}</changefreq>
<priority>{priority:.1f}</priority>
</url>
""")
xml_parts.append('</urlset>')
cursor.close()
db.close()
response = make_response(''.join(xml_parts))
response.headers['Content-Type'] = 'application/xml'
return response
if __name__ == '__main__':
app.run(debug=True)
6. WordPress Image Optimization Beyond Basic Plugins
Yes, you can use Smush or ShortPixel plugins, but here’s what they don’t do:
Bulk Convert to WebP (Including Existing Images)
#!/bin/bash
# Save as convert-to-webp.sh
# Find all JPG and PNG files
find /path/to/wordpress/uploads -type f \( -name "*.jpg" -o -name "*.png" \) | while read img; do
# Check if WebP version already exists
webp_file="${img%.*}.webp"
if [ ! -f "$webp_file" ]; then
echo "Converting: $img"
# Convert with 85% quality
cwebp -q 85 "$img" -o "$webp_file"
fi
done
Auto-Generate Missing Alt Text
For WordPress, add to functions.php:
function auto_generate_alt_text($attachment_id) {
$attachment = get_post($attachment_id);
if (!$attachment) return;
// Check if alt text already exists
$existing_alt = get_post_meta($attachment_id, '_wp_attachment_image_alt', true);
if (empty($existing_alt)) {
// Generate from filename
$filename = basename(get_attached_file($attachment_id));
$name = pathinfo($filename, PATHINFO_FILENAME);
// Clean up filename
$alt_text = str_replace(array('-', '_'), ' ', $name);
$alt_text = preg_replace('/\d{4,}/', '', $alt_text); // Remove long numbers
$alt_text = preg_replace('/^(img|image|photo|pic|screenshot)\s*/i', '', $alt_text);
$alt_text = ucwords(strtolower(trim($alt_text)));
// Save as alt text
update_post_meta($attachment_id, '_wp_attachment_image_alt', $alt_text);
}
}
// Run when images are uploaded
add_action('add_attachment', 'auto_generate_alt_text');
// Fix existing images (run once)
function fix_all_alt_texts() {
$attachments = get_posts(array(
'post_type' => 'attachment',
'posts_per_page' => -1,
'post_mime_type' => 'image'
));
foreach ($attachments as $attachment) {
auto_generate_alt_text($attachment->ID);
}
}
// Uncomment to run once: fix_all_alt_texts();
Serve WebP with Fallback (No Plugin Needed)
Add to .htaccess:
<IfModule mod_rewrite.c>
RewriteEngine On
# Check if browser accepts WebP
RewriteCond %{HTTP_ACCEPT} image/webp
# Check if WebP version exists
RewriteCond %{DOCUMENT_ROOT}/$1.webp -f
# Serve WebP instead
RewriteRule (.+)\.(jpg|jpeg|png)$ $1.webp [T=image/webp,E=accept:1]
</IfModule>
<IfModule mod_headers.c>
Header append Vary Accept env=REDIRECT_accept
</IfModule>
Hope you loved these tricks!