Redis & Advanced Caching

API Response Caching

20 min Lesson 9 of 30

API Response Caching

API response caching is critical for building high-performance web services. By caching API responses in Redis, you can reduce database load, decrease response times, and improve scalability. This lesson covers practical patterns for caching REST API responses in Node.js.

Basic API Cache Middleware

Create a simple middleware that caches GET requests:

const express = require('express');
const Redis = require('ioredis');
const app = express();
const redis = new Redis();

// Basic cache middleware
function apiCache(durationSeconds) {
  return async (req, res, next) => {
    // Only cache GET requests
    if (req.method !== 'GET') {
      return next();
    }

    const key = `cache:${req.originalUrl}`;

    try {
      // Try to get cached response
      const cachedResponse = await redis.get(key);

      if (cachedResponse) {
        console.log('Cache hit:', key);
        return res.json(JSON.parse(cachedResponse));
      }

      console.log('Cache miss:', key);

      // Intercept res.json to cache the response
      const originalJson = res.json.bind(res);
      res.json = function(data) {
        // Cache the response
        redis.setex(key, durationSeconds, JSON.stringify(data));
        return originalJson(data);
      };

      next();
    } catch (error) {
      console.error('Cache error:', error);
      next(); // Continue without cache on error
    }
  };
}

// Usage
app.get('/api/products',
  apiCache(300), // Cache for 5 minutes
  async (req, res) => {
    const products = await db.query('SELECT * FROM products');
    res.json(products);
  }
);
Tip: Always cache only GET requests. POST, PUT, DELETE, and PATCH requests should never be cached as they modify data.

Advanced Cache Key Design

Design cache keys that account for query parameters, pagination, and filters:

function generateCacheKey(req) {
  const baseKey = req.path;
  const params = req.query;

  // Sort query parameters for consistent keys
  const sortedParams = Object.keys(params)
    .sort()
    .map(key => `${key}=${params[key]}`)
    .join('&');

  return sortedParams
    ? `cache:${baseKey}?${sortedParams}`
    : `cache:${baseKey}`;
}

// Enhanced middleware with custom key generation
function smartApiCache(durationSeconds, options = {}) {
  const { keyGenerator = generateCacheKey } = options;

  return async (req, res, next) => {
    if (req.method !== 'GET') return next();

    const key = keyGenerator(req);

    try {
      const cachedResponse = await redis.get(key);

      if (cachedResponse) {
        const data = JSON.parse(cachedResponse);
        res.set('X-Cache', 'HIT');
        return res.json(data);
      }

      res.set('X-Cache', 'MISS');

      const originalJson = res.json.bind(res);
      res.json = function(data) {
        redis.setex(key, durationSeconds, JSON.stringify(data));
        return originalJson(data);
      };

      next();
    } catch (error) {
      console.error('Cache error:', error);
      next();
    }
  };
}

// Usage with query parameters
app.get('/api/products', smartApiCache(300), async (req, res) => {
  const { category, page = 1, limit = 20 } = req.query;
  const products = await db.products.find({ category, page, limit });
  res.json(products);
});
Note: Cache keys should be deterministic. Always sort query parameters to ensure /api/products?page=1&limit=20 and /api/products?limit=20&page=1 generate the same cache key.

Conditional Caching

Only cache responses based on status codes and content types:

function conditionalCache(durationSeconds) {
  return async (req, res, next) => {
    if (req.method !== 'GET') return next();

    const key = generateCacheKey(req);

    try {
      const cached = await redis.get(key);
      if (cached) {
        const { status, headers, body } = JSON.parse(cached);
        res.status(status).set(headers).json(body);
        return;
      }

      const originalJson = res.json.bind(res);
      res.json = function(data) {
        // Only cache successful responses (2xx status codes)
        if (res.statusCode >= 200 && res.statusCode < 300) {
          const cacheData = {
            status: res.statusCode,
            headers: res.getHeaders(),
            body: data
          };
          redis.setex(key, durationSeconds, JSON.stringify(cacheData));
        }
        return originalJson(data);
      };

      next();
    } catch (error) {
      console.error('Cache error:', error);
      next();
    }
  };
}

User-Specific Caching

Cache responses per user for authenticated APIs:

function userSpecificCache(durationSeconds) {
  return async (req, res, next) => {
    if (req.method !== 'GET') return next();

    // Include user ID in cache key
    const userId = req.user?.id || 'anonymous';
    const baseKey = generateCacheKey(req);
    const key = `${baseKey}:user:${userId}`;

    try {
      const cached = await redis.get(key);
      if (cached) {
        return res.json(JSON.parse(cached));
      }

      const originalJson = res.json.bind(res);
      res.json = function(data) {
        redis.setex(key, durationSeconds, JSON.stringify(data));
        return originalJson(data);
      };

      next();
    } catch (error) {
      console.error('Cache error:', error);
      next();
    }
  };
}

// Usage
app.get('/api/user/dashboard',
  authenticateUser, // Authentication middleware
  userSpecificCache(120), // Cache for 2 minutes per user
  async (req, res) => {
    const dashboard = await getDashboardData(req.user.id);
    res.json(dashboard);
  }
);
Warning: When caching user-specific data, ensure cache keys include user IDs to prevent data leakage between users. Never cache sensitive personal information without encryption.

Cache Invalidation

Implement cache invalidation when data is modified:

// Helper function to invalidate cache
async function invalidateCache(pattern) {
  try {
    const keys = await redis.keys(pattern);
    if (keys.length > 0) {
      await redis.del(...keys);
      console.log(`Invalidated ${keys.length} cache entries`);
    }
  } catch (error) {
    console.error('Cache invalidation error:', error);
  }
}

// Invalidate on POST/PUT/DELETE
app.post('/api/products', async (req, res) => {
  const product = await db.products.create(req.body);

  // Invalidate all product list caches
  await invalidateCache('cache:/api/products*');

  res.status(201).json(product);
});

app.put('/api/products/:id', async (req, res) => {
  const product = await db.products.update(req.params.id, req.body);

  // Invalidate specific product and list caches
  await invalidateCache(`cache:/api/products/${req.params.id}*`);
  await invalidateCache('cache:/api/products?*');
  await invalidateCache('cache:/api/products');

  res.json(product);
});

app.delete('/api/products/:id', async (req, res) => {
  await db.products.delete(req.params.id);

  // Invalidate all related caches
  await invalidateCache('cache:/api/products*');

  res.status(204).send();
});
Tip: Use Redis SCAN instead of KEYS in production for large key sets to avoid blocking the Redis server. The KEYS command is blocking and can cause performance issues.

Cache Headers for Client-Side Caching

Combine Redis caching with HTTP cache headers:

function apiCacheWithHeaders(redisTTL, httpMaxAge) {
  return async (req, res, next) => {
    if (req.method !== 'GET') return next();

    const key = generateCacheKey(req);

    try {
      const cached = await redis.get(key);

      if (cached) {
        res.set({
          'Cache-Control': `public, max-age=${httpMaxAge}`,
          'X-Cache': 'HIT'
        });
        return res.json(JSON.parse(cached));
      }

      const originalJson = res.json.bind(res);
      res.json = function(data) {
        res.set({
          'Cache-Control': `public, max-age=${httpMaxAge}`,
          'X-Cache': 'MISS'
        });
        redis.setex(key, redisTTL, JSON.stringify(data));
        return originalJson(data);
      };

      next();
    } catch (error) {
      console.error('Cache error:', error);
      next();
    }
  };
}

// Cache in Redis for 5 minutes, browser for 1 minute
app.get('/api/config',
  apiCacheWithHeaders(300, 60),
  async (req, res) => {
    const config = await getAppConfig();
    res.json(config);
  }
);

Cache Analytics

Track cache performance metrics:

const cacheStats = {
  hits: 0,
  misses: 0,
  errors: 0
};

function analyticsCache(durationSeconds) {
  return async (req, res, next) => {
    if (req.method !== 'GET') return next();

    const key = generateCacheKey(req);
    const startTime = Date.now();

    try {
      const cached = await redis.get(key);

      if (cached) {
        cacheStats.hits++;
        const duration = Date.now() - startTime;
        res.set({
          'X-Cache': 'HIT',
          'X-Cache-Time': `${duration}ms`
        });
        return res.json(JSON.parse(cached));
      }

      cacheStats.misses++;

      const originalJson = res.json.bind(res);
      res.json = function(data) {
        const duration = Date.now() - startTime;
        res.set({
          'X-Cache': 'MISS',
          'X-Response-Time': `${duration}ms`
        });
        redis.setex(key, durationSeconds, JSON.stringify(data));
        return originalJson(data);
      };

      next();
    } catch (error) {
      cacheStats.errors++;
      console.error('Cache error:', error);
      next();
    }
  };
}

// Cache stats endpoint
app.get('/api/cache/stats', (req, res) => {
  const total = cacheStats.hits + cacheStats.misses;
  const hitRate = total > 0 ? (cacheStats.hits / total * 100).toFixed(2) : 0;

  res.json({
    hits: cacheStats.hits,
    misses: cacheStats.misses,
    errors: cacheStats.errors,
    hitRate: `${hitRate}%`,
    total
  });
});
Note: A cache hit rate above 80% typically indicates effective caching. Below 50% suggests your cache TTL might be too short or your data changes too frequently.
Exercise: Build a blog API with three endpoints: list posts (paginated), single post detail, and post comments. Implement caching with 10-minute TTL for lists, 30-minute TTL for details, and invalidation when posts are created/updated. Add cache analytics and measure hit rates after 100 requests.