Redis & Advanced Caching
Caching in Microservices
Caching in Microservices
Microservices architectures introduce unique caching challenges due to distributed data, service boundaries, and consistency requirements. Let's explore effective caching strategies for microservices.
Distributed Caching Challenges
Microservices face specific caching complexities:
// Challenge 1: Data Ownership and Boundaries
// Service A owns User data
class UserService {
async getUser(userId) {
const cacheKey = `user:${userId}`;
let user = await redis.get(cacheKey);
if (!user) {
user = await db.users.findById(userId);
await redis.setex(cacheKey, 3600, JSON.stringify(user));
} else {
user = JSON.parse(user);
}
return user;
}
}
// Challenge 2: Service B needs User data
class OrderService {
async createOrder(userId, items) {
// Should we cache user data here?
// Option 1: Call UserService (network overhead)
const user = await userServiceClient.getUser(userId);
// Option 2: Cache locally (data duplication)
// Option 3: Use shared cache (coupling)
}
}
// Service A owns User data
class UserService {
async getUser(userId) {
const cacheKey = `user:${userId}`;
let user = await redis.get(cacheKey);
if (!user) {
user = await db.users.findById(userId);
await redis.setex(cacheKey, 3600, JSON.stringify(user));
} else {
user = JSON.parse(user);
}
return user;
}
}
// Challenge 2: Service B needs User data
class OrderService {
async createOrder(userId, items) {
// Should we cache user data here?
// Option 1: Call UserService (network overhead)
const user = await userServiceClient.getUser(userId);
// Option 2: Cache locally (data duplication)
// Option 3: Use shared cache (coupling)
}
}
Key Challenges:
- Data ownership and service boundaries
- Cache consistency across services
- Network latency and service calls
- Cache invalidation coordination
- Data duplication vs. coupling
Shared Cache Patterns
Multiple services can share a Redis instance with proper key namespacing:
// Pattern 1: Service-Prefixed Keys
class CacheManager {
constructor(serviceName) {
this.serviceName = serviceName;
this.redis = redis.createClient();
}
getKey(key) {
return `${this.serviceName}:${key}`;
}
async get(key) {
return await this.redis.get(this.getKey(key));
}
async set(key, value, ttl = 3600) {
return await this.redis.setex(this.getKey(key), ttl, value);
}
}
// User Service
const userCache = new CacheManager('user-service');
await userCache.set('user:123', userData);
// Key in Redis: "user-service:user:123"
// Order Service
const orderCache = new CacheManager('order-service');
await orderCache.set('order:456', orderData);
// Key in Redis: "order-service:order:456"
class CacheManager {
constructor(serviceName) {
this.serviceName = serviceName;
this.redis = redis.createClient();
}
getKey(key) {
return `${this.serviceName}:${key}`;
}
async get(key) {
return await this.redis.get(this.getKey(key));
}
async set(key, value, ttl = 3600) {
return await this.redis.setex(this.getKey(key), ttl, value);
}
}
// User Service
const userCache = new CacheManager('user-service');
await userCache.set('user:123', userData);
// Key in Redis: "user-service:user:123"
// Order Service
const orderCache = new CacheManager('order-service');
await orderCache.set('order:456', orderData);
// Key in Redis: "order-service:order:456"
// Pattern 2: Shared Reference Cache
// Central cache for frequently accessed reference data
class ReferenceCache {
async getCountry(code) {
const key = `ref:country:${code}`;
let country = await redis.get(key);
if (!country) {
country = await referenceDataAPI.getCountry(code);
// Long TTL for reference data (24 hours)
await redis.setex(key, 86400, JSON.stringify(country));
}
return JSON.parse(country);
}
async getProductCategory(id) {
const key = `ref:category:${id}`;
return await this.getCachedReference(key,
() => referenceDataAPI.getCategory(id)
);
}
}
// Central cache for frequently accessed reference data
class ReferenceCache {
async getCountry(code) {
const key = `ref:country:${code}`;
let country = await redis.get(key);
if (!country) {
country = await referenceDataAPI.getCountry(code);
// Long TTL for reference data (24 hours)
await redis.setex(key, 86400, JSON.stringify(country));
}
return JSON.parse(country);
}
async getProductCategory(id) {
const key = `ref:category:${id}`;
return await this.getCachedReference(key,
() => referenceDataAPI.getCategory(id)
);
}
}
Cache Consistency Strategies
Maintaining consistency across distributed services:
// Strategy 1: Event-Driven Cache Invalidation
const EventEmitter = require('events');
const eventBus = new EventEmitter();
class UserService {
async updateUser(userId, updates) {
// Update database
await db.users.update(userId, updates);
// Invalidate cache
await redis.del(`user:${userId}`);
// Publish invalidation event
eventBus.emit('user.updated', { userId, updates });
// Or use Redis Pub/Sub for distributed events
await redis.publish('cache:invalidate', JSON.stringify({
service: 'user-service',
key: `user:${userId}`,
timestamp: Date.now()
}));
}
}
// Other services listen for invalidation events
class OrderService {
constructor() {
this.subscriber = redis.duplicate();
this.subscriber.subscribe('cache:invalidate');
this.subscriber.on('message', async (channel, message) => {
const event = JSON.parse(message);
if (event.service === 'user-service') {
// Invalidate local cache if exists
await this.localCache.del(event.key);
}
});
}
}
const EventEmitter = require('events');
const eventBus = new EventEmitter();
class UserService {
async updateUser(userId, updates) {
// Update database
await db.users.update(userId, updates);
// Invalidate cache
await redis.del(`user:${userId}`);
// Publish invalidation event
eventBus.emit('user.updated', { userId, updates });
// Or use Redis Pub/Sub for distributed events
await redis.publish('cache:invalidate', JSON.stringify({
service: 'user-service',
key: `user:${userId}`,
timestamp: Date.now()
}));
}
}
// Other services listen for invalidation events
class OrderService {
constructor() {
this.subscriber = redis.duplicate();
this.subscriber.subscribe('cache:invalidate');
this.subscriber.on('message', async (channel, message) => {
const event = JSON.parse(message);
if (event.service === 'user-service') {
// Invalidate local cache if exists
await this.localCache.del(event.key);
}
});
}
}
// Strategy 2: Time-Based Consistency (Eventual Consistency)
class CacheWithSoftTTL {
async get(key, fetcher, ttl = 3600, softTTL = 300) {
const data = await redis.get(key);
if (!data) {
// Cache miss - fetch and cache
const fresh = await fetcher();
await redis.setex(key, ttl, JSON.stringify({
value: fresh,
cachedAt: Date.now()
}));
return fresh;
}
const cached = JSON.parse(data);
const age = Date.now() - cached.cachedAt;
if (age > softTTL * 1000) {
// Soft TTL exceeded - return stale data but refresh async
setImmediate(async () => {
try {
const fresh = await fetcher();
await redis.setex(key, ttl, JSON.stringify({
value: fresh,
cachedAt: Date.now()
}));
} catch (err) {
console.error('Background refresh failed:', err);
}
});
}
return cached.value;
}
}
class CacheWithSoftTTL {
async get(key, fetcher, ttl = 3600, softTTL = 300) {
const data = await redis.get(key);
if (!data) {
// Cache miss - fetch and cache
const fresh = await fetcher();
await redis.setex(key, ttl, JSON.stringify({
value: fresh,
cachedAt: Date.now()
}));
return fresh;
}
const cached = JSON.parse(data);
const age = Date.now() - cached.cachedAt;
if (age > softTTL * 1000) {
// Soft TTL exceeded - return stale data but refresh async
setImmediate(async () => {
try {
const fresh = await fetcher();
await redis.setex(key, ttl, JSON.stringify({
value: fresh,
cachedAt: Date.now()
}));
} catch (err) {
console.error('Background refresh failed:', err);
}
});
}
return cached.value;
}
}
Cache Synchronization
Advanced synchronization techniques for distributed caches:
// Pattern 1: Two-Level Caching (L1 + L2)
class TwoLevelCache {
constructor() {
this.l1Cache = new Map(); // In-memory (fast)
this.l2Cache = redis; // Redis (shared)
this.l1TTL = 60; // 1 minute
this.l2TTL = 3600; // 1 hour
}
async get(key, fetcher) {
// Check L1 (local memory)
const l1Data = this.l1Cache.get(key);
if (l1Data && Date.now() < l1Data.expiresAt) {
return l1Data.value;
}
// Check L2 (Redis)
const l2Data = await this.l2Cache.get(key);
if (l2Data) {
const value = JSON.parse(l2Data);
// Populate L1
this.l1Cache.set(key, {
value,
expiresAt: Date.now() + (this.l1TTL * 1000)
});
return value;
}
// Cache miss - fetch from source
const value = await fetcher();
// Store in both levels
this.l1Cache.set(key, {
value,
expiresAt: Date.now() + (this.l1TTL * 1000)
});
await this.l2Cache.setex(key, this.l2TTL, JSON.stringify(value));
return value;
}
async invalidate(key) {
this.l1Cache.delete(key);
await this.l2Cache.del(key);
// Broadcast to other instances
await this.l2Cache.publish('cache:invalidate:l1', key);
}
}
class TwoLevelCache {
constructor() {
this.l1Cache = new Map(); // In-memory (fast)
this.l2Cache = redis; // Redis (shared)
this.l1TTL = 60; // 1 minute
this.l2TTL = 3600; // 1 hour
}
async get(key, fetcher) {
// Check L1 (local memory)
const l1Data = this.l1Cache.get(key);
if (l1Data && Date.now() < l1Data.expiresAt) {
return l1Data.value;
}
// Check L2 (Redis)
const l2Data = await this.l2Cache.get(key);
if (l2Data) {
const value = JSON.parse(l2Data);
// Populate L1
this.l1Cache.set(key, {
value,
expiresAt: Date.now() + (this.l1TTL * 1000)
});
return value;
}
// Cache miss - fetch from source
const value = await fetcher();
// Store in both levels
this.l1Cache.set(key, {
value,
expiresAt: Date.now() + (this.l1TTL * 1000)
});
await this.l2Cache.setex(key, this.l2TTL, JSON.stringify(value));
return value;
}
async invalidate(key) {
this.l1Cache.delete(key);
await this.l2Cache.del(key);
// Broadcast to other instances
await this.l2Cache.publish('cache:invalidate:l1', key);
}
}
// Pattern 2: Write-Through Cache Across Services
class WriteThroughCache {
async set(key, value, ttl = 3600) {
const pipeline = redis.pipeline();
// Write to cache
pipeline.setex(key, ttl, JSON.stringify(value));
// Publish change event
pipeline.publish('cache:change', JSON.stringify({
key,
value,
timestamp: Date.now()
}));
await pipeline.exec();
}
}
class WriteThroughCache {
async set(key, value, ttl = 3600) {
const pipeline = redis.pipeline();
// Write to cache
pipeline.setex(key, ttl, JSON.stringify(value));
// Publish change event
pipeline.publish('cache:change', JSON.stringify({
key,
value,
timestamp: Date.now()
}));
await pipeline.exec();
}
}
Service-Level Caching
Implementing caching at the API gateway and service levels:
// API Gateway Cache Middleware
const express = require('express');
const app = express();
const gatewayCacheMiddleware = (ttl = 60) => {
return async (req, res, next) => {
// Only cache GET requests
if (req.method !== 'GET') {
return next();
}
const cacheKey = `gateway:${req.originalUrl}`;
const cached = await redis.get(cacheKey);
if (cached) {
const data = JSON.parse(cached);
return res.set('X-Cache', 'HIT').json(data);
}
// Capture response
const originalJson = res.json.bind(res);
res.json = (data) => {
// Cache successful responses only
if (res.statusCode === 200) {
redis.setex(cacheKey, ttl, JSON.stringify(data));
}
res.set('X-Cache', 'MISS');
return originalJson(data);
};
next();
};
};
// Apply to routes
app.get('/api/products', gatewayCacheMiddleware(300), productController.list);
app.get('/api/products/:id', gatewayCacheMiddleware(600), productController.get);
const express = require('express');
const app = express();
const gatewayCacheMiddleware = (ttl = 60) => {
return async (req, res, next) => {
// Only cache GET requests
if (req.method !== 'GET') {
return next();
}
const cacheKey = `gateway:${req.originalUrl}`;
const cached = await redis.get(cacheKey);
if (cached) {
const data = JSON.parse(cached);
return res.set('X-Cache', 'HIT').json(data);
}
// Capture response
const originalJson = res.json.bind(res);
res.json = (data) => {
// Cache successful responses only
if (res.statusCode === 200) {
redis.setex(cacheKey, ttl, JSON.stringify(data));
}
res.set('X-Cache', 'MISS');
return originalJson(data);
};
next();
};
};
// Apply to routes
app.get('/api/products', gatewayCacheMiddleware(300), productController.list);
app.get('/api/products/:id', gatewayCacheMiddleware(600), productController.get);
// Service-Specific Cache Strategy
class ProductService {
constructor() {
this.cache = new CacheManager('product-service');
}
async getProduct(id) {
return await this.cache.get(`product:${id}`, async () => {
// Fetch from database
const product = await db.products.findById(id);
// Fetch related data from other services (cache these too)
const [category, inventory] = await Promise.all([
this.getCategoryFromCache(product.categoryId),
this.getInventoryFromCache(id)
]);
return { ...product, category, inventory };
}, 3600);
}
async getCategoryFromCache(categoryId) {
return await this.cache.get(
`category:${categoryId}`,
() => categoryServiceClient.get(categoryId),
7200 // Longer TTL for reference data
);
}
}
class ProductService {
constructor() {
this.cache = new CacheManager('product-service');
}
async getProduct(id) {
return await this.cache.get(`product:${id}`, async () => {
// Fetch from database
const product = await db.products.findById(id);
// Fetch related data from other services (cache these too)
const [category, inventory] = await Promise.all([
this.getCategoryFromCache(product.categoryId),
this.getInventoryFromCache(id)
]);
return { ...product, category, inventory };
}, 3600);
}
async getCategoryFromCache(categoryId) {
return await this.cache.get(
`category:${categoryId}`,
() => categoryServiceClient.get(categoryId),
7200 // Longer TTL for reference data
);
}
}
Microservices Caching Anti-Patterns:
- Caching data you don't own without invalidation strategy
- Over-coupling services through shared cache dependencies
- Ignoring service boundaries in cache keys
- Not handling network partitions and cache unavailability
- Caching without monitoring cache effectiveness per service
Cache Health Monitoring
Essential monitoring for distributed caching:
// Service-Level Cache Metrics
class CacheMetrics {
constructor(serviceName) {
this.serviceName = serviceName;
this.hits = 0;
this.misses = 0;
this.errors = 0;
}
recordHit() {
this.hits++;
redis.hincrby(`metrics:${this.serviceName}`, 'cache_hits', 1);
}
recordMiss() {
this.misses++;
redis.hincrby(`metrics:${this.serviceName}`, 'cache_misses', 1);
}
getHitRate() {
const total = this.hits + this.misses;
return total > 0 ? (this.hits / total * 100).toFixed(2) : 0;
}
async getGlobalMetrics() {
const metrics = await redis.hgetall(`metrics:${this.serviceName}`);
return {
service: this.serviceName,
hits: parseInt(metrics.cache_hits || 0),
misses: parseInt(metrics.cache_misses || 0),
hitRate: this.calculateHitRate(metrics)
};
}
}
class CacheMetrics {
constructor(serviceName) {
this.serviceName = serviceName;
this.hits = 0;
this.misses = 0;
this.errors = 0;
}
recordHit() {
this.hits++;
redis.hincrby(`metrics:${this.serviceName}`, 'cache_hits', 1);
}
recordMiss() {
this.misses++;
redis.hincrby(`metrics:${this.serviceName}`, 'cache_misses', 1);
}
getHitRate() {
const total = this.hits + this.misses;
return total > 0 ? (this.hits / total * 100).toFixed(2) : 0;
}
async getGlobalMetrics() {
const metrics = await redis.hgetall(`metrics:${this.serviceName}`);
return {
service: this.serviceName,
hits: parseInt(metrics.cache_hits || 0),
misses: parseInt(metrics.cache_misses || 0),
hitRate: this.calculateHitRate(metrics)
};
}
}
Exercise: Design a caching strategy for an e-commerce microservices architecture with the following services: User Service, Product Service, Inventory Service, Order Service, and Payment Service. Consider:
- What data should be cached at each service?
- Which caches should be shared vs. service-specific?
- How will you handle cache invalidation when a product is updated?
- What TTL values are appropriate for each data type?
- How will you ensure consistency between services?
Best Practices: Use service-prefixed cache keys, implement event-driven invalidation for critical data, accept eventual consistency for non-critical data, monitor cache hit rates per service, and implement circuit breakers for cache failures.