Rate Limits & Best Practices
The Cortex API implements rate limiting to ensure fair usage and maintain service quality. With the x402 payment model, rate limits are designed to prevent abuse while allowing legitimate usage.
Rate Limit Exceeded
When you exceed the rate limit, you'll receive a 429 status code:
{
"success": false,
"error": {
"code": "rate_limit_exceeded",
"message": "Rate limit exceeded",
"retryAfter": 5
},
"meta": {
"timestamp": "2024-01-01T00:00:00Z",
"request_id": "req_123456789"
}
}
The retryAfter field indicates how many seconds you should wait before making another request.
Handling Rate Limits with x402
Exponential Backoff with x402
Implement exponential backoff when you hit rate limits with x402 clients:
import { wrapFetchWithPayment } from "x402-fetch";
import { privateKeyToAccount } from "viem/accounts";
const account = privateKeyToAccount(process.env.PRIVATE_KEY);
const fetchWithPayment = wrapFetchWithPayment(fetch, account);
async function makeRequestWithRetry(url, options, maxRetries = 3) {
for (let i = 0; i < maxRetries; i++) {
try {
const response = await fetchWithPayment(url, options);
if (response.status === 429) {
const data = await response.json();
const retryAfter = data.error.retryAfter || Math.pow(2, i);
console.log(`Rate limited. Waiting ${retryAfter} seconds...`);
await new Promise(resolve => setTimeout(resolve, retryAfter * 1000));
continue;
}
return response;
} catch (error) {
if (i === maxRetries - 1) throw error;
await new Promise(resolve => setTimeout(resolve, Math.pow(2, i) * 1000));
}
}
}
Request Queuing with Payment Handling
For high-volume applications, implement request queuing with x402:
class RateLimitedQueue {
constructor(requestsPerSecond = 10, account) {
this.queue = [];
this.processing = false;
this.interval = 1000 / requestsPerSecond;
this.account = account;
this.fetchWithPayment = wrapFetchWithPayment(fetch, account);
}
async add(url, options = {}) {
return new Promise((resolve, reject) => {
this.queue.push({ url, options, resolve, reject });
this.process();
});
}
async process() {
if (this.processing || this.queue.length === 0) return;
this.processing = true;
while (this.queue.length > 0) {
const { url, options, resolve, reject } = this.queue.shift();
try {
const response = await this.fetchWithPayment(url, options);
if (response.status === 429) {
const data = await response.json();
const retryAfter = data.error.retryAfter || 5;
// Re-queue the request
this.queue.unshift({ url, options, resolve, reject });
await new Promise(resolve => setTimeout(resolve, retryAfter * 1000));
continue;
}
resolve(response);
} catch (error) {
reject(error);
}
await new Promise(resolve => setTimeout(resolve, this.interval));
}
this.processing = false;
}
}
Cost-Aware Rate Limiting
Monitoring Costs
Track both rate limits and costs:
class CostAwareClient {
constructor(account) {
this.account = account;
this.fetchWithPayment = wrapFetchWithPayment(fetch, account);
this.dailyCost = 0;
this.dailyRequests = 0;
this.costLimit = 10; // $10 daily limit
this.requestLimit = 25000; // 25,000 requests daily
}
async makeRequest(url, options = {}) {
// Check daily limits
if (this.dailyCost >= this.costLimit) {
throw new Error('Daily cost limit exceeded');
}
if (this.dailyRequests >= this.requestLimit) {
throw new Error('Daily request limit exceeded');
}
const response = await this.fetchWithPayment(url, options);
// Track usage
this.dailyRequests++;
this.dailyCost += 0.0004; // $0.0004 per request
return response;
}
getUsageStats() {
return {
dailyRequests: this.dailyRequests,
dailyCost: this.dailyCost,
remainingRequests: this.requestLimit - this.dailyRequests,
remainingCost: this.costLimit - this.dailyCost
};
}
}
Best Practices
1. Caching Strategy
Implement intelligent caching to reduce API calls:
class CachedAPIClient {
constructor(account, ttl = 300000) { // 5 minutes default TTL
this.account = account;
this.fetchWithPayment = wrapFetchWithPayment(fetch, account);
this.cache = new Map();
this.ttl = ttl;
}
async getTokenInfo(address, networkId) {
const cacheKey = `token-${address}-${networkId}`;
const cached = this.cache.get(cacheKey);
if (cached && Date.now() - cached.timestamp < this.ttl) {
return cached.data;
}
const response = await this.fetchWithPayment(`https://api.cortex402.xyz/token/${address}/${networkId}`);
const data = await response.json();
this.cache.set(cacheKey, {
data: data.data,
timestamp: Date.now()
});
return data.data;
}
}
2. Batch Operations
When possible, use batch operations to reduce the number of requests:
// Instead of multiple individual requests
async function getMultipleTokens(tokens) {
const promises = tokens.map(({ address, networkId }) =>
fetchWithPayment(`https://api.cortex402.xyz/token/${address}/${networkId}`)
);
const responses = await Promise.all(promises);
return Promise.all(responses.map(r => r.json()));
}
3. Error Handling
Implement comprehensive error handling:
async function handleAPIError(error, response) {
if (response?.status === 429) {
const data = await response.json();
console.warn(`Rate limited. Retry after ${data.error.retryAfter} seconds`);
return { shouldRetry: true, delay: data.error.retryAfter * 1000 };
}
if (response?.status === 402) {
console.error('Payment required - this should be handled by x402 client');
return { shouldRetry: false };
}
if (response?.status >= 500) {
console.error('Server error - retry with exponential backoff');
return { shouldRetry: true, delay: 5000 };
}
return { shouldRetry: false };
}
4. Monitoring and Alerting
Set up monitoring for rate limits and costs:
class APIMonitor {
constructor() {
this.metrics = {
requests: 0,
errors: 0,
rateLimitHits: 0,
totalCost: 0
};
}
trackRequest(response, cost = 0.0004) {
this.metrics.requests++;
this.metrics.totalCost += cost;
if (response.status === 429) {
this.metrics.rateLimitHits++;
}
if (response.status >= 400) {
this.metrics.errors++;
}
// Alert if approaching limits
if (this.metrics.rateLimitHits > 10) {
console.warn('High rate limit hit rate detected');
}
if (this.metrics.totalCost > 5) {
console.warn('Approaching daily cost limit');
}
}
getMetrics() {
return {
...this.metrics,
errorRate: this.metrics.errors / this.metrics.requests,
avgCostPerRequest: this.metrics.totalCost / this.metrics.requests
};
}
}
Next Steps
- Authentication - x402 payment protocol
- Networks - Supported blockchain networks
- Integration Examples - Complete code examples
- API Overview - Complete API reference