Integrate content moderation in under 5 minutes
Navigate to the Keys section in your dashboard and generate a new API key.
zod_••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••••Login to revealcurl -X POST \
https://zodiac-api-five.vercel.app/v1/check \
-H "Authorization: Bearer YOUR_API_KEY" \
-H "Content-Type: application/json" \
-d '{
"text": "Test message",
"metadata": {
"senderId": "user_123",
"platform": "web"
},
"mode": "community"
}'{
"isSafe": true,
"reason": null,
"reportId": null,
"userRiskScore": 0.0
}All API requests require authentication using Bearer tokens in the Authorization header.
Authorization: Bearer YOUR_API_KEY
⚠️ Security: Never expose your API key in client-side code. Always make requests from your backend server.
Use the official zodiac-guard npm package for a simpler, cleaner integration in under 60 seconds.
npm install zodiac-guard
Import and initialize with your API key. Connects to the production API by default.
// For modern projects (ESM/TypeScript)
import Zodiac from "zodiac-guard";
// For legacy projects (CommonJS)
// const Zodiac = require("zodiac-guard");
const zodiac = new Zodiac(process.env.ZODIAC_API_KEY);The check method is async and returns a full moderation report.
const result = await zodiac.check("fuck u", {
mode: "community", // Modes: 'community', 'dating', 'kids', 'marketplace'
metadata: {
senderId: "user_8821", // REQUIRED for risk tracking
platform: "web-app"
}
});
console.log(result);
/* {
isSafe: false,
reason: 'Profanity',
reportId: '1744aac6-27d9-4430-bb31-85c98ca45cdd',
userRiskScore: 0.2
} */Provide a senderId and choose a moderation mode for full risk tracking.
const options = {
mode: "community",
// Options: 'community', 'dating',
// 'kids', 'marketplace'
metadata: {
senderId: "user_12345",
platform: "web-chat"
}
};
const result = await zodiac.check(
"some potentially bad text",
options
);
console.log(result.isSafe); // false
console.log(result.reason); // "Profanity"
console.log(result.reportId); // UUID for your logs
console.log(result.userRiskScore); // 0.0 – 1.0The SDK uses fail-safe logic: if the moderation service is unreachable or credits run out, it defaults to isSafe: true so your app never crashes or blocks users due to a network hiccup.
const result = await zodiac.check("text");
if (result.error) {
console.warn(
"Moderation unavailable, defaulting to safe:",
result.error
);
}Moderate content and receive instant safety analysis with user risk scoring.
{
"text": "string (REQUIRED)",
"metadata": {
"senderId": "string (REQUIRED)",
"platform": "string (optional)",
"chatRoomId": "string (optional)",
"userId": "string (optional)"
},
"mode": "string (optional)"
}{
"isSafe": true | false,
"reason": "No policy violation",
"reportId": "uuid-1234..." | null,
"userRiskScore": 0.0 to 1.0
}Get your current usage statistics and remaining credits.
{
"remainingCredits": 896,
"usedThisMonth": 104,
"limit": 1000
}Choose the moderation mode that best fits your use case.
Standard toxicity, hate speech, and general harassment detection.
mode: "community"Allows flirting but blocks harassment and threats. Optimized for dating apps.
mode: "dating"Zero-tolerance for profanity, violence, and mature themes.
mode: "kids"Scam detection, fake payment links, and suspicious requests.
mode: "marketplace"All responses from the API follow a consistent structure. Below are the possible response shapes.
{
"isSafe": true,
"reason": null,
"reportId": null,
"userRiskScore": 0.0
}{
"isSafe": false,
"reason": "profanity detected",
"reportId": "uuid-1234-5678-abcd",
"userRiskScore": 0.65
}Risk score escalates per sender with repeated unsafe content: 0.2 → 0.4 → 0.6 → 0.8 → 1.0
{
"isSafe": true,
"error": "Upstream timeout"
}const axios = require('axios');
app.post('/api/chat/send', async (req, res) => {
const { userId, message, roomId } = req.body;
try {
const response = await axios.post(
'https://zodiac-api-five.vercel.app/v1/check', {
text: message,
metadata: {
senderId: userId,
chatRoomId: roomId,
platform: 'web'
}
}, {
headers: {
'Authorization': `Bearer ${process.env.ZODIAC_API_KEY}`
}
});
if (!response.data.isSafe) {
return res.status(400).json({
error: 'Message blocked',
reason: response.data.reason
});
}
res.json({ success: true });
} catch (error) {
res.status(500).json({ error: 'Service unavailable' });
}
});Integrate moderation before saving or broadcasting each message. The safest pattern is: validate → moderate → block/allow → store.
app.post('/chat/send', async (req, res) => {
const { userId, roomId, message } = req.body;
const mod = await axios.post(
'https://zodiac-api-five.vercel.app/v1/check', {
text: message,
metadata: {
senderId: userId,
chatRoomId: roomId,
platform: 'web',
type: 'chat_message'
},
mode: 'community'
}, {
headers: {
Authorization: `Bearer ${process.env.ZODIAC_API_KEY}`
}
});
if (!mod.data.isSafe) {
return res.status(400).json({
blocked: true,
reason: mod.data.reason,
userRiskScore: mod.data.userRiskScore,
reportId: mod.data.reportId
});
}
res.json({ blocked: false, userRiskScore: mod.data.userRiskScore });
});function chatActionByRisk(score) {
if (score >= 0.8) return 'temp-ban';
if (score >= 0.6) return 'mute-10m';
if (score >= 0.4) return 'slow-mode';
return 'warn';
}Same chat integration using the zodiac-guard. Less boilerplate, built-in error handling, and automatic fail-safe behavior.
// For modern projects (ESM/TypeScript)
import Zodiac from "zodiac-guard";
// For legacy projects (CommonJS)
// const Zodiac = require("zodiac-guard");
const zodiac = new Zodiac(process.env.ZODIAC_API_KEY);app.post('/chat/send', async (req, res) => {
const { userId, roomId, message } = req.body;
const result = await zodiac.check(message, {
mode: 'community',
metadata: {
senderId: userId,
chatRoomId: roomId,
platform: 'web'
}
});
if (!result.isSafe) {
return res.status(400).json({
blocked: true,
reason: result.reason,
userRiskScore: result.userRiskScore,
reportId: result.reportId
});
}
// Save & broadcast message here...
res.json({ blocked: false, userRiskScore: result.userRiskScore });
});function chatActionByRisk(score) {
if (score >= 0.8) return 'temp-ban';
if (score >= 0.6) return 'mute-10m';
if (score >= 0.4) return 'slow-mode';
return 'warn';
}
const action = chatActionByRisk(result.userRiskScore);
await applyAction(userId, action);isSafe: true and sets result.error for logging.Moderate blog comments, forum posts, or product reviews using the SDK. Keep senderId stable per account to maintain accurate risk history.
// For modern projects (ESM/TypeScript)
import Zodiac from "zodiac-guard";
// For legacy projects (CommonJS)
// const Zodiac = require("zodiac-guard");
const zodiac = new Zodiac(process.env.ZODIAC_API_KEY);
app.post('/posts/:postId/comments', async (req, res) => {
const { postId } = req.params;
const { userId, content } = req.body;
const result = await zodiac.check(content, {
mode: 'community',
metadata: { senderId: userId, postId, platform: 'web' }
});
if (!result.isSafe) {
return res.status(400).json({
error: 'Comment blocked',
reason: result.reason,
userRiskScore: result.userRiskScore,
reportId: result.reportId
});
}
// Save the comment to your database...
res.status(201).json({ success: true, userRiskScore: result.userRiskScore });
});if (result.isSafe && result.userRiskScore >= 0.6) {
await queueForReview({
postId, userId, content,
risk: result.userRiskScore
});
return res.status(202).json({
queued: true, message: 'Comment pending review'
});
}// The SDK never throws — check result.error
if (result.error) {
console.warn('[zodiac] Skipped moderation:', result.error);
// Optionally log & proceed, or queue for manual review
}result.reportId alongside flagged content in your database — it lets you trace and audit moderation decisions later.Every sender has a persistent risk profile per developer account. On each unsafe message, violation count increments and score is recalculated.
riskScore = min(violationCount × 0.2, 1.0) 1 violation -> 0.2 2 violations -> 0.4 3 violations -> 0.6 4 violations -> 0.8 5+ -> 1.0
1 violation
2–3 violations
4+ violations
Important: use a consistent `metadata.senderId` for each user. Changing sender IDs resets effective history for that user.
Implement robust error handling to ensure your application continues functioning.
If the moderation service times out, the API returns:
{
"isSafe": true,
"error": "Upstream timeout"
}The metadata.senderId field is REQUIRED for user tracking and risk scoring.
Always make moderation requests from your backend server.
When content is flagged, save the reportId for tracking and analytics.
The zodiac-guard handles retries, timeouts, and fail-safe defaults — recommended over raw HTTP calls.
🗨️ [COMMENTS_INTEGRATION]
Use the same endpoint to scan comments before publish. Keep `senderId` stable per account so risk history remains accurate.
Step 1: Moderate comment on submit
app.post('/posts/:postId/comments', async (req, res) => { const { postId } = req.params; const { userId, content } = req.body; const mod = await axios.post( 'https://zodiac-api-five.vercel.app/v1/check', { text: content, metadata: { senderId: userId, postId, platform: 'web', type: 'blog_comment' }, mode: 'community' }, { headers: { Authorization: `Bearer ${process.env.ZODIAC_API_KEY}` } }); if (!mod.data.isSafe) { return res.status(400).json({ error: 'Comment blocked', reason: mod.data.reason, userRiskScore: mod.data.userRiskScore, reportId: mod.data.reportId }); } res.status(201).json({ success: true, userRiskScore: mod.data.userRiskScore }); });Step 2: Queue risky-but-safe comments for review
if (mod.data.isSafe && mod.data.userRiskScore >= 0.6) { await queueForReview({ postId, userId, content, risk: mod.data.userRiskScore }); }