Implement content moderation in Google Antigravity with AI filtering, flagging, and admin review queues.
# Content Moderation for Google Antigravity
Build robust content moderation systems with AI filtering, user reporting, and admin review.
## Database Schema
```sql
CREATE TYPE moderation_status AS ENUM ('pending', 'approved', 'rejected', 'flagged');
CREATE TABLE public.moderation_queue (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
content_type TEXT NOT NULL,
content_id UUID NOT NULL,
content_preview TEXT,
status moderation_status DEFAULT 'pending',
reason TEXT,
reporter_id UUID REFERENCES auth.users(id),
reviewer_id UUID REFERENCES auth.users(id),
reviewed_at TIMESTAMPTZ,
created_at TIMESTAMPTZ DEFAULT NOW()
);
CREATE TABLE public.content_reports (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
content_type TEXT NOT NULL,
content_id UUID NOT NULL,
reporter_id UUID REFERENCES auth.users(id) ON DELETE SET NULL,
reason TEXT NOT NULL,
details TEXT,
created_at TIMESTAMPTZ DEFAULT NOW()
);
CREATE INDEX idx_moderation_status ON public.moderation_queue(status, created_at);
CREATE INDEX idx_reports_content ON public.content_reports(content_type, content_id);
```
## Moderation Service
```typescript
// lib/moderation.ts
import { createClient } from "@/lib/supabase/server";
export async function submitForModeration(contentType: string, contentId: string, contentPreview: string) {
const supabase = createClient();
await supabase.from("moderation_queue").insert({ content_type: contentType, content_id: contentId, content_preview: contentPreview, status: "pending" });
}
export async function reportContent(contentType: string, contentId: string, reporterId: string, reason: string, details?: string) {
const supabase = createClient();
// Create report
await supabase.from("content_reports").insert({ content_type: contentType, content_id: contentId, reporter_id: reporterId, reason, details });
// Check if threshold reached for auto-flagging
const { count } = await supabase.from("content_reports").select("*", { count: "exact", head: true }).eq("content_type", contentType).eq("content_id", contentId);
if (count && count >= 3) {
await supabase.from("moderation_queue").upsert({ content_type: contentType, content_id: contentId, status: "flagged", reason: "Multiple reports" }, { onConflict: "content_type,content_id" });
}
}
export async function moderateContent(itemId: string, reviewerId: string, decision: "approved" | "rejected", reason?: string) {
const supabase = createClient();
await supabase.from("moderation_queue").update({ status: decision, reviewer_id: reviewerId, reviewed_at: new Date().toISOString(), reason }).eq("id", itemId);
}
```
## AI Content Filter
```typescript
// lib/ai-moderation.ts
export interface ModerationResult {
safe: boolean;
categories: { name: string; score: number; flagged: boolean }[];
action: "allow" | "review" | "block";
}
export async function checkContent(text: string): Promise<ModerationResult> {
const response = await fetch("https://api.openai.com/v1/moderations", {
method: "POST",
headers: { "Authorization": `Bearer ${process.env.OPENAI_API_KEY}`, "Content-Type": "application/json" },
body: JSON.stringify({ input: text }),
});
const data = await response.json();
const result = data.results[0];
const categories = Object.entries(result.category_scores).map(([name, score]) => ({
name,
score: score as number,
flagged: result.categories[name],
}));
const flaggedCount = categories.filter((c) => c.flagged).length;
const action = flaggedCount > 2 ? "block" : flaggedCount > 0 ? "review" : "allow";
return { safe: !result.flagged, categories, action };
}
```
## Report Dialog Component
```typescript
// components/ReportDialog.tsx
"use client";
import { useState } from "react";
const REPORT_REASONS = ["Spam", "Harassment", "Hate speech", "Misinformation", "Copyright violation", "Other"];
export function ReportDialog({ contentType, contentId, onClose }: { contentType: string; contentId: string; onClose: () => void }) {
const [reason, setReason] = useState("");
const [details, setDetails] = useState("");
const [submitting, setSubmitting] = useState(false);
const handleSubmit = async (e: React.FormEvent) => {
e.preventDefault();
setSubmitting(true);
await fetch("/api/report", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ contentType, contentId, reason, details }),
});
onClose();
};
return (
<div className="report-dialog">
<h3>Report Content</h3>
<form onSubmit={handleSubmit}>
<label>Reason</label>
<select value={reason} onChange={(e) => setReason(e.target.value)} required>
<option value="">Select a reason</option>
{REPORT_REASONS.map((r) => <option key={r} value={r}>{r}</option>)}
</select>
<label>Additional details (optional)</label>
<textarea value={details} onChange={(e) => setDetails(e.target.value)} placeholder="Provide more context..." />
<div className="actions">
<button type="button" onClick={onClose}>Cancel</button>
<button type="submit" disabled={submitting}>{submitting ? "Submitting..." : "Submit Report"}</button>
</div>
</form>
</div>
);
}
```
## Admin Moderation Queue
```typescript
// app/admin/moderation/page.tsx
import { createClient } from "@/lib/supabase/server";
import { ModerationItem } from "@/components/admin/ModerationItem";
export default async function ModerationPage() {
const supabase = createClient();
const { data: items } = await supabase.from("moderation_queue").select("*").in("status", ["pending", "flagged"]).order("created_at", { ascending: true });
return (
<div className="moderation-queue">
<h1>Moderation Queue</h1>
<p>{items?.length || 0} items pending review</p>
<div className="queue-list">
{items?.map((item) => <ModerationItem key={item.id} item={item} />)}
</div>
</div>
);
}
```
## Best Practices
1. **AI Pre-screening**: Use AI to pre-filter obvious violations
2. **Threshold Triggers**: Auto-flag content with multiple reports
3. **Audit Trail**: Keep complete audit trail of decisions
4. **Appeals Process**: Allow users to appeal moderation decisions
5. **Rate Limiting**: Prevent abuse of reporting systemThis moderation prompt is ideal for developers working on:
By using this prompt, you can save hours of manual coding and ensure best practices are followed from the start. It's particularly valuable for teams looking to maintain consistency across their moderation implementations.
Yes! All prompts on Antigravity AI Directory are free to use for both personal and commercial projects. No attribution required, though it's always appreciated.
This prompt works excellently with Claude, ChatGPT, Cursor, GitHub Copilot, and other modern AI coding assistants. For best results, use models with large context windows.
You can modify the prompt by adding specific requirements, constraints, or preferences. For moderation projects, consider mentioning your framework version, coding style, and any specific libraries you're using.