Handle audio and video uploads, transcoding, and streaming in Google Antigravity applications.
# Audio and Video Processing for Google Antigravity
Implement robust audio and video processing capabilities including uploads, transcoding, and streaming in your Google Antigravity applications.
## Video Upload Component
```typescript
// components/VideoUploader.tsx
"use client";
import { useState, useRef, useCallback } from "react";
interface VideoUploaderProps {
onUpload: (file: File, metadata: VideoMetadata) => Promise<void>;
maxSizeInMB?: number;
acceptedFormats?: string[];
}
interface VideoMetadata {
duration: number;
width: number;
height: number;
size: number;
}
export function VideoUploader({ onUpload, maxSizeInMB = 100, acceptedFormats = ["video/mp4", "video/webm", "video/quicktime"] }: VideoUploaderProps) {
const [preview, setPreview] = useState<string | null>(null);
const [uploading, setUploading] = useState(false);
const [progress, setProgress] = useState(0);
const [error, setError] = useState<string | null>(null);
const videoRef = useRef<HTMLVideoElement>(null);
const inputRef = useRef<HTMLInputElement>(null);
const extractMetadata = (file: File): Promise<VideoMetadata> => {
return new Promise((resolve, reject) => {
const video = document.createElement("video");
video.preload = "metadata";
video.onloadedmetadata = () => {
URL.revokeObjectURL(video.src);
resolve({
duration: video.duration,
width: video.videoWidth,
height: video.videoHeight,
size: file.size,
});
};
video.onerror = () => reject(new Error("Failed to load video metadata"));
video.src = URL.createObjectURL(file);
});
};
const handleFileSelect = useCallback(async (event: React.ChangeEvent<HTMLInputElement>) => {
const file = event.target.files?.[0];
if (!file) return;
setError(null);
if (!acceptedFormats.includes(file.type)) {
setError(`Invalid format. Accepted: ${acceptedFormats.join(", ")}`);
return;
}
if (file.size > maxSizeInMB * 1024 * 1024) {
setError(`File too large. Maximum size: ${maxSizeInMB}MB`);
return;
}
try {
const metadata = await extractMetadata(file);
setPreview(URL.createObjectURL(file));
setUploading(true);
await onUpload(file, metadata);
setUploading(false);
setProgress(100);
} catch (err) {
setError(err instanceof Error ? err.message : "Upload failed");
setUploading(false);
}
}, [acceptedFormats, maxSizeInMB, onUpload]);
return (
<div className="video-uploader">
<input ref={inputRef} type="file" accept={acceptedFormats.join(",")} onChange={handleFileSelect} className="hidden" />
{preview ? (
<div className="preview-container">
<video ref={videoRef} src={preview} controls className="video-preview" />
{uploading && <div className="progress-bar" style={{ width: `${progress}%` }} />}
</div>
) : (
<button onClick={() => inputRef.current?.click()} className="upload-button">
Select Video
</button>
)}
{error && <p className="error-message">{error}</p>}
</div>
);
}
```
## Server-Side Upload Handler
```typescript
// app/api/upload/video/route.ts
import { NextRequest, NextResponse } from "next/server";
import { createClient } from "@/lib/supabase/server";
import { v4 as uuidv4 } from "uuid";
export async function POST(request: NextRequest) {
const supabase = createClient();
const { data: { user } } = await supabase.auth.getUser();
if (!user) {
return NextResponse.json({ error: "Unauthorized" }, { status: 401 });
}
const formData = await request.formData();
const file = formData.get("video") as File;
const metadata = JSON.parse(formData.get("metadata") as string);
const fileId = uuidv4();
const extension = file.name.split(".").pop();
const filePath = `videos/${user.id}/${fileId}.${extension}`;
const { error: uploadError } = await supabase.storage.from("media").upload(filePath, file, {
contentType: file.type,
cacheControl: "3600",
});
if (uploadError) {
return NextResponse.json({ error: "Upload failed" }, { status: 500 });
}
const { data: video, error: dbError } = await supabase.from("videos").insert({
id: fileId,
user_id: user.id,
file_path: filePath,
duration: metadata.duration,
width: metadata.width,
height: metadata.height,
size: metadata.size,
status: "processing",
}).select().single();
// Trigger transcoding job
await fetch(process.env.TRANSCODING_WEBHOOK_URL!, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ videoId: fileId, filePath }),
});
return NextResponse.json({ video });
}
```
## Audio Waveform Visualization
```typescript
// components/AudioWaveform.tsx
"use client";
import { useRef, useEffect, useState } from "react";
export function AudioWaveform({ audioUrl }: { audioUrl: string }) {
const canvasRef = useRef<HTMLCanvasElement>(null);
const [audioContext, setAudioContext] = useState<AudioContext | null>(null);
const [analyser, setAnalyser] = useState<AnalyserNode | null>(null);
useEffect(() => {
const ctx = new AudioContext();
const analyserNode = ctx.createAnalyser();
analyserNode.fftSize = 256;
setAudioContext(ctx);
setAnalyser(analyserNode);
return () => { ctx.close(); };
}, []);
const drawWaveform = () => {
if (!canvasRef.current || !analyser) return;
const canvas = canvasRef.current;
const ctx = canvas.getContext("2d")!;
const bufferLength = analyser.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
const draw = () => {
requestAnimationFrame(draw);
analyser.getByteFrequencyData(dataArray);
ctx.fillStyle = "#1a1a1a";
ctx.fillRect(0, 0, canvas.width, canvas.height);
const barWidth = (canvas.width / bufferLength) * 2.5;
let x = 0;
for (let i = 0; i < bufferLength; i++) {
const barHeight = (dataArray[i] / 255) * canvas.height;
const gradient = ctx.createLinearGradient(0, canvas.height - barHeight, 0, canvas.height);
gradient.addColorStop(0, "#8b5cf6");
gradient.addColorStop(1, "#3b82f6");
ctx.fillStyle = gradient;
ctx.fillRect(x, canvas.height - barHeight, barWidth, barHeight);
x += barWidth + 1;
}
};
draw();
};
return <canvas ref={canvasRef} width={600} height={200} className="audio-waveform" />;
}
```
## Video Player with Custom Controls
```typescript
// components/VideoPlayer.tsx
"use client";
import { useRef, useState } from "react";
export function VideoPlayer({ src, poster }: { src: string; poster?: string }) {
const videoRef = useRef<HTMLVideoElement>(null);
const [playing, setPlaying] = useState(false);
const [progress, setProgress] = useState(0);
const [volume, setVolume] = useState(1);
const togglePlay = () => {
if (videoRef.current) {
if (playing) videoRef.current.pause();
else videoRef.current.play();
setPlaying(!playing);
}
};
const handleTimeUpdate = () => {
if (videoRef.current) {
setProgress((videoRef.current.currentTime / videoRef.current.duration) * 100);
}
};
return (
<div className="video-player">
<video ref={videoRef} src={src} poster={poster} onTimeUpdate={handleTimeUpdate} onClick={togglePlay} />
<div className="controls">
<button onClick={togglePlay}>{playing ? "⏸" : "▶"}</button>
<input type="range" value={progress} onChange={(e) => {
if (videoRef.current) videoRef.current.currentTime = (Number(e.target.value) / 100) * videoRef.current.duration;
}} />
<input type="range" min="0" max="1" step="0.1" value={volume} onChange={(e) => {
setVolume(Number(e.target.value));
if (videoRef.current) videoRef.current.volume = Number(e.target.value);
}} />
</div>
</div>
);
}
```
## Best Practices
1. **Progressive Upload**: Use chunked uploads for large files with resume capability
2. **Transcoding**: Offload video transcoding to background workers or cloud services
3. **Adaptive Streaming**: Use HLS or DASH for adaptive bitrate streaming
4. **Thumbnail Generation**: Generate video thumbnails server-side during processing
5. **Storage Optimization**: Store multiple quality versions for different bandwidth scenariosThis video prompt is ideal for developers working on:
By using this prompt, you can save hours of manual coding and ensure best practices are followed from the start. It's particularly valuable for teams looking to maintain consistency across their video implementations.
Yes! All prompts on Antigravity AI Directory are free to use for both personal and commercial projects. No attribution required, though it's always appreciated.
This prompt works excellently with Claude, ChatGPT, Cursor, GitHub Copilot, and other modern AI coding assistants. For best results, use models with large context windows.
You can modify the prompt by adding specific requirements, constraints, or preferences. For video projects, consider mentioning your framework version, coding style, and any specific libraries you're using.