mirror of
https://github.com/mofeng-git/One-KVM.git
synced 2026-01-29 00:51:53 +08:00
init
This commit is contained in:
693
src/video/capture.rs
Normal file
693
src/video/capture.rs
Normal file
@@ -0,0 +1,693 @@
|
||||
//! V4L2 video capture implementation
|
||||
//!
|
||||
//! Provides async video capture using memory-mapped buffers.
|
||||
|
||||
use bytes::Bytes;
|
||||
use std::io;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::sync::{broadcast, watch, Mutex};
|
||||
use tracing::{debug, error, info, warn};
|
||||
use v4l::buffer::Type as BufferType;
|
||||
use v4l::io::traits::CaptureStream;
|
||||
use v4l::prelude::*;
|
||||
use v4l::video::capture::Parameters;
|
||||
use v4l::video::Capture;
|
||||
use v4l::Format;
|
||||
|
||||
use super::format::{PixelFormat, Resolution};
|
||||
use super::frame::VideoFrame;
|
||||
use crate::error::{AppError, Result};
|
||||
|
||||
/// Default number of capture buffers (reduced from 4 to 2 for lower latency)
|
||||
const DEFAULT_BUFFER_COUNT: u32 = 2;
|
||||
/// Default capture timeout in seconds
|
||||
const DEFAULT_TIMEOUT: u64 = 2;
|
||||
/// Minimum valid frame size (bytes)
|
||||
const MIN_FRAME_SIZE: usize = 128;
|
||||
|
||||
/// Video capturer configuration
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CaptureConfig {
|
||||
/// Device path
|
||||
pub device_path: PathBuf,
|
||||
/// Desired resolution
|
||||
pub resolution: Resolution,
|
||||
/// Desired pixel format
|
||||
pub format: PixelFormat,
|
||||
/// Desired frame rate (0 = max available)
|
||||
pub fps: u32,
|
||||
/// Number of capture buffers
|
||||
pub buffer_count: u32,
|
||||
/// Capture timeout
|
||||
pub timeout: Duration,
|
||||
/// JPEG quality (1-100, for MJPEG sources with hardware quality control)
|
||||
pub jpeg_quality: u8,
|
||||
}
|
||||
|
||||
impl Default for CaptureConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
device_path: PathBuf::from("/dev/video0"),
|
||||
resolution: Resolution::HD1080,
|
||||
format: PixelFormat::Mjpeg,
|
||||
fps: 30,
|
||||
buffer_count: DEFAULT_BUFFER_COUNT,
|
||||
timeout: Duration::from_secs(DEFAULT_TIMEOUT),
|
||||
jpeg_quality: 80,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CaptureConfig {
|
||||
/// Create config for a specific device
|
||||
pub fn for_device(path: impl AsRef<Path>) -> Self {
|
||||
Self {
|
||||
device_path: path.as_ref().to_path_buf(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Set resolution
|
||||
pub fn with_resolution(mut self, width: u32, height: u32) -> Self {
|
||||
self.resolution = Resolution::new(width, height);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set format
|
||||
pub fn with_format(mut self, format: PixelFormat) -> Self {
|
||||
self.format = format;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set frame rate
|
||||
pub fn with_fps(mut self, fps: u32) -> Self {
|
||||
self.fps = fps;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Capture statistics
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct CaptureStats {
|
||||
/// Total frames captured
|
||||
pub frames_captured: u64,
|
||||
/// Frames dropped (invalid/too small)
|
||||
pub frames_dropped: u64,
|
||||
/// Current FPS (calculated)
|
||||
pub current_fps: f32,
|
||||
/// Average frame size in bytes
|
||||
pub avg_frame_size: usize,
|
||||
/// Capture errors
|
||||
pub errors: u64,
|
||||
/// Last frame timestamp
|
||||
pub last_frame_ts: Option<Instant>,
|
||||
/// Whether signal is present
|
||||
pub signal_present: bool,
|
||||
}
|
||||
|
||||
/// Video capturer state
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum CaptureState {
|
||||
/// Not started
|
||||
Stopped,
|
||||
/// Starting (initializing device)
|
||||
Starting,
|
||||
/// Running and capturing
|
||||
Running,
|
||||
/// No signal from source
|
||||
NoSignal,
|
||||
/// Error occurred
|
||||
Error,
|
||||
/// Device was lost (disconnected)
|
||||
DeviceLost,
|
||||
}
|
||||
|
||||
/// Async video capturer
|
||||
pub struct VideoCapturer {
|
||||
config: CaptureConfig,
|
||||
state: Arc<watch::Sender<CaptureState>>,
|
||||
state_rx: watch::Receiver<CaptureState>,
|
||||
stats: Arc<Mutex<CaptureStats>>,
|
||||
frame_tx: broadcast::Sender<VideoFrame>,
|
||||
stop_flag: Arc<AtomicBool>,
|
||||
sequence: Arc<AtomicU64>,
|
||||
capture_handle: Mutex<Option<tokio::task::JoinHandle<()>>>,
|
||||
/// Last error that occurred (device path, reason)
|
||||
last_error: Arc<parking_lot::RwLock<Option<(String, String)>>>,
|
||||
}
|
||||
|
||||
impl VideoCapturer {
|
||||
/// Create a new video capturer
|
||||
pub fn new(config: CaptureConfig) -> Self {
|
||||
let (state_tx, state_rx) = watch::channel(CaptureState::Stopped);
|
||||
let (frame_tx, _) = broadcast::channel(16); // Buffer up to 16 frames
|
||||
|
||||
Self {
|
||||
config,
|
||||
state: Arc::new(state_tx),
|
||||
state_rx,
|
||||
stats: Arc::new(Mutex::new(CaptureStats::default())),
|
||||
frame_tx,
|
||||
stop_flag: Arc::new(AtomicBool::new(false)),
|
||||
sequence: Arc::new(AtomicU64::new(0)),
|
||||
capture_handle: Mutex::new(None),
|
||||
last_error: Arc::new(parking_lot::RwLock::new(None)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get current capture state
|
||||
pub fn state(&self) -> CaptureState {
|
||||
*self.state_rx.borrow()
|
||||
}
|
||||
|
||||
/// Subscribe to state changes
|
||||
pub fn state_watch(&self) -> watch::Receiver<CaptureState> {
|
||||
self.state_rx.clone()
|
||||
}
|
||||
|
||||
/// Get last error (device path, reason)
|
||||
pub fn last_error(&self) -> Option<(String, String)> {
|
||||
self.last_error.read().clone()
|
||||
}
|
||||
|
||||
/// Clear last error
|
||||
pub fn clear_error(&self) {
|
||||
*self.last_error.write() = None;
|
||||
}
|
||||
|
||||
/// Subscribe to frames
|
||||
pub fn subscribe(&self) -> broadcast::Receiver<VideoFrame> {
|
||||
self.frame_tx.subscribe()
|
||||
}
|
||||
|
||||
/// Get frame sender (for sharing with other components like WebRTC)
|
||||
pub fn frame_sender(&self) -> broadcast::Sender<VideoFrame> {
|
||||
self.frame_tx.clone()
|
||||
}
|
||||
|
||||
/// Get capture statistics
|
||||
pub async fn stats(&self) -> CaptureStats {
|
||||
self.stats.lock().await.clone()
|
||||
}
|
||||
|
||||
/// Get config
|
||||
pub fn config(&self) -> &CaptureConfig {
|
||||
&self.config
|
||||
}
|
||||
|
||||
/// Start capturing in background
|
||||
pub async fn start(&self) -> Result<()> {
|
||||
let current_state = self.state();
|
||||
// Already running or starting - nothing to do
|
||||
if current_state == CaptureState::Running || current_state == CaptureState::Starting {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
info!(
|
||||
"Starting capture on {:?} at {}x{} {}",
|
||||
self.config.device_path,
|
||||
self.config.resolution.width,
|
||||
self.config.resolution.height,
|
||||
self.config.format
|
||||
);
|
||||
|
||||
// Set Starting state immediately to prevent concurrent start attempts
|
||||
let _ = self.state.send(CaptureState::Starting);
|
||||
|
||||
// Clear any previous error
|
||||
*self.last_error.write() = None;
|
||||
|
||||
self.stop_flag.store(false, Ordering::SeqCst);
|
||||
|
||||
let config = self.config.clone();
|
||||
let state = self.state.clone();
|
||||
let stats = self.stats.clone();
|
||||
let frame_tx = self.frame_tx.clone();
|
||||
let stop_flag = self.stop_flag.clone();
|
||||
let sequence = self.sequence.clone();
|
||||
let last_error = self.last_error.clone();
|
||||
|
||||
let handle = tokio::task::spawn_blocking(move || {
|
||||
capture_loop(config, state, stats, frame_tx, stop_flag, sequence, last_error);
|
||||
});
|
||||
|
||||
*self.capture_handle.lock().await = Some(handle);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Stop capturing
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
info!("Stopping capture");
|
||||
self.stop_flag.store(true, Ordering::SeqCst);
|
||||
|
||||
if let Some(handle) = self.capture_handle.lock().await.take() {
|
||||
let _ = handle.await;
|
||||
}
|
||||
|
||||
let _ = self.state.send(CaptureState::Stopped);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if capturing
|
||||
pub fn is_running(&self) -> bool {
|
||||
self.state() == CaptureState::Running
|
||||
}
|
||||
|
||||
/// Get the latest frame (if any receivers would get it)
|
||||
pub fn latest_frame(&self) -> Option<VideoFrame> {
|
||||
// This is a bit tricky with broadcast - we'd need to track internally
|
||||
// For now, callers should use subscribe()
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Main capture loop (runs in blocking thread)
|
||||
fn capture_loop(
|
||||
config: CaptureConfig,
|
||||
state: Arc<watch::Sender<CaptureState>>,
|
||||
stats: Arc<Mutex<CaptureStats>>,
|
||||
frame_tx: broadcast::Sender<VideoFrame>,
|
||||
stop_flag: Arc<AtomicBool>,
|
||||
sequence: Arc<AtomicU64>,
|
||||
error_holder: Arc<parking_lot::RwLock<Option<(String, String)>>>,
|
||||
) {
|
||||
let result = run_capture(
|
||||
&config,
|
||||
&state,
|
||||
&stats,
|
||||
&frame_tx,
|
||||
&stop_flag,
|
||||
&sequence,
|
||||
);
|
||||
|
||||
match result {
|
||||
Ok(_) => {
|
||||
let _ = state.send(CaptureState::Stopped);
|
||||
}
|
||||
Err(AppError::VideoDeviceLost { device, reason }) => {
|
||||
error!("Video device lost: {} - {}", device, reason);
|
||||
// Store the error for recovery handling
|
||||
*error_holder.write() = Some((device, reason));
|
||||
let _ = state.send(CaptureState::DeviceLost);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Capture error: {}", e);
|
||||
let _ = state.send(CaptureState::Error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn run_capture(
|
||||
config: &CaptureConfig,
|
||||
state: &watch::Sender<CaptureState>,
|
||||
stats: &Arc<Mutex<CaptureStats>>,
|
||||
frame_tx: &broadcast::Sender<VideoFrame>,
|
||||
stop_flag: &AtomicBool,
|
||||
sequence: &AtomicU64,
|
||||
) -> Result<()> {
|
||||
// Retry logic for device busy errors
|
||||
const MAX_RETRIES: u32 = 5;
|
||||
const RETRY_DELAY_MS: u64 = 200;
|
||||
|
||||
let mut last_error = None;
|
||||
|
||||
for attempt in 0..MAX_RETRIES {
|
||||
if stop_flag.load(Ordering::Relaxed) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Open device
|
||||
let device = match Device::with_path(&config.device_path) {
|
||||
Ok(d) => d,
|
||||
Err(e) => {
|
||||
let err_str = e.to_string();
|
||||
if err_str.contains("busy") || err_str.contains("resource") {
|
||||
warn!(
|
||||
"Device busy on attempt {}/{}, retrying in {}ms...",
|
||||
attempt + 1,
|
||||
MAX_RETRIES,
|
||||
RETRY_DELAY_MS
|
||||
);
|
||||
std::thread::sleep(Duration::from_millis(RETRY_DELAY_MS));
|
||||
last_error = Some(AppError::VideoError(format!(
|
||||
"Failed to open device {:?}: {}",
|
||||
config.device_path, e
|
||||
)));
|
||||
continue;
|
||||
}
|
||||
return Err(AppError::VideoError(format!(
|
||||
"Failed to open device {:?}: {}",
|
||||
config.device_path, e
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
// Set format
|
||||
let format = Format::new(
|
||||
config.resolution.width,
|
||||
config.resolution.height,
|
||||
config.format.to_fourcc(),
|
||||
);
|
||||
|
||||
let actual_format = match device.set_format(&format) {
|
||||
Ok(f) => f,
|
||||
Err(e) => {
|
||||
let err_str = e.to_string();
|
||||
if err_str.contains("busy") || err_str.contains("resource") {
|
||||
warn!(
|
||||
"Device busy on set_format attempt {}/{}, retrying in {}ms...",
|
||||
attempt + 1,
|
||||
MAX_RETRIES,
|
||||
RETRY_DELAY_MS
|
||||
);
|
||||
std::thread::sleep(Duration::from_millis(RETRY_DELAY_MS));
|
||||
last_error = Some(AppError::VideoError(format!("Failed to set format: {}", e)));
|
||||
continue;
|
||||
}
|
||||
return Err(AppError::VideoError(format!("Failed to set format: {}", e)));
|
||||
}
|
||||
};
|
||||
|
||||
// Device opened and format set successfully - proceed with capture
|
||||
return run_capture_inner(
|
||||
config,
|
||||
state,
|
||||
stats,
|
||||
frame_tx,
|
||||
stop_flag,
|
||||
sequence,
|
||||
device,
|
||||
actual_format,
|
||||
);
|
||||
}
|
||||
|
||||
// All retries exhausted
|
||||
Err(last_error.unwrap_or_else(|| {
|
||||
AppError::VideoError("Failed to open device after all retries".to_string())
|
||||
}))
|
||||
}
|
||||
|
||||
/// Inner capture function after device is successfully opened
|
||||
fn run_capture_inner(
|
||||
config: &CaptureConfig,
|
||||
state: &watch::Sender<CaptureState>,
|
||||
stats: &Arc<Mutex<CaptureStats>>,
|
||||
frame_tx: &broadcast::Sender<VideoFrame>,
|
||||
stop_flag: &AtomicBool,
|
||||
sequence: &AtomicU64,
|
||||
device: Device,
|
||||
actual_format: Format,
|
||||
) -> Result<()> {
|
||||
info!(
|
||||
"Capture format: {}x{} {:?} stride={}",
|
||||
actual_format.width, actual_format.height, actual_format.fourcc, actual_format.stride
|
||||
);
|
||||
|
||||
let resolution = Resolution::new(actual_format.width, actual_format.height);
|
||||
let pixel_format = PixelFormat::from_fourcc(actual_format.fourcc).unwrap_or(config.format);
|
||||
|
||||
// Try to set hardware FPS (V4L2 VIDIOC_S_PARM)
|
||||
if config.fps > 0 {
|
||||
match device.set_params(&Parameters::with_fps(config.fps)) {
|
||||
Ok(actual_params) => {
|
||||
// Extract actual FPS from returned interval (numerator/denominator)
|
||||
let actual_hw_fps = if actual_params.interval.numerator > 0 {
|
||||
actual_params.interval.denominator / actual_params.interval.numerator
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
if actual_hw_fps == config.fps {
|
||||
info!("Hardware FPS set successfully: {} fps", actual_hw_fps);
|
||||
} else if actual_hw_fps > 0 {
|
||||
info!(
|
||||
"Hardware FPS coerced: requested {} fps, got {} fps",
|
||||
config.fps, actual_hw_fps
|
||||
);
|
||||
} else {
|
||||
warn!("Hardware FPS setting returned invalid interval");
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to set hardware FPS: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create stream with mmap buffers
|
||||
let mut stream =
|
||||
MmapStream::with_buffers(&device, BufferType::VideoCapture, config.buffer_count)
|
||||
.map_err(|e| AppError::VideoError(format!("Failed to create stream: {}", e)))?;
|
||||
|
||||
let _ = state.send(CaptureState::Running);
|
||||
info!("Capture started");
|
||||
|
||||
// FPS calculation variables
|
||||
let mut fps_frame_count = 0u64;
|
||||
let mut fps_window_start = Instant::now();
|
||||
let fps_window_duration = Duration::from_secs(1);
|
||||
|
||||
// Main capture loop
|
||||
while !stop_flag.load(Ordering::Relaxed) {
|
||||
// Try to capture a frame
|
||||
let (buf, meta) = match stream.next() {
|
||||
Ok(frame_data) => frame_data,
|
||||
Err(e) => {
|
||||
if e.kind() == io::ErrorKind::TimedOut {
|
||||
warn!("Capture timeout - no signal?");
|
||||
let _ = state.send(CaptureState::NoSignal);
|
||||
|
||||
// Update stats
|
||||
if let Ok(mut s) = stats.try_lock() {
|
||||
s.signal_present = false;
|
||||
}
|
||||
|
||||
// Wait a bit before retrying
|
||||
std::thread::sleep(Duration::from_millis(100));
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check for device loss errors
|
||||
let is_device_lost = match e.raw_os_error() {
|
||||
Some(6) => true, // ENXIO - No such device or address
|
||||
Some(19) => true, // ENODEV - No such device
|
||||
Some(5) => true, // EIO - I/O error (device removed)
|
||||
Some(32) => true, // EPIPE - Broken pipe
|
||||
Some(108) => true, // ESHUTDOWN - Transport endpoint shutdown
|
||||
_ => false,
|
||||
};
|
||||
|
||||
if is_device_lost {
|
||||
let device_path = config.device_path.display().to_string();
|
||||
error!("Video device lost: {} - {}", device_path, e);
|
||||
return Err(AppError::VideoDeviceLost {
|
||||
device: device_path,
|
||||
reason: e.to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
error!("Capture error: {}", e);
|
||||
if let Ok(mut s) = stats.try_lock() {
|
||||
s.errors += 1;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// Use actual bytes used, not buffer size
|
||||
let frame_size = meta.bytesused as usize;
|
||||
|
||||
// Validate frame
|
||||
if frame_size < MIN_FRAME_SIZE {
|
||||
debug!("Dropping small frame: {} bytes (bytesused={})", frame_size, meta.bytesused);
|
||||
if let Ok(mut s) = stats.try_lock() {
|
||||
s.frames_dropped += 1;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// For JPEG formats, validate header
|
||||
if pixel_format.is_compressed() && !is_valid_jpeg(&buf[..frame_size]) {
|
||||
debug!("Dropping invalid JPEG frame (size={})", frame_size);
|
||||
if let Ok(mut s) = stats.try_lock() {
|
||||
s.frames_dropped += 1;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Create frame with actual data size
|
||||
let seq = sequence.fetch_add(1, Ordering::Relaxed);
|
||||
let frame = VideoFrame::new(
|
||||
Bytes::copy_from_slice(&buf[..frame_size]),
|
||||
resolution,
|
||||
pixel_format,
|
||||
actual_format.stride,
|
||||
seq,
|
||||
);
|
||||
|
||||
// Update state if was no signal
|
||||
if *state.borrow() == CaptureState::NoSignal {
|
||||
let _ = state.send(CaptureState::Running);
|
||||
}
|
||||
|
||||
// Send frame to subscribers
|
||||
let receiver_count = frame_tx.receiver_count();
|
||||
if receiver_count > 0 {
|
||||
if let Err(e) = frame_tx.send(frame) {
|
||||
debug!("No active receivers for frame: {}", e);
|
||||
}
|
||||
} else if seq % 60 == 0 {
|
||||
// Log every 60 frames (about 1 second at 60fps) when no receivers
|
||||
debug!("No receivers for video frames (receiver_count=0)");
|
||||
}
|
||||
|
||||
// Update stats
|
||||
if let Ok(mut s) = stats.try_lock() {
|
||||
s.frames_captured += 1;
|
||||
s.signal_present = true;
|
||||
s.last_frame_ts = Some(Instant::now());
|
||||
|
||||
// Update FPS calculation
|
||||
fps_frame_count += 1;
|
||||
let elapsed = fps_window_start.elapsed();
|
||||
|
||||
if elapsed >= fps_window_duration {
|
||||
// Calculate FPS from the completed window
|
||||
s.current_fps = (fps_frame_count as f32 / elapsed.as_secs_f32()).max(0.0);
|
||||
// Reset for next window
|
||||
fps_frame_count = 0;
|
||||
fps_window_start = Instant::now();
|
||||
} else if elapsed.as_millis() > 100 && fps_frame_count > 0 {
|
||||
// Provide partial estimate if we have at least 100ms of data
|
||||
s.current_fps = (fps_frame_count as f32 / elapsed.as_secs_f32()).max(0.0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("Capture stopped");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate JPEG frame data
|
||||
fn is_valid_jpeg(data: &[u8]) -> bool {
|
||||
if data.len() < 125 {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check start marker (0xFFD8)
|
||||
let start_marker = ((data[0] as u16) << 8) | data[1] as u16;
|
||||
if start_marker != 0xFFD8 {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check end marker
|
||||
let end = data.len();
|
||||
let end_marker = ((data[end - 2] as u16) << 8) | data[end - 1] as u16;
|
||||
|
||||
// Valid end markers: 0xFFD9, 0xD900, 0x0000 (padded)
|
||||
matches!(end_marker, 0xFFD9 | 0xD900 | 0x0000)
|
||||
}
|
||||
|
||||
/// Frame grabber for one-shot capture
|
||||
pub struct FrameGrabber {
|
||||
device_path: PathBuf,
|
||||
}
|
||||
|
||||
impl FrameGrabber {
|
||||
/// Create a new frame grabber
|
||||
pub fn new(device_path: impl AsRef<Path>) -> Self {
|
||||
Self {
|
||||
device_path: device_path.as_ref().to_path_buf(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Capture a single frame
|
||||
pub async fn grab(
|
||||
&self,
|
||||
resolution: Resolution,
|
||||
format: PixelFormat,
|
||||
) -> Result<VideoFrame> {
|
||||
let device_path = self.device_path.clone();
|
||||
|
||||
tokio::task::spawn_blocking(move || {
|
||||
grab_single_frame(&device_path, resolution, format)
|
||||
})
|
||||
.await
|
||||
.map_err(|e| AppError::VideoError(format!("Grab task failed: {}", e)))?
|
||||
}
|
||||
}
|
||||
|
||||
fn grab_single_frame(
|
||||
device_path: &Path,
|
||||
resolution: Resolution,
|
||||
format: PixelFormat,
|
||||
) -> Result<VideoFrame> {
|
||||
let device = Device::with_path(device_path).map_err(|e| {
|
||||
AppError::VideoError(format!("Failed to open device: {}", e))
|
||||
})?;
|
||||
|
||||
let fmt = Format::new(resolution.width, resolution.height, format.to_fourcc());
|
||||
let actual = device.set_format(&fmt).map_err(|e| {
|
||||
AppError::VideoError(format!("Failed to set format: {}", e))
|
||||
})?;
|
||||
|
||||
let mut stream = MmapStream::with_buffers(&device, BufferType::VideoCapture, 2)
|
||||
.map_err(|e| AppError::VideoError(format!("Failed to create stream: {}", e)))?;
|
||||
|
||||
// Try to get a valid frame (skip first few which might be bad)
|
||||
for attempt in 0..5 {
|
||||
match stream.next() {
|
||||
Ok((buf, _meta)) => {
|
||||
if buf.len() >= MIN_FRAME_SIZE {
|
||||
let actual_format =
|
||||
PixelFormat::from_fourcc(actual.fourcc).unwrap_or(format);
|
||||
|
||||
return Ok(VideoFrame::new(
|
||||
Bytes::copy_from_slice(buf),
|
||||
Resolution::new(actual.width, actual.height),
|
||||
actual_format,
|
||||
actual.stride,
|
||||
0,
|
||||
));
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
if attempt == 4 {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"Failed to grab frame: {}",
|
||||
e
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Err(AppError::VideoError("Failed to capture valid frame".to_string()))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_valid_jpeg() {
|
||||
// Valid JPEG header and footer
|
||||
let mut data = vec![0xFF, 0xD8]; // SOI
|
||||
data.extend(vec![0u8; 200]); // Content
|
||||
data.extend([0xFF, 0xD9]); // EOI
|
||||
|
||||
assert!(is_valid_jpeg(&data));
|
||||
|
||||
// Invalid - too small
|
||||
assert!(!is_valid_jpeg(&[0xFF, 0xD8, 0xFF, 0xD9]));
|
||||
|
||||
// Invalid - wrong header
|
||||
let mut bad = vec![0x00, 0x00];
|
||||
bad.extend(vec![0u8; 200]);
|
||||
assert!(!is_valid_jpeg(&bad));
|
||||
}
|
||||
}
|
||||
640
src/video/convert.rs
Normal file
640
src/video/convert.rs
Normal file
@@ -0,0 +1,640 @@
|
||||
//! Pixel format conversion utilities
|
||||
//!
|
||||
//! This module provides SIMD-accelerated color space conversion using libyuv.
|
||||
//! Primary use case: YUYV (from V4L2 capture) → YUV420P/NV12 (for H264 encoding)
|
||||
|
||||
use crate::error::{AppError, Result};
|
||||
use crate::video::format::{PixelFormat, Resolution};
|
||||
|
||||
/// YUV420P buffer with separate Y, U, V planes
|
||||
pub struct Yuv420pBuffer {
|
||||
/// Raw buffer containing all planes
|
||||
data: Vec<u8>,
|
||||
/// Width of the frame
|
||||
width: u32,
|
||||
/// Height of the frame
|
||||
height: u32,
|
||||
/// Y plane offset (always 0)
|
||||
y_offset: usize,
|
||||
/// U plane offset
|
||||
u_offset: usize,
|
||||
/// V plane offset
|
||||
v_offset: usize,
|
||||
}
|
||||
|
||||
impl Yuv420pBuffer {
|
||||
/// Create a new YUV420P buffer for the given resolution
|
||||
pub fn new(resolution: Resolution) -> Self {
|
||||
let width = resolution.width;
|
||||
let height = resolution.height;
|
||||
|
||||
// YUV420P: Y = width*height, U = width*height/4, V = width*height/4
|
||||
let y_size = (width * height) as usize;
|
||||
let uv_size = y_size / 4;
|
||||
let total_size = y_size + uv_size * 2;
|
||||
|
||||
Self {
|
||||
data: vec![0u8; total_size],
|
||||
width,
|
||||
height,
|
||||
y_offset: 0,
|
||||
u_offset: y_size,
|
||||
v_offset: y_size + uv_size,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the raw buffer as bytes
|
||||
pub fn as_bytes(&self) -> &[u8] {
|
||||
&self.data
|
||||
}
|
||||
|
||||
/// Get the raw buffer as mutable bytes
|
||||
pub fn as_bytes_mut(&mut self) -> &mut [u8] {
|
||||
&mut self.data
|
||||
}
|
||||
|
||||
/// Get Y plane
|
||||
pub fn y_plane(&self) -> &[u8] {
|
||||
&self.data[self.y_offset..self.u_offset]
|
||||
}
|
||||
|
||||
/// Get Y plane mutable
|
||||
pub fn y_plane_mut(&mut self) -> &mut [u8] {
|
||||
let u_offset = self.u_offset;
|
||||
&mut self.data[self.y_offset..u_offset]
|
||||
}
|
||||
|
||||
/// Get U plane
|
||||
pub fn u_plane(&self) -> &[u8] {
|
||||
&self.data[self.u_offset..self.v_offset]
|
||||
}
|
||||
|
||||
/// Get U plane mutable
|
||||
pub fn u_plane_mut(&mut self) -> &mut [u8] {
|
||||
let v_offset = self.v_offset;
|
||||
let u_offset = self.u_offset;
|
||||
&mut self.data[u_offset..v_offset]
|
||||
}
|
||||
|
||||
/// Get V plane
|
||||
pub fn v_plane(&self) -> &[u8] {
|
||||
&self.data[self.v_offset..]
|
||||
}
|
||||
|
||||
/// Get V plane mutable
|
||||
pub fn v_plane_mut(&mut self) -> &mut [u8] {
|
||||
let v_offset = self.v_offset;
|
||||
&mut self.data[v_offset..]
|
||||
}
|
||||
|
||||
/// Get buffer length
|
||||
pub fn len(&self) -> usize {
|
||||
self.data.len()
|
||||
}
|
||||
|
||||
/// Check if buffer is empty
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.data.is_empty()
|
||||
}
|
||||
|
||||
/// Get resolution
|
||||
pub fn resolution(&self) -> Resolution {
|
||||
Resolution::new(self.width, self.height)
|
||||
}
|
||||
}
|
||||
|
||||
/// NV12 buffer with Y plane and interleaved UV plane
|
||||
pub struct Nv12Buffer {
|
||||
/// Raw buffer containing Y plane followed by interleaved UV plane
|
||||
data: Vec<u8>,
|
||||
/// Width of the frame
|
||||
width: u32,
|
||||
/// Height of the frame
|
||||
height: u32,
|
||||
}
|
||||
|
||||
impl Nv12Buffer {
|
||||
/// Create a new NV12 buffer for the given resolution
|
||||
pub fn new(resolution: Resolution) -> Self {
|
||||
let width = resolution.width;
|
||||
let height = resolution.height;
|
||||
// NV12: Y = width*height, UV = width*height/2 (interleaved)
|
||||
let y_size = (width * height) as usize;
|
||||
let uv_size = y_size / 2;
|
||||
let total_size = y_size + uv_size;
|
||||
|
||||
Self {
|
||||
data: vec![0u8; total_size],
|
||||
width,
|
||||
height,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the raw buffer as bytes
|
||||
pub fn as_bytes(&self) -> &[u8] {
|
||||
&self.data
|
||||
}
|
||||
|
||||
/// Get the raw buffer as mutable bytes
|
||||
pub fn as_bytes_mut(&mut self) -> &mut [u8] {
|
||||
&mut self.data
|
||||
}
|
||||
|
||||
/// Get Y plane
|
||||
pub fn y_plane(&self) -> &[u8] {
|
||||
let y_size = (self.width * self.height) as usize;
|
||||
&self.data[..y_size]
|
||||
}
|
||||
|
||||
/// Get Y plane mutable
|
||||
pub fn y_plane_mut(&mut self) -> &mut [u8] {
|
||||
let y_size = (self.width * self.height) as usize;
|
||||
&mut self.data[..y_size]
|
||||
}
|
||||
|
||||
/// Get UV plane (interleaved)
|
||||
pub fn uv_plane(&self) -> &[u8] {
|
||||
let y_size = (self.width * self.height) as usize;
|
||||
&self.data[y_size..]
|
||||
}
|
||||
|
||||
/// Get UV plane mutable
|
||||
pub fn uv_plane_mut(&mut self) -> &mut [u8] {
|
||||
let y_size = (self.width * self.height) as usize;
|
||||
&mut self.data[y_size..]
|
||||
}
|
||||
|
||||
/// Get buffer length
|
||||
pub fn len(&self) -> usize {
|
||||
self.data.len()
|
||||
}
|
||||
|
||||
/// Check if buffer is empty
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.data.is_empty()
|
||||
}
|
||||
|
||||
/// Get resolution
|
||||
pub fn resolution(&self) -> Resolution {
|
||||
Resolution::new(self.width, self.height)
|
||||
}
|
||||
}
|
||||
|
||||
/// Pixel format converter using libyuv (SIMD accelerated)
|
||||
pub struct PixelConverter {
|
||||
/// Source format
|
||||
src_format: PixelFormat,
|
||||
/// Destination format
|
||||
dst_format: PixelFormat,
|
||||
/// Frame resolution
|
||||
resolution: Resolution,
|
||||
/// Output buffer (reused across conversions)
|
||||
output_buffer: Yuv420pBuffer,
|
||||
}
|
||||
|
||||
impl PixelConverter {
|
||||
/// Create a new converter for YUYV → YUV420P
|
||||
pub fn yuyv_to_yuv420p(resolution: Resolution) -> Self {
|
||||
Self {
|
||||
src_format: PixelFormat::Yuyv,
|
||||
dst_format: PixelFormat::Yuv420,
|
||||
resolution,
|
||||
output_buffer: Yuv420pBuffer::new(resolution),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new converter for UYVY → YUV420P
|
||||
pub fn uyvy_to_yuv420p(resolution: Resolution) -> Self {
|
||||
Self {
|
||||
src_format: PixelFormat::Uyvy,
|
||||
dst_format: PixelFormat::Yuv420,
|
||||
resolution,
|
||||
output_buffer: Yuv420pBuffer::new(resolution),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new converter for YVYU → YUV420P
|
||||
pub fn yvyu_to_yuv420p(resolution: Resolution) -> Self {
|
||||
Self {
|
||||
src_format: PixelFormat::Yvyu,
|
||||
dst_format: PixelFormat::Yuv420,
|
||||
resolution,
|
||||
output_buffer: Yuv420pBuffer::new(resolution),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new converter for NV12 → YUV420P
|
||||
pub fn nv12_to_yuv420p(resolution: Resolution) -> Self {
|
||||
Self {
|
||||
src_format: PixelFormat::Nv12,
|
||||
dst_format: PixelFormat::Yuv420,
|
||||
resolution,
|
||||
output_buffer: Yuv420pBuffer::new(resolution),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new converter for YVU420 → YUV420P (swap U and V planes)
|
||||
pub fn yvu420_to_yuv420p(resolution: Resolution) -> Self {
|
||||
Self {
|
||||
src_format: PixelFormat::Yvu420,
|
||||
dst_format: PixelFormat::Yuv420,
|
||||
resolution,
|
||||
output_buffer: Yuv420pBuffer::new(resolution),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new converter for RGB24 → YUV420P
|
||||
pub fn rgb24_to_yuv420p(resolution: Resolution) -> Self {
|
||||
Self {
|
||||
src_format: PixelFormat::Rgb24,
|
||||
dst_format: PixelFormat::Yuv420,
|
||||
resolution,
|
||||
output_buffer: Yuv420pBuffer::new(resolution),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new converter for BGR24 → YUV420P
|
||||
pub fn bgr24_to_yuv420p(resolution: Resolution) -> Self {
|
||||
Self {
|
||||
src_format: PixelFormat::Bgr24,
|
||||
dst_format: PixelFormat::Yuv420,
|
||||
resolution,
|
||||
output_buffer: Yuv420pBuffer::new(resolution),
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a frame and return reference to the output buffer
|
||||
pub fn convert(&mut self, input: &[u8]) -> Result<&[u8]> {
|
||||
let width = self.resolution.width as i32;
|
||||
let height = self.resolution.height as i32;
|
||||
let expected_size = self.output_buffer.len();
|
||||
|
||||
match (self.src_format, self.dst_format) {
|
||||
(PixelFormat::Yuyv, PixelFormat::Yuv420) => {
|
||||
libyuv::yuy2_to_i420(input, self.output_buffer.as_bytes_mut(), width, height)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv conversion failed: {}", e)))?;
|
||||
}
|
||||
(PixelFormat::Uyvy, PixelFormat::Yuv420) => {
|
||||
libyuv::uyvy_to_i420(input, self.output_buffer.as_bytes_mut(), width, height)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv conversion failed: {}", e)))?;
|
||||
}
|
||||
(PixelFormat::Nv12, PixelFormat::Yuv420) => {
|
||||
libyuv::nv12_to_i420(input, self.output_buffer.as_bytes_mut(), width, height)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv conversion failed: {}", e)))?;
|
||||
}
|
||||
(PixelFormat::Rgb24, PixelFormat::Yuv420) => {
|
||||
libyuv::rgb24_to_i420(input, self.output_buffer.as_bytes_mut(), width, height)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv conversion failed: {}", e)))?;
|
||||
}
|
||||
(PixelFormat::Bgr24, PixelFormat::Yuv420) => {
|
||||
libyuv::bgr24_to_i420(input, self.output_buffer.as_bytes_mut(), width, height)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv conversion failed: {}", e)))?;
|
||||
}
|
||||
(PixelFormat::Yvyu, PixelFormat::Yuv420) => {
|
||||
// YVYU is not directly supported by libyuv, use software conversion
|
||||
self.convert_yvyu_to_yuv420p_sw(input)?;
|
||||
}
|
||||
(PixelFormat::Yvu420, PixelFormat::Yuv420) => {
|
||||
// YVU420 just swaps U and V planes
|
||||
self.convert_yvu420_to_yuv420p_sw(input)?;
|
||||
}
|
||||
(PixelFormat::Yuv420, PixelFormat::Yuv420) => {
|
||||
// No conversion needed, just copy
|
||||
if input.len() < expected_size {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"Input buffer too small: {} < {}",
|
||||
input.len(),
|
||||
expected_size
|
||||
)));
|
||||
}
|
||||
self.output_buffer.as_bytes_mut().copy_from_slice(&input[..expected_size]);
|
||||
}
|
||||
_ => {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"Unsupported conversion: {} → {}",
|
||||
self.src_format, self.dst_format
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
Ok(self.output_buffer.as_bytes())
|
||||
}
|
||||
|
||||
/// Get output buffer length
|
||||
pub fn output_len(&self) -> usize {
|
||||
self.output_buffer.len()
|
||||
}
|
||||
|
||||
/// Get resolution
|
||||
pub fn resolution(&self) -> Resolution {
|
||||
self.resolution
|
||||
}
|
||||
|
||||
/// Software conversion for YVYU (not supported by libyuv)
|
||||
fn convert_yvyu_to_yuv420p_sw(&mut self, yvyu: &[u8]) -> Result<()> {
|
||||
let width = self.resolution.width as usize;
|
||||
let height = self.resolution.height as usize;
|
||||
let y_size = width * height;
|
||||
let uv_size = y_size / 4;
|
||||
let half_width = width / 2;
|
||||
|
||||
let data = self.output_buffer.as_bytes_mut();
|
||||
let (y_plane, uv_planes) = data.split_at_mut(y_size);
|
||||
let (u_plane, v_plane) = uv_planes.split_at_mut(uv_size);
|
||||
|
||||
for row in (0..height).step_by(2) {
|
||||
let yvyu_row0_offset = row * width * 2;
|
||||
let yvyu_row1_offset = (row + 1) * width * 2;
|
||||
let y_row0_offset = row * width;
|
||||
let y_row1_offset = (row + 1) * width;
|
||||
let uv_row_offset = (row / 2) * half_width;
|
||||
|
||||
for col in (0..width).step_by(2) {
|
||||
let yvyu_offset0 = yvyu_row0_offset + col * 2;
|
||||
let yvyu_offset1 = yvyu_row1_offset + col * 2;
|
||||
|
||||
// YVYU: Y0, V0, Y1, U0
|
||||
let y0_0 = yvyu[yvyu_offset0];
|
||||
let v0 = yvyu[yvyu_offset0 + 1];
|
||||
let y0_1 = yvyu[yvyu_offset0 + 2];
|
||||
let u0 = yvyu[yvyu_offset0 + 3];
|
||||
|
||||
let y1_0 = yvyu[yvyu_offset1];
|
||||
let v1 = yvyu[yvyu_offset1 + 1];
|
||||
let y1_1 = yvyu[yvyu_offset1 + 2];
|
||||
let u1 = yvyu[yvyu_offset1 + 3];
|
||||
|
||||
y_plane[y_row0_offset + col] = y0_0;
|
||||
y_plane[y_row0_offset + col + 1] = y0_1;
|
||||
y_plane[y_row1_offset + col] = y1_0;
|
||||
y_plane[y_row1_offset + col + 1] = y1_1;
|
||||
|
||||
let uv_idx = uv_row_offset + col / 2;
|
||||
u_plane[uv_idx] = ((u0 as u16 + u1 as u16) / 2) as u8;
|
||||
v_plane[uv_idx] = ((v0 as u16 + v1 as u16) / 2) as u8;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Software conversion for YVU420 (just swap U and V)
|
||||
fn convert_yvu420_to_yuv420p_sw(&mut self, yvu420: &[u8]) -> Result<()> {
|
||||
let width = self.resolution.width as usize;
|
||||
let height = self.resolution.height as usize;
|
||||
let y_size = width * height;
|
||||
let uv_size = y_size / 4;
|
||||
|
||||
let data = self.output_buffer.as_bytes_mut();
|
||||
let (y_plane, uv_planes) = data.split_at_mut(y_size);
|
||||
let (u_plane, v_plane) = uv_planes.split_at_mut(uv_size);
|
||||
|
||||
// Copy Y plane directly
|
||||
y_plane.copy_from_slice(&yvu420[..y_size]);
|
||||
|
||||
// In YVU420, V comes before U
|
||||
let v_src = &yvu420[y_size..y_size + uv_size];
|
||||
let u_src = &yvu420[y_size + uv_size..];
|
||||
|
||||
// Swap U and V
|
||||
u_plane.copy_from_slice(u_src);
|
||||
v_plane.copy_from_slice(v_src);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate YUV420P buffer size for a given resolution
|
||||
pub fn yuv420p_buffer_size(resolution: Resolution) -> usize {
|
||||
let pixels = (resolution.width * resolution.height) as usize;
|
||||
pixels + pixels / 2
|
||||
}
|
||||
|
||||
/// Calculate YUYV buffer size for a given resolution
|
||||
pub fn yuyv_buffer_size(resolution: Resolution) -> usize {
|
||||
(resolution.width * resolution.height * 2) as usize
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// MJPEG Decoder - Decodes JPEG to YUV420P using libyuv
|
||||
// ============================================================================
|
||||
|
||||
/// MJPEG/JPEG decoder that outputs YUV420P using libyuv
|
||||
pub struct MjpegDecoder {
|
||||
/// Resolution hint (can be updated from decoded frame)
|
||||
resolution: Resolution,
|
||||
/// YUV420P output buffer
|
||||
yuv_buffer: Yuv420pBuffer,
|
||||
}
|
||||
|
||||
impl MjpegDecoder {
|
||||
/// Create a new MJPEG decoder with expected resolution
|
||||
pub fn new(resolution: Resolution) -> Result<Self> {
|
||||
Ok(Self {
|
||||
resolution,
|
||||
yuv_buffer: Yuv420pBuffer::new(resolution),
|
||||
})
|
||||
}
|
||||
|
||||
/// Decode MJPEG/JPEG data to YUV420P using libyuv
|
||||
pub fn decode(&mut self, jpeg_data: &[u8]) -> Result<&[u8]> {
|
||||
// Get MJPEG dimensions
|
||||
let (width, height) = libyuv::mjpeg_size(jpeg_data)
|
||||
.map_err(|e| AppError::VideoError(format!("Failed to get MJPEG size: {}", e)))?;
|
||||
|
||||
// Check if resolution changed
|
||||
if width != self.resolution.width as i32 || height != self.resolution.height as i32 {
|
||||
tracing::debug!(
|
||||
"MJPEG resolution changed: {}x{} -> {}x{}",
|
||||
self.resolution.width,
|
||||
self.resolution.height,
|
||||
width,
|
||||
height
|
||||
);
|
||||
self.resolution = Resolution::new(width as u32, height as u32);
|
||||
self.yuv_buffer = Yuv420pBuffer::new(self.resolution);
|
||||
}
|
||||
|
||||
// Decode MJPEG directly to I420 using libyuv
|
||||
libyuv::mjpeg_to_i420(jpeg_data, self.yuv_buffer.as_bytes_mut(), width, height)
|
||||
.map_err(|e| AppError::VideoError(format!("MJPEG decode failed: {}", e)))?;
|
||||
|
||||
Ok(self.yuv_buffer.as_bytes())
|
||||
}
|
||||
|
||||
/// Get current resolution
|
||||
pub fn resolution(&self) -> Resolution {
|
||||
self.resolution
|
||||
}
|
||||
|
||||
/// Get YUV420P buffer size
|
||||
pub fn yuv_buffer_size(&self) -> usize {
|
||||
self.yuv_buffer.len()
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// NV12 Converter for VAAPI encoder (using libyuv)
|
||||
// ============================================================================
|
||||
|
||||
/// Pixel format converter that outputs NV12 (for VAAPI encoders)
|
||||
pub struct Nv12Converter {
|
||||
/// Source format
|
||||
src_format: PixelFormat,
|
||||
/// Frame resolution
|
||||
resolution: Resolution,
|
||||
/// Output buffer (reused across conversions)
|
||||
output_buffer: Nv12Buffer,
|
||||
}
|
||||
|
||||
impl Nv12Converter {
|
||||
/// Create a new converter for BGR24 → NV12
|
||||
pub fn bgr24_to_nv12(resolution: Resolution) -> Self {
|
||||
Self {
|
||||
src_format: PixelFormat::Bgr24,
|
||||
resolution,
|
||||
output_buffer: Nv12Buffer::new(resolution),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new converter for RGB24 → NV12
|
||||
pub fn rgb24_to_nv12(resolution: Resolution) -> Self {
|
||||
Self {
|
||||
src_format: PixelFormat::Rgb24,
|
||||
resolution,
|
||||
output_buffer: Nv12Buffer::new(resolution),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new converter for YUYV → NV12
|
||||
pub fn yuyv_to_nv12(resolution: Resolution) -> Self {
|
||||
Self {
|
||||
src_format: PixelFormat::Yuyv,
|
||||
resolution,
|
||||
output_buffer: Nv12Buffer::new(resolution),
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a frame and return reference to the output buffer
|
||||
pub fn convert(&mut self, input: &[u8]) -> Result<&[u8]> {
|
||||
let width = self.resolution.width as i32;
|
||||
let height = self.resolution.height as i32;
|
||||
let dst = self.output_buffer.as_bytes_mut();
|
||||
|
||||
let result = match self.src_format {
|
||||
PixelFormat::Bgr24 => libyuv::bgr24_to_nv12(input, dst, width, height),
|
||||
PixelFormat::Rgb24 => libyuv::rgb24_to_nv12(input, dst, width, height),
|
||||
PixelFormat::Yuyv => libyuv::yuy2_to_nv12(input, dst, width, height),
|
||||
_ => {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"Unsupported conversion to NV12: {}",
|
||||
self.src_format
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
result.map_err(|e| AppError::VideoError(format!("libyuv NV12 conversion failed: {}", e)))?;
|
||||
Ok(self.output_buffer.as_bytes())
|
||||
}
|
||||
|
||||
/// Get output buffer length
|
||||
pub fn output_len(&self) -> usize {
|
||||
self.output_buffer.len()
|
||||
}
|
||||
|
||||
/// Get resolution
|
||||
pub fn resolution(&self) -> Resolution {
|
||||
self.resolution
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Standalone conversion functions (using libyuv)
|
||||
// ============================================================================
|
||||
|
||||
/// Convert BGR24 to NV12 using libyuv
|
||||
pub fn bgr_to_nv12(bgr: &[u8], nv12: &mut [u8], width: usize, height: usize) {
|
||||
if let Err(e) = libyuv::bgr24_to_nv12(bgr, nv12, width as i32, height as i32) {
|
||||
tracing::error!("libyuv BGR24→NV12 conversion failed: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert RGB24 to NV12 using libyuv
|
||||
pub fn rgb_to_nv12(rgb: &[u8], nv12: &mut [u8], width: usize, height: usize) {
|
||||
if let Err(e) = libyuv::rgb24_to_nv12(rgb, nv12, width as i32, height as i32) {
|
||||
tracing::error!("libyuv RGB24→NV12 conversion failed: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert YUYV to NV12 using libyuv
|
||||
pub fn yuyv_to_nv12(yuyv: &[u8], nv12: &mut [u8], width: usize, height: usize) {
|
||||
if let Err(e) = libyuv::yuy2_to_nv12(yuyv, nv12, width as i32, height as i32) {
|
||||
tracing::error!("libyuv YUYV→NV12 conversion failed: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Extended PixelConverter for MJPEG support
|
||||
// ============================================================================
|
||||
|
||||
/// MJPEG to YUV420P converter (wraps MjpegDecoder)
|
||||
pub struct MjpegToYuv420Converter {
|
||||
decoder: MjpegDecoder,
|
||||
}
|
||||
|
||||
impl MjpegToYuv420Converter {
|
||||
/// Create a new MJPEG to YUV420P converter
|
||||
pub fn new(resolution: Resolution) -> Result<Self> {
|
||||
Ok(Self {
|
||||
decoder: MjpegDecoder::new(resolution)?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Convert MJPEG data to YUV420P
|
||||
pub fn convert(&mut self, mjpeg_data: &[u8]) -> Result<&[u8]> {
|
||||
self.decoder.decode(mjpeg_data)
|
||||
}
|
||||
|
||||
/// Get current resolution
|
||||
pub fn resolution(&self) -> Resolution {
|
||||
self.decoder.resolution()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_yuv420p_buffer_creation() {
|
||||
let buffer = Yuv420pBuffer::new(Resolution::HD720);
|
||||
assert_eq!(buffer.len(), 1280 * 720 * 3 / 2);
|
||||
assert_eq!(buffer.y_plane().len(), 1280 * 720);
|
||||
assert_eq!(buffer.u_plane().len(), 1280 * 720 / 4);
|
||||
assert_eq!(buffer.v_plane().len(), 1280 * 720 / 4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_nv12_buffer_creation() {
|
||||
let buffer = Nv12Buffer::new(Resolution::HD720);
|
||||
assert_eq!(buffer.len(), 1280 * 720 * 3 / 2);
|
||||
assert_eq!(buffer.y_plane().len(), 1280 * 720);
|
||||
assert_eq!(buffer.uv_plane().len(), 1280 * 720 / 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_yuyv_to_yuv420p_conversion() {
|
||||
let resolution = Resolution::new(4, 4);
|
||||
let mut converter = PixelConverter::yuyv_to_yuv420p(resolution);
|
||||
|
||||
// Create YUYV data (4x4 = 32 bytes)
|
||||
let yuyv = vec![
|
||||
16, 128, 17, 129, 18, 130, 19, 131,
|
||||
20, 132, 21, 133, 22, 134, 23, 135,
|
||||
24, 136, 25, 137, 26, 138, 27, 139,
|
||||
28, 140, 29, 141, 30, 142, 31, 143,
|
||||
];
|
||||
|
||||
let result = converter.convert(&yuyv).unwrap();
|
||||
assert_eq!(result.len(), 24); // 4*4 + 2*2 + 2*2 = 24 bytes
|
||||
}
|
||||
}
|
||||
645
src/video/decoder/mjpeg.rs
Normal file
645
src/video/decoder/mjpeg.rs
Normal file
@@ -0,0 +1,645 @@
|
||||
//! MJPEG VAAPI hardware decoder
|
||||
//!
|
||||
//! Uses hwcodec's FFmpeg VAAPI backend to decode MJPEG to NV12.
|
||||
//! This provides hardware-accelerated JPEG decoding with direct NV12 output,
|
||||
//! which is the optimal format for VAAPI H264 encoding.
|
||||
|
||||
use std::sync::Once;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use hwcodec::ffmpeg::AVHWDeviceType;
|
||||
use hwcodec::ffmpeg::AVPixelFormat;
|
||||
use hwcodec::ffmpeg_ram::decode::{DecodeContext, DecodeFrame, Decoder};
|
||||
|
||||
use crate::error::{AppError, Result};
|
||||
use crate::video::format::Resolution;
|
||||
|
||||
// libyuv for SIMD-accelerated YUV conversion
|
||||
|
||||
static INIT_LOGGING: Once = Once::new();
|
||||
|
||||
/// Initialize hwcodec logging (only once)
|
||||
fn init_hwcodec_logging() {
|
||||
INIT_LOGGING.call_once(|| {
|
||||
debug!("hwcodec MJPEG decoder logging initialized");
|
||||
});
|
||||
}
|
||||
|
||||
/// MJPEG VAAPI decoder configuration
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MjpegVaapiDecoderConfig {
|
||||
/// Expected resolution (can be updated from decoded frame)
|
||||
pub resolution: Resolution,
|
||||
/// Use hardware acceleration (VAAPI)
|
||||
pub use_hwaccel: bool,
|
||||
}
|
||||
|
||||
impl Default for MjpegVaapiDecoderConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
resolution: Resolution::HD1080,
|
||||
use_hwaccel: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Decoded frame data in NV12 format
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DecodedNv12Frame {
|
||||
/// Y plane data
|
||||
pub y_plane: Vec<u8>,
|
||||
/// UV interleaved plane data
|
||||
pub uv_plane: Vec<u8>,
|
||||
/// Y plane linesize (stride)
|
||||
pub y_linesize: i32,
|
||||
/// UV plane linesize (stride)
|
||||
pub uv_linesize: i32,
|
||||
/// Frame width
|
||||
pub width: i32,
|
||||
/// Frame height
|
||||
pub height: i32,
|
||||
}
|
||||
|
||||
/// Decoded frame data in YUV420P (I420) format
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DecodedYuv420pFrame {
|
||||
/// Y plane data
|
||||
pub y_plane: Vec<u8>,
|
||||
/// U plane data
|
||||
pub u_plane: Vec<u8>,
|
||||
/// V plane data
|
||||
pub v_plane: Vec<u8>,
|
||||
/// Y plane linesize (stride)
|
||||
pub y_linesize: i32,
|
||||
/// U plane linesize (stride)
|
||||
pub u_linesize: i32,
|
||||
/// V plane linesize (stride)
|
||||
pub v_linesize: i32,
|
||||
/// Frame width
|
||||
pub width: i32,
|
||||
/// Frame height
|
||||
pub height: i32,
|
||||
}
|
||||
|
||||
impl DecodedYuv420pFrame {
|
||||
/// Get packed YUV420P data (Y plane followed by U and V planes, with stride removed)
|
||||
pub fn to_packed_yuv420p(&self) -> Vec<u8> {
|
||||
let width = self.width as usize;
|
||||
let height = self.height as usize;
|
||||
let y_size = width * height;
|
||||
let uv_size = width * height / 4;
|
||||
|
||||
let mut packed = Vec::with_capacity(y_size + uv_size * 2);
|
||||
|
||||
// Copy Y plane, removing stride padding if any
|
||||
if self.y_linesize as usize == width {
|
||||
packed.extend_from_slice(&self.y_plane[..y_size]);
|
||||
} else {
|
||||
for row in 0..height {
|
||||
let src_offset = row * self.y_linesize as usize;
|
||||
packed.extend_from_slice(&self.y_plane[src_offset..src_offset + width]);
|
||||
}
|
||||
}
|
||||
|
||||
// Copy U plane
|
||||
let uv_width = width / 2;
|
||||
let uv_height = height / 2;
|
||||
if self.u_linesize as usize == uv_width {
|
||||
packed.extend_from_slice(&self.u_plane[..uv_size]);
|
||||
} else {
|
||||
for row in 0..uv_height {
|
||||
let src_offset = row * self.u_linesize as usize;
|
||||
packed.extend_from_slice(&self.u_plane[src_offset..src_offset + uv_width]);
|
||||
}
|
||||
}
|
||||
|
||||
// Copy V plane
|
||||
if self.v_linesize as usize == uv_width {
|
||||
packed.extend_from_slice(&self.v_plane[..uv_size]);
|
||||
} else {
|
||||
for row in 0..uv_height {
|
||||
let src_offset = row * self.v_linesize as usize;
|
||||
packed.extend_from_slice(&self.v_plane[src_offset..src_offset + uv_width]);
|
||||
}
|
||||
}
|
||||
|
||||
packed
|
||||
}
|
||||
|
||||
/// Copy packed YUV420P data to external buffer (zero allocation)
|
||||
/// Returns the number of bytes written, or None if buffer too small
|
||||
pub fn copy_to_packed_yuv420p(&self, dst: &mut [u8]) -> Option<usize> {
|
||||
let width = self.width as usize;
|
||||
let height = self.height as usize;
|
||||
let y_size = width * height;
|
||||
let uv_size = width * height / 4;
|
||||
let total_size = y_size + uv_size * 2;
|
||||
|
||||
if dst.len() < total_size {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Copy Y plane
|
||||
if self.y_linesize as usize == width {
|
||||
dst[..y_size].copy_from_slice(&self.y_plane[..y_size]);
|
||||
} else {
|
||||
for row in 0..height {
|
||||
let src_offset = row * self.y_linesize as usize;
|
||||
let dst_offset = row * width;
|
||||
dst[dst_offset..dst_offset + width]
|
||||
.copy_from_slice(&self.y_plane[src_offset..src_offset + width]);
|
||||
}
|
||||
}
|
||||
|
||||
// Copy U plane
|
||||
let uv_width = width / 2;
|
||||
let uv_height = height / 2;
|
||||
if self.u_linesize as usize == uv_width {
|
||||
dst[y_size..y_size + uv_size].copy_from_slice(&self.u_plane[..uv_size]);
|
||||
} else {
|
||||
for row in 0..uv_height {
|
||||
let src_offset = row * self.u_linesize as usize;
|
||||
let dst_offset = y_size + row * uv_width;
|
||||
dst[dst_offset..dst_offset + uv_width]
|
||||
.copy_from_slice(&self.u_plane[src_offset..src_offset + uv_width]);
|
||||
}
|
||||
}
|
||||
|
||||
// Copy V plane
|
||||
let v_offset = y_size + uv_size;
|
||||
if self.v_linesize as usize == uv_width {
|
||||
dst[v_offset..v_offset + uv_size].copy_from_slice(&self.v_plane[..uv_size]);
|
||||
} else {
|
||||
for row in 0..uv_height {
|
||||
let src_offset = row * self.v_linesize as usize;
|
||||
let dst_offset = v_offset + row * uv_width;
|
||||
dst[dst_offset..dst_offset + uv_width]
|
||||
.copy_from_slice(&self.v_plane[src_offset..src_offset + uv_width]);
|
||||
}
|
||||
}
|
||||
|
||||
Some(total_size)
|
||||
}
|
||||
}
|
||||
|
||||
impl DecodedNv12Frame {
|
||||
/// Get packed NV12 data (Y plane followed by UV plane, with stride removed)
|
||||
pub fn to_packed_nv12(&self) -> Vec<u8> {
|
||||
let width = self.width as usize;
|
||||
let height = self.height as usize;
|
||||
let y_size = width * height;
|
||||
let uv_size = width * height / 2;
|
||||
|
||||
let mut packed = Vec::with_capacity(y_size + uv_size);
|
||||
|
||||
// Copy Y plane, removing stride padding if any
|
||||
if self.y_linesize as usize == width {
|
||||
// No padding, direct copy
|
||||
packed.extend_from_slice(&self.y_plane[..y_size]);
|
||||
} else {
|
||||
// Has padding, copy row by row
|
||||
for row in 0..height {
|
||||
let src_offset = row * self.y_linesize as usize;
|
||||
packed.extend_from_slice(&self.y_plane[src_offset..src_offset + width]);
|
||||
}
|
||||
}
|
||||
|
||||
// Copy UV plane, removing stride padding if any
|
||||
let uv_height = height / 2;
|
||||
if self.uv_linesize as usize == width {
|
||||
// No padding, direct copy
|
||||
packed.extend_from_slice(&self.uv_plane[..uv_size]);
|
||||
} else {
|
||||
// Has padding, copy row by row
|
||||
for row in 0..uv_height {
|
||||
let src_offset = row * self.uv_linesize as usize;
|
||||
packed.extend_from_slice(&self.uv_plane[src_offset..src_offset + width]);
|
||||
}
|
||||
}
|
||||
|
||||
packed
|
||||
}
|
||||
|
||||
/// Copy packed NV12 data to external buffer (zero allocation)
|
||||
/// Returns the number of bytes written, or None if buffer too small
|
||||
pub fn copy_to_packed_nv12(&self, dst: &mut [u8]) -> Option<usize> {
|
||||
let width = self.width as usize;
|
||||
let height = self.height as usize;
|
||||
let y_size = width * height;
|
||||
let uv_size = width * height / 2;
|
||||
let total_size = y_size + uv_size;
|
||||
|
||||
if dst.len() < total_size {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Copy Y plane, removing stride padding if any
|
||||
if self.y_linesize as usize == width {
|
||||
// No padding, direct copy
|
||||
dst[..y_size].copy_from_slice(&self.y_plane[..y_size]);
|
||||
} else {
|
||||
// Has padding, copy row by row
|
||||
for row in 0..height {
|
||||
let src_offset = row * self.y_linesize as usize;
|
||||
let dst_offset = row * width;
|
||||
dst[dst_offset..dst_offset + width]
|
||||
.copy_from_slice(&self.y_plane[src_offset..src_offset + width]);
|
||||
}
|
||||
}
|
||||
|
||||
// Copy UV plane, removing stride padding if any
|
||||
let uv_height = height / 2;
|
||||
if self.uv_linesize as usize == width {
|
||||
// No padding, direct copy
|
||||
dst[y_size..total_size].copy_from_slice(&self.uv_plane[..uv_size]);
|
||||
} else {
|
||||
// Has padding, copy row by row
|
||||
for row in 0..uv_height {
|
||||
let src_offset = row * self.uv_linesize as usize;
|
||||
let dst_offset = y_size + row * width;
|
||||
dst[dst_offset..dst_offset + width]
|
||||
.copy_from_slice(&self.uv_plane[src_offset..src_offset + width]);
|
||||
}
|
||||
}
|
||||
|
||||
Some(total_size)
|
||||
}
|
||||
}
|
||||
|
||||
/// MJPEG VAAPI hardware decoder
|
||||
///
|
||||
/// Decodes MJPEG frames to NV12 format using VAAPI hardware acceleration.
|
||||
/// This is optimal for feeding into VAAPI H264 encoder.
|
||||
pub struct MjpegVaapiDecoder {
|
||||
/// hwcodec decoder instance
|
||||
decoder: Decoder,
|
||||
/// Configuration
|
||||
config: MjpegVaapiDecoderConfig,
|
||||
/// Frame counter
|
||||
frame_count: u64,
|
||||
/// Whether hardware acceleration is active
|
||||
hwaccel_active: bool,
|
||||
}
|
||||
|
||||
impl MjpegVaapiDecoder {
|
||||
/// Create a new MJPEG decoder
|
||||
/// Note: VAAPI does not support MJPEG decoding on most hardware,
|
||||
/// so we use software decoding and convert to NV12 for VAAPI encoding.
|
||||
pub fn new(config: MjpegVaapiDecoderConfig) -> Result<Self> {
|
||||
init_hwcodec_logging();
|
||||
|
||||
// VAAPI doesn't support MJPEG decoding, always use software decoder
|
||||
// The output will be converted to NV12 for VAAPI H264 encoding
|
||||
let device_type = AVHWDeviceType::AV_HWDEVICE_TYPE_NONE;
|
||||
|
||||
info!(
|
||||
"Creating MJPEG decoder with software decoding (VAAPI doesn't support MJPEG decode)"
|
||||
);
|
||||
|
||||
let ctx = DecodeContext {
|
||||
name: "mjpeg".to_string(),
|
||||
device_type,
|
||||
thread_count: 4, // Use multiple threads for software decoding
|
||||
};
|
||||
|
||||
let decoder = Decoder::new(ctx).map_err(|_| {
|
||||
AppError::VideoError("Failed to create MJPEG software decoder".to_string())
|
||||
})?;
|
||||
|
||||
// hwaccel is not actually active for MJPEG decoding
|
||||
let hwaccel_active = false;
|
||||
|
||||
info!(
|
||||
"MJPEG decoder created successfully (software decode, will convert to NV12)"
|
||||
);
|
||||
|
||||
Ok(Self {
|
||||
decoder,
|
||||
config,
|
||||
frame_count: 0,
|
||||
hwaccel_active,
|
||||
})
|
||||
}
|
||||
|
||||
/// Create with default config (VAAPI enabled)
|
||||
pub fn with_vaapi(resolution: Resolution) -> Result<Self> {
|
||||
Self::new(MjpegVaapiDecoderConfig {
|
||||
resolution,
|
||||
use_hwaccel: true,
|
||||
})
|
||||
}
|
||||
|
||||
/// Create with software decoding (fallback)
|
||||
pub fn with_software(resolution: Resolution) -> Result<Self> {
|
||||
Self::new(MjpegVaapiDecoderConfig {
|
||||
resolution,
|
||||
use_hwaccel: false,
|
||||
})
|
||||
}
|
||||
|
||||
/// Check if hardware acceleration is active
|
||||
pub fn is_hwaccel_active(&self) -> bool {
|
||||
self.hwaccel_active
|
||||
}
|
||||
|
||||
/// Decode MJPEG frame to NV12
|
||||
///
|
||||
/// Returns the decoded frame in NV12 format, or an error if decoding fails.
|
||||
pub fn decode(&mut self, jpeg_data: &[u8]) -> Result<DecodedNv12Frame> {
|
||||
if jpeg_data.len() < 2 {
|
||||
return Err(AppError::VideoError("JPEG data too small".to_string()));
|
||||
}
|
||||
|
||||
// Verify JPEG signature (FFD8)
|
||||
if jpeg_data[0] != 0xFF || jpeg_data[1] != 0xD8 {
|
||||
return Err(AppError::VideoError("Invalid JPEG signature".to_string()));
|
||||
}
|
||||
|
||||
self.frame_count += 1;
|
||||
|
||||
let frames = self.decoder.decode(jpeg_data).map_err(|e| {
|
||||
AppError::VideoError(format!("MJPEG decode failed: error code {}", e))
|
||||
})?;
|
||||
|
||||
if frames.is_empty() {
|
||||
return Err(AppError::VideoError("Decoder returned no frames".to_string()));
|
||||
}
|
||||
|
||||
let frame = &frames[0];
|
||||
|
||||
// Handle different output formats
|
||||
// VAAPI MJPEG decoder may output NV12, YUV420P, or YUVJ420P (JPEG full-range)
|
||||
if frame.pixfmt == AVPixelFormat::AV_PIX_FMT_NV12
|
||||
|| frame.pixfmt == AVPixelFormat::AV_PIX_FMT_NV21
|
||||
{
|
||||
// NV12/NV21 format: Y plane + UV interleaved plane
|
||||
if frame.data.len() < 2 {
|
||||
return Err(AppError::VideoError("Invalid NV12 frame data".to_string()));
|
||||
}
|
||||
|
||||
return Ok(DecodedNv12Frame {
|
||||
y_plane: frame.data[0].clone(),
|
||||
uv_plane: frame.data[1].clone(),
|
||||
y_linesize: frame.linesize[0],
|
||||
uv_linesize: frame.linesize[1],
|
||||
width: frame.width,
|
||||
height: frame.height,
|
||||
});
|
||||
}
|
||||
|
||||
// YUV420P or YUVJ420P (JPEG full-range) - need to convert to NV12
|
||||
if frame.pixfmt == AVPixelFormat::AV_PIX_FMT_YUV420P
|
||||
|| frame.pixfmt == AVPixelFormat::AV_PIX_FMT_YUVJ420P
|
||||
{
|
||||
return Self::convert_yuv420p_to_nv12_static(frame);
|
||||
}
|
||||
|
||||
// YUV422P or YUVJ422P (JPEG full-range 4:2:2) - need to convert to NV12
|
||||
if frame.pixfmt == AVPixelFormat::AV_PIX_FMT_YUV422P
|
||||
|| frame.pixfmt == AVPixelFormat::AV_PIX_FMT_YUVJ422P
|
||||
{
|
||||
return Self::convert_yuv422p_to_nv12_static(frame);
|
||||
}
|
||||
|
||||
Err(AppError::VideoError(format!(
|
||||
"Unexpected output format: {:?} (expected NV12, YUV420P, YUV422P, or YUVJ variants)",
|
||||
frame.pixfmt
|
||||
)))
|
||||
}
|
||||
|
||||
/// Convert YUV420P frame to NV12 format using libyuv (SIMD accelerated)
|
||||
fn convert_yuv420p_to_nv12_static(frame: &DecodeFrame) -> Result<DecodedNv12Frame> {
|
||||
if frame.data.len() < 3 {
|
||||
return Err(AppError::VideoError("Invalid YUV420P frame data".to_string()));
|
||||
}
|
||||
|
||||
let width = frame.width as i32;
|
||||
let height = frame.height as i32;
|
||||
let y_linesize = frame.linesize[0];
|
||||
let u_linesize = frame.linesize[1];
|
||||
let v_linesize = frame.linesize[2];
|
||||
|
||||
// Allocate packed NV12 output buffer
|
||||
let nv12_size = (width * height * 3 / 2) as usize;
|
||||
let mut nv12_data = vec![0u8; nv12_size];
|
||||
|
||||
// Use libyuv for SIMD-accelerated I420 → NV12 conversion
|
||||
libyuv::i420_to_nv12_planar(
|
||||
&frame.data[0], y_linesize,
|
||||
&frame.data[1], u_linesize,
|
||||
&frame.data[2], v_linesize,
|
||||
&mut nv12_data,
|
||||
width, height,
|
||||
).map_err(|e| AppError::VideoError(format!("libyuv I420→NV12 failed: {}", e)))?;
|
||||
|
||||
// Split into Y and UV planes for DecodedNv12Frame
|
||||
let y_size = (width * height) as usize;
|
||||
let y_plane = nv12_data[..y_size].to_vec();
|
||||
let uv_plane = nv12_data[y_size..].to_vec();
|
||||
|
||||
Ok(DecodedNv12Frame {
|
||||
y_plane,
|
||||
uv_plane,
|
||||
y_linesize: width, // Output is packed, no padding
|
||||
uv_linesize: width,
|
||||
width: frame.width,
|
||||
height: frame.height,
|
||||
})
|
||||
}
|
||||
|
||||
/// Convert YUV422P frame to NV12 format using libyuv (SIMD accelerated)
|
||||
/// Pipeline: I422 (YUV422P) → I420 → NV12
|
||||
fn convert_yuv422p_to_nv12_static(frame: &DecodeFrame) -> Result<DecodedNv12Frame> {
|
||||
if frame.data.len() < 3 {
|
||||
return Err(AppError::VideoError("Invalid YUV422P frame data".to_string()));
|
||||
}
|
||||
|
||||
let width = frame.width as i32;
|
||||
let height = frame.height as i32;
|
||||
let y_linesize = frame.linesize[0];
|
||||
let u_linesize = frame.linesize[1];
|
||||
let v_linesize = frame.linesize[2];
|
||||
|
||||
// Step 1: I422 → I420 (vertical chroma downsampling via SIMD)
|
||||
let i420_size = (width * height * 3 / 2) as usize;
|
||||
let mut i420_data = vec![0u8; i420_size];
|
||||
|
||||
libyuv::i422_to_i420_planar(
|
||||
&frame.data[0], y_linesize,
|
||||
&frame.data[1], u_linesize,
|
||||
&frame.data[2], v_linesize,
|
||||
&mut i420_data,
|
||||
width, height,
|
||||
).map_err(|e| AppError::VideoError(format!("libyuv I422→I420 failed: {}", e)))?;
|
||||
|
||||
// Step 2: I420 → NV12 (UV interleaving via SIMD)
|
||||
let nv12_size = (width * height * 3 / 2) as usize;
|
||||
let mut nv12_data = vec![0u8; nv12_size];
|
||||
|
||||
libyuv::i420_to_nv12(&i420_data, &mut nv12_data, width, height)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv I420→NV12 failed: {}", e)))?;
|
||||
|
||||
// Split into Y and UV planes for DecodedNv12Frame
|
||||
let y_size = (width * height) as usize;
|
||||
let y_plane = nv12_data[..y_size].to_vec();
|
||||
let uv_plane = nv12_data[y_size..].to_vec();
|
||||
|
||||
Ok(DecodedNv12Frame {
|
||||
y_plane,
|
||||
uv_plane,
|
||||
y_linesize: width,
|
||||
uv_linesize: width,
|
||||
width: frame.width,
|
||||
height: frame.height,
|
||||
})
|
||||
}
|
||||
|
||||
/// Get frame count
|
||||
pub fn frame_count(&self) -> u64 {
|
||||
self.frame_count
|
||||
}
|
||||
|
||||
/// Get current resolution from config
|
||||
pub fn resolution(&self) -> Resolution {
|
||||
self.config.resolution
|
||||
}
|
||||
}
|
||||
|
||||
/// Libyuv-based MJPEG decoder for direct YUV420P output
|
||||
///
|
||||
/// This decoder is optimized for software encoders (libvpx, libx265) that need YUV420P input.
|
||||
/// It uses libyuv's MJPGToI420 to decode directly to I420/YUV420P format.
|
||||
pub struct MjpegTurboDecoder {
|
||||
/// Frame counter
|
||||
frame_count: u64,
|
||||
}
|
||||
|
||||
impl MjpegTurboDecoder {
|
||||
/// Create a new libyuv-based MJPEG decoder
|
||||
pub fn new(resolution: Resolution) -> Result<Self> {
|
||||
info!(
|
||||
"Created libyuv MJPEG decoder for {}x{} (direct YUV420P output)",
|
||||
resolution.width, resolution.height
|
||||
);
|
||||
|
||||
Ok(Self {
|
||||
frame_count: 0,
|
||||
})
|
||||
}
|
||||
|
||||
/// Decode MJPEG frame directly to YUV420P using libyuv
|
||||
///
|
||||
/// This is the optimal path for software encoders that need YUV420P input.
|
||||
/// libyuv handles all JPEG subsampling formats internally.
|
||||
pub fn decode_to_yuv420p(&mut self, jpeg_data: &[u8]) -> Result<DecodedYuv420pFrame> {
|
||||
if jpeg_data.len() < 2 || jpeg_data[0] != 0xFF || jpeg_data[1] != 0xD8 {
|
||||
return Err(AppError::VideoError("Invalid JPEG data".to_string()));
|
||||
}
|
||||
|
||||
self.frame_count += 1;
|
||||
|
||||
// Get JPEG dimensions
|
||||
let (width, height) = libyuv::mjpeg_size(jpeg_data)
|
||||
.map_err(|e| AppError::VideoError(format!("Failed to read MJPEG size: {}", e)))?;
|
||||
|
||||
let y_size = (width * height) as usize;
|
||||
let uv_size = y_size / 4;
|
||||
let yuv420_size = y_size + uv_size * 2;
|
||||
|
||||
let mut yuv_data = vec![0u8; yuv420_size];
|
||||
|
||||
libyuv::mjpeg_to_i420(jpeg_data, &mut yuv_data, width, height)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv MJPEG→I420 failed: {}", e)))?;
|
||||
|
||||
Ok(DecodedYuv420pFrame {
|
||||
y_plane: yuv_data[..y_size].to_vec(),
|
||||
u_plane: yuv_data[y_size..y_size + uv_size].to_vec(),
|
||||
v_plane: yuv_data[y_size + uv_size..].to_vec(),
|
||||
y_linesize: width,
|
||||
u_linesize: width / 2,
|
||||
v_linesize: width / 2,
|
||||
width,
|
||||
height,
|
||||
})
|
||||
}
|
||||
|
||||
/// Decode directly to packed YUV420P buffer using libyuv
|
||||
///
|
||||
/// This uses libyuv's MJPGToI420 which handles all JPEG subsampling formats
|
||||
/// and converts to I420 directly.
|
||||
pub fn decode_to_yuv420p_buffer(&mut self, jpeg_data: &[u8], dst: &mut [u8]) -> Result<usize> {
|
||||
if jpeg_data.len() < 2 || jpeg_data[0] != 0xFF || jpeg_data[1] != 0xD8 {
|
||||
return Err(AppError::VideoError("Invalid JPEG data".to_string()));
|
||||
}
|
||||
|
||||
self.frame_count += 1;
|
||||
|
||||
// Get JPEG dimensions from libyuv
|
||||
let (width, height) = libyuv::mjpeg_size(jpeg_data)
|
||||
.map_err(|e| AppError::VideoError(format!("Failed to read MJPEG size: {}", e)))?;
|
||||
|
||||
let yuv420_size = (width * height * 3 / 2) as usize;
|
||||
|
||||
if dst.len() < yuv420_size {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"Buffer too small: {} < {}", dst.len(), yuv420_size
|
||||
)));
|
||||
}
|
||||
|
||||
// Decode MJPEG directly to I420 using libyuv
|
||||
// libyuv handles all JPEG subsampling formats (4:2:0, 4:2:2, 4:4:4) internally
|
||||
libyuv::mjpeg_to_i420(jpeg_data, &mut dst[..yuv420_size], width, height)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv MJPEG→I420 failed: {}", e)))?;
|
||||
|
||||
Ok(yuv420_size)
|
||||
}
|
||||
|
||||
/// Get frame count
|
||||
pub fn frame_count(&self) -> u64 {
|
||||
self.frame_count
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if MJPEG VAAPI decoder is available
|
||||
pub fn is_mjpeg_vaapi_available() -> bool {
|
||||
let ctx = DecodeContext {
|
||||
name: "mjpeg".to_string(),
|
||||
device_type: AVHWDeviceType::AV_HWDEVICE_TYPE_VAAPI,
|
||||
thread_count: 1,
|
||||
};
|
||||
|
||||
match Decoder::new(ctx) {
|
||||
Ok(_) => {
|
||||
info!("MJPEG VAAPI decoder is available");
|
||||
true
|
||||
}
|
||||
Err(_) => {
|
||||
warn!("MJPEG VAAPI decoder is not available");
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_mjpeg_vaapi_availability() {
|
||||
let available = is_mjpeg_vaapi_available();
|
||||
println!("MJPEG VAAPI available: {}", available);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decoder_creation() {
|
||||
let config = MjpegVaapiDecoderConfig::default();
|
||||
match MjpegVaapiDecoder::new(config) {
|
||||
Ok(decoder) => {
|
||||
println!("Decoder created, hwaccel: {}", decoder.is_hwaccel_active());
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Failed to create decoder: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
11
src/video/decoder/mod.rs
Normal file
11
src/video/decoder/mod.rs
Normal file
@@ -0,0 +1,11 @@
|
||||
//! Video decoder implementations
|
||||
//!
|
||||
//! This module provides video decoding capabilities including:
|
||||
//! - MJPEG VAAPI hardware decoding (outputs NV12)
|
||||
//! - MJPEG turbojpeg decoding (outputs YUV420P directly)
|
||||
|
||||
pub mod mjpeg;
|
||||
|
||||
pub use mjpeg::{
|
||||
DecodedYuv420pFrame, MjpegTurboDecoder, MjpegVaapiDecoder, MjpegVaapiDecoderConfig,
|
||||
};
|
||||
459
src/video/device.rs
Normal file
459
src/video/device.rs
Normal file
@@ -0,0 +1,459 @@
|
||||
//! V4L2 device enumeration and capability query
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::path::{Path, PathBuf};
|
||||
use tracing::{debug, info, warn};
|
||||
use v4l::capability::Flags;
|
||||
use v4l::prelude::*;
|
||||
use v4l::video::Capture;
|
||||
use v4l::Format;
|
||||
use v4l::FourCC;
|
||||
|
||||
use super::format::{PixelFormat, Resolution};
|
||||
use crate::error::{AppError, Result};
|
||||
|
||||
/// Information about a video device
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct VideoDeviceInfo {
|
||||
/// Device path (e.g., /dev/video0)
|
||||
pub path: PathBuf,
|
||||
/// Device name from driver
|
||||
pub name: String,
|
||||
/// Driver name
|
||||
pub driver: String,
|
||||
/// Bus info
|
||||
pub bus_info: String,
|
||||
/// Card name
|
||||
pub card: String,
|
||||
/// Supported pixel formats
|
||||
pub formats: Vec<FormatInfo>,
|
||||
/// Device capabilities
|
||||
pub capabilities: DeviceCapabilities,
|
||||
/// Whether this is likely an HDMI capture card
|
||||
pub is_capture_card: bool,
|
||||
/// Priority score for device selection (higher is better)
|
||||
pub priority: u32,
|
||||
}
|
||||
|
||||
/// Information about a supported format
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct FormatInfo {
|
||||
/// Pixel format
|
||||
pub format: PixelFormat,
|
||||
/// Supported resolutions
|
||||
pub resolutions: Vec<ResolutionInfo>,
|
||||
/// Description from driver
|
||||
pub description: String,
|
||||
}
|
||||
|
||||
/// Information about a supported resolution and frame rates
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ResolutionInfo {
|
||||
pub width: u32,
|
||||
pub height: u32,
|
||||
pub fps: Vec<u32>,
|
||||
}
|
||||
|
||||
impl ResolutionInfo {
|
||||
pub fn new(width: u32, height: u32, fps: Vec<u32>) -> Self {
|
||||
Self { width, height, fps }
|
||||
}
|
||||
|
||||
pub fn resolution(&self) -> Resolution {
|
||||
Resolution::new(self.width, self.height)
|
||||
}
|
||||
}
|
||||
|
||||
/// Device capabilities
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
pub struct DeviceCapabilities {
|
||||
pub video_capture: bool,
|
||||
pub video_capture_mplane: bool,
|
||||
pub video_output: bool,
|
||||
pub streaming: bool,
|
||||
pub read_write: bool,
|
||||
}
|
||||
|
||||
/// Wrapper around a V4L2 video device
|
||||
pub struct VideoDevice {
|
||||
pub path: PathBuf,
|
||||
device: Device,
|
||||
}
|
||||
|
||||
impl VideoDevice {
|
||||
/// Open a video device by path
|
||||
pub fn open(path: impl AsRef<Path>) -> Result<Self> {
|
||||
let path = path.as_ref().to_path_buf();
|
||||
debug!("Opening video device: {:?}", path);
|
||||
|
||||
let device = Device::with_path(&path).map_err(|e| {
|
||||
AppError::VideoError(format!("Failed to open device {:?}: {}", path, e))
|
||||
})?;
|
||||
|
||||
Ok(Self { path, device })
|
||||
}
|
||||
|
||||
/// Get device capabilities
|
||||
pub fn capabilities(&self) -> Result<DeviceCapabilities> {
|
||||
let caps = self.device.query_caps().map_err(|e| {
|
||||
AppError::VideoError(format!("Failed to query capabilities: {}", e))
|
||||
})?;
|
||||
|
||||
Ok(DeviceCapabilities {
|
||||
video_capture: caps.capabilities.contains(Flags::VIDEO_CAPTURE),
|
||||
video_capture_mplane: caps.capabilities.contains(Flags::VIDEO_CAPTURE_MPLANE),
|
||||
video_output: caps.capabilities.contains(Flags::VIDEO_OUTPUT),
|
||||
streaming: caps.capabilities.contains(Flags::STREAMING),
|
||||
read_write: caps.capabilities.contains(Flags::READ_WRITE),
|
||||
})
|
||||
}
|
||||
|
||||
/// Get detailed device information
|
||||
pub fn info(&self) -> Result<VideoDeviceInfo> {
|
||||
let caps = self.device.query_caps().map_err(|e| {
|
||||
AppError::VideoError(format!("Failed to query capabilities: {}", e))
|
||||
})?;
|
||||
|
||||
let capabilities = DeviceCapabilities {
|
||||
video_capture: caps.capabilities.contains(Flags::VIDEO_CAPTURE),
|
||||
video_capture_mplane: caps.capabilities.contains(Flags::VIDEO_CAPTURE_MPLANE),
|
||||
video_output: caps.capabilities.contains(Flags::VIDEO_OUTPUT),
|
||||
streaming: caps.capabilities.contains(Flags::STREAMING),
|
||||
read_write: caps.capabilities.contains(Flags::READ_WRITE),
|
||||
};
|
||||
|
||||
let formats = self.enumerate_formats()?;
|
||||
|
||||
// Determine if this is likely an HDMI capture card
|
||||
let is_capture_card = Self::detect_capture_card(&caps.card, &caps.driver, &formats);
|
||||
|
||||
// Calculate priority score
|
||||
let priority = Self::calculate_priority(&caps.card, &caps.driver, &formats, is_capture_card);
|
||||
|
||||
Ok(VideoDeviceInfo {
|
||||
path: self.path.clone(),
|
||||
name: caps.card.clone(),
|
||||
driver: caps.driver.clone(),
|
||||
bus_info: caps.bus.clone(),
|
||||
card: caps.card,
|
||||
formats,
|
||||
capabilities,
|
||||
is_capture_card,
|
||||
priority,
|
||||
})
|
||||
}
|
||||
|
||||
/// Enumerate supported formats
|
||||
pub fn enumerate_formats(&self) -> Result<Vec<FormatInfo>> {
|
||||
let mut formats = Vec::new();
|
||||
|
||||
// Get supported formats
|
||||
let format_descs = self.device.enum_formats().map_err(|e| {
|
||||
AppError::VideoError(format!("Failed to enumerate formats: {}", e))
|
||||
})?;
|
||||
|
||||
for desc in format_descs {
|
||||
// Try to convert FourCC to our PixelFormat
|
||||
if let Some(format) = PixelFormat::from_fourcc(desc.fourcc) {
|
||||
let resolutions = self.enumerate_resolutions(desc.fourcc)?;
|
||||
|
||||
formats.push(FormatInfo {
|
||||
format,
|
||||
resolutions,
|
||||
description: desc.description.clone(),
|
||||
});
|
||||
} else {
|
||||
debug!(
|
||||
"Skipping unsupported format: {:?} ({})",
|
||||
desc.fourcc, desc.description
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by format priority (MJPEG first)
|
||||
formats.sort_by(|a, b| b.format.priority().cmp(&a.format.priority()));
|
||||
|
||||
Ok(formats)
|
||||
}
|
||||
|
||||
/// Enumerate resolutions for a specific format
|
||||
fn enumerate_resolutions(&self, fourcc: FourCC) -> Result<Vec<ResolutionInfo>> {
|
||||
let mut resolutions = Vec::new();
|
||||
|
||||
// Try to enumerate frame sizes
|
||||
match self.device.enum_framesizes(fourcc) {
|
||||
Ok(sizes) => {
|
||||
for size in sizes {
|
||||
match size.size {
|
||||
v4l::framesize::FrameSizeEnum::Discrete(d) => {
|
||||
let fps = self.enumerate_fps(fourcc, d.width, d.height).unwrap_or_default();
|
||||
resolutions.push(ResolutionInfo::new(d.width, d.height, fps));
|
||||
}
|
||||
v4l::framesize::FrameSizeEnum::Stepwise(s) => {
|
||||
// For stepwise, add some common resolutions
|
||||
for res in [
|
||||
Resolution::VGA,
|
||||
Resolution::HD720,
|
||||
Resolution::HD1080,
|
||||
Resolution::UHD4K,
|
||||
] {
|
||||
if res.width >= s.min_width
|
||||
&& res.width <= s.max_width
|
||||
&& res.height >= s.min_height
|
||||
&& res.height <= s.max_height
|
||||
{
|
||||
let fps = self.enumerate_fps(fourcc, res.width, res.height).unwrap_or_default();
|
||||
resolutions.push(ResolutionInfo::new(res.width, res.height, fps));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Failed to enumerate frame sizes for {:?}: {}", fourcc, e);
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by resolution (largest first)
|
||||
resolutions.sort_by(|a, b| (b.width * b.height).cmp(&(a.width * a.height)));
|
||||
resolutions.dedup_by(|a, b| a.width == b.width && a.height == b.height);
|
||||
|
||||
Ok(resolutions)
|
||||
}
|
||||
|
||||
/// Enumerate FPS for a specific resolution
|
||||
fn enumerate_fps(&self, fourcc: FourCC, width: u32, height: u32) -> Result<Vec<u32>> {
|
||||
let mut fps_list = Vec::new();
|
||||
|
||||
match self.device.enum_frameintervals(fourcc, width, height) {
|
||||
Ok(intervals) => {
|
||||
for interval in intervals {
|
||||
match interval.interval {
|
||||
v4l::frameinterval::FrameIntervalEnum::Discrete(fraction) => {
|
||||
if fraction.numerator > 0 {
|
||||
let fps = fraction.denominator / fraction.numerator;
|
||||
fps_list.push(fps);
|
||||
}
|
||||
}
|
||||
v4l::frameinterval::FrameIntervalEnum::Stepwise(step) => {
|
||||
// Just pick max/min/step
|
||||
if step.max.numerator > 0 {
|
||||
let min_fps = step.max.denominator / step.max.numerator;
|
||||
let max_fps = step.min.denominator / step.min.numerator;
|
||||
fps_list.push(min_fps);
|
||||
if max_fps != min_fps {
|
||||
fps_list.push(max_fps);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
// If enumeration fails, assume 30fps
|
||||
fps_list.push(30);
|
||||
}
|
||||
}
|
||||
|
||||
fps_list.sort_by(|a, b| b.cmp(a));
|
||||
fps_list.dedup();
|
||||
Ok(fps_list)
|
||||
}
|
||||
|
||||
/// Get current format
|
||||
pub fn get_format(&self) -> Result<Format> {
|
||||
self.device.format().map_err(|e| {
|
||||
AppError::VideoError(format!("Failed to get format: {}", e))
|
||||
})
|
||||
}
|
||||
|
||||
/// Set capture format
|
||||
pub fn set_format(&self, width: u32, height: u32, format: PixelFormat) -> Result<Format> {
|
||||
let fmt = Format::new(width, height, format.to_fourcc());
|
||||
|
||||
// Request the format
|
||||
let actual = self.device.set_format(&fmt).map_err(|e| {
|
||||
AppError::VideoError(format!("Failed to set format: {}", e))
|
||||
})?;
|
||||
|
||||
if actual.width != width || actual.height != height {
|
||||
warn!(
|
||||
"Requested {}x{}, got {}x{}",
|
||||
width, height, actual.width, actual.height
|
||||
);
|
||||
}
|
||||
|
||||
Ok(actual)
|
||||
}
|
||||
|
||||
/// Detect if device is likely an HDMI capture card
|
||||
fn detect_capture_card(card: &str, driver: &str, formats: &[FormatInfo]) -> bool {
|
||||
let card_lower = card.to_lowercase();
|
||||
let driver_lower = driver.to_lowercase();
|
||||
|
||||
// Known capture card patterns
|
||||
let capture_patterns = [
|
||||
"hdmi",
|
||||
"capture",
|
||||
"grabber",
|
||||
"usb3",
|
||||
"ms2109",
|
||||
"ms2130",
|
||||
"macrosilicon",
|
||||
"tc358743",
|
||||
"uvc",
|
||||
];
|
||||
|
||||
// Check card/driver names
|
||||
for pattern in capture_patterns {
|
||||
if card_lower.contains(pattern) || driver_lower.contains(pattern) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Capture cards usually support MJPEG and high resolutions
|
||||
let has_mjpeg = formats.iter().any(|f| f.format == PixelFormat::Mjpeg);
|
||||
let has_1080p = formats.iter().any(|f| {
|
||||
f.resolutions
|
||||
.iter()
|
||||
.any(|r| r.width >= 1920 && r.height >= 1080)
|
||||
});
|
||||
|
||||
has_mjpeg && has_1080p
|
||||
}
|
||||
|
||||
/// Calculate device priority for selection
|
||||
fn calculate_priority(
|
||||
_card: &str,
|
||||
driver: &str,
|
||||
formats: &[FormatInfo],
|
||||
is_capture_card: bool,
|
||||
) -> u32 {
|
||||
let mut priority = 0u32;
|
||||
|
||||
// Capture cards get highest priority
|
||||
if is_capture_card {
|
||||
priority += 1000;
|
||||
}
|
||||
|
||||
// MJPEG support is valuable
|
||||
if formats.iter().any(|f| f.format == PixelFormat::Mjpeg) {
|
||||
priority += 100;
|
||||
}
|
||||
|
||||
// High resolution support
|
||||
let max_resolution = formats
|
||||
.iter()
|
||||
.flat_map(|f| &f.resolutions)
|
||||
.map(|r| r.width * r.height)
|
||||
.max()
|
||||
.unwrap_or(0);
|
||||
|
||||
priority += (max_resolution / 100000) as u32;
|
||||
|
||||
// Known good drivers get bonus
|
||||
let good_drivers = ["uvcvideo", "tc358743"];
|
||||
if good_drivers.iter().any(|d| driver.contains(d)) {
|
||||
priority += 50;
|
||||
}
|
||||
|
||||
priority
|
||||
}
|
||||
|
||||
/// Get the inner device reference (for advanced usage)
|
||||
pub fn inner(&self) -> &Device {
|
||||
&self.device
|
||||
}
|
||||
}
|
||||
|
||||
/// Enumerate all video capture devices
|
||||
pub fn enumerate_devices() -> Result<Vec<VideoDeviceInfo>> {
|
||||
info!("Enumerating video devices...");
|
||||
|
||||
let mut devices = Vec::new();
|
||||
|
||||
// Scan /dev/video* devices
|
||||
for entry in std::fs::read_dir("/dev").map_err(|e| {
|
||||
AppError::VideoError(format!("Failed to read /dev: {}", e))
|
||||
})? {
|
||||
let entry = match entry {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
let path = entry.path();
|
||||
let name = path.file_name().and_then(|n| n.to_str()).unwrap_or("");
|
||||
|
||||
if !name.starts_with("video") {
|
||||
continue;
|
||||
}
|
||||
|
||||
debug!("Found video device: {:?}", path);
|
||||
|
||||
// Try to open and query the device
|
||||
match VideoDevice::open(&path) {
|
||||
Ok(device) => {
|
||||
match device.info() {
|
||||
Ok(info) => {
|
||||
// Only include devices with video capture capability
|
||||
if info.capabilities.video_capture || info.capabilities.video_capture_mplane
|
||||
{
|
||||
info!(
|
||||
"Found capture device: {} ({}) - {} formats",
|
||||
info.name,
|
||||
info.driver,
|
||||
info.formats.len()
|
||||
);
|
||||
devices.push(info);
|
||||
} else {
|
||||
debug!("Skipping non-capture device: {:?}", path);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Failed to get info for {:?}: {}", path, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Failed to open {:?}: {}", path, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by priority (highest first)
|
||||
devices.sort_by(|a, b| b.priority.cmp(&a.priority));
|
||||
|
||||
info!("Found {} video capture devices", devices.len());
|
||||
Ok(devices)
|
||||
}
|
||||
|
||||
/// Find the best video device for KVM use
|
||||
pub fn find_best_device() -> Result<VideoDeviceInfo> {
|
||||
let devices = enumerate_devices()?;
|
||||
|
||||
devices.into_iter().next().ok_or_else(|| {
|
||||
AppError::VideoError("No video capture devices found".to_string())
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_pixel_format_conversion() {
|
||||
let format = PixelFormat::Mjpeg;
|
||||
let fourcc = format.to_fourcc();
|
||||
let back = PixelFormat::from_fourcc(fourcc);
|
||||
assert_eq!(back, Some(format));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_resolution() {
|
||||
let res = Resolution::HD1080;
|
||||
assert_eq!(res.width, 1920);
|
||||
assert_eq!(res.height, 1080);
|
||||
assert!(res.is_valid());
|
||||
}
|
||||
}
|
||||
370
src/video/encoder/codec.rs
Normal file
370
src/video/encoder/codec.rs
Normal file
@@ -0,0 +1,370 @@
|
||||
//! WebRTC Video Codec abstraction layer
|
||||
//!
|
||||
//! This module provides a unified interface for video codecs used in WebRTC streaming.
|
||||
//! It supports multiple codec types (H264, VP8, VP9, H265) with a common API.
|
||||
//!
|
||||
//! # Architecture
|
||||
//!
|
||||
//! ```text
|
||||
//! VideoCodec (trait)
|
||||
//! |
|
||||
//! +-- H264Codec (current implementation)
|
||||
//! +-- VP8Codec (reserved)
|
||||
//! +-- VP9Codec (reserved)
|
||||
//! +-- H265Codec (reserved)
|
||||
//! ```
|
||||
|
||||
use bytes::Bytes;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::video::format::Resolution;
|
||||
|
||||
/// Supported video codec types for WebRTC
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum VideoCodecType {
|
||||
/// H.264/AVC - widely supported, good compression
|
||||
H264,
|
||||
/// VP8 - royalty-free, good browser support
|
||||
VP8,
|
||||
/// VP9 - better compression than VP8
|
||||
VP9,
|
||||
/// H.265/HEVC - best compression, limited browser support
|
||||
H265,
|
||||
}
|
||||
|
||||
impl VideoCodecType {
|
||||
/// Get the codec name for SDP
|
||||
pub fn sdp_name(&self) -> &'static str {
|
||||
match self {
|
||||
VideoCodecType::H264 => "H264",
|
||||
VideoCodecType::VP8 => "VP8",
|
||||
VideoCodecType::VP9 => "VP9",
|
||||
VideoCodecType::H265 => "H265",
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the default RTP payload type
|
||||
pub fn default_payload_type(&self) -> u8 {
|
||||
match self {
|
||||
VideoCodecType::H264 => 96,
|
||||
VideoCodecType::VP8 => 97,
|
||||
VideoCodecType::VP9 => 98,
|
||||
VideoCodecType::H265 => 99,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the RTP clock rate (always 90000 for video)
|
||||
pub fn clock_rate(&self) -> u32 {
|
||||
90000
|
||||
}
|
||||
|
||||
/// Get the MIME type
|
||||
pub fn mime_type(&self) -> &'static str {
|
||||
match self {
|
||||
VideoCodecType::H264 => "video/H264",
|
||||
VideoCodecType::VP8 => "video/VP8",
|
||||
VideoCodecType::VP9 => "video/VP9",
|
||||
VideoCodecType::H265 => "video/H265",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for VideoCodecType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.sdp_name())
|
||||
}
|
||||
}
|
||||
|
||||
/// Encoded video frame for WebRTC transmission
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CodecFrame {
|
||||
/// Encoded data (Annex B format for H264/H265, raw for VP8/VP9)
|
||||
pub data: Bytes,
|
||||
/// Presentation timestamp in milliseconds
|
||||
pub pts_ms: i64,
|
||||
/// Whether this is a keyframe (IDR for H264, key frame for VP8/VP9)
|
||||
pub is_keyframe: bool,
|
||||
/// Codec type
|
||||
pub codec: VideoCodecType,
|
||||
/// Frame sequence number
|
||||
pub sequence: u64,
|
||||
/// Frame duration
|
||||
pub duration: Duration,
|
||||
}
|
||||
|
||||
impl CodecFrame {
|
||||
/// Create a new H264 frame
|
||||
pub fn h264(data: Bytes, pts_ms: i64, is_keyframe: bool, sequence: u64, fps: u32) -> Self {
|
||||
Self {
|
||||
data,
|
||||
pts_ms,
|
||||
is_keyframe,
|
||||
codec: VideoCodecType::H264,
|
||||
sequence,
|
||||
duration: Duration::from_millis(1000 / fps as u64),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new VP8 frame
|
||||
pub fn vp8(data: Bytes, pts_ms: i64, is_keyframe: bool, sequence: u64, fps: u32) -> Self {
|
||||
Self {
|
||||
data,
|
||||
pts_ms,
|
||||
is_keyframe,
|
||||
codec: VideoCodecType::VP8,
|
||||
sequence,
|
||||
duration: Duration::from_millis(1000 / fps as u64),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new VP9 frame
|
||||
pub fn vp9(data: Bytes, pts_ms: i64, is_keyframe: bool, sequence: u64, fps: u32) -> Self {
|
||||
Self {
|
||||
data,
|
||||
pts_ms,
|
||||
is_keyframe,
|
||||
codec: VideoCodecType::VP9,
|
||||
sequence,
|
||||
duration: Duration::from_millis(1000 / fps as u64),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new H265 frame
|
||||
pub fn h265(data: Bytes, pts_ms: i64, is_keyframe: bool, sequence: u64, fps: u32) -> Self {
|
||||
Self {
|
||||
data,
|
||||
pts_ms,
|
||||
is_keyframe,
|
||||
codec: VideoCodecType::H265,
|
||||
sequence,
|
||||
duration: Duration::from_millis(1000 / fps as u64),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get frame size in bytes
|
||||
pub fn len(&self) -> usize {
|
||||
self.data.len()
|
||||
}
|
||||
|
||||
/// Check if frame is empty
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.data.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
/// Video codec configuration
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct VideoCodecConfig {
|
||||
/// Codec type
|
||||
pub codec: VideoCodecType,
|
||||
/// Target resolution
|
||||
pub resolution: Resolution,
|
||||
/// Target bitrate in kbps
|
||||
pub bitrate_kbps: u32,
|
||||
/// Target FPS
|
||||
pub fps: u32,
|
||||
/// GOP size (keyframe interval in frames)
|
||||
pub gop_size: u32,
|
||||
/// Profile (codec-specific)
|
||||
pub profile: Option<String>,
|
||||
/// Level (codec-specific)
|
||||
pub level: Option<String>,
|
||||
}
|
||||
|
||||
impl Default for VideoCodecConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
codec: VideoCodecType::H264,
|
||||
resolution: Resolution::HD720,
|
||||
bitrate_kbps: 8000,
|
||||
fps: 30,
|
||||
gop_size: 30,
|
||||
profile: None,
|
||||
level: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl VideoCodecConfig {
|
||||
/// Create H264 config with common settings
|
||||
pub fn h264(resolution: Resolution, bitrate_kbps: u32, fps: u32) -> Self {
|
||||
Self {
|
||||
codec: VideoCodecType::H264,
|
||||
resolution,
|
||||
bitrate_kbps,
|
||||
fps,
|
||||
gop_size: fps, // 1 second GOP
|
||||
profile: Some("baseline".to_string()),
|
||||
level: Some("3.1".to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create VP8 config
|
||||
pub fn vp8(resolution: Resolution, bitrate_kbps: u32, fps: u32) -> Self {
|
||||
Self {
|
||||
codec: VideoCodecType::VP8,
|
||||
resolution,
|
||||
bitrate_kbps,
|
||||
fps,
|
||||
gop_size: fps,
|
||||
profile: None,
|
||||
level: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create VP9 config
|
||||
pub fn vp9(resolution: Resolution, bitrate_kbps: u32, fps: u32) -> Self {
|
||||
Self {
|
||||
codec: VideoCodecType::VP9,
|
||||
resolution,
|
||||
bitrate_kbps,
|
||||
fps,
|
||||
gop_size: fps,
|
||||
profile: None,
|
||||
level: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create H265 config
|
||||
pub fn h265(resolution: Resolution, bitrate_kbps: u32, fps: u32) -> Self {
|
||||
Self {
|
||||
codec: VideoCodecType::H265,
|
||||
resolution,
|
||||
bitrate_kbps,
|
||||
fps,
|
||||
gop_size: fps,
|
||||
profile: Some("main".to_string()),
|
||||
level: Some("4.0".to_string()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// WebRTC video codec trait
|
||||
///
|
||||
/// This trait defines the interface for video codecs used in WebRTC streaming.
|
||||
/// Implementations should handle format conversion internally if needed.
|
||||
pub trait VideoCodec: Send {
|
||||
/// Get codec type
|
||||
fn codec_type(&self) -> VideoCodecType;
|
||||
|
||||
/// Get codec name for display
|
||||
fn codec_name(&self) -> &'static str;
|
||||
|
||||
/// Get RTP payload type
|
||||
fn payload_type(&self) -> u8 {
|
||||
self.codec_type().default_payload_type()
|
||||
}
|
||||
|
||||
/// Get SDP fmtp parameters (codec-specific)
|
||||
///
|
||||
/// For H264: "level-asymmetry-allowed=1;packetization-mode=1;profile-level-id=42e01f"
|
||||
/// For VP8/VP9: None or empty
|
||||
fn sdp_fmtp(&self) -> Option<String>;
|
||||
|
||||
/// Encode a raw frame (NV12 format expected)
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `frame` - Raw frame data in NV12 format
|
||||
/// * `pts_ms` - Presentation timestamp in milliseconds
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Ok(Some(frame))` - Encoded frame
|
||||
/// * `Ok(None)` - Encoder is buffering (no output yet)
|
||||
/// * `Err(e)` - Encoding error
|
||||
fn encode(&mut self, frame: &[u8], pts_ms: i64) -> Result<Option<CodecFrame>>;
|
||||
|
||||
/// Set target bitrate dynamically
|
||||
fn set_bitrate(&mut self, bitrate_kbps: u32) -> Result<()>;
|
||||
|
||||
/// Request a keyframe on next encode
|
||||
fn request_keyframe(&mut self);
|
||||
|
||||
/// Get current configuration
|
||||
fn config(&self) -> &VideoCodecConfig;
|
||||
|
||||
/// Flush any pending frames
|
||||
fn flush(&mut self) -> Result<Vec<CodecFrame>> {
|
||||
Ok(vec![])
|
||||
}
|
||||
|
||||
/// Reset encoder state
|
||||
fn reset(&mut self) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Video codec factory trait
|
||||
///
|
||||
/// Used to create codec instances and query available codecs.
|
||||
pub trait VideoCodecFactory: Send + Sync {
|
||||
/// Create a codec with the given configuration
|
||||
fn create(&self, config: VideoCodecConfig) -> Result<Box<dyn VideoCodec>>;
|
||||
|
||||
/// Get supported codec types
|
||||
fn supported_codecs(&self) -> Vec<VideoCodecType>;
|
||||
|
||||
/// Check if a specific codec is available
|
||||
fn is_codec_available(&self, codec: VideoCodecType) -> bool {
|
||||
self.supported_codecs().contains(&codec)
|
||||
}
|
||||
|
||||
/// Get the best available codec (based on priority)
|
||||
fn best_codec(&self) -> Option<VideoCodecType> {
|
||||
// Priority: H264 > VP8 > VP9 > H265
|
||||
let supported = self.supported_codecs();
|
||||
if supported.contains(&VideoCodecType::H264) {
|
||||
Some(VideoCodecType::H264)
|
||||
} else if supported.contains(&VideoCodecType::VP8) {
|
||||
Some(VideoCodecType::VP8)
|
||||
} else if supported.contains(&VideoCodecType::VP9) {
|
||||
Some(VideoCodecType::VP9)
|
||||
} else if supported.contains(&VideoCodecType::H265) {
|
||||
Some(VideoCodecType::H265)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_codec_type_properties() {
|
||||
assert_eq!(VideoCodecType::H264.sdp_name(), "H264");
|
||||
assert_eq!(VideoCodecType::H264.default_payload_type(), 96);
|
||||
assert_eq!(VideoCodecType::H264.clock_rate(), 90000);
|
||||
assert_eq!(VideoCodecType::H264.mime_type(), "video/H264");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_codec_frame_creation() {
|
||||
let data = Bytes::from(vec![0x00, 0x00, 0x00, 0x01, 0x65]);
|
||||
let frame = CodecFrame::h264(data.clone(), 1000, true, 1, 30);
|
||||
|
||||
assert_eq!(frame.codec, VideoCodecType::H264);
|
||||
assert!(frame.is_keyframe);
|
||||
assert_eq!(frame.pts_ms, 1000);
|
||||
assert_eq!(frame.sequence, 1);
|
||||
assert_eq!(frame.len(), 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_codec_config_default() {
|
||||
let config = VideoCodecConfig::default();
|
||||
assert_eq!(config.codec, VideoCodecType::H264);
|
||||
assert_eq!(config.bitrate_kbps, 2000);
|
||||
assert_eq!(config.fps, 30);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_codec_config_h264() {
|
||||
let config = VideoCodecConfig::h264(Resolution::HD1080, 4000, 60);
|
||||
assert_eq!(config.codec, VideoCodecType::H264);
|
||||
assert_eq!(config.bitrate_kbps, 4000);
|
||||
assert_eq!(config.fps, 60);
|
||||
assert_eq!(config.gop_size, 60);
|
||||
}
|
||||
}
|
||||
532
src/video/encoder/h264.rs
Normal file
532
src/video/encoder/h264.rs
Normal file
@@ -0,0 +1,532 @@
|
||||
//! H.264 encoder using hwcodec (rustdesk's FFmpeg wrapper)
|
||||
//!
|
||||
//! Supports multiple encoder backends via FFmpeg:
|
||||
//! - VAAPI (Intel/AMD/NVIDIA on Linux)
|
||||
//! - NVENC (NVIDIA)
|
||||
//! - AMF (AMD)
|
||||
//! - Software (libx264)
|
||||
//!
|
||||
//! The encoder is selected automatically based on availability.
|
||||
|
||||
use bytes::Bytes;
|
||||
use std::sync::Once;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use hwcodec::common::{Quality, RateControl};
|
||||
use hwcodec::ffmpeg::AVPixelFormat;
|
||||
use hwcodec::ffmpeg_ram::encode::{EncodeContext, Encoder as HwEncoder};
|
||||
use hwcodec::ffmpeg_ram::CodecInfo;
|
||||
|
||||
use super::traits::{EncodedFormat, EncodedFrame, Encoder, EncoderConfig};
|
||||
use crate::error::{AppError, Result};
|
||||
use crate::video::format::{PixelFormat, Resolution};
|
||||
|
||||
static INIT_LOGGING: Once = Once::new();
|
||||
|
||||
/// Initialize hwcodec logging (only once)
|
||||
fn init_hwcodec_logging() {
|
||||
INIT_LOGGING.call_once(|| {
|
||||
// hwcodec uses the `log` crate, which will work with our tracing subscriber
|
||||
debug!("hwcodec logging initialized");
|
||||
});
|
||||
}
|
||||
|
||||
/// H.264 encoder type (detected from hwcodec)
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum H264EncoderType {
|
||||
/// NVIDIA NVENC
|
||||
Nvenc,
|
||||
/// Intel Quick Sync (QSV)
|
||||
Qsv,
|
||||
/// AMD AMF
|
||||
Amf,
|
||||
/// VAAPI (Linux generic)
|
||||
Vaapi,
|
||||
/// RKMPP (Rockchip) - requires hwcodec extension
|
||||
Rkmpp,
|
||||
/// V4L2 M2M (ARM generic) - requires hwcodec extension
|
||||
V4l2M2m,
|
||||
/// Software encoding (libx264/openh264)
|
||||
Software,
|
||||
/// No encoder available
|
||||
None,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for H264EncoderType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
H264EncoderType::Nvenc => write!(f, "NVENC"),
|
||||
H264EncoderType::Qsv => write!(f, "QSV"),
|
||||
H264EncoderType::Amf => write!(f, "AMF"),
|
||||
H264EncoderType::Vaapi => write!(f, "VAAPI"),
|
||||
H264EncoderType::Rkmpp => write!(f, "RKMPP"),
|
||||
H264EncoderType::V4l2M2m => write!(f, "V4L2 M2M"),
|
||||
H264EncoderType::Software => write!(f, "Software"),
|
||||
H264EncoderType::None => write!(f, "None"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for H264EncoderType {
|
||||
fn default() -> Self {
|
||||
Self::None
|
||||
}
|
||||
}
|
||||
|
||||
/// Map codec name to encoder type
|
||||
fn codec_name_to_type(name: &str) -> H264EncoderType {
|
||||
if name.contains("nvenc") {
|
||||
H264EncoderType::Nvenc
|
||||
} else if name.contains("qsv") {
|
||||
H264EncoderType::Qsv
|
||||
} else if name.contains("amf") {
|
||||
H264EncoderType::Amf
|
||||
} else if name.contains("vaapi") {
|
||||
H264EncoderType::Vaapi
|
||||
} else if name.contains("rkmpp") {
|
||||
H264EncoderType::Rkmpp
|
||||
} else if name.contains("v4l2m2m") {
|
||||
H264EncoderType::V4l2M2m
|
||||
} else {
|
||||
H264EncoderType::Software
|
||||
}
|
||||
}
|
||||
|
||||
/// Input pixel format for H264 encoder
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum H264InputFormat {
|
||||
/// YUV420P (I420) - planar Y, U, V
|
||||
Yuv420p,
|
||||
/// NV12 - Y plane + interleaved UV plane (optimal for VAAPI)
|
||||
Nv12,
|
||||
}
|
||||
|
||||
impl Default for H264InputFormat {
|
||||
fn default() -> Self {
|
||||
Self::Nv12 // Default to NV12 for VAAPI compatibility
|
||||
}
|
||||
}
|
||||
|
||||
/// H.264 encoder configuration
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct H264Config {
|
||||
/// Base encoder config
|
||||
pub base: EncoderConfig,
|
||||
/// Target bitrate in kbps
|
||||
pub bitrate_kbps: u32,
|
||||
/// GOP size (keyframe interval)
|
||||
pub gop_size: u32,
|
||||
/// Frame rate
|
||||
pub fps: u32,
|
||||
/// Input pixel format
|
||||
pub input_format: H264InputFormat,
|
||||
}
|
||||
|
||||
impl Default for H264Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
base: EncoderConfig::default(),
|
||||
bitrate_kbps: 8000,
|
||||
gop_size: 30,
|
||||
fps: 30,
|
||||
input_format: H264InputFormat::Nv12,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl H264Config {
|
||||
/// Create config for low latency streaming with NV12 input (optimal for VAAPI)
|
||||
pub fn low_latency(resolution: Resolution, bitrate_kbps: u32) -> Self {
|
||||
Self {
|
||||
base: EncoderConfig::h264(resolution, bitrate_kbps),
|
||||
bitrate_kbps,
|
||||
gop_size: 30,
|
||||
fps: 30,
|
||||
input_format: H264InputFormat::Nv12,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create config for low latency streaming with YUV420P input
|
||||
pub fn low_latency_yuv420p(resolution: Resolution, bitrate_kbps: u32) -> Self {
|
||||
Self {
|
||||
base: EncoderConfig::h264(resolution, bitrate_kbps),
|
||||
bitrate_kbps,
|
||||
gop_size: 30,
|
||||
fps: 30,
|
||||
input_format: H264InputFormat::Yuv420p,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create config for quality streaming
|
||||
pub fn quality(resolution: Resolution, bitrate_kbps: u32) -> Self {
|
||||
Self {
|
||||
base: EncoderConfig::h264(resolution, bitrate_kbps),
|
||||
bitrate_kbps,
|
||||
gop_size: 60,
|
||||
fps: 30,
|
||||
input_format: H264InputFormat::Nv12,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set input format
|
||||
pub fn with_input_format(mut self, format: H264InputFormat) -> Self {
|
||||
self.input_format = format;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Get available H264 encoders from hwcodec
|
||||
pub fn get_available_encoders(width: u32, height: u32) -> Vec<CodecInfo> {
|
||||
init_hwcodec_logging();
|
||||
|
||||
let ctx = EncodeContext {
|
||||
name: String::new(),
|
||||
mc_name: None,
|
||||
width: width as i32,
|
||||
height: height as i32,
|
||||
pixfmt: AVPixelFormat::AV_PIX_FMT_YUV420P,
|
||||
align: 1,
|
||||
fps: 30,
|
||||
gop: 30,
|
||||
rc: RateControl::RC_CBR,
|
||||
quality: Quality::Quality_Low, // Use low quality preset for fastest encoding (ultrafast)
|
||||
kbs: 2000,
|
||||
q: 23,
|
||||
thread_count: 4,
|
||||
};
|
||||
|
||||
HwEncoder::available_encoders(ctx, None)
|
||||
}
|
||||
|
||||
/// Detect best available H.264 encoder
|
||||
pub fn detect_best_encoder(width: u32, height: u32) -> (H264EncoderType, Option<String>) {
|
||||
let encoders = get_available_encoders(width, height);
|
||||
|
||||
if encoders.is_empty() {
|
||||
warn!("No H.264 encoders available from hwcodec");
|
||||
return (H264EncoderType::None, None);
|
||||
}
|
||||
|
||||
// Find H264 encoder (not H265)
|
||||
for codec in &encoders {
|
||||
if codec.format == hwcodec::common::DataFormat::H264 {
|
||||
let encoder_type = codec_name_to_type(&codec.name);
|
||||
info!("Best H.264 encoder: {} ({})", codec.name, encoder_type);
|
||||
return (encoder_type, Some(codec.name.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
(H264EncoderType::None, None)
|
||||
}
|
||||
|
||||
/// Encoded frame from hwcodec (cloned for ownership)
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HwEncodeFrame {
|
||||
pub data: Vec<u8>,
|
||||
pub pts: i64,
|
||||
pub key: i32,
|
||||
}
|
||||
|
||||
/// H.264 encoder using hwcodec
|
||||
pub struct H264Encoder {
|
||||
/// hwcodec encoder instance
|
||||
inner: HwEncoder,
|
||||
/// Encoder configuration
|
||||
config: H264Config,
|
||||
/// Detected encoder type
|
||||
encoder_type: H264EncoderType,
|
||||
/// Codec name
|
||||
codec_name: String,
|
||||
/// Frame counter
|
||||
frame_count: u64,
|
||||
/// YUV420P buffer for input (reserved for future use)
|
||||
#[allow(dead_code)]
|
||||
yuv_buffer: Vec<u8>,
|
||||
/// Required YUV buffer length from hwcodec
|
||||
yuv_length: i32,
|
||||
}
|
||||
|
||||
impl H264Encoder {
|
||||
/// Create a new H.264 encoder with automatic codec detection
|
||||
pub fn new(config: H264Config) -> Result<Self> {
|
||||
init_hwcodec_logging();
|
||||
|
||||
let width = config.base.resolution.width;
|
||||
let height = config.base.resolution.height;
|
||||
|
||||
// Detect best encoder
|
||||
let (_encoder_type, codec_name) = detect_best_encoder(width, height);
|
||||
|
||||
let codec_name = codec_name.ok_or_else(|| {
|
||||
AppError::VideoError("No H.264 encoder available".to_string())
|
||||
})?;
|
||||
|
||||
Self::with_codec(config, &codec_name)
|
||||
}
|
||||
|
||||
/// Create encoder with specific codec name
|
||||
pub fn with_codec(config: H264Config, codec_name: &str) -> Result<Self> {
|
||||
init_hwcodec_logging();
|
||||
|
||||
let width = config.base.resolution.width;
|
||||
let height = config.base.resolution.height;
|
||||
|
||||
// Select pixel format based on config
|
||||
let pixfmt = match config.input_format {
|
||||
H264InputFormat::Nv12 => AVPixelFormat::AV_PIX_FMT_NV12,
|
||||
H264InputFormat::Yuv420p => AVPixelFormat::AV_PIX_FMT_YUV420P,
|
||||
};
|
||||
|
||||
info!(
|
||||
"Creating H.264 encoder: {} at {}x{} @ {} kbps (input: {:?})",
|
||||
codec_name, width, height, config.bitrate_kbps, config.input_format
|
||||
);
|
||||
|
||||
let ctx = EncodeContext {
|
||||
name: codec_name.to_string(),
|
||||
mc_name: None,
|
||||
width: width as i32,
|
||||
height: height as i32,
|
||||
pixfmt,
|
||||
align: 1,
|
||||
fps: config.fps as i32,
|
||||
gop: config.gop_size as i32,
|
||||
rc: RateControl::RC_CBR,
|
||||
quality: Quality::Quality_Low, // Use low quality preset for fastest encoding (lowest latency)
|
||||
kbs: config.bitrate_kbps as i32,
|
||||
q: 23,
|
||||
thread_count: 4, // Use 4 threads for better performance
|
||||
};
|
||||
|
||||
let inner = HwEncoder::new(ctx).map_err(|_| {
|
||||
AppError::VideoError(format!("Failed to create encoder: {}", codec_name))
|
||||
})?;
|
||||
|
||||
let yuv_length = inner.length;
|
||||
let encoder_type = codec_name_to_type(codec_name);
|
||||
|
||||
info!(
|
||||
"H.264 encoder created: {} (type: {}, buffer_length: {}, input_format: {:?})",
|
||||
codec_name, encoder_type, yuv_length, config.input_format
|
||||
);
|
||||
|
||||
Ok(Self {
|
||||
inner,
|
||||
config,
|
||||
encoder_type,
|
||||
codec_name: codec_name.to_string(),
|
||||
frame_count: 0,
|
||||
yuv_buffer: vec![0u8; yuv_length as usize],
|
||||
yuv_length,
|
||||
})
|
||||
}
|
||||
|
||||
/// Create with auto-detected encoder
|
||||
pub fn auto(resolution: Resolution, bitrate_kbps: u32) -> Result<Self> {
|
||||
let config = H264Config::low_latency(resolution, bitrate_kbps);
|
||||
Self::new(config)
|
||||
}
|
||||
|
||||
/// Get encoder type
|
||||
pub fn encoder_type(&self) -> &H264EncoderType {
|
||||
&self.encoder_type
|
||||
}
|
||||
|
||||
/// Get codec name
|
||||
pub fn codec_name(&self) -> &str {
|
||||
&self.codec_name
|
||||
}
|
||||
|
||||
/// Update bitrate dynamically
|
||||
pub fn set_bitrate(&mut self, bitrate_kbps: u32) -> Result<()> {
|
||||
self.inner.set_bitrate(bitrate_kbps as i32).map_err(|_| {
|
||||
AppError::VideoError("Failed to set bitrate".to_string())
|
||||
})?;
|
||||
self.config.bitrate_kbps = bitrate_kbps;
|
||||
debug!("Bitrate updated to {} kbps", bitrate_kbps);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Request next frame to be a keyframe (IDR)
|
||||
pub fn request_keyframe(&mut self) {
|
||||
self.inner.request_keyframe();
|
||||
debug!("H264 keyframe requested");
|
||||
}
|
||||
|
||||
/// Encode raw frame data (YUV420P or NV12 depending on config)
|
||||
pub fn encode_raw(&mut self, data: &[u8], pts_ms: i64) -> Result<Vec<HwEncodeFrame>> {
|
||||
if data.len() < self.yuv_length as usize {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"Frame data too small: {} < {}",
|
||||
data.len(),
|
||||
self.yuv_length
|
||||
)));
|
||||
}
|
||||
|
||||
self.frame_count += 1;
|
||||
|
||||
match self.inner.encode(data, pts_ms) {
|
||||
Ok(frames) => {
|
||||
// Copy frame data to owned HwEncodeFrame
|
||||
let owned_frames: Vec<HwEncodeFrame> = frames
|
||||
.iter()
|
||||
.map(|f| HwEncodeFrame {
|
||||
data: f.data.clone(),
|
||||
pts: f.pts,
|
||||
key: f.key,
|
||||
})
|
||||
.collect();
|
||||
Ok(owned_frames)
|
||||
}
|
||||
Err(e) => {
|
||||
// For the first ~30 frames, x264 may fail due to initialization
|
||||
// Log as warning instead of error to avoid alarming users
|
||||
if self.frame_count <= 30 {
|
||||
warn!(
|
||||
"Encode failed during initialization (frame {}): {} - this is normal for x264",
|
||||
self.frame_count, e
|
||||
);
|
||||
} else {
|
||||
error!("Encode failed: {}", e);
|
||||
}
|
||||
Err(AppError::VideoError(format!("Encode failed: {}", e)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Encode YUV420P data (legacy method, use encode_raw for new code)
|
||||
pub fn encode_yuv420p(&mut self, yuv_data: &[u8], pts_ms: i64) -> Result<Vec<HwEncodeFrame>> {
|
||||
self.encode_raw(yuv_data, pts_ms)
|
||||
}
|
||||
|
||||
/// Encode NV12 data
|
||||
pub fn encode_nv12(&mut self, nv12_data: &[u8], pts_ms: i64) -> Result<Vec<HwEncodeFrame>> {
|
||||
self.encode_raw(nv12_data, pts_ms)
|
||||
}
|
||||
|
||||
/// Get input format
|
||||
pub fn input_format(&self) -> H264InputFormat {
|
||||
self.config.input_format
|
||||
}
|
||||
|
||||
/// Get YUV buffer info (linesize, offset, length)
|
||||
pub fn yuv_info(&self) -> (Vec<i32>, Vec<i32>, i32) {
|
||||
(
|
||||
self.inner.linesize.clone(),
|
||||
self.inner.offset.clone(),
|
||||
self.inner.length,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// SAFETY: H264Encoder contains hwcodec::ffmpeg_ram::encode::Encoder which has raw pointers
|
||||
// that are not Send by default. However, we ensure that H264Encoder is only used from
|
||||
// a single task/thread at a time (encoding is sequential), so this is safe.
|
||||
// The raw pointers are internal FFmpeg context that doesn't escape the encoder.
|
||||
unsafe impl Send for H264Encoder {}
|
||||
|
||||
impl Encoder for H264Encoder {
|
||||
fn name(&self) -> &str {
|
||||
&self.codec_name
|
||||
}
|
||||
|
||||
fn output_format(&self) -> EncodedFormat {
|
||||
EncodedFormat::H264
|
||||
}
|
||||
|
||||
fn encode(&mut self, data: &[u8], sequence: u64) -> Result<EncodedFrame> {
|
||||
// Assume input is YUV420P
|
||||
let pts_ms = (sequence * 1000 / self.config.fps as u64) as i64;
|
||||
|
||||
let frames = self.encode_yuv420p(data, pts_ms)?;
|
||||
|
||||
if frames.is_empty() {
|
||||
// Encoder needs more frames (shouldn't happen with our config)
|
||||
warn!("Encoder returned no frames");
|
||||
return Err(AppError::VideoError("Encoder returned no frames".to_string()));
|
||||
}
|
||||
|
||||
// Take the first frame
|
||||
let frame = &frames[0];
|
||||
let key_frame = frame.key == 1;
|
||||
|
||||
Ok(EncodedFrame::h264(
|
||||
Bytes::from(frame.data.clone()),
|
||||
self.config.base.resolution,
|
||||
key_frame,
|
||||
sequence,
|
||||
frame.pts as u64,
|
||||
frame.pts as u64,
|
||||
))
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> Result<Vec<EncodedFrame>> {
|
||||
// hwcodec doesn't have explicit flush, return empty
|
||||
Ok(vec![])
|
||||
}
|
||||
|
||||
fn reset(&mut self) -> Result<()> {
|
||||
self.frame_count = 0;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn config(&self) -> &EncoderConfig {
|
||||
&self.config.base
|
||||
}
|
||||
|
||||
fn supports_format(&self, format: PixelFormat) -> bool {
|
||||
// Check if the format matches our configured input format
|
||||
match self.config.input_format {
|
||||
H264InputFormat::Nv12 => matches!(format, PixelFormat::Nv12),
|
||||
H264InputFormat::Yuv420p => matches!(format, PixelFormat::Yuv420),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Encoder statistics
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct EncoderStats {
|
||||
/// Total frames encoded
|
||||
pub frames_encoded: u64,
|
||||
/// Total bytes output
|
||||
pub bytes_output: u64,
|
||||
/// Current encoding FPS
|
||||
pub fps: f32,
|
||||
/// Average encoding time per frame (ms)
|
||||
pub avg_encode_time_ms: f32,
|
||||
/// Keyframes encoded
|
||||
pub keyframes: u64,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_detect_encoder() {
|
||||
let (encoder_type, codec_name) = detect_best_encoder(1280, 720);
|
||||
println!("Detected encoder: {:?} ({:?})", encoder_type, codec_name);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_available_encoders() {
|
||||
let encoders = get_available_encoders(1280, 720);
|
||||
println!("Available encoders:");
|
||||
for enc in &encoders {
|
||||
println!(" - {} ({:?})", enc.name, enc.format);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_encoder() {
|
||||
let config = H264Config::low_latency(Resolution::HD720, 2000);
|
||||
match H264Encoder::new(config) {
|
||||
Ok(encoder) => {
|
||||
println!("Created encoder: {} ({})", encoder.codec_name(), encoder.encoder_type());
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Failed to create encoder: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
577
src/video/encoder/h265.rs
Normal file
577
src/video/encoder/h265.rs
Normal file
@@ -0,0 +1,577 @@
|
||||
//! H.265/HEVC encoder using hwcodec (FFmpeg wrapper)
|
||||
//!
|
||||
//! Supports both hardware and software encoding:
|
||||
//! - Hardware: VAAPI, NVENC, QSV, AMF, RKMPP, V4L2 M2M
|
||||
//! - Software: libx265 (CPU-based, high CPU usage)
|
||||
//!
|
||||
//! Hardware encoding is preferred when available for better performance.
|
||||
|
||||
use bytes::Bytes;
|
||||
use std::sync::Once;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use hwcodec::common::{DataFormat, Quality, RateControl};
|
||||
use hwcodec::ffmpeg::AVPixelFormat;
|
||||
use hwcodec::ffmpeg_ram::encode::{EncodeContext, Encoder as HwEncoder};
|
||||
use hwcodec::ffmpeg_ram::CodecInfo;
|
||||
|
||||
use super::registry::{EncoderBackend, EncoderRegistry, VideoEncoderType};
|
||||
use super::traits::{EncodedFormat, EncodedFrame, Encoder, EncoderConfig};
|
||||
use crate::error::{AppError, Result};
|
||||
use crate::video::format::{PixelFormat, Resolution};
|
||||
|
||||
static INIT_LOGGING: Once = Once::new();
|
||||
|
||||
/// Initialize hwcodec logging (only once)
|
||||
fn init_hwcodec_logging() {
|
||||
INIT_LOGGING.call_once(|| {
|
||||
debug!("hwcodec logging initialized for H265");
|
||||
});
|
||||
}
|
||||
|
||||
/// H.265 encoder type (detected from hwcodec)
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum H265EncoderType {
|
||||
/// NVIDIA NVENC
|
||||
Nvenc,
|
||||
/// Intel Quick Sync (QSV)
|
||||
Qsv,
|
||||
/// AMD AMF
|
||||
Amf,
|
||||
/// VAAPI (Linux generic)
|
||||
Vaapi,
|
||||
/// RKMPP (Rockchip)
|
||||
Rkmpp,
|
||||
/// V4L2 M2M (ARM generic)
|
||||
V4l2M2m,
|
||||
/// Software encoder (libx265)
|
||||
Software,
|
||||
/// No encoder available
|
||||
None,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for H265EncoderType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
H265EncoderType::Nvenc => write!(f, "NVENC"),
|
||||
H265EncoderType::Qsv => write!(f, "QSV"),
|
||||
H265EncoderType::Amf => write!(f, "AMF"),
|
||||
H265EncoderType::Vaapi => write!(f, "VAAPI"),
|
||||
H265EncoderType::Rkmpp => write!(f, "RKMPP"),
|
||||
H265EncoderType::V4l2M2m => write!(f, "V4L2 M2M"),
|
||||
H265EncoderType::Software => write!(f, "Software"),
|
||||
H265EncoderType::None => write!(f, "None"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for H265EncoderType {
|
||||
fn default() -> Self {
|
||||
Self::None
|
||||
}
|
||||
}
|
||||
|
||||
impl From<EncoderBackend> for H265EncoderType {
|
||||
fn from(backend: EncoderBackend) -> Self {
|
||||
match backend {
|
||||
EncoderBackend::Nvenc => H265EncoderType::Nvenc,
|
||||
EncoderBackend::Qsv => H265EncoderType::Qsv,
|
||||
EncoderBackend::Amf => H265EncoderType::Amf,
|
||||
EncoderBackend::Vaapi => H265EncoderType::Vaapi,
|
||||
EncoderBackend::Rkmpp => H265EncoderType::Rkmpp,
|
||||
EncoderBackend::V4l2m2m => H265EncoderType::V4l2M2m,
|
||||
EncoderBackend::Software => H265EncoderType::Software,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Input pixel format for H265 encoder
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum H265InputFormat {
|
||||
/// YUV420P (I420) - planar Y, U, V
|
||||
Yuv420p,
|
||||
/// NV12 - Y plane + interleaved UV plane (optimal for hardware encoders)
|
||||
Nv12,
|
||||
}
|
||||
|
||||
impl Default for H265InputFormat {
|
||||
fn default() -> Self {
|
||||
Self::Nv12 // Default to NV12 for hardware encoder compatibility
|
||||
}
|
||||
}
|
||||
|
||||
/// H.265 encoder configuration
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct H265Config {
|
||||
/// Base encoder config
|
||||
pub base: EncoderConfig,
|
||||
/// Target bitrate in kbps
|
||||
pub bitrate_kbps: u32,
|
||||
/// GOP size (keyframe interval)
|
||||
pub gop_size: u32,
|
||||
/// Frame rate
|
||||
pub fps: u32,
|
||||
/// Input pixel format
|
||||
pub input_format: H265InputFormat,
|
||||
}
|
||||
|
||||
impl Default for H265Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
base: EncoderConfig::default(),
|
||||
bitrate_kbps: 8000,
|
||||
gop_size: 30,
|
||||
fps: 30,
|
||||
input_format: H265InputFormat::Nv12,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl H265Config {
|
||||
/// Create config for low latency streaming with NV12 input
|
||||
pub fn low_latency(resolution: Resolution, bitrate_kbps: u32) -> Self {
|
||||
Self {
|
||||
base: EncoderConfig {
|
||||
resolution,
|
||||
input_format: PixelFormat::Nv12,
|
||||
quality: bitrate_kbps,
|
||||
fps: 30,
|
||||
gop_size: 30,
|
||||
},
|
||||
bitrate_kbps,
|
||||
gop_size: 30,
|
||||
fps: 30,
|
||||
input_format: H265InputFormat::Nv12,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create config for quality streaming
|
||||
pub fn quality(resolution: Resolution, bitrate_kbps: u32) -> Self {
|
||||
Self {
|
||||
base: EncoderConfig {
|
||||
resolution,
|
||||
input_format: PixelFormat::Nv12,
|
||||
quality: bitrate_kbps,
|
||||
fps: 30,
|
||||
gop_size: 60,
|
||||
},
|
||||
bitrate_kbps,
|
||||
gop_size: 60,
|
||||
fps: 30,
|
||||
input_format: H265InputFormat::Nv12,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set input format
|
||||
pub fn with_input_format(mut self, format: H265InputFormat) -> Self {
|
||||
self.input_format = format;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Get available H265 hardware encoders from hwcodec
|
||||
pub fn get_available_h265_encoders(width: u32, height: u32) -> Vec<CodecInfo> {
|
||||
init_hwcodec_logging();
|
||||
|
||||
let ctx = EncodeContext {
|
||||
name: String::new(),
|
||||
mc_name: None,
|
||||
width: width as i32,
|
||||
height: height as i32,
|
||||
pixfmt: AVPixelFormat::AV_PIX_FMT_NV12,
|
||||
align: 1,
|
||||
fps: 30,
|
||||
gop: 30,
|
||||
rc: RateControl::RC_CBR,
|
||||
quality: Quality::Quality_Default,
|
||||
kbs: 2000,
|
||||
q: 23,
|
||||
thread_count: 1,
|
||||
};
|
||||
|
||||
let all_encoders = HwEncoder::available_encoders(ctx, None);
|
||||
|
||||
// Include both hardware and software H265 encoders
|
||||
all_encoders
|
||||
.into_iter()
|
||||
.filter(|e| e.format == DataFormat::H265)
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Detect best available H.265 encoder (hardware preferred, software fallback)
|
||||
pub fn detect_best_h265_encoder(width: u32, height: u32) -> (H265EncoderType, Option<String>) {
|
||||
let encoders = get_available_h265_encoders(width, height);
|
||||
|
||||
if encoders.is_empty() {
|
||||
warn!("No H.265 encoders available");
|
||||
return (H265EncoderType::None, None);
|
||||
}
|
||||
|
||||
// Prefer hardware encoders over software (libx265)
|
||||
// Hardware priority: NVENC > QSV > AMF > VAAPI > RKMPP > V4L2 M2M > Software
|
||||
let codec = encoders
|
||||
.iter()
|
||||
.find(|e| !e.name.contains("libx265"))
|
||||
.or_else(|| encoders.first())
|
||||
.unwrap();
|
||||
|
||||
let encoder_type = if codec.name.contains("nvenc") {
|
||||
H265EncoderType::Nvenc
|
||||
} else if codec.name.contains("qsv") {
|
||||
H265EncoderType::Qsv
|
||||
} else if codec.name.contains("amf") {
|
||||
H265EncoderType::Amf
|
||||
} else if codec.name.contains("vaapi") {
|
||||
H265EncoderType::Vaapi
|
||||
} else if codec.name.contains("rkmpp") {
|
||||
H265EncoderType::Rkmpp
|
||||
} else if codec.name.contains("v4l2m2m") {
|
||||
H265EncoderType::V4l2M2m
|
||||
} else if codec.name.contains("libx265") {
|
||||
H265EncoderType::Software
|
||||
} else {
|
||||
H265EncoderType::Software // Default to software for unknown
|
||||
};
|
||||
|
||||
info!(
|
||||
"Selected H.265 encoder: {} ({})",
|
||||
codec.name, encoder_type
|
||||
);
|
||||
(encoder_type, Some(codec.name.clone()))
|
||||
}
|
||||
|
||||
/// Check if H265 hardware encoding is available
|
||||
pub fn is_h265_available() -> bool {
|
||||
let registry = EncoderRegistry::global();
|
||||
registry.is_format_available(VideoEncoderType::H265, true)
|
||||
}
|
||||
|
||||
/// Encoded frame from hwcodec (cloned for ownership)
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HwEncodeFrame {
|
||||
pub data: Vec<u8>,
|
||||
pub pts: i64,
|
||||
pub key: i32,
|
||||
}
|
||||
|
||||
/// H.265 encoder using hwcodec (hardware only)
|
||||
pub struct H265Encoder {
|
||||
/// hwcodec encoder instance
|
||||
inner: HwEncoder,
|
||||
/// Encoder configuration
|
||||
config: H265Config,
|
||||
/// Detected encoder type
|
||||
encoder_type: H265EncoderType,
|
||||
/// Codec name
|
||||
codec_name: String,
|
||||
/// Frame counter
|
||||
frame_count: u64,
|
||||
/// Required buffer length from hwcodec
|
||||
buffer_length: i32,
|
||||
}
|
||||
|
||||
impl H265Encoder {
|
||||
/// Create a new H.265 encoder with automatic hardware codec detection
|
||||
///
|
||||
/// Returns an error if no hardware encoder is available.
|
||||
pub fn new(config: H265Config) -> Result<Self> {
|
||||
init_hwcodec_logging();
|
||||
|
||||
let width = config.base.resolution.width;
|
||||
let height = config.base.resolution.height;
|
||||
|
||||
// Detect best hardware encoder
|
||||
let (encoder_type, codec_name) = detect_best_h265_encoder(width, height);
|
||||
|
||||
if encoder_type == H265EncoderType::None {
|
||||
return Err(AppError::VideoError(
|
||||
"No H.265 encoder available. Please ensure FFmpeg is built with libx265 support.".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let codec_name = codec_name.unwrap();
|
||||
Self::with_codec(config, &codec_name)
|
||||
}
|
||||
|
||||
/// Create encoder with specific codec name
|
||||
pub fn with_codec(config: H265Config, codec_name: &str) -> Result<Self> {
|
||||
init_hwcodec_logging();
|
||||
|
||||
// Determine if this is a software encoder
|
||||
let is_software = codec_name.contains("libx265");
|
||||
|
||||
// Warn about software encoder performance
|
||||
if is_software {
|
||||
warn!(
|
||||
"Using software H.265 encoder (libx265) - high CPU usage expected. \
|
||||
Hardware encoder is recommended for better performance."
|
||||
);
|
||||
}
|
||||
|
||||
let width = config.base.resolution.width;
|
||||
let height = config.base.resolution.height;
|
||||
|
||||
// Software encoders (libx265) require YUV420P, hardware encoders use NV12
|
||||
let (pixfmt, actual_input_format) = if is_software {
|
||||
(AVPixelFormat::AV_PIX_FMT_YUV420P, H265InputFormat::Yuv420p)
|
||||
} else {
|
||||
match config.input_format {
|
||||
H265InputFormat::Nv12 => (AVPixelFormat::AV_PIX_FMT_NV12, H265InputFormat::Nv12),
|
||||
H265InputFormat::Yuv420p => (AVPixelFormat::AV_PIX_FMT_YUV420P, H265InputFormat::Yuv420p),
|
||||
}
|
||||
};
|
||||
|
||||
info!(
|
||||
"Creating H.265 encoder: {} at {}x{} @ {} kbps (input: {:?})",
|
||||
codec_name, width, height, config.bitrate_kbps, actual_input_format
|
||||
);
|
||||
|
||||
let ctx = EncodeContext {
|
||||
name: codec_name.to_string(),
|
||||
mc_name: None,
|
||||
width: width as i32,
|
||||
height: height as i32,
|
||||
pixfmt,
|
||||
align: 1,
|
||||
fps: config.fps as i32,
|
||||
gop: config.gop_size as i32,
|
||||
rc: RateControl::RC_CBR,
|
||||
quality: Quality::Quality_Default,
|
||||
kbs: config.bitrate_kbps as i32,
|
||||
q: 23,
|
||||
thread_count: 1,
|
||||
};
|
||||
|
||||
let inner = HwEncoder::new(ctx).map_err(|_| {
|
||||
AppError::VideoError(format!("Failed to create H.265 encoder: {}", codec_name))
|
||||
})?;
|
||||
|
||||
let buffer_length = inner.length;
|
||||
let backend = EncoderBackend::from_codec_name(codec_name);
|
||||
let encoder_type = H265EncoderType::from(backend);
|
||||
|
||||
// Update config to reflect actual input format used
|
||||
let mut config = config;
|
||||
config.input_format = actual_input_format;
|
||||
|
||||
info!(
|
||||
"H.265 encoder created: {} (type: {}, buffer_length: {})",
|
||||
codec_name, encoder_type, buffer_length
|
||||
);
|
||||
|
||||
Ok(Self {
|
||||
inner,
|
||||
config,
|
||||
encoder_type,
|
||||
codec_name: codec_name.to_string(),
|
||||
frame_count: 0,
|
||||
buffer_length,
|
||||
})
|
||||
}
|
||||
|
||||
/// Create with auto-detected encoder
|
||||
pub fn auto(resolution: Resolution, bitrate_kbps: u32) -> Result<Self> {
|
||||
let config = H265Config::low_latency(resolution, bitrate_kbps);
|
||||
Self::new(config)
|
||||
}
|
||||
|
||||
/// Get encoder type
|
||||
pub fn encoder_type(&self) -> &H265EncoderType {
|
||||
&self.encoder_type
|
||||
}
|
||||
|
||||
/// Get codec name
|
||||
pub fn codec_name(&self) -> &str {
|
||||
&self.codec_name
|
||||
}
|
||||
|
||||
/// Update bitrate dynamically
|
||||
pub fn set_bitrate(&mut self, bitrate_kbps: u32) -> Result<()> {
|
||||
self.inner.set_bitrate(bitrate_kbps as i32).map_err(|_| {
|
||||
AppError::VideoError("Failed to set H.265 bitrate".to_string())
|
||||
})?;
|
||||
self.config.bitrate_kbps = bitrate_kbps;
|
||||
debug!("H.265 bitrate updated to {} kbps", bitrate_kbps);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Request next frame to be a keyframe (IDR)
|
||||
pub fn request_keyframe(&mut self) {
|
||||
self.inner.request_keyframe();
|
||||
debug!("H265 keyframe requested");
|
||||
}
|
||||
|
||||
/// Encode raw frame data (NV12 or YUV420P depending on config)
|
||||
pub fn encode_raw(&mut self, data: &[u8], pts_ms: i64) -> Result<Vec<HwEncodeFrame>> {
|
||||
if data.len() < self.buffer_length as usize {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"Frame data too small: {} < {}",
|
||||
data.len(),
|
||||
self.buffer_length
|
||||
)));
|
||||
}
|
||||
|
||||
self.frame_count += 1;
|
||||
|
||||
// Debug log every 30 frames (1 second at 30fps)
|
||||
if self.frame_count % 30 == 1 {
|
||||
debug!(
|
||||
"[H265] Encoding frame #{}: input_size={}, pts_ms={}, codec={}",
|
||||
self.frame_count,
|
||||
data.len(),
|
||||
pts_ms,
|
||||
self.codec_name
|
||||
);
|
||||
}
|
||||
|
||||
match self.inner.encode(data, pts_ms) {
|
||||
Ok(frames) => {
|
||||
let owned_frames: Vec<HwEncodeFrame> = frames
|
||||
.iter()
|
||||
.map(|f| HwEncodeFrame {
|
||||
data: f.data.clone(),
|
||||
pts: f.pts,
|
||||
key: f.key,
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Log encoded output
|
||||
if !owned_frames.is_empty() {
|
||||
let total_size: usize = owned_frames.iter().map(|f| f.data.len()).sum();
|
||||
let keyframe = owned_frames.iter().any(|f| f.key == 1);
|
||||
|
||||
if keyframe || self.frame_count % 30 == 1 {
|
||||
debug!(
|
||||
"[H265] Encoded frame #{}: output_size={}, keyframe={}, frame_count={}",
|
||||
self.frame_count, total_size, keyframe, owned_frames.len()
|
||||
);
|
||||
|
||||
// Log first few bytes of keyframe for debugging
|
||||
if keyframe && !owned_frames[0].data.is_empty() {
|
||||
let preview_len = owned_frames[0].data.len().min(32);
|
||||
debug!(
|
||||
"[H265] Keyframe data preview: {:02x?}",
|
||||
&owned_frames[0].data[..preview_len]
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!("[H265] Encoder returned empty frame list for frame #{}", self.frame_count);
|
||||
}
|
||||
|
||||
Ok(owned_frames)
|
||||
}
|
||||
Err(e) => {
|
||||
error!("[H265] Encode failed at frame #{}: {}", self.frame_count, e);
|
||||
Err(AppError::VideoError(format!("H.265 encode failed: {}", e)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Encode NV12 data
|
||||
pub fn encode_nv12(&mut self, nv12_data: &[u8], pts_ms: i64) -> Result<Vec<HwEncodeFrame>> {
|
||||
self.encode_raw(nv12_data, pts_ms)
|
||||
}
|
||||
|
||||
/// Get input format
|
||||
pub fn input_format(&self) -> H265InputFormat {
|
||||
self.config.input_format
|
||||
}
|
||||
|
||||
/// Get buffer info (linesize, offset, length)
|
||||
pub fn buffer_info(&self) -> (Vec<i32>, Vec<i32>, i32) {
|
||||
(
|
||||
self.inner.linesize.clone(),
|
||||
self.inner.offset.clone(),
|
||||
self.inner.length,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// SAFETY: H265Encoder contains hwcodec::ffmpeg_ram::encode::Encoder which has raw pointers
|
||||
// that are not Send by default. However, we ensure that H265Encoder is only used from
|
||||
// a single task/thread at a time (encoding is sequential), so this is safe.
|
||||
unsafe impl Send for H265Encoder {}
|
||||
|
||||
impl Encoder for H265Encoder {
|
||||
fn name(&self) -> &str {
|
||||
&self.codec_name
|
||||
}
|
||||
|
||||
fn output_format(&self) -> EncodedFormat {
|
||||
EncodedFormat::H265
|
||||
}
|
||||
|
||||
fn encode(&mut self, data: &[u8], sequence: u64) -> Result<EncodedFrame> {
|
||||
let pts_ms = (sequence * 1000 / self.config.fps as u64) as i64;
|
||||
|
||||
let frames = self.encode_raw(data, pts_ms)?;
|
||||
|
||||
if frames.is_empty() {
|
||||
warn!("H.265 encoder returned no frames");
|
||||
return Err(AppError::VideoError(
|
||||
"H.265 encoder returned no frames".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let frame = &frames[0];
|
||||
let key_frame = frame.key == 1;
|
||||
|
||||
Ok(EncodedFrame {
|
||||
data: Bytes::from(frame.data.clone()),
|
||||
format: EncodedFormat::H265,
|
||||
resolution: self.config.base.resolution,
|
||||
key_frame,
|
||||
sequence,
|
||||
timestamp: std::time::Instant::now(),
|
||||
pts: frame.pts as u64,
|
||||
dts: frame.pts as u64,
|
||||
})
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> Result<Vec<EncodedFrame>> {
|
||||
Ok(vec![])
|
||||
}
|
||||
|
||||
fn reset(&mut self) -> Result<()> {
|
||||
self.frame_count = 0;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn config(&self) -> &EncoderConfig {
|
||||
&self.config.base
|
||||
}
|
||||
|
||||
fn supports_format(&self, format: PixelFormat) -> bool {
|
||||
match self.config.input_format {
|
||||
H265InputFormat::Nv12 => matches!(format, PixelFormat::Nv12),
|
||||
H265InputFormat::Yuv420p => matches!(format, PixelFormat::Yuv420),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_detect_h265_encoder() {
|
||||
let (encoder_type, codec_name) = detect_best_h265_encoder(1280, 720);
|
||||
println!("Detected H.265 encoder: {:?} ({:?})", encoder_type, codec_name);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_available_h265_encoders() {
|
||||
let encoders = get_available_h265_encoders(1280, 720);
|
||||
println!("Available H.265 hardware encoders:");
|
||||
for enc in &encoders {
|
||||
println!(" - {} ({:?})", enc.name, enc.format);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_h265_availability() {
|
||||
let available = is_h265_available();
|
||||
println!("H.265 hardware encoding available: {}", available);
|
||||
}
|
||||
}
|
||||
226
src/video/encoder/jpeg.rs
Normal file
226
src/video/encoder/jpeg.rs
Normal file
@@ -0,0 +1,226 @@
|
||||
//! JPEG encoder implementation
|
||||
//!
|
||||
//! Provides JPEG encoding for raw video frames (YUYV, NV12, RGB, BGR)
|
||||
//! Uses libyuv for SIMD-accelerated color space conversion to I420,
|
||||
//! then turbojpeg for direct YUV encoding (skips internal color conversion).
|
||||
|
||||
use bytes::Bytes;
|
||||
|
||||
use super::traits::{EncodedFormat, EncodedFrame, EncoderConfig};
|
||||
use crate::error::{AppError, Result};
|
||||
use crate::video::format::{PixelFormat, Resolution};
|
||||
|
||||
/// JPEG encoder using libyuv + turbojpeg
|
||||
///
|
||||
/// Encoding pipeline (all SIMD accelerated):
|
||||
/// ```text
|
||||
/// YUYV/NV12/BGR24/RGB24 ──libyuv──> I420 ──turbojpeg──> JPEG
|
||||
/// ```
|
||||
///
|
||||
/// Note: This encoder is NOT thread-safe due to turbojpeg limitations.
|
||||
/// Use it from a single thread or wrap in a Mutex.
|
||||
pub struct JpegEncoder {
|
||||
config: EncoderConfig,
|
||||
compressor: turbojpeg::Compressor,
|
||||
/// I420 buffer for YUV encoding (Y + U + V planes)
|
||||
i420_buffer: Vec<u8>,
|
||||
}
|
||||
|
||||
impl JpegEncoder {
|
||||
/// Create a new JPEG encoder
|
||||
pub fn new(config: EncoderConfig) -> Result<Self> {
|
||||
let resolution = config.resolution;
|
||||
let width = resolution.width as usize;
|
||||
let height = resolution.height as usize;
|
||||
// I420: Y = width*height, U = width*height/4, V = width*height/4
|
||||
let i420_size = width * height * 3 / 2;
|
||||
|
||||
let mut compressor = turbojpeg::Compressor::new()
|
||||
.map_err(|e| AppError::VideoError(format!("Failed to create turbojpeg compressor: {}", e)))?;
|
||||
|
||||
compressor.set_quality(config.quality.min(100) as i32)
|
||||
.map_err(|e| AppError::VideoError(format!("Failed to set JPEG quality: {}", e)))?;
|
||||
|
||||
Ok(Self {
|
||||
config,
|
||||
compressor,
|
||||
i420_buffer: vec![0u8; i420_size],
|
||||
})
|
||||
}
|
||||
|
||||
/// Create with specific quality
|
||||
pub fn with_quality(resolution: Resolution, quality: u32) -> Result<Self> {
|
||||
let config = EncoderConfig::jpeg(resolution, quality);
|
||||
Self::new(config)
|
||||
}
|
||||
|
||||
/// Set JPEG quality (1-100)
|
||||
pub fn set_quality(&mut self, quality: u32) -> Result<()> {
|
||||
self.compressor.set_quality(quality.min(100) as i32)
|
||||
.map_err(|e| AppError::VideoError(format!("Failed to set JPEG quality: {}", e)))?;
|
||||
self.config.quality = quality;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Encode I420 buffer to JPEG using turbojpeg's YUV encoder
|
||||
#[inline]
|
||||
fn encode_i420_to_jpeg(&mut self, sequence: u64) -> Result<EncodedFrame> {
|
||||
let width = self.config.resolution.width as usize;
|
||||
let height = self.config.resolution.height as usize;
|
||||
|
||||
// Create YuvImage for turbojpeg (I420 = YUV420 = Sub2x2)
|
||||
let yuv_image = turbojpeg::YuvImage {
|
||||
pixels: self.i420_buffer.as_slice(),
|
||||
width,
|
||||
height,
|
||||
align: 1, // No padding between rows
|
||||
subsamp: turbojpeg::Subsamp::Sub2x2, // YUV 4:2:0
|
||||
};
|
||||
|
||||
// Compress YUV directly to JPEG (skips color space conversion!)
|
||||
let jpeg_data = self.compressor.compress_yuv_to_vec(yuv_image)
|
||||
.map_err(|e| AppError::VideoError(format!("JPEG compression failed: {}", e)))?;
|
||||
|
||||
Ok(EncodedFrame::jpeg(
|
||||
Bytes::from(jpeg_data),
|
||||
self.config.resolution,
|
||||
sequence,
|
||||
))
|
||||
}
|
||||
|
||||
/// Encode YUYV (YUV422) frame to JPEG
|
||||
pub fn encode_yuyv(&mut self, data: &[u8], sequence: u64) -> Result<EncodedFrame> {
|
||||
let width = self.config.resolution.width as usize;
|
||||
let height = self.config.resolution.height as usize;
|
||||
let expected_size = width * height * 2;
|
||||
|
||||
if data.len() < expected_size {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"YUYV data too small: {} < {}",
|
||||
data.len(),
|
||||
expected_size
|
||||
)));
|
||||
}
|
||||
|
||||
// Convert YUYV to I420 using libyuv (SIMD accelerated)
|
||||
libyuv::yuy2_to_i420(data, &mut self.i420_buffer, width as i32, height as i32)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv YUYV→I420 failed: {}", e)))?;
|
||||
|
||||
self.encode_i420_to_jpeg(sequence)
|
||||
}
|
||||
|
||||
/// Encode NV12 frame to JPEG
|
||||
pub fn encode_nv12(&mut self, data: &[u8], sequence: u64) -> Result<EncodedFrame> {
|
||||
let width = self.config.resolution.width as usize;
|
||||
let height = self.config.resolution.height as usize;
|
||||
let expected_size = width * height * 3 / 2;
|
||||
|
||||
if data.len() < expected_size {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"NV12 data too small: {} < {}",
|
||||
data.len(),
|
||||
expected_size
|
||||
)));
|
||||
}
|
||||
|
||||
// Convert NV12 to I420 using libyuv (SIMD accelerated)
|
||||
libyuv::nv12_to_i420(data, &mut self.i420_buffer, width as i32, height as i32)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv NV12→I420 failed: {}", e)))?;
|
||||
|
||||
self.encode_i420_to_jpeg(sequence)
|
||||
}
|
||||
|
||||
/// Encode RGB24 frame to JPEG
|
||||
pub fn encode_rgb(&mut self, data: &[u8], sequence: u64) -> Result<EncodedFrame> {
|
||||
let width = self.config.resolution.width as usize;
|
||||
let height = self.config.resolution.height as usize;
|
||||
let expected_size = width * height * 3;
|
||||
|
||||
if data.len() < expected_size {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"RGB data too small: {} < {}",
|
||||
data.len(),
|
||||
expected_size
|
||||
)));
|
||||
}
|
||||
|
||||
// Convert RGB24 to I420 using libyuv (SIMD accelerated)
|
||||
libyuv::rgb24_to_i420(data, &mut self.i420_buffer, width as i32, height as i32)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv RGB24→I420 failed: {}", e)))?;
|
||||
|
||||
self.encode_i420_to_jpeg(sequence)
|
||||
}
|
||||
|
||||
/// Encode BGR24 frame to JPEG
|
||||
pub fn encode_bgr(&mut self, data: &[u8], sequence: u64) -> Result<EncodedFrame> {
|
||||
let width = self.config.resolution.width as usize;
|
||||
let height = self.config.resolution.height as usize;
|
||||
let expected_size = width * height * 3;
|
||||
|
||||
if data.len() < expected_size {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"BGR data too small: {} < {}",
|
||||
data.len(),
|
||||
expected_size
|
||||
)));
|
||||
}
|
||||
|
||||
// Convert BGR24 to I420 using libyuv (SIMD accelerated)
|
||||
// Note: libyuv's RAWToI420 is BGR24 → I420
|
||||
libyuv::bgr24_to_i420(data, &mut self.i420_buffer, width as i32, height as i32)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv BGR24→I420 failed: {}", e)))?;
|
||||
|
||||
self.encode_i420_to_jpeg(sequence)
|
||||
}
|
||||
}
|
||||
|
||||
impl crate::video::encoder::traits::Encoder for JpegEncoder {
|
||||
fn name(&self) -> &str {
|
||||
"JPEG (libyuv+turbojpeg)"
|
||||
}
|
||||
|
||||
fn output_format(&self) -> EncodedFormat {
|
||||
EncodedFormat::Jpeg
|
||||
}
|
||||
|
||||
fn encode(&mut self, data: &[u8], sequence: u64) -> Result<EncodedFrame> {
|
||||
match self.config.input_format {
|
||||
PixelFormat::Yuyv | PixelFormat::Yvyu => self.encode_yuyv(data, sequence),
|
||||
PixelFormat::Nv12 => self.encode_nv12(data, sequence),
|
||||
PixelFormat::Rgb24 => self.encode_rgb(data, sequence),
|
||||
PixelFormat::Bgr24 => self.encode_bgr(data, sequence),
|
||||
_ => Err(AppError::VideoError(format!(
|
||||
"Unsupported input format for JPEG: {}",
|
||||
self.config.input_format
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
fn config(&self) -> &EncoderConfig {
|
||||
&self.config
|
||||
}
|
||||
|
||||
fn supports_format(&self, format: PixelFormat) -> bool {
|
||||
matches!(
|
||||
format,
|
||||
PixelFormat::Yuyv
|
||||
| PixelFormat::Yvyu
|
||||
| PixelFormat::Nv12
|
||||
| PixelFormat::Rgb24
|
||||
| PixelFormat::Bgr24
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_i420_buffer_size() {
|
||||
// 1920x1080 I420 = 1920*1080 + 960*540 + 960*540 = 3110400 bytes
|
||||
let config = EncoderConfig::jpeg(Resolution::HD1080, 80);
|
||||
let encoder = JpegEncoder::new(config).unwrap();
|
||||
assert_eq!(encoder.i420_buffer.len(), 1920 * 1080 * 3 / 2);
|
||||
}
|
||||
}
|
||||
43
src/video/encoder/mod.rs
Normal file
43
src/video/encoder/mod.rs
Normal file
@@ -0,0 +1,43 @@
|
||||
//! Video encoder implementations
|
||||
//!
|
||||
//! This module provides video encoding capabilities including:
|
||||
//! - JPEG encoding for raw frames (YUYV, NV12, etc.)
|
||||
//! - H264 encoding (hardware + software)
|
||||
//! - H265 encoding (hardware only)
|
||||
//! - VP8 encoding (hardware only - VAAPI)
|
||||
//! - VP9 encoding (hardware only - VAAPI)
|
||||
//! - WebRTC video codec abstraction
|
||||
//! - Encoder registry for automatic detection
|
||||
|
||||
pub mod codec;
|
||||
pub mod h264;
|
||||
pub mod h265;
|
||||
pub mod jpeg;
|
||||
pub mod registry;
|
||||
pub mod traits;
|
||||
pub mod vp8;
|
||||
pub mod vp9;
|
||||
|
||||
// Core traits and types
|
||||
pub use traits::{EncodedFormat, EncodedFrame, Encoder, EncoderConfig, EncoderFactory};
|
||||
|
||||
// WebRTC codec abstraction
|
||||
pub use codec::{CodecFrame, VideoCodec, VideoCodecConfig, VideoCodecFactory, VideoCodecType};
|
||||
|
||||
// Encoder registry
|
||||
pub use registry::{AvailableEncoder, EncoderBackend, EncoderRegistry, VideoEncoderType};
|
||||
|
||||
// H264 encoder
|
||||
pub use h264::{H264Config, H264Encoder, H264EncoderType, H264InputFormat};
|
||||
|
||||
// H265 encoder (hardware only)
|
||||
pub use h265::{H265Config, H265Encoder, H265EncoderType, H265InputFormat};
|
||||
|
||||
// VP8 encoder (hardware only)
|
||||
pub use vp8::{VP8Config, VP8Encoder, VP8EncoderType, VP8InputFormat};
|
||||
|
||||
// VP9 encoder (hardware only)
|
||||
pub use vp9::{VP9Config, VP9Encoder, VP9EncoderType, VP9InputFormat};
|
||||
|
||||
// JPEG encoder
|
||||
pub use jpeg::JpegEncoder;
|
||||
531
src/video/encoder/registry.rs
Normal file
531
src/video/encoder/registry.rs
Normal file
@@ -0,0 +1,531 @@
|
||||
//! Encoder registry - Detection and management of available video encoders
|
||||
//!
|
||||
//! This module provides:
|
||||
//! - Automatic detection of available hardware/software encoders
|
||||
//! - Encoder selection based on format and priority
|
||||
//! - Global registry for encoder availability queries
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::OnceLock;
|
||||
use tracing::{debug, info};
|
||||
|
||||
use hwcodec::common::{DataFormat, Quality, RateControl};
|
||||
use hwcodec::ffmpeg::AVPixelFormat;
|
||||
use hwcodec::ffmpeg_ram::encode::{EncodeContext, Encoder as HwEncoder};
|
||||
use hwcodec::ffmpeg_ram::CodecInfo;
|
||||
|
||||
/// Video encoder format type
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum VideoEncoderType {
|
||||
/// H.264/AVC
|
||||
H264,
|
||||
/// H.265/HEVC
|
||||
H265,
|
||||
/// VP8
|
||||
VP8,
|
||||
/// VP9
|
||||
VP9,
|
||||
}
|
||||
|
||||
impl VideoEncoderType {
|
||||
/// Convert to hwcodec DataFormat
|
||||
pub fn to_data_format(&self) -> DataFormat {
|
||||
match self {
|
||||
VideoEncoderType::H264 => DataFormat::H264,
|
||||
VideoEncoderType::H265 => DataFormat::H265,
|
||||
VideoEncoderType::VP8 => DataFormat::VP8,
|
||||
VideoEncoderType::VP9 => DataFormat::VP9,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create from hwcodec DataFormat
|
||||
pub fn from_data_format(format: DataFormat) -> Option<Self> {
|
||||
match format {
|
||||
DataFormat::H264 => Some(VideoEncoderType::H264),
|
||||
DataFormat::H265 => Some(VideoEncoderType::H265),
|
||||
DataFormat::VP8 => Some(VideoEncoderType::VP8),
|
||||
DataFormat::VP9 => Some(VideoEncoderType::VP9),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get codec name prefix for FFmpeg
|
||||
pub fn codec_prefix(&self) -> &'static str {
|
||||
match self {
|
||||
VideoEncoderType::H264 => "h264",
|
||||
VideoEncoderType::H265 => "hevc",
|
||||
VideoEncoderType::VP8 => "vp8",
|
||||
VideoEncoderType::VP9 => "vp9",
|
||||
}
|
||||
}
|
||||
|
||||
/// Get display name
|
||||
pub fn display_name(&self) -> &'static str {
|
||||
match self {
|
||||
VideoEncoderType::H264 => "H.264",
|
||||
VideoEncoderType::H265 => "H.265/HEVC",
|
||||
VideoEncoderType::VP8 => "VP8",
|
||||
VideoEncoderType::VP9 => "VP9",
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if this format requires hardware-only encoding
|
||||
/// H264 supports software fallback, others require hardware
|
||||
pub fn hardware_only(&self) -> bool {
|
||||
match self {
|
||||
VideoEncoderType::H264 => false,
|
||||
VideoEncoderType::H265 => true,
|
||||
VideoEncoderType::VP8 => true,
|
||||
VideoEncoderType::VP9 => true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for VideoEncoderType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.display_name())
|
||||
}
|
||||
}
|
||||
|
||||
/// Encoder backend type
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum EncoderBackend {
|
||||
/// Intel/AMD/NVIDIA VAAPI (Linux)
|
||||
Vaapi,
|
||||
/// NVIDIA NVENC
|
||||
Nvenc,
|
||||
/// Intel Quick Sync Video
|
||||
Qsv,
|
||||
/// AMD AMF
|
||||
Amf,
|
||||
/// Rockchip MPP
|
||||
Rkmpp,
|
||||
/// V4L2 Memory-to-Memory (ARM)
|
||||
V4l2m2m,
|
||||
/// Software encoding (libx264, libx265, libvpx)
|
||||
Software,
|
||||
}
|
||||
|
||||
impl EncoderBackend {
|
||||
/// Detect backend from codec name
|
||||
pub fn from_codec_name(name: &str) -> Self {
|
||||
if name.contains("vaapi") {
|
||||
EncoderBackend::Vaapi
|
||||
} else if name.contains("nvenc") {
|
||||
EncoderBackend::Nvenc
|
||||
} else if name.contains("qsv") {
|
||||
EncoderBackend::Qsv
|
||||
} else if name.contains("amf") {
|
||||
EncoderBackend::Amf
|
||||
} else if name.contains("rkmpp") {
|
||||
EncoderBackend::Rkmpp
|
||||
} else if name.contains("v4l2m2m") {
|
||||
EncoderBackend::V4l2m2m
|
||||
} else {
|
||||
EncoderBackend::Software
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if this is a hardware backend
|
||||
pub fn is_hardware(&self) -> bool {
|
||||
!matches!(self, EncoderBackend::Software)
|
||||
}
|
||||
|
||||
/// Get display name
|
||||
pub fn display_name(&self) -> &'static str {
|
||||
match self {
|
||||
EncoderBackend::Vaapi => "VAAPI",
|
||||
EncoderBackend::Nvenc => "NVENC",
|
||||
EncoderBackend::Qsv => "QSV",
|
||||
EncoderBackend::Amf => "AMF",
|
||||
EncoderBackend::Rkmpp => "RKMPP",
|
||||
EncoderBackend::V4l2m2m => "V4L2 M2M",
|
||||
EncoderBackend::Software => "Software",
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse from string (case-insensitive)
|
||||
pub fn from_str(s: &str) -> Option<Self> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"vaapi" => Some(EncoderBackend::Vaapi),
|
||||
"nvenc" => Some(EncoderBackend::Nvenc),
|
||||
"qsv" => Some(EncoderBackend::Qsv),
|
||||
"amf" => Some(EncoderBackend::Amf),
|
||||
"rkmpp" => Some(EncoderBackend::Rkmpp),
|
||||
"v4l2m2m" | "v4l2" => Some(EncoderBackend::V4l2m2m),
|
||||
"software" | "cpu" => Some(EncoderBackend::Software),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for EncoderBackend {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.display_name())
|
||||
}
|
||||
}
|
||||
|
||||
/// Information about an available encoder
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AvailableEncoder {
|
||||
/// Encoder format type
|
||||
pub format: VideoEncoderType,
|
||||
/// FFmpeg codec name (e.g., "h264_vaapi", "hevc_nvenc")
|
||||
pub codec_name: String,
|
||||
/// Backend type
|
||||
pub backend: EncoderBackend,
|
||||
/// Priority (lower is better)
|
||||
pub priority: i32,
|
||||
/// Whether this is a hardware encoder
|
||||
pub is_hardware: bool,
|
||||
}
|
||||
|
||||
impl AvailableEncoder {
|
||||
/// Create from hwcodec CodecInfo
|
||||
pub fn from_codec_info(info: &CodecInfo) -> Option<Self> {
|
||||
let format = VideoEncoderType::from_data_format(info.format)?;
|
||||
let backend = EncoderBackend::from_codec_name(&info.name);
|
||||
let is_hardware = backend.is_hardware();
|
||||
|
||||
Some(Self {
|
||||
format,
|
||||
codec_name: info.name.clone(),
|
||||
backend,
|
||||
priority: info.priority,
|
||||
is_hardware,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Global encoder registry
|
||||
///
|
||||
/// Detects and caches available encoders at startup.
|
||||
/// Use `EncoderRegistry::global()` to access the singleton instance.
|
||||
pub struct EncoderRegistry {
|
||||
/// Available encoders grouped by format
|
||||
encoders: HashMap<VideoEncoderType, Vec<AvailableEncoder>>,
|
||||
/// Detection resolution (used for testing)
|
||||
detection_resolution: (u32, u32),
|
||||
}
|
||||
|
||||
impl EncoderRegistry {
|
||||
/// Get the global registry instance
|
||||
///
|
||||
/// The registry is initialized lazily on first access with 1920x1080 detection.
|
||||
pub fn global() -> &'static Self {
|
||||
static INSTANCE: OnceLock<EncoderRegistry> = OnceLock::new();
|
||||
INSTANCE.get_or_init(|| {
|
||||
let mut registry = EncoderRegistry::new();
|
||||
registry.detect_encoders(1920, 1080);
|
||||
registry
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a new empty registry
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
encoders: HashMap::new(),
|
||||
detection_resolution: (0, 0),
|
||||
}
|
||||
}
|
||||
|
||||
/// Detect all available encoders
|
||||
///
|
||||
/// This queries hwcodec/FFmpeg for available encoders and populates the registry.
|
||||
pub fn detect_encoders(&mut self, width: u32, height: u32) {
|
||||
info!("Detecting available video encoders at {}x{}", width, height);
|
||||
|
||||
self.encoders.clear();
|
||||
self.detection_resolution = (width, height);
|
||||
|
||||
// Create test context for encoder detection
|
||||
let ctx = EncodeContext {
|
||||
name: String::new(),
|
||||
mc_name: None,
|
||||
width: width as i32,
|
||||
height: height as i32,
|
||||
pixfmt: AVPixelFormat::AV_PIX_FMT_NV12,
|
||||
align: 1,
|
||||
fps: 30,
|
||||
gop: 30,
|
||||
rc: RateControl::RC_CBR,
|
||||
quality: Quality::Quality_Default,
|
||||
kbs: 2000,
|
||||
q: 23,
|
||||
thread_count: 1,
|
||||
};
|
||||
|
||||
// Get all available encoders from hwcodec
|
||||
let all_encoders = HwEncoder::available_encoders(ctx, None);
|
||||
|
||||
info!("Found {} encoders from hwcodec", all_encoders.len());
|
||||
|
||||
for codec_info in &all_encoders {
|
||||
if let Some(encoder) = AvailableEncoder::from_codec_info(codec_info) {
|
||||
debug!(
|
||||
"Detected encoder: {} ({}) - {} priority={}",
|
||||
encoder.codec_name,
|
||||
encoder.format,
|
||||
encoder.backend,
|
||||
encoder.priority
|
||||
);
|
||||
|
||||
self.encoders
|
||||
.entry(encoder.format)
|
||||
.or_default()
|
||||
.push(encoder);
|
||||
}
|
||||
}
|
||||
|
||||
// Sort encoders by priority (lower is better)
|
||||
for encoders in self.encoders.values_mut() {
|
||||
encoders.sort_by_key(|e| e.priority);
|
||||
}
|
||||
|
||||
// Register software encoders as fallback
|
||||
info!("Registering software encoders...");
|
||||
let software_encoders = [
|
||||
(VideoEncoderType::H264, "libx264", 100),
|
||||
(VideoEncoderType::H265, "libx265", 100),
|
||||
(VideoEncoderType::VP8, "libvpx", 100),
|
||||
(VideoEncoderType::VP9, "libvpx-vp9", 100),
|
||||
];
|
||||
|
||||
for (format, codec_name, priority) in software_encoders {
|
||||
self.encoders
|
||||
.entry(format)
|
||||
.or_default()
|
||||
.push(AvailableEncoder {
|
||||
format,
|
||||
codec_name: codec_name.to_string(),
|
||||
backend: EncoderBackend::Software,
|
||||
priority,
|
||||
is_hardware: false,
|
||||
});
|
||||
|
||||
debug!(
|
||||
"Registered software encoder: {} for {} (priority: {})",
|
||||
codec_name, format, priority
|
||||
);
|
||||
}
|
||||
|
||||
// Log summary
|
||||
for (format, encoders) in &self.encoders {
|
||||
let hw_count = encoders.iter().filter(|e| e.is_hardware).count();
|
||||
let sw_count = encoders.len() - hw_count;
|
||||
info!(
|
||||
"{}: {} encoders ({} hardware, {} software)",
|
||||
format,
|
||||
encoders.len(),
|
||||
hw_count,
|
||||
sw_count
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the best encoder for a format
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `format` - The video format to encode
|
||||
/// * `hardware_only` - If true, only return hardware encoders
|
||||
///
|
||||
/// # Returns
|
||||
/// The best available encoder, or None if no suitable encoder is found
|
||||
pub fn best_encoder(
|
||||
&self,
|
||||
format: VideoEncoderType,
|
||||
hardware_only: bool,
|
||||
) -> Option<&AvailableEncoder> {
|
||||
self.encoders.get(&format)?.iter().find(|e| {
|
||||
if hardware_only {
|
||||
e.is_hardware
|
||||
} else {
|
||||
true
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Get all encoders for a format
|
||||
pub fn encoders_for_format(&self, format: VideoEncoderType) -> &[AvailableEncoder] {
|
||||
self.encoders
|
||||
.get(&format)
|
||||
.map(|v| v.as_slice())
|
||||
.unwrap_or(&[])
|
||||
}
|
||||
|
||||
/// Get all available formats
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `hardware_only` - If true, only return formats with hardware encoders
|
||||
pub fn available_formats(&self, hardware_only: bool) -> Vec<VideoEncoderType> {
|
||||
self.encoders
|
||||
.iter()
|
||||
.filter(|(_, encoders)| {
|
||||
if hardware_only {
|
||||
encoders.iter().any(|e| e.is_hardware)
|
||||
} else {
|
||||
!encoders.is_empty()
|
||||
}
|
||||
})
|
||||
.map(|(format, _)| *format)
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Check if a format is available
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `format` - The video format to check
|
||||
/// * `hardware_only` - If true, only check for hardware encoders
|
||||
pub fn is_format_available(&self, format: VideoEncoderType, hardware_only: bool) -> bool {
|
||||
self.best_encoder(format, hardware_only).is_some()
|
||||
}
|
||||
|
||||
/// Get available formats for user selection
|
||||
///
|
||||
/// Returns formats that are actually usable based on their requirements:
|
||||
/// - H264: Available if any encoder exists (hardware or software)
|
||||
/// - H265/VP8/VP9: Available only if hardware encoder exists
|
||||
pub fn selectable_formats(&self) -> Vec<VideoEncoderType> {
|
||||
let mut formats = Vec::new();
|
||||
|
||||
// H264 - supports software fallback
|
||||
if self.is_format_available(VideoEncoderType::H264, false) {
|
||||
formats.push(VideoEncoderType::H264);
|
||||
}
|
||||
|
||||
// H265/VP8/VP9 - hardware only
|
||||
for format in [
|
||||
VideoEncoderType::H265,
|
||||
VideoEncoderType::VP8,
|
||||
VideoEncoderType::VP9,
|
||||
] {
|
||||
if self.is_format_available(format, true) {
|
||||
formats.push(format);
|
||||
}
|
||||
}
|
||||
|
||||
formats
|
||||
}
|
||||
|
||||
/// Get detection resolution
|
||||
pub fn detection_resolution(&self) -> (u32, u32) {
|
||||
self.detection_resolution
|
||||
}
|
||||
|
||||
/// Get all available backend types
|
||||
pub fn available_backends(&self) -> Vec<EncoderBackend> {
|
||||
use std::collections::HashSet;
|
||||
|
||||
let mut backends = HashSet::new();
|
||||
for encoders in self.encoders.values() {
|
||||
for encoder in encoders {
|
||||
backends.insert(encoder.backend);
|
||||
}
|
||||
}
|
||||
|
||||
let mut result: Vec<_> = backends.into_iter().collect();
|
||||
// Sort: hardware backends first, software last
|
||||
result.sort_by_key(|b| if b.is_hardware() { 0 } else { 1 });
|
||||
result
|
||||
}
|
||||
|
||||
/// Get formats supported by a specific backend
|
||||
pub fn formats_for_backend(&self, backend: EncoderBackend) -> Vec<VideoEncoderType> {
|
||||
let mut formats = Vec::new();
|
||||
for (format, encoders) in &self.encoders {
|
||||
if encoders.iter().any(|e| e.backend == backend) {
|
||||
formats.push(*format);
|
||||
}
|
||||
}
|
||||
formats
|
||||
}
|
||||
|
||||
/// Get encoder for a format with specific backend
|
||||
pub fn encoder_with_backend(
|
||||
&self,
|
||||
format: VideoEncoderType,
|
||||
backend: EncoderBackend,
|
||||
) -> Option<&AvailableEncoder> {
|
||||
self.encoders
|
||||
.get(&format)?
|
||||
.iter()
|
||||
.find(|e| e.backend == backend)
|
||||
}
|
||||
|
||||
/// Get encoders grouped by backend for a format
|
||||
pub fn encoders_by_backend(
|
||||
&self,
|
||||
format: VideoEncoderType,
|
||||
) -> HashMap<EncoderBackend, Vec<&AvailableEncoder>> {
|
||||
let mut grouped = HashMap::new();
|
||||
if let Some(encoders) = self.encoders.get(&format) {
|
||||
for encoder in encoders {
|
||||
grouped
|
||||
.entry(encoder.backend)
|
||||
.or_insert_with(Vec::new)
|
||||
.push(encoder);
|
||||
}
|
||||
}
|
||||
grouped
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for EncoderRegistry {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_video_encoder_type_display() {
|
||||
assert_eq!(VideoEncoderType::H264.display_name(), "H.264");
|
||||
assert_eq!(VideoEncoderType::H265.display_name(), "H.265/HEVC");
|
||||
assert_eq!(VideoEncoderType::VP8.display_name(), "VP8");
|
||||
assert_eq!(VideoEncoderType::VP9.display_name(), "VP9");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encoder_backend_detection() {
|
||||
assert_eq!(
|
||||
EncoderBackend::from_codec_name("h264_vaapi"),
|
||||
EncoderBackend::Vaapi
|
||||
);
|
||||
assert_eq!(
|
||||
EncoderBackend::from_codec_name("hevc_nvenc"),
|
||||
EncoderBackend::Nvenc
|
||||
);
|
||||
assert_eq!(
|
||||
EncoderBackend::from_codec_name("h264_qsv"),
|
||||
EncoderBackend::Qsv
|
||||
);
|
||||
assert_eq!(
|
||||
EncoderBackend::from_codec_name("libx264"),
|
||||
EncoderBackend::Software
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hardware_only_requirement() {
|
||||
assert!(!VideoEncoderType::H264.hardware_only());
|
||||
assert!(VideoEncoderType::H265.hardware_only());
|
||||
assert!(VideoEncoderType::VP8.hardware_only());
|
||||
assert!(VideoEncoderType::VP9.hardware_only());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_registry_detection() {
|
||||
let mut registry = EncoderRegistry::new();
|
||||
registry.detect_encoders(1280, 720);
|
||||
|
||||
// Should have detected at least H264 (software fallback available)
|
||||
println!("Available formats: {:?}", registry.available_formats(false));
|
||||
println!(
|
||||
"Selectable formats: {:?}",
|
||||
registry.selectable_formats()
|
||||
);
|
||||
}
|
||||
}
|
||||
188
src/video/encoder/traits.rs
Normal file
188
src/video/encoder/traits.rs
Normal file
@@ -0,0 +1,188 @@
|
||||
//! Encoder traits and common types
|
||||
|
||||
use bytes::Bytes;
|
||||
use std::time::Instant;
|
||||
|
||||
use crate::video::format::{PixelFormat, Resolution};
|
||||
use crate::error::Result;
|
||||
|
||||
/// Encoder configuration
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EncoderConfig {
|
||||
/// Target resolution
|
||||
pub resolution: Resolution,
|
||||
/// Input pixel format
|
||||
pub input_format: PixelFormat,
|
||||
/// Output quality (1-100 for JPEG, bitrate kbps for H264)
|
||||
pub quality: u32,
|
||||
/// Target frame rate
|
||||
pub fps: u32,
|
||||
/// Keyframe interval (for H264)
|
||||
pub gop_size: u32,
|
||||
}
|
||||
|
||||
impl Default for EncoderConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
resolution: Resolution::HD1080,
|
||||
input_format: PixelFormat::Yuyv,
|
||||
quality: 80,
|
||||
fps: 30,
|
||||
gop_size: 30,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl EncoderConfig {
|
||||
pub fn jpeg(resolution: Resolution, quality: u32) -> Self {
|
||||
Self {
|
||||
resolution,
|
||||
input_format: PixelFormat::Yuyv,
|
||||
quality,
|
||||
fps: 30,
|
||||
gop_size: 1,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn h264(resolution: Resolution, bitrate_kbps: u32) -> Self {
|
||||
Self {
|
||||
resolution,
|
||||
input_format: PixelFormat::Yuyv,
|
||||
quality: bitrate_kbps,
|
||||
fps: 30,
|
||||
gop_size: 30,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Encoded frame output
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EncodedFrame {
|
||||
/// Encoded data
|
||||
pub data: Bytes,
|
||||
/// Output format (JPEG, H264, etc.)
|
||||
pub format: EncodedFormat,
|
||||
/// Resolution
|
||||
pub resolution: Resolution,
|
||||
/// Whether this is a key frame
|
||||
pub key_frame: bool,
|
||||
/// Frame sequence number
|
||||
pub sequence: u64,
|
||||
/// Encoding timestamp
|
||||
pub timestamp: Instant,
|
||||
/// Presentation timestamp (for video sync)
|
||||
pub pts: u64,
|
||||
/// Decode timestamp (for B-frames)
|
||||
pub dts: u64,
|
||||
}
|
||||
|
||||
impl EncodedFrame {
|
||||
pub fn jpeg(data: Bytes, resolution: Resolution, sequence: u64) -> Self {
|
||||
Self {
|
||||
data,
|
||||
format: EncodedFormat::Jpeg,
|
||||
resolution,
|
||||
key_frame: true,
|
||||
sequence,
|
||||
timestamp: Instant::now(),
|
||||
pts: sequence,
|
||||
dts: sequence,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn h264(
|
||||
data: Bytes,
|
||||
resolution: Resolution,
|
||||
key_frame: bool,
|
||||
sequence: u64,
|
||||
pts: u64,
|
||||
dts: u64,
|
||||
) -> Self {
|
||||
Self {
|
||||
data,
|
||||
format: EncodedFormat::H264,
|
||||
resolution,
|
||||
key_frame,
|
||||
sequence,
|
||||
timestamp: Instant::now(),
|
||||
pts,
|
||||
dts,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.data.len()
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.data.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
/// Encoded output format
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum EncodedFormat {
|
||||
Jpeg,
|
||||
H264,
|
||||
H265,
|
||||
Vp8,
|
||||
Vp9,
|
||||
Av1,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for EncodedFormat {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
EncodedFormat::Jpeg => write!(f, "JPEG"),
|
||||
EncodedFormat::H264 => write!(f, "H.264"),
|
||||
EncodedFormat::H265 => write!(f, "H.265"),
|
||||
EncodedFormat::Vp8 => write!(f, "VP8"),
|
||||
EncodedFormat::Vp9 => write!(f, "VP9"),
|
||||
EncodedFormat::Av1 => write!(f, "AV1"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Generic encoder trait
|
||||
/// Note: Not Sync because some encoders (like turbojpeg) are not thread-safe
|
||||
pub trait Encoder: Send {
|
||||
/// Get encoder name
|
||||
fn name(&self) -> &str;
|
||||
|
||||
/// Get output format
|
||||
fn output_format(&self) -> EncodedFormat;
|
||||
|
||||
/// Encode a raw frame
|
||||
fn encode(&mut self, data: &[u8], sequence: u64) -> Result<EncodedFrame>;
|
||||
|
||||
/// Flush any pending frames
|
||||
fn flush(&mut self) -> Result<Vec<EncodedFrame>> {
|
||||
Ok(vec![])
|
||||
}
|
||||
|
||||
/// Reset encoder state
|
||||
fn reset(&mut self) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get current configuration
|
||||
fn config(&self) -> &EncoderConfig;
|
||||
|
||||
/// Check if encoder supports the given input format
|
||||
fn supports_format(&self, format: PixelFormat) -> bool;
|
||||
}
|
||||
|
||||
/// Encoder factory for creating encoders
|
||||
pub trait EncoderFactory: Send + Sync {
|
||||
/// Create an encoder with the given configuration
|
||||
fn create(&self, config: EncoderConfig) -> Result<Box<dyn Encoder>>;
|
||||
|
||||
/// Get encoder type name
|
||||
fn encoder_type(&self) -> &str;
|
||||
|
||||
/// Check if this encoder is available on the system
|
||||
fn is_available(&self) -> bool;
|
||||
|
||||
/// Get encoder priority (higher = preferred)
|
||||
fn priority(&self) -> u32;
|
||||
}
|
||||
488
src/video/encoder/vp8.rs
Normal file
488
src/video/encoder/vp8.rs
Normal file
@@ -0,0 +1,488 @@
|
||||
//! VP8 encoder using hwcodec (FFmpeg wrapper)
|
||||
//!
|
||||
//! Supports both hardware and software encoding:
|
||||
//! - Hardware: VAAPI (Intel on Linux)
|
||||
//! - Software: libvpx (CPU-based, high CPU usage)
|
||||
//!
|
||||
//! Hardware encoding is preferred when available for better performance.
|
||||
|
||||
use bytes::Bytes;
|
||||
use std::sync::Once;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use hwcodec::common::{DataFormat, Quality, RateControl};
|
||||
use hwcodec::ffmpeg::AVPixelFormat;
|
||||
use hwcodec::ffmpeg_ram::encode::{EncodeContext, Encoder as HwEncoder};
|
||||
use hwcodec::ffmpeg_ram::CodecInfo;
|
||||
|
||||
use super::registry::{EncoderBackend, EncoderRegistry, VideoEncoderType};
|
||||
use super::traits::{EncodedFormat, EncodedFrame, Encoder, EncoderConfig};
|
||||
use crate::error::{AppError, Result};
|
||||
use crate::video::format::{PixelFormat, Resolution};
|
||||
|
||||
static INIT_LOGGING: Once = Once::new();
|
||||
|
||||
/// Initialize hwcodec logging (only once)
|
||||
fn init_hwcodec_logging() {
|
||||
INIT_LOGGING.call_once(|| {
|
||||
debug!("hwcodec logging initialized for VP8");
|
||||
});
|
||||
}
|
||||
|
||||
/// VP8 encoder type (detected from hwcodec)
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum VP8EncoderType {
|
||||
/// VAAPI (Intel on Linux)
|
||||
Vaapi,
|
||||
/// Software encoder (libvpx)
|
||||
Software,
|
||||
/// No encoder available
|
||||
None,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for VP8EncoderType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
VP8EncoderType::Vaapi => write!(f, "VAAPI"),
|
||||
VP8EncoderType::Software => write!(f, "Software"),
|
||||
VP8EncoderType::None => write!(f, "None"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for VP8EncoderType {
|
||||
fn default() -> Self {
|
||||
Self::None
|
||||
}
|
||||
}
|
||||
|
||||
impl From<EncoderBackend> for VP8EncoderType {
|
||||
fn from(backend: EncoderBackend) -> Self {
|
||||
match backend {
|
||||
EncoderBackend::Vaapi => VP8EncoderType::Vaapi,
|
||||
EncoderBackend::Software => VP8EncoderType::Software,
|
||||
_ => VP8EncoderType::None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Input pixel format for VP8 encoder
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum VP8InputFormat {
|
||||
/// YUV420P (I420) - planar Y, U, V
|
||||
Yuv420p,
|
||||
/// NV12 - Y plane + interleaved UV plane
|
||||
Nv12,
|
||||
}
|
||||
|
||||
impl Default for VP8InputFormat {
|
||||
fn default() -> Self {
|
||||
Self::Nv12 // Default to NV12 for VAAPI compatibility
|
||||
}
|
||||
}
|
||||
|
||||
/// VP8 encoder configuration
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct VP8Config {
|
||||
/// Base encoder config
|
||||
pub base: EncoderConfig,
|
||||
/// Target bitrate in kbps
|
||||
pub bitrate_kbps: u32,
|
||||
/// GOP size (keyframe interval)
|
||||
pub gop_size: u32,
|
||||
/// Frame rate
|
||||
pub fps: u32,
|
||||
/// Input pixel format
|
||||
pub input_format: VP8InputFormat,
|
||||
}
|
||||
|
||||
impl Default for VP8Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
base: EncoderConfig::default(),
|
||||
bitrate_kbps: 8000,
|
||||
gop_size: 30,
|
||||
fps: 30,
|
||||
input_format: VP8InputFormat::Nv12,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl VP8Config {
|
||||
/// Create config for low latency streaming with NV12 input
|
||||
pub fn low_latency(resolution: Resolution, bitrate_kbps: u32) -> Self {
|
||||
Self {
|
||||
base: EncoderConfig {
|
||||
resolution,
|
||||
input_format: PixelFormat::Nv12,
|
||||
quality: bitrate_kbps,
|
||||
fps: 30,
|
||||
gop_size: 30,
|
||||
},
|
||||
bitrate_kbps,
|
||||
gop_size: 30,
|
||||
fps: 30,
|
||||
input_format: VP8InputFormat::Nv12,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set input format
|
||||
pub fn with_input_format(mut self, format: VP8InputFormat) -> Self {
|
||||
self.input_format = format;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Get available VP8 hardware encoders from hwcodec
|
||||
pub fn get_available_vp8_encoders(width: u32, height: u32) -> Vec<CodecInfo> {
|
||||
init_hwcodec_logging();
|
||||
|
||||
let ctx = EncodeContext {
|
||||
name: String::new(),
|
||||
mc_name: None,
|
||||
width: width as i32,
|
||||
height: height as i32,
|
||||
pixfmt: AVPixelFormat::AV_PIX_FMT_NV12,
|
||||
align: 1,
|
||||
fps: 30,
|
||||
gop: 30,
|
||||
rc: RateControl::RC_CBR,
|
||||
quality: Quality::Quality_Default,
|
||||
kbs: 2000,
|
||||
q: 23,
|
||||
thread_count: 1,
|
||||
};
|
||||
|
||||
let all_encoders = HwEncoder::available_encoders(ctx, None);
|
||||
|
||||
// Include both hardware and software VP8 encoders
|
||||
all_encoders
|
||||
.into_iter()
|
||||
.filter(|e| e.format == DataFormat::VP8)
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Detect best available VP8 encoder (hardware preferred, software fallback)
|
||||
pub fn detect_best_vp8_encoder(width: u32, height: u32) -> (VP8EncoderType, Option<String>) {
|
||||
let encoders = get_available_vp8_encoders(width, height);
|
||||
|
||||
if encoders.is_empty() {
|
||||
warn!("No VP8 encoders available");
|
||||
return (VP8EncoderType::None, None);
|
||||
}
|
||||
|
||||
// Prefer hardware encoders (VAAPI) over software (libvpx)
|
||||
let codec = encoders
|
||||
.iter()
|
||||
.find(|e| e.name.contains("vaapi"))
|
||||
.or_else(|| encoders.first())
|
||||
.unwrap();
|
||||
|
||||
let encoder_type = if codec.name.contains("vaapi") {
|
||||
VP8EncoderType::Vaapi
|
||||
} else if codec.name.contains("libvpx") {
|
||||
VP8EncoderType::Software
|
||||
} else {
|
||||
VP8EncoderType::Software // Default to software for unknown
|
||||
};
|
||||
|
||||
info!(
|
||||
"Selected VP8 encoder: {} ({})",
|
||||
codec.name, encoder_type
|
||||
);
|
||||
(encoder_type, Some(codec.name.clone()))
|
||||
}
|
||||
|
||||
/// Check if VP8 hardware encoding is available
|
||||
pub fn is_vp8_available() -> bool {
|
||||
let registry = EncoderRegistry::global();
|
||||
registry.is_format_available(VideoEncoderType::VP8, true)
|
||||
}
|
||||
|
||||
/// Encoded frame from hwcodec (cloned for ownership)
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HwEncodeFrame {
|
||||
pub data: Vec<u8>,
|
||||
pub pts: i64,
|
||||
pub key: i32,
|
||||
}
|
||||
|
||||
/// VP8 encoder using hwcodec (hardware only - VAAPI)
|
||||
pub struct VP8Encoder {
|
||||
/// hwcodec encoder instance
|
||||
inner: HwEncoder,
|
||||
/// Encoder configuration
|
||||
config: VP8Config,
|
||||
/// Detected encoder type
|
||||
encoder_type: VP8EncoderType,
|
||||
/// Codec name
|
||||
codec_name: String,
|
||||
/// Frame counter
|
||||
frame_count: u64,
|
||||
/// Required buffer length from hwcodec
|
||||
buffer_length: i32,
|
||||
}
|
||||
|
||||
impl VP8Encoder {
|
||||
/// Create a new VP8 encoder with automatic hardware codec detection
|
||||
///
|
||||
/// Returns an error if no hardware encoder is available.
|
||||
/// VP8 hardware encoding requires Intel VAAPI support.
|
||||
pub fn new(config: VP8Config) -> Result<Self> {
|
||||
init_hwcodec_logging();
|
||||
|
||||
let width = config.base.resolution.width;
|
||||
let height = config.base.resolution.height;
|
||||
|
||||
let (encoder_type, codec_name) = detect_best_vp8_encoder(width, height);
|
||||
|
||||
if encoder_type == VP8EncoderType::None {
|
||||
return Err(AppError::VideoError(
|
||||
"No VP8 encoder available. Please ensure FFmpeg is built with libvpx support.".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let codec_name = codec_name.unwrap();
|
||||
Self::with_codec(config, &codec_name)
|
||||
}
|
||||
|
||||
/// Create encoder with specific codec name
|
||||
pub fn with_codec(config: VP8Config, codec_name: &str) -> Result<Self> {
|
||||
init_hwcodec_logging();
|
||||
|
||||
// Determine if this is a software encoder
|
||||
let is_software = codec_name.contains("libvpx");
|
||||
|
||||
// Warn about software encoder performance
|
||||
if is_software {
|
||||
warn!(
|
||||
"Using software VP8 encoder (libvpx) - high CPU usage expected. \
|
||||
Hardware encoder is recommended for better performance."
|
||||
);
|
||||
}
|
||||
|
||||
let width = config.base.resolution.width;
|
||||
let height = config.base.resolution.height;
|
||||
|
||||
// Software encoders (libvpx) require YUV420P, hardware (VAAPI) uses NV12
|
||||
let (pixfmt, actual_input_format) = if is_software {
|
||||
(AVPixelFormat::AV_PIX_FMT_YUV420P, VP8InputFormat::Yuv420p)
|
||||
} else {
|
||||
match config.input_format {
|
||||
VP8InputFormat::Nv12 => (AVPixelFormat::AV_PIX_FMT_NV12, VP8InputFormat::Nv12),
|
||||
VP8InputFormat::Yuv420p => (AVPixelFormat::AV_PIX_FMT_YUV420P, VP8InputFormat::Yuv420p),
|
||||
}
|
||||
};
|
||||
|
||||
info!(
|
||||
"Creating VP8 encoder: {} at {}x{} @ {} kbps (input: {:?})",
|
||||
codec_name, width, height, config.bitrate_kbps, actual_input_format
|
||||
);
|
||||
|
||||
let ctx = EncodeContext {
|
||||
name: codec_name.to_string(),
|
||||
mc_name: None,
|
||||
width: width as i32,
|
||||
height: height as i32,
|
||||
pixfmt,
|
||||
align: 1,
|
||||
fps: config.fps as i32,
|
||||
gop: config.gop_size as i32,
|
||||
rc: RateControl::RC_CBR,
|
||||
quality: Quality::Quality_Default,
|
||||
kbs: config.bitrate_kbps as i32,
|
||||
q: 23,
|
||||
thread_count: 1,
|
||||
};
|
||||
|
||||
let inner = HwEncoder::new(ctx).map_err(|_| {
|
||||
AppError::VideoError(format!("Failed to create VP8 encoder: {}", codec_name))
|
||||
})?;
|
||||
|
||||
let buffer_length = inner.length;
|
||||
let backend = EncoderBackend::from_codec_name(codec_name);
|
||||
let encoder_type = VP8EncoderType::from(backend);
|
||||
|
||||
// Update config to reflect actual input format used
|
||||
let mut config = config;
|
||||
config.input_format = actual_input_format;
|
||||
|
||||
info!(
|
||||
"VP8 encoder created: {} (type: {}, buffer_length: {})",
|
||||
codec_name, encoder_type, buffer_length
|
||||
);
|
||||
|
||||
Ok(Self {
|
||||
inner,
|
||||
config,
|
||||
encoder_type,
|
||||
codec_name: codec_name.to_string(),
|
||||
frame_count: 0,
|
||||
buffer_length,
|
||||
})
|
||||
}
|
||||
|
||||
/// Create with auto-detected encoder
|
||||
pub fn auto(resolution: Resolution, bitrate_kbps: u32) -> Result<Self> {
|
||||
let config = VP8Config::low_latency(resolution, bitrate_kbps);
|
||||
Self::new(config)
|
||||
}
|
||||
|
||||
/// Get encoder type
|
||||
pub fn encoder_type(&self) -> &VP8EncoderType {
|
||||
&self.encoder_type
|
||||
}
|
||||
|
||||
/// Get codec name
|
||||
pub fn codec_name(&self) -> &str {
|
||||
&self.codec_name
|
||||
}
|
||||
|
||||
/// Update bitrate dynamically
|
||||
pub fn set_bitrate(&mut self, bitrate_kbps: u32) -> Result<()> {
|
||||
self.inner.set_bitrate(bitrate_kbps as i32).map_err(|_| {
|
||||
AppError::VideoError("Failed to set VP8 bitrate".to_string())
|
||||
})?;
|
||||
self.config.bitrate_kbps = bitrate_kbps;
|
||||
debug!("VP8 bitrate updated to {} kbps", bitrate_kbps);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Encode raw frame data
|
||||
pub fn encode_raw(&mut self, data: &[u8], pts_ms: i64) -> Result<Vec<HwEncodeFrame>> {
|
||||
if data.len() < self.buffer_length as usize {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"Frame data too small: {} < {}",
|
||||
data.len(),
|
||||
self.buffer_length
|
||||
)));
|
||||
}
|
||||
|
||||
self.frame_count += 1;
|
||||
|
||||
match self.inner.encode(data, pts_ms) {
|
||||
Ok(frames) => {
|
||||
let owned_frames: Vec<HwEncodeFrame> = frames
|
||||
.iter()
|
||||
.map(|f| HwEncodeFrame {
|
||||
data: f.data.clone(),
|
||||
pts: f.pts,
|
||||
key: f.key,
|
||||
})
|
||||
.collect();
|
||||
Ok(owned_frames)
|
||||
}
|
||||
Err(e) => {
|
||||
error!("VP8 encode failed: {}", e);
|
||||
Err(AppError::VideoError(format!("VP8 encode failed: {}", e)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Encode NV12 data
|
||||
pub fn encode_nv12(&mut self, nv12_data: &[u8], pts_ms: i64) -> Result<Vec<HwEncodeFrame>> {
|
||||
self.encode_raw(nv12_data, pts_ms)
|
||||
}
|
||||
|
||||
/// Get input format
|
||||
pub fn input_format(&self) -> VP8InputFormat {
|
||||
self.config.input_format
|
||||
}
|
||||
|
||||
/// Get buffer info
|
||||
pub fn buffer_info(&self) -> (Vec<i32>, Vec<i32>, i32) {
|
||||
(
|
||||
self.inner.linesize.clone(),
|
||||
self.inner.offset.clone(),
|
||||
self.inner.length,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// SAFETY: VP8Encoder contains hwcodec::ffmpeg_ram::encode::Encoder which has raw pointers
|
||||
// that are not Send by default. However, we ensure that VP8Encoder is only used from
|
||||
// a single task/thread at a time (encoding is sequential), so this is safe.
|
||||
unsafe impl Send for VP8Encoder {}
|
||||
|
||||
impl Encoder for VP8Encoder {
|
||||
fn name(&self) -> &str {
|
||||
&self.codec_name
|
||||
}
|
||||
|
||||
fn output_format(&self) -> EncodedFormat {
|
||||
EncodedFormat::Vp8
|
||||
}
|
||||
|
||||
fn encode(&mut self, data: &[u8], sequence: u64) -> Result<EncodedFrame> {
|
||||
let pts_ms = (sequence * 1000 / self.config.fps as u64) as i64;
|
||||
|
||||
let frames = self.encode_raw(data, pts_ms)?;
|
||||
|
||||
if frames.is_empty() {
|
||||
warn!("VP8 encoder returned no frames");
|
||||
return Err(AppError::VideoError(
|
||||
"VP8 encoder returned no frames".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let frame = &frames[0];
|
||||
let key_frame = frame.key == 1;
|
||||
|
||||
Ok(EncodedFrame {
|
||||
data: Bytes::from(frame.data.clone()),
|
||||
format: EncodedFormat::Vp8,
|
||||
resolution: self.config.base.resolution,
|
||||
key_frame,
|
||||
sequence,
|
||||
timestamp: std::time::Instant::now(),
|
||||
pts: frame.pts as u64,
|
||||
dts: frame.pts as u64,
|
||||
})
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> Result<Vec<EncodedFrame>> {
|
||||
Ok(vec![])
|
||||
}
|
||||
|
||||
fn reset(&mut self) -> Result<()> {
|
||||
self.frame_count = 0;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn config(&self) -> &EncoderConfig {
|
||||
&self.config.base
|
||||
}
|
||||
|
||||
fn supports_format(&self, format: PixelFormat) -> bool {
|
||||
match self.config.input_format {
|
||||
VP8InputFormat::Nv12 => matches!(format, PixelFormat::Nv12),
|
||||
VP8InputFormat::Yuv420p => matches!(format, PixelFormat::Yuv420),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_detect_vp8_encoder() {
|
||||
let (encoder_type, codec_name) = detect_best_vp8_encoder(1280, 720);
|
||||
println!("Detected VP8 encoder: {:?} ({:?})", encoder_type, codec_name);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_available_vp8_encoders() {
|
||||
let encoders = get_available_vp8_encoders(1280, 720);
|
||||
println!("Available VP8 hardware encoders:");
|
||||
for enc in &encoders {
|
||||
println!(" - {} ({:?})", enc.name, enc.format);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vp8_availability() {
|
||||
let available = is_vp8_available();
|
||||
println!("VP8 hardware encoding available: {}", available);
|
||||
}
|
||||
}
|
||||
488
src/video/encoder/vp9.rs
Normal file
488
src/video/encoder/vp9.rs
Normal file
@@ -0,0 +1,488 @@
|
||||
//! VP9 encoder using hwcodec (FFmpeg wrapper)
|
||||
//!
|
||||
//! Supports both hardware and software encoding:
|
||||
//! - Hardware: VAAPI (Intel on Linux)
|
||||
//! - Software: libvpx-vp9 (CPU-based, high CPU usage)
|
||||
//!
|
||||
//! Hardware encoding is preferred when available for better performance.
|
||||
|
||||
use bytes::Bytes;
|
||||
use std::sync::Once;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use hwcodec::common::{DataFormat, Quality, RateControl};
|
||||
use hwcodec::ffmpeg::AVPixelFormat;
|
||||
use hwcodec::ffmpeg_ram::encode::{EncodeContext, Encoder as HwEncoder};
|
||||
use hwcodec::ffmpeg_ram::CodecInfo;
|
||||
|
||||
use super::registry::{EncoderBackend, EncoderRegistry, VideoEncoderType};
|
||||
use super::traits::{EncodedFormat, EncodedFrame, Encoder, EncoderConfig};
|
||||
use crate::error::{AppError, Result};
|
||||
use crate::video::format::{PixelFormat, Resolution};
|
||||
|
||||
static INIT_LOGGING: Once = Once::new();
|
||||
|
||||
/// Initialize hwcodec logging (only once)
|
||||
fn init_hwcodec_logging() {
|
||||
INIT_LOGGING.call_once(|| {
|
||||
debug!("hwcodec logging initialized for VP9");
|
||||
});
|
||||
}
|
||||
|
||||
/// VP9 encoder type (detected from hwcodec)
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum VP9EncoderType {
|
||||
/// VAAPI (Intel on Linux)
|
||||
Vaapi,
|
||||
/// Software encoder (libvpx-vp9)
|
||||
Software,
|
||||
/// No encoder available
|
||||
None,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for VP9EncoderType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
VP9EncoderType::Vaapi => write!(f, "VAAPI"),
|
||||
VP9EncoderType::Software => write!(f, "Software"),
|
||||
VP9EncoderType::None => write!(f, "None"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for VP9EncoderType {
|
||||
fn default() -> Self {
|
||||
Self::None
|
||||
}
|
||||
}
|
||||
|
||||
impl From<EncoderBackend> for VP9EncoderType {
|
||||
fn from(backend: EncoderBackend) -> Self {
|
||||
match backend {
|
||||
EncoderBackend::Vaapi => VP9EncoderType::Vaapi,
|
||||
EncoderBackend::Software => VP9EncoderType::Software,
|
||||
_ => VP9EncoderType::None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Input pixel format for VP9 encoder
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum VP9InputFormat {
|
||||
/// YUV420P (I420) - planar Y, U, V
|
||||
Yuv420p,
|
||||
/// NV12 - Y plane + interleaved UV plane
|
||||
Nv12,
|
||||
}
|
||||
|
||||
impl Default for VP9InputFormat {
|
||||
fn default() -> Self {
|
||||
Self::Nv12 // Default to NV12 for VAAPI compatibility
|
||||
}
|
||||
}
|
||||
|
||||
/// VP9 encoder configuration
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct VP9Config {
|
||||
/// Base encoder config
|
||||
pub base: EncoderConfig,
|
||||
/// Target bitrate in kbps
|
||||
pub bitrate_kbps: u32,
|
||||
/// GOP size (keyframe interval)
|
||||
pub gop_size: u32,
|
||||
/// Frame rate
|
||||
pub fps: u32,
|
||||
/// Input pixel format
|
||||
pub input_format: VP9InputFormat,
|
||||
}
|
||||
|
||||
impl Default for VP9Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
base: EncoderConfig::default(),
|
||||
bitrate_kbps: 8000,
|
||||
gop_size: 30,
|
||||
fps: 30,
|
||||
input_format: VP9InputFormat::Nv12,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl VP9Config {
|
||||
/// Create config for low latency streaming with NV12 input
|
||||
pub fn low_latency(resolution: Resolution, bitrate_kbps: u32) -> Self {
|
||||
Self {
|
||||
base: EncoderConfig {
|
||||
resolution,
|
||||
input_format: PixelFormat::Nv12,
|
||||
quality: bitrate_kbps,
|
||||
fps: 30,
|
||||
gop_size: 30,
|
||||
},
|
||||
bitrate_kbps,
|
||||
gop_size: 30,
|
||||
fps: 30,
|
||||
input_format: VP9InputFormat::Nv12,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set input format
|
||||
pub fn with_input_format(mut self, format: VP9InputFormat) -> Self {
|
||||
self.input_format = format;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Get available VP9 hardware encoders from hwcodec
|
||||
pub fn get_available_vp9_encoders(width: u32, height: u32) -> Vec<CodecInfo> {
|
||||
init_hwcodec_logging();
|
||||
|
||||
let ctx = EncodeContext {
|
||||
name: String::new(),
|
||||
mc_name: None,
|
||||
width: width as i32,
|
||||
height: height as i32,
|
||||
pixfmt: AVPixelFormat::AV_PIX_FMT_NV12,
|
||||
align: 1,
|
||||
fps: 30,
|
||||
gop: 30,
|
||||
rc: RateControl::RC_CBR,
|
||||
quality: Quality::Quality_Default,
|
||||
kbs: 2000,
|
||||
q: 23,
|
||||
thread_count: 1,
|
||||
};
|
||||
|
||||
let all_encoders = HwEncoder::available_encoders(ctx, None);
|
||||
|
||||
// Include both hardware and software VP9 encoders
|
||||
all_encoders
|
||||
.into_iter()
|
||||
.filter(|e| e.format == DataFormat::VP9)
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Detect best available VP9 encoder (hardware preferred, software fallback)
|
||||
pub fn detect_best_vp9_encoder(width: u32, height: u32) -> (VP9EncoderType, Option<String>) {
|
||||
let encoders = get_available_vp9_encoders(width, height);
|
||||
|
||||
if encoders.is_empty() {
|
||||
warn!("No VP9 encoders available");
|
||||
return (VP9EncoderType::None, None);
|
||||
}
|
||||
|
||||
// Prefer hardware encoders (VAAPI) over software (libvpx-vp9)
|
||||
let codec = encoders
|
||||
.iter()
|
||||
.find(|e| e.name.contains("vaapi"))
|
||||
.or_else(|| encoders.first())
|
||||
.unwrap();
|
||||
|
||||
let encoder_type = if codec.name.contains("vaapi") {
|
||||
VP9EncoderType::Vaapi
|
||||
} else if codec.name.contains("libvpx") {
|
||||
VP9EncoderType::Software
|
||||
} else {
|
||||
VP9EncoderType::Software // Default to software for unknown
|
||||
};
|
||||
|
||||
info!(
|
||||
"Selected VP9 encoder: {} ({})",
|
||||
codec.name, encoder_type
|
||||
);
|
||||
(encoder_type, Some(codec.name.clone()))
|
||||
}
|
||||
|
||||
/// Check if VP9 hardware encoding is available
|
||||
pub fn is_vp9_available() -> bool {
|
||||
let registry = EncoderRegistry::global();
|
||||
registry.is_format_available(VideoEncoderType::VP9, true)
|
||||
}
|
||||
|
||||
/// Encoded frame from hwcodec (cloned for ownership)
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HwEncodeFrame {
|
||||
pub data: Vec<u8>,
|
||||
pub pts: i64,
|
||||
pub key: i32,
|
||||
}
|
||||
|
||||
/// VP9 encoder using hwcodec (hardware only - VAAPI)
|
||||
pub struct VP9Encoder {
|
||||
/// hwcodec encoder instance
|
||||
inner: HwEncoder,
|
||||
/// Encoder configuration
|
||||
config: VP9Config,
|
||||
/// Detected encoder type
|
||||
encoder_type: VP9EncoderType,
|
||||
/// Codec name
|
||||
codec_name: String,
|
||||
/// Frame counter
|
||||
frame_count: u64,
|
||||
/// Required buffer length from hwcodec
|
||||
buffer_length: i32,
|
||||
}
|
||||
|
||||
impl VP9Encoder {
|
||||
/// Create a new VP9 encoder with automatic hardware codec detection
|
||||
///
|
||||
/// Returns an error if no hardware encoder is available.
|
||||
/// VP9 hardware encoding requires Intel VAAPI support.
|
||||
pub fn new(config: VP9Config) -> Result<Self> {
|
||||
init_hwcodec_logging();
|
||||
|
||||
let width = config.base.resolution.width;
|
||||
let height = config.base.resolution.height;
|
||||
|
||||
let (encoder_type, codec_name) = detect_best_vp9_encoder(width, height);
|
||||
|
||||
if encoder_type == VP9EncoderType::None {
|
||||
return Err(AppError::VideoError(
|
||||
"No VP9 encoder available. Please ensure FFmpeg is built with libvpx support.".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let codec_name = codec_name.unwrap();
|
||||
Self::with_codec(config, &codec_name)
|
||||
}
|
||||
|
||||
/// Create encoder with specific codec name
|
||||
pub fn with_codec(config: VP9Config, codec_name: &str) -> Result<Self> {
|
||||
init_hwcodec_logging();
|
||||
|
||||
// Determine if this is a software encoder
|
||||
let is_software = codec_name.contains("libvpx");
|
||||
|
||||
// Warn about software encoder performance
|
||||
if is_software {
|
||||
warn!(
|
||||
"Using software VP9 encoder (libvpx-vp9) - high CPU usage expected. \
|
||||
Hardware encoder is recommended for better performance."
|
||||
);
|
||||
}
|
||||
|
||||
let width = config.base.resolution.width;
|
||||
let height = config.base.resolution.height;
|
||||
|
||||
// Software encoders (libvpx-vp9) require YUV420P, hardware (VAAPI) uses NV12
|
||||
let (pixfmt, actual_input_format) = if is_software {
|
||||
(AVPixelFormat::AV_PIX_FMT_YUV420P, VP9InputFormat::Yuv420p)
|
||||
} else {
|
||||
match config.input_format {
|
||||
VP9InputFormat::Nv12 => (AVPixelFormat::AV_PIX_FMT_NV12, VP9InputFormat::Nv12),
|
||||
VP9InputFormat::Yuv420p => (AVPixelFormat::AV_PIX_FMT_YUV420P, VP9InputFormat::Yuv420p),
|
||||
}
|
||||
};
|
||||
|
||||
info!(
|
||||
"Creating VP9 encoder: {} at {}x{} @ {} kbps (input: {:?})",
|
||||
codec_name, width, height, config.bitrate_kbps, actual_input_format
|
||||
);
|
||||
|
||||
let ctx = EncodeContext {
|
||||
name: codec_name.to_string(),
|
||||
mc_name: None,
|
||||
width: width as i32,
|
||||
height: height as i32,
|
||||
pixfmt,
|
||||
align: 1,
|
||||
fps: config.fps as i32,
|
||||
gop: config.gop_size as i32,
|
||||
rc: RateControl::RC_CBR,
|
||||
quality: Quality::Quality_Default,
|
||||
kbs: config.bitrate_kbps as i32,
|
||||
q: 31,
|
||||
thread_count: 4, // VP9 benefits from multi-threading
|
||||
};
|
||||
|
||||
let inner = HwEncoder::new(ctx).map_err(|_| {
|
||||
AppError::VideoError(format!("Failed to create VP9 encoder: {}", codec_name))
|
||||
})?;
|
||||
|
||||
let buffer_length = inner.length;
|
||||
let backend = EncoderBackend::from_codec_name(codec_name);
|
||||
let encoder_type = VP9EncoderType::from(backend);
|
||||
|
||||
// Update config to reflect actual input format used
|
||||
let mut config = config;
|
||||
config.input_format = actual_input_format;
|
||||
|
||||
info!(
|
||||
"VP9 encoder created: {} (type: {}, buffer_length: {})",
|
||||
codec_name, encoder_type, buffer_length
|
||||
);
|
||||
|
||||
Ok(Self {
|
||||
inner,
|
||||
config,
|
||||
encoder_type,
|
||||
codec_name: codec_name.to_string(),
|
||||
frame_count: 0,
|
||||
buffer_length,
|
||||
})
|
||||
}
|
||||
|
||||
/// Create with auto-detected encoder
|
||||
pub fn auto(resolution: Resolution, bitrate_kbps: u32) -> Result<Self> {
|
||||
let config = VP9Config::low_latency(resolution, bitrate_kbps);
|
||||
Self::new(config)
|
||||
}
|
||||
|
||||
/// Get encoder type
|
||||
pub fn encoder_type(&self) -> &VP9EncoderType {
|
||||
&self.encoder_type
|
||||
}
|
||||
|
||||
/// Get codec name
|
||||
pub fn codec_name(&self) -> &str {
|
||||
&self.codec_name
|
||||
}
|
||||
|
||||
/// Update bitrate dynamically
|
||||
pub fn set_bitrate(&mut self, bitrate_kbps: u32) -> Result<()> {
|
||||
self.inner.set_bitrate(bitrate_kbps as i32).map_err(|_| {
|
||||
AppError::VideoError("Failed to set VP9 bitrate".to_string())
|
||||
})?;
|
||||
self.config.bitrate_kbps = bitrate_kbps;
|
||||
debug!("VP9 bitrate updated to {} kbps", bitrate_kbps);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Encode raw frame data
|
||||
pub fn encode_raw(&mut self, data: &[u8], pts_ms: i64) -> Result<Vec<HwEncodeFrame>> {
|
||||
if data.len() < self.buffer_length as usize {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"Frame data too small: {} < {}",
|
||||
data.len(),
|
||||
self.buffer_length
|
||||
)));
|
||||
}
|
||||
|
||||
self.frame_count += 1;
|
||||
|
||||
match self.inner.encode(data, pts_ms) {
|
||||
Ok(frames) => {
|
||||
let owned_frames: Vec<HwEncodeFrame> = frames
|
||||
.iter()
|
||||
.map(|f| HwEncodeFrame {
|
||||
data: f.data.clone(),
|
||||
pts: f.pts,
|
||||
key: f.key,
|
||||
})
|
||||
.collect();
|
||||
Ok(owned_frames)
|
||||
}
|
||||
Err(e) => {
|
||||
error!("VP9 encode failed: {}", e);
|
||||
Err(AppError::VideoError(format!("VP9 encode failed: {}", e)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Encode NV12 data
|
||||
pub fn encode_nv12(&mut self, nv12_data: &[u8], pts_ms: i64) -> Result<Vec<HwEncodeFrame>> {
|
||||
self.encode_raw(nv12_data, pts_ms)
|
||||
}
|
||||
|
||||
/// Get input format
|
||||
pub fn input_format(&self) -> VP9InputFormat {
|
||||
self.config.input_format
|
||||
}
|
||||
|
||||
/// Get buffer info
|
||||
pub fn buffer_info(&self) -> (Vec<i32>, Vec<i32>, i32) {
|
||||
(
|
||||
self.inner.linesize.clone(),
|
||||
self.inner.offset.clone(),
|
||||
self.inner.length,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// SAFETY: VP9Encoder contains hwcodec::ffmpeg_ram::encode::Encoder which has raw pointers
|
||||
// that are not Send by default. However, we ensure that VP9Encoder is only used from
|
||||
// a single task/thread at a time (encoding is sequential), so this is safe.
|
||||
unsafe impl Send for VP9Encoder {}
|
||||
|
||||
impl Encoder for VP9Encoder {
|
||||
fn name(&self) -> &str {
|
||||
&self.codec_name
|
||||
}
|
||||
|
||||
fn output_format(&self) -> EncodedFormat {
|
||||
EncodedFormat::Vp9
|
||||
}
|
||||
|
||||
fn encode(&mut self, data: &[u8], sequence: u64) -> Result<EncodedFrame> {
|
||||
let pts_ms = (sequence * 1000 / self.config.fps as u64) as i64;
|
||||
|
||||
let frames = self.encode_raw(data, pts_ms)?;
|
||||
|
||||
if frames.is_empty() {
|
||||
warn!("VP9 encoder returned no frames");
|
||||
return Err(AppError::VideoError(
|
||||
"VP9 encoder returned no frames".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let frame = &frames[0];
|
||||
let key_frame = frame.key == 1;
|
||||
|
||||
Ok(EncodedFrame {
|
||||
data: Bytes::from(frame.data.clone()),
|
||||
format: EncodedFormat::Vp9,
|
||||
resolution: self.config.base.resolution,
|
||||
key_frame,
|
||||
sequence,
|
||||
timestamp: std::time::Instant::now(),
|
||||
pts: frame.pts as u64,
|
||||
dts: frame.pts as u64,
|
||||
})
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> Result<Vec<EncodedFrame>> {
|
||||
Ok(vec![])
|
||||
}
|
||||
|
||||
fn reset(&mut self) -> Result<()> {
|
||||
self.frame_count = 0;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn config(&self) -> &EncoderConfig {
|
||||
&self.config.base
|
||||
}
|
||||
|
||||
fn supports_format(&self, format: PixelFormat) -> bool {
|
||||
match self.config.input_format {
|
||||
VP9InputFormat::Nv12 => matches!(format, PixelFormat::Nv12),
|
||||
VP9InputFormat::Yuv420p => matches!(format, PixelFormat::Yuv420),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_detect_vp9_encoder() {
|
||||
let (encoder_type, codec_name) = detect_best_vp9_encoder(1280, 720);
|
||||
println!("Detected VP9 encoder: {:?} ({:?})", encoder_type, codec_name);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_available_vp9_encoders() {
|
||||
let encoders = get_available_vp9_encoders(1280, 720);
|
||||
println!("Available VP9 hardware encoders:");
|
||||
for enc in &encoders {
|
||||
println!(" - {} ({:?})", enc.name, enc.format);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vp9_availability() {
|
||||
let available = is_vp9_available();
|
||||
println!("VP9 hardware encoding available: {}", available);
|
||||
}
|
||||
}
|
||||
259
src/video/format.rs
Normal file
259
src/video/format.rs
Normal file
@@ -0,0 +1,259 @@
|
||||
//! Pixel format definitions and conversions
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fmt;
|
||||
use v4l::format::fourcc;
|
||||
|
||||
/// Supported pixel formats
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "UPPERCASE")]
|
||||
pub enum PixelFormat {
|
||||
/// MJPEG compressed format (preferred for capture cards)
|
||||
Mjpeg,
|
||||
/// JPEG compressed format
|
||||
Jpeg,
|
||||
/// YUYV 4:2:2 packed format
|
||||
Yuyv,
|
||||
/// YVYU 4:2:2 packed format
|
||||
Yvyu,
|
||||
/// UYVY 4:2:2 packed format
|
||||
Uyvy,
|
||||
/// NV12 semi-planar format (Y plane + interleaved UV)
|
||||
Nv12,
|
||||
/// NV16 semi-planar format
|
||||
Nv16,
|
||||
/// NV24 semi-planar format
|
||||
Nv24,
|
||||
/// YUV420 planar format
|
||||
Yuv420,
|
||||
/// YVU420 planar format
|
||||
Yvu420,
|
||||
/// RGB565 format
|
||||
Rgb565,
|
||||
/// RGB24 format (3 bytes per pixel)
|
||||
Rgb24,
|
||||
/// BGR24 format (3 bytes per pixel)
|
||||
Bgr24,
|
||||
/// Grayscale format
|
||||
Grey,
|
||||
}
|
||||
|
||||
impl PixelFormat {
|
||||
/// Convert to V4L2 FourCC
|
||||
pub fn to_fourcc(&self) -> fourcc::FourCC {
|
||||
match self {
|
||||
PixelFormat::Mjpeg => fourcc::FourCC::new(b"MJPG"),
|
||||
PixelFormat::Jpeg => fourcc::FourCC::new(b"JPEG"),
|
||||
PixelFormat::Yuyv => fourcc::FourCC::new(b"YUYV"),
|
||||
PixelFormat::Yvyu => fourcc::FourCC::new(b"YVYU"),
|
||||
PixelFormat::Uyvy => fourcc::FourCC::new(b"UYVY"),
|
||||
PixelFormat::Nv12 => fourcc::FourCC::new(b"NV12"),
|
||||
PixelFormat::Nv16 => fourcc::FourCC::new(b"NV16"),
|
||||
PixelFormat::Nv24 => fourcc::FourCC::new(b"NV24"),
|
||||
PixelFormat::Yuv420 => fourcc::FourCC::new(b"YU12"),
|
||||
PixelFormat::Yvu420 => fourcc::FourCC::new(b"YV12"),
|
||||
PixelFormat::Rgb565 => fourcc::FourCC::new(b"RGBP"),
|
||||
PixelFormat::Rgb24 => fourcc::FourCC::new(b"RGB3"),
|
||||
PixelFormat::Bgr24 => fourcc::FourCC::new(b"BGR3"),
|
||||
PixelFormat::Grey => fourcc::FourCC::new(b"GREY"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to convert from V4L2 FourCC
|
||||
pub fn from_fourcc(fourcc: fourcc::FourCC) -> Option<Self> {
|
||||
let repr = fourcc.repr;
|
||||
match &repr {
|
||||
b"MJPG" => Some(PixelFormat::Mjpeg),
|
||||
b"JPEG" => Some(PixelFormat::Jpeg),
|
||||
b"YUYV" => Some(PixelFormat::Yuyv),
|
||||
b"YVYU" => Some(PixelFormat::Yvyu),
|
||||
b"UYVY" => Some(PixelFormat::Uyvy),
|
||||
b"NV12" => Some(PixelFormat::Nv12),
|
||||
b"NV16" => Some(PixelFormat::Nv16),
|
||||
b"NV24" => Some(PixelFormat::Nv24),
|
||||
b"YU12" | b"I420" => Some(PixelFormat::Yuv420),
|
||||
b"YV12" => Some(PixelFormat::Yvu420),
|
||||
b"RGBP" => Some(PixelFormat::Rgb565),
|
||||
b"RGB3" => Some(PixelFormat::Rgb24),
|
||||
b"BGR3" => Some(PixelFormat::Bgr24),
|
||||
b"GREY" | b"Y800" => Some(PixelFormat::Grey),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if format is compressed (JPEG/MJPEG)
|
||||
pub fn is_compressed(&self) -> bool {
|
||||
matches!(self, PixelFormat::Mjpeg | PixelFormat::Jpeg)
|
||||
}
|
||||
|
||||
/// Get bytes per pixel for uncompressed formats
|
||||
/// Returns None for compressed formats
|
||||
pub fn bytes_per_pixel(&self) -> Option<usize> {
|
||||
match self {
|
||||
PixelFormat::Mjpeg | PixelFormat::Jpeg => None,
|
||||
PixelFormat::Yuyv | PixelFormat::Yvyu | PixelFormat::Uyvy => Some(2),
|
||||
PixelFormat::Nv12 | PixelFormat::Yuv420 | PixelFormat::Yvu420 => None, // Variable
|
||||
PixelFormat::Nv16 => None,
|
||||
PixelFormat::Nv24 => None,
|
||||
PixelFormat::Rgb565 => Some(2),
|
||||
PixelFormat::Rgb24 | PixelFormat::Bgr24 => Some(3),
|
||||
PixelFormat::Grey => Some(1),
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate expected frame size for a given resolution
|
||||
/// Returns None for compressed formats (variable size)
|
||||
pub fn frame_size(&self, resolution: Resolution) -> Option<usize> {
|
||||
let pixels = (resolution.width * resolution.height) as usize;
|
||||
match self {
|
||||
PixelFormat::Mjpeg | PixelFormat::Jpeg => None,
|
||||
PixelFormat::Yuyv | PixelFormat::Yvyu | PixelFormat::Uyvy => Some(pixels * 2),
|
||||
PixelFormat::Nv12 | PixelFormat::Yuv420 | PixelFormat::Yvu420 => Some(pixels * 3 / 2),
|
||||
PixelFormat::Nv16 => Some(pixels * 2),
|
||||
PixelFormat::Nv24 => Some(pixels * 3),
|
||||
PixelFormat::Rgb565 => Some(pixels * 2),
|
||||
PixelFormat::Rgb24 | PixelFormat::Bgr24 => Some(pixels * 3),
|
||||
PixelFormat::Grey => Some(pixels),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get priority for format selection (higher is better)
|
||||
/// MJPEG is preferred for HDMI capture cards
|
||||
pub fn priority(&self) -> u8 {
|
||||
match self {
|
||||
PixelFormat::Mjpeg => 100,
|
||||
PixelFormat::Jpeg => 99,
|
||||
PixelFormat::Yuyv => 80,
|
||||
PixelFormat::Nv12 => 75,
|
||||
PixelFormat::Yuv420 => 70,
|
||||
PixelFormat::Uyvy => 65,
|
||||
PixelFormat::Yvyu => 64,
|
||||
PixelFormat::Yvu420 => 63,
|
||||
PixelFormat::Nv16 => 60,
|
||||
PixelFormat::Nv24 => 55,
|
||||
PixelFormat::Rgb24 => 50,
|
||||
PixelFormat::Bgr24 => 49,
|
||||
PixelFormat::Rgb565 => 40,
|
||||
PixelFormat::Grey => 10,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get all supported formats
|
||||
pub fn all() -> &'static [PixelFormat] {
|
||||
&[
|
||||
PixelFormat::Mjpeg,
|
||||
PixelFormat::Jpeg,
|
||||
PixelFormat::Yuyv,
|
||||
PixelFormat::Yvyu,
|
||||
PixelFormat::Uyvy,
|
||||
PixelFormat::Nv12,
|
||||
PixelFormat::Nv16,
|
||||
PixelFormat::Nv24,
|
||||
PixelFormat::Yuv420,
|
||||
PixelFormat::Yvu420,
|
||||
PixelFormat::Rgb565,
|
||||
PixelFormat::Rgb24,
|
||||
PixelFormat::Bgr24,
|
||||
PixelFormat::Grey,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for PixelFormat {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let name = match self {
|
||||
PixelFormat::Mjpeg => "MJPEG",
|
||||
PixelFormat::Jpeg => "JPEG",
|
||||
PixelFormat::Yuyv => "YUYV",
|
||||
PixelFormat::Yvyu => "YVYU",
|
||||
PixelFormat::Uyvy => "UYVY",
|
||||
PixelFormat::Nv12 => "NV12",
|
||||
PixelFormat::Nv16 => "NV16",
|
||||
PixelFormat::Nv24 => "NV24",
|
||||
PixelFormat::Yuv420 => "YUV420",
|
||||
PixelFormat::Yvu420 => "YVU420",
|
||||
PixelFormat::Rgb565 => "RGB565",
|
||||
PixelFormat::Rgb24 => "RGB24",
|
||||
PixelFormat::Bgr24 => "BGR24",
|
||||
PixelFormat::Grey => "GREY",
|
||||
};
|
||||
write!(f, "{}", name)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for PixelFormat {
|
||||
type Err = String;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s.to_uppercase().as_str() {
|
||||
"MJPEG" | "MJPG" => Ok(PixelFormat::Mjpeg),
|
||||
"JPEG" => Ok(PixelFormat::Jpeg),
|
||||
"YUYV" => Ok(PixelFormat::Yuyv),
|
||||
"YVYU" => Ok(PixelFormat::Yvyu),
|
||||
"UYVY" => Ok(PixelFormat::Uyvy),
|
||||
"NV12" => Ok(PixelFormat::Nv12),
|
||||
"NV16" => Ok(PixelFormat::Nv16),
|
||||
"NV24" => Ok(PixelFormat::Nv24),
|
||||
"YUV420" | "I420" => Ok(PixelFormat::Yuv420),
|
||||
"YVU420" | "YV12" => Ok(PixelFormat::Yvu420),
|
||||
"RGB565" => Ok(PixelFormat::Rgb565),
|
||||
"RGB24" => Ok(PixelFormat::Rgb24),
|
||||
"BGR24" => Ok(PixelFormat::Bgr24),
|
||||
"GREY" | "GRAY" => Ok(PixelFormat::Grey),
|
||||
_ => Err(format!("Unknown pixel format: {}", s)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Resolution (width x height)
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub struct Resolution {
|
||||
pub width: u32,
|
||||
pub height: u32,
|
||||
}
|
||||
|
||||
impl Resolution {
|
||||
pub fn new(width: u32, height: u32) -> Self {
|
||||
Self { width, height }
|
||||
}
|
||||
|
||||
/// Check if resolution is valid
|
||||
pub fn is_valid(&self) -> bool {
|
||||
self.width >= 160 && self.width <= 15360 && self.height >= 120 && self.height <= 8640
|
||||
}
|
||||
|
||||
/// Get total pixels
|
||||
pub fn pixels(&self) -> u64 {
|
||||
self.width as u64 * self.height as u64
|
||||
}
|
||||
|
||||
/// Common resolutions
|
||||
pub const VGA: Resolution = Resolution {
|
||||
width: 640,
|
||||
height: 480,
|
||||
};
|
||||
pub const HD720: Resolution = Resolution {
|
||||
width: 1280,
|
||||
height: 720,
|
||||
};
|
||||
pub const HD1080: Resolution = Resolution {
|
||||
width: 1920,
|
||||
height: 1080,
|
||||
};
|
||||
pub const UHD4K: Resolution = Resolution {
|
||||
width: 3840,
|
||||
height: 2160,
|
||||
};
|
||||
}
|
||||
|
||||
impl fmt::Display for Resolution {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}x{}", self.width, self.height)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<(u32, u32)> for Resolution {
|
||||
fn from((width, height): (u32, u32)) -> Self {
|
||||
Self { width, height }
|
||||
}
|
||||
}
|
||||
239
src/video/frame.rs
Normal file
239
src/video/frame.rs
Normal file
@@ -0,0 +1,239 @@
|
||||
//! Video frame data structures
|
||||
|
||||
use bytes::Bytes;
|
||||
use std::sync::Arc;
|
||||
use std::sync::OnceLock;
|
||||
use std::time::Instant;
|
||||
|
||||
use super::format::{PixelFormat, Resolution};
|
||||
|
||||
/// A video frame with metadata
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct VideoFrame {
|
||||
/// Raw frame data
|
||||
data: Arc<Bytes>,
|
||||
/// Cached xxHash64 of frame data (lazy computed for deduplication)
|
||||
hash: Arc<OnceLock<u64>>,
|
||||
/// Frame resolution
|
||||
pub resolution: Resolution,
|
||||
/// Pixel format
|
||||
pub format: PixelFormat,
|
||||
/// Stride (bytes per line)
|
||||
pub stride: u32,
|
||||
/// Whether this is a key frame (for compressed formats)
|
||||
pub key_frame: bool,
|
||||
/// Frame sequence number
|
||||
pub sequence: u64,
|
||||
/// Timestamp when frame was captured
|
||||
pub capture_ts: Instant,
|
||||
/// Whether capture is online (signal present)
|
||||
pub online: bool,
|
||||
}
|
||||
|
||||
impl VideoFrame {
|
||||
/// Create a new video frame
|
||||
pub fn new(
|
||||
data: Bytes,
|
||||
resolution: Resolution,
|
||||
format: PixelFormat,
|
||||
stride: u32,
|
||||
sequence: u64,
|
||||
) -> Self {
|
||||
Self {
|
||||
data: Arc::new(data),
|
||||
hash: Arc::new(OnceLock::new()),
|
||||
resolution,
|
||||
format,
|
||||
stride,
|
||||
key_frame: true,
|
||||
sequence,
|
||||
capture_ts: Instant::now(),
|
||||
online: true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a frame from a Vec<u8>
|
||||
pub fn from_vec(
|
||||
data: Vec<u8>,
|
||||
resolution: Resolution,
|
||||
format: PixelFormat,
|
||||
stride: u32,
|
||||
sequence: u64,
|
||||
) -> Self {
|
||||
Self::new(Bytes::from(data), resolution, format, stride, sequence)
|
||||
}
|
||||
|
||||
/// Get frame data as bytes slice
|
||||
pub fn data(&self) -> &[u8] {
|
||||
&self.data
|
||||
}
|
||||
|
||||
/// Get frame data as Bytes (cheap clone)
|
||||
pub fn data_bytes(&self) -> Bytes {
|
||||
(*self.data).clone()
|
||||
}
|
||||
|
||||
/// Get data length
|
||||
pub fn len(&self) -> usize {
|
||||
self.data.len()
|
||||
}
|
||||
|
||||
/// Check if frame is empty
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.data.is_empty()
|
||||
}
|
||||
|
||||
/// Get width
|
||||
pub fn width(&self) -> u32 {
|
||||
self.resolution.width
|
||||
}
|
||||
|
||||
/// Get height
|
||||
pub fn height(&self) -> u32 {
|
||||
self.resolution.height
|
||||
}
|
||||
|
||||
/// Get age of this frame (time since capture)
|
||||
pub fn age(&self) -> std::time::Duration {
|
||||
self.capture_ts.elapsed()
|
||||
}
|
||||
|
||||
/// Check if this frame is still fresh (within threshold)
|
||||
pub fn is_fresh(&self, max_age_ms: u64) -> bool {
|
||||
self.age().as_millis() < max_age_ms as u128
|
||||
}
|
||||
|
||||
/// Get hash of frame data (computed once, cached)
|
||||
/// Used for fast frame deduplication comparison
|
||||
pub fn get_hash(&self) -> u64 {
|
||||
*self.hash.get_or_init(|| {
|
||||
xxhash_rust::xxh64::xxh64(self.data.as_ref(), 0)
|
||||
})
|
||||
}
|
||||
|
||||
/// Check if format is JPEG/MJPEG
|
||||
pub fn is_jpeg(&self) -> bool {
|
||||
self.format.is_compressed()
|
||||
}
|
||||
|
||||
/// Validate JPEG frame data
|
||||
pub fn is_valid_jpeg(&self) -> bool {
|
||||
if !self.is_jpeg() {
|
||||
return false;
|
||||
}
|
||||
if self.data.len() < 125 {
|
||||
return false;
|
||||
}
|
||||
// Check JPEG header
|
||||
let start_marker = ((self.data[0] as u16) << 8) | self.data[1] as u16;
|
||||
if start_marker != 0xFFD8 {
|
||||
return false;
|
||||
}
|
||||
// Check JPEG end marker
|
||||
let end = self.data.len();
|
||||
let end_marker = ((self.data[end - 2] as u16) << 8) | self.data[end - 1] as u16;
|
||||
// Valid end markers: 0xFFD9, 0xD900, 0x0000 (padded)
|
||||
matches!(end_marker, 0xFFD9 | 0xD900 | 0x0000)
|
||||
}
|
||||
|
||||
/// Create an offline placeholder frame
|
||||
pub fn offline(resolution: Resolution, format: PixelFormat) -> Self {
|
||||
Self {
|
||||
data: Arc::new(Bytes::new()),
|
||||
hash: Arc::new(OnceLock::new()),
|
||||
resolution,
|
||||
format,
|
||||
stride: 0,
|
||||
key_frame: true,
|
||||
sequence: 0,
|
||||
capture_ts: Instant::now(),
|
||||
online: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Frame metadata without actual data (for logging/stats)
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FrameMeta {
|
||||
pub resolution: Resolution,
|
||||
pub format: PixelFormat,
|
||||
pub size: usize,
|
||||
pub sequence: u64,
|
||||
pub key_frame: bool,
|
||||
pub online: bool,
|
||||
}
|
||||
|
||||
impl From<&VideoFrame> for FrameMeta {
|
||||
fn from(frame: &VideoFrame) -> Self {
|
||||
Self {
|
||||
resolution: frame.resolution,
|
||||
format: frame.format,
|
||||
size: frame.len(),
|
||||
sequence: frame.sequence,
|
||||
key_frame: frame.key_frame,
|
||||
online: frame.online,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Ring buffer for storing recent frames
|
||||
pub struct FrameRing {
|
||||
frames: Vec<Option<VideoFrame>>,
|
||||
capacity: usize,
|
||||
write_pos: usize,
|
||||
count: usize,
|
||||
}
|
||||
|
||||
impl FrameRing {
|
||||
/// Create a new frame ring with specified capacity
|
||||
pub fn new(capacity: usize) -> Self {
|
||||
assert!(capacity > 0, "Ring capacity must be > 0");
|
||||
Self {
|
||||
frames: (0..capacity).map(|_| None).collect(),
|
||||
capacity,
|
||||
write_pos: 0,
|
||||
count: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Push a frame into the ring
|
||||
pub fn push(&mut self, frame: VideoFrame) {
|
||||
self.frames[self.write_pos] = Some(frame);
|
||||
self.write_pos = (self.write_pos + 1) % self.capacity;
|
||||
if self.count < self.capacity {
|
||||
self.count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the latest frame
|
||||
pub fn latest(&self) -> Option<&VideoFrame> {
|
||||
if self.count == 0 {
|
||||
return None;
|
||||
}
|
||||
let pos = if self.write_pos == 0 {
|
||||
self.capacity - 1
|
||||
} else {
|
||||
self.write_pos - 1
|
||||
};
|
||||
self.frames[pos].as_ref()
|
||||
}
|
||||
|
||||
/// Get number of frames in ring
|
||||
pub fn len(&self) -> usize {
|
||||
self.count
|
||||
}
|
||||
|
||||
/// Check if ring is empty
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.count == 0
|
||||
}
|
||||
|
||||
/// Clear all frames
|
||||
pub fn clear(&mut self) {
|
||||
for frame in &mut self.frames {
|
||||
*frame = None;
|
||||
}
|
||||
self.write_pos = 0;
|
||||
self.count = 0;
|
||||
}
|
||||
}
|
||||
539
src/video/h264_pipeline.rs
Normal file
539
src/video/h264_pipeline.rs
Normal file
@@ -0,0 +1,539 @@
|
||||
//! H264 video encoding pipeline for WebRTC streaming
|
||||
//!
|
||||
//! This module provides a complete H264 encoding pipeline that connects:
|
||||
//! 1. Video capture (YUYV/MJPEG from V4L2)
|
||||
//! 2. Pixel conversion (YUYV → YUV420P) or JPEG decode
|
||||
//! 3. H264 encoding (via hwcodec)
|
||||
//! 4. RTP packetization and WebRTC track output
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::sync::{broadcast, watch, Mutex};
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use crate::error::{AppError, Result};
|
||||
use crate::video::convert::Nv12Converter;
|
||||
use crate::video::decoder::mjpeg::{MjpegVaapiDecoder, MjpegVaapiDecoderConfig};
|
||||
use crate::video::encoder::h264::{H264Config, H264Encoder};
|
||||
use crate::video::format::{PixelFormat, Resolution};
|
||||
use crate::webrtc::rtp::{H264VideoTrack, H264VideoTrackConfig};
|
||||
|
||||
/// H264 pipeline configuration
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct H264PipelineConfig {
|
||||
/// Input resolution
|
||||
pub resolution: Resolution,
|
||||
/// Input pixel format (YUYV, NV12, etc.)
|
||||
pub input_format: PixelFormat,
|
||||
/// Target bitrate in kbps
|
||||
pub bitrate_kbps: u32,
|
||||
/// Target FPS
|
||||
pub fps: u32,
|
||||
/// GOP size (keyframe interval in frames)
|
||||
pub gop_size: u32,
|
||||
/// Track ID for WebRTC
|
||||
pub track_id: String,
|
||||
/// Stream ID for WebRTC
|
||||
pub stream_id: String,
|
||||
}
|
||||
|
||||
impl Default for H264PipelineConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
resolution: Resolution::HD720,
|
||||
input_format: PixelFormat::Yuyv,
|
||||
bitrate_kbps: 8000,
|
||||
fps: 30,
|
||||
gop_size: 30,
|
||||
track_id: "video0".to_string(),
|
||||
stream_id: "one-kvm-stream".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// H264 pipeline statistics
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct H264PipelineStats {
|
||||
/// Total frames captured
|
||||
pub frames_captured: u64,
|
||||
/// Total frames encoded
|
||||
pub frames_encoded: u64,
|
||||
/// Frames dropped (encoding too slow)
|
||||
pub frames_dropped: u64,
|
||||
/// Total bytes encoded
|
||||
pub bytes_encoded: u64,
|
||||
/// Keyframes encoded
|
||||
pub keyframes_encoded: u64,
|
||||
/// Average encoding time per frame (ms)
|
||||
pub avg_encode_time_ms: f32,
|
||||
/// Current encoding FPS
|
||||
pub current_fps: f32,
|
||||
/// Errors encountered
|
||||
pub errors: u64,
|
||||
}
|
||||
|
||||
/// H264 video encoding pipeline
|
||||
pub struct H264Pipeline {
|
||||
config: H264PipelineConfig,
|
||||
/// H264 encoder instance
|
||||
encoder: Arc<Mutex<Option<H264Encoder>>>,
|
||||
/// NV12 converter (for BGR24/RGB24/YUYV → NV12)
|
||||
nv12_converter: Arc<Mutex<Option<Nv12Converter>>>,
|
||||
/// MJPEG VAAPI decoder (for MJPEG input, outputs NV12)
|
||||
mjpeg_decoder: Arc<Mutex<Option<MjpegVaapiDecoder>>>,
|
||||
/// WebRTC video track
|
||||
video_track: Arc<H264VideoTrack>,
|
||||
/// Pipeline statistics
|
||||
stats: Arc<Mutex<H264PipelineStats>>,
|
||||
/// Running state
|
||||
running: watch::Sender<bool>,
|
||||
/// Encode time accumulator for averaging
|
||||
encode_times: Arc<Mutex<Vec<f32>>>,
|
||||
}
|
||||
|
||||
impl H264Pipeline {
|
||||
/// Create a new H264 pipeline
|
||||
pub fn new(config: H264PipelineConfig) -> Result<Self> {
|
||||
info!(
|
||||
"Creating H264 pipeline: {}x{} @ {} kbps, {} fps",
|
||||
config.resolution.width,
|
||||
config.resolution.height,
|
||||
config.bitrate_kbps,
|
||||
config.fps
|
||||
);
|
||||
|
||||
// Determine encoder input format based on pipeline input
|
||||
// NV12 is optimal for VAAPI, use it for all formats
|
||||
// VAAPI encoders typically only support NV12 input
|
||||
let encoder_input_format = crate::video::encoder::h264::H264InputFormat::Nv12;
|
||||
|
||||
// Create H264 encoder with appropriate input format
|
||||
let encoder_config = H264Config {
|
||||
base: crate::video::encoder::traits::EncoderConfig::h264(
|
||||
config.resolution,
|
||||
config.bitrate_kbps,
|
||||
),
|
||||
bitrate_kbps: config.bitrate_kbps,
|
||||
gop_size: config.gop_size,
|
||||
fps: config.fps,
|
||||
input_format: encoder_input_format,
|
||||
};
|
||||
|
||||
let encoder = H264Encoder::new(encoder_config)?;
|
||||
info!(
|
||||
"H264 encoder created: {} ({}) with {:?} input",
|
||||
encoder.codec_name(),
|
||||
encoder.encoder_type(),
|
||||
encoder_input_format
|
||||
);
|
||||
|
||||
// Create NV12 converter or MJPEG decoder based on input format
|
||||
// All formats are converted to NV12 for VAAPI encoder
|
||||
let (nv12_converter, mjpeg_decoder) = match config.input_format {
|
||||
// NV12 input - direct passthrough
|
||||
PixelFormat::Nv12 => {
|
||||
info!("NV12 input: direct passthrough to encoder");
|
||||
(None, None)
|
||||
}
|
||||
|
||||
// YUYV (4:2:2 packed) → NV12
|
||||
PixelFormat::Yuyv => {
|
||||
info!("YUYV input: converting to NV12");
|
||||
(Some(Nv12Converter::yuyv_to_nv12(config.resolution)), None)
|
||||
}
|
||||
|
||||
// RGB24 → NV12
|
||||
PixelFormat::Rgb24 => {
|
||||
info!("RGB24 input: converting to NV12");
|
||||
(Some(Nv12Converter::rgb24_to_nv12(config.resolution)), None)
|
||||
}
|
||||
|
||||
// BGR24 → NV12
|
||||
PixelFormat::Bgr24 => {
|
||||
info!("BGR24 input: converting to NV12");
|
||||
(Some(Nv12Converter::bgr24_to_nv12(config.resolution)), None)
|
||||
}
|
||||
|
||||
// MJPEG/JPEG → NV12 (via hwcodec decoder)
|
||||
PixelFormat::Mjpeg | PixelFormat::Jpeg => {
|
||||
let decoder_config = MjpegVaapiDecoderConfig {
|
||||
resolution: config.resolution,
|
||||
use_hwaccel: true,
|
||||
};
|
||||
let decoder = MjpegVaapiDecoder::new(decoder_config)?;
|
||||
info!(
|
||||
"MJPEG decoder created for H264 pipeline (outputs NV12)"
|
||||
);
|
||||
(None, Some(decoder))
|
||||
}
|
||||
|
||||
_ => {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"Unsupported input format for H264 pipeline: {}",
|
||||
config.input_format
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
// Create WebRTC video track
|
||||
let track_config = H264VideoTrackConfig {
|
||||
track_id: config.track_id.clone(),
|
||||
stream_id: config.stream_id.clone(),
|
||||
resolution: config.resolution,
|
||||
bitrate_kbps: config.bitrate_kbps,
|
||||
fps: config.fps,
|
||||
profile_level_id: None, // Let browser negotiate the best profile
|
||||
};
|
||||
let video_track = Arc::new(H264VideoTrack::new(track_config));
|
||||
|
||||
let (running_tx, _) = watch::channel(false);
|
||||
|
||||
Ok(Self {
|
||||
config,
|
||||
encoder: Arc::new(Mutex::new(Some(encoder))),
|
||||
nv12_converter: Arc::new(Mutex::new(nv12_converter)),
|
||||
mjpeg_decoder: Arc::new(Mutex::new(mjpeg_decoder)),
|
||||
video_track,
|
||||
stats: Arc::new(Mutex::new(H264PipelineStats::default())),
|
||||
running: running_tx,
|
||||
encode_times: Arc::new(Mutex::new(Vec::with_capacity(100))),
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the WebRTC video track
|
||||
pub fn video_track(&self) -> Arc<H264VideoTrack> {
|
||||
self.video_track.clone()
|
||||
}
|
||||
|
||||
/// Get current statistics
|
||||
pub async fn stats(&self) -> H264PipelineStats {
|
||||
self.stats.lock().await.clone()
|
||||
}
|
||||
|
||||
/// Check if pipeline is running
|
||||
pub fn is_running(&self) -> bool {
|
||||
*self.running.borrow()
|
||||
}
|
||||
|
||||
/// Start the encoding pipeline
|
||||
///
|
||||
/// This starts a background task that receives raw frames from the receiver,
|
||||
/// encodes them to H264, and sends them to the WebRTC track.
|
||||
pub async fn start(&self, mut frame_rx: broadcast::Receiver<Vec<u8>>) {
|
||||
if *self.running.borrow() {
|
||||
warn!("H264 pipeline already running");
|
||||
return;
|
||||
}
|
||||
|
||||
let _ = self.running.send(true);
|
||||
info!("Starting H264 pipeline (input format: {})", self.config.input_format);
|
||||
|
||||
let encoder = self.encoder.lock().await.take();
|
||||
let nv12_converter = self.nv12_converter.lock().await.take();
|
||||
let mjpeg_decoder = self.mjpeg_decoder.lock().await.take();
|
||||
let video_track = self.video_track.clone();
|
||||
let stats = self.stats.clone();
|
||||
let encode_times = self.encode_times.clone();
|
||||
let config = self.config.clone();
|
||||
let mut running_rx = self.running.subscribe();
|
||||
|
||||
// Spawn encoding task
|
||||
tokio::spawn(async move {
|
||||
let mut encoder = match encoder {
|
||||
Some(e) => e,
|
||||
None => {
|
||||
error!("No encoder available");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let mut nv12_converter = nv12_converter;
|
||||
let mut mjpeg_decoder = mjpeg_decoder;
|
||||
let mut frame_count: u64 = 0;
|
||||
let mut last_fps_time = Instant::now();
|
||||
let mut fps_frame_count: u64 = 0;
|
||||
|
||||
// Pre-allocated NV12 buffer for MJPEG decoder output (avoids per-frame allocation)
|
||||
let nv12_size = (config.resolution.width * config.resolution.height * 3 / 2) as usize;
|
||||
let mut nv12_buffer = vec![0u8; nv12_size];
|
||||
|
||||
// Flag for one-time warnings
|
||||
let mut size_mismatch_warned = false;
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
biased;
|
||||
|
||||
_ = running_rx.changed() => {
|
||||
if !*running_rx.borrow() {
|
||||
info!("H264 pipeline stopping");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
result = frame_rx.recv() => {
|
||||
match result {
|
||||
Ok(raw_frame) => {
|
||||
let start = Instant::now();
|
||||
|
||||
// Validate frame size for uncompressed formats
|
||||
if let Some(expected_size) = config.input_format.frame_size(config.resolution) {
|
||||
if raw_frame.len() != expected_size && !size_mismatch_warned {
|
||||
warn!(
|
||||
"Frame size mismatch: got {} bytes, expected {} for {} {}x{}",
|
||||
raw_frame.len(),
|
||||
expected_size,
|
||||
config.input_format,
|
||||
config.resolution.width,
|
||||
config.resolution.height
|
||||
);
|
||||
size_mismatch_warned = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Update captured count
|
||||
{
|
||||
let mut s = stats.lock().await;
|
||||
s.frames_captured += 1;
|
||||
}
|
||||
|
||||
// Convert to NV12 for VAAPI encoder
|
||||
// MJPEG -> NV12 (via VAAPI decoder)
|
||||
// BGR24/RGB24/YUYV -> NV12 (via NV12 converter)
|
||||
// NV12 -> pass through
|
||||
//
|
||||
// Optimized: avoid unnecessary allocations and copies
|
||||
frame_count += 1;
|
||||
fps_frame_count += 1;
|
||||
let pts_ms = (frame_count * 1000 / config.fps as u64) as i64;
|
||||
|
||||
let encode_result = if let Some(ref mut decoder) = mjpeg_decoder {
|
||||
// MJPEG input - decode to NV12 via VAAPI
|
||||
match decoder.decode(&raw_frame) {
|
||||
Ok(nv12_frame) => {
|
||||
// Calculate required size for this frame
|
||||
let required_size = (nv12_frame.width * nv12_frame.height * 3 / 2) as usize;
|
||||
|
||||
// Resize buffer if needed (handles resolution changes)
|
||||
if nv12_buffer.len() < required_size {
|
||||
debug!(
|
||||
"Resizing NV12 buffer: {} -> {} bytes (resolution: {}x{})",
|
||||
nv12_buffer.len(), required_size,
|
||||
nv12_frame.width, nv12_frame.height
|
||||
);
|
||||
nv12_buffer.resize(required_size, 0);
|
||||
}
|
||||
|
||||
// Copy to pre-allocated buffer (guaranteed to fit after resize)
|
||||
let written = nv12_frame.copy_to_packed_nv12(&mut nv12_buffer)
|
||||
.expect("BUG: buffer too small after resize");
|
||||
encoder.encode_raw(&nv12_buffer[..written], pts_ms)
|
||||
}
|
||||
Err(e) => {
|
||||
error!("MJPEG VAAPI decode failed: {}", e);
|
||||
let mut s = stats.lock().await;
|
||||
s.errors += 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
} else if let Some(ref mut conv) = nv12_converter {
|
||||
// BGR24/RGB24/YUYV input - convert to NV12
|
||||
// Optimized: pass reference directly without copy
|
||||
match conv.convert(&raw_frame) {
|
||||
Ok(nv12_data) => encoder.encode_raw(nv12_data, pts_ms),
|
||||
Err(e) => {
|
||||
error!("NV12 conversion failed: {}", e);
|
||||
let mut s = stats.lock().await;
|
||||
s.errors += 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// NV12 input - pass reference directly
|
||||
encoder.encode_raw(&raw_frame, pts_ms)
|
||||
};
|
||||
|
||||
match encode_result {
|
||||
Ok(frames) => {
|
||||
if !frames.is_empty() {
|
||||
let frame = &frames[0];
|
||||
let is_keyframe = frame.key == 1;
|
||||
|
||||
// Send to WebRTC track
|
||||
let duration = Duration::from_millis(
|
||||
1000 / config.fps as u64
|
||||
);
|
||||
|
||||
if let Err(e) = video_track
|
||||
.write_frame(&frame.data, duration, is_keyframe)
|
||||
.await
|
||||
{
|
||||
error!("Failed to write frame to track: {}", e);
|
||||
let mut s = stats.lock().await;
|
||||
s.errors += 1;
|
||||
} else {
|
||||
// Update stats
|
||||
let encode_time = start.elapsed().as_secs_f32() * 1000.0;
|
||||
let mut s = stats.lock().await;
|
||||
s.frames_encoded += 1;
|
||||
s.bytes_encoded += frame.data.len() as u64;
|
||||
if is_keyframe {
|
||||
s.keyframes_encoded += 1;
|
||||
}
|
||||
|
||||
// Update encode time average
|
||||
let mut times = encode_times.lock().await;
|
||||
times.push(encode_time);
|
||||
if times.len() > 100 {
|
||||
times.remove(0);
|
||||
}
|
||||
if !times.is_empty() {
|
||||
s.avg_encode_time_ms =
|
||||
times.iter().sum::<f32>() / times.len() as f32;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Encoding failed: {}", e);
|
||||
let mut s = stats.lock().await;
|
||||
s.errors += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Update FPS every second
|
||||
if last_fps_time.elapsed() >= Duration::from_secs(1) {
|
||||
let mut s = stats.lock().await;
|
||||
s.current_fps = fps_frame_count as f32
|
||||
/ last_fps_time.elapsed().as_secs_f32();
|
||||
fps_frame_count = 0;
|
||||
last_fps_time = Instant::now();
|
||||
}
|
||||
}
|
||||
Err(broadcast::error::RecvError::Lagged(n)) => {
|
||||
let mut s = stats.lock().await;
|
||||
s.frames_dropped += n;
|
||||
}
|
||||
Err(broadcast::error::RecvError::Closed) => {
|
||||
info!("Frame channel closed, stopping H264 pipeline");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("H264 pipeline task exited");
|
||||
});
|
||||
}
|
||||
|
||||
/// Stop the encoding pipeline
|
||||
pub fn stop(&self) {
|
||||
if *self.running.borrow() {
|
||||
let _ = self.running.send(false);
|
||||
info!("Stopping H264 pipeline");
|
||||
}
|
||||
}
|
||||
|
||||
/// Request a keyframe (force IDR)
|
||||
pub async fn request_keyframe(&self) {
|
||||
// Note: hwcodec doesn't support on-demand keyframe requests
|
||||
// The encoder will produce keyframes based on GOP size
|
||||
debug!("Keyframe requested (will occur at next GOP boundary)");
|
||||
}
|
||||
|
||||
/// Update bitrate dynamically
|
||||
pub async fn set_bitrate(&self, bitrate_kbps: u32) -> Result<()> {
|
||||
if let Some(ref mut encoder) = *self.encoder.lock().await {
|
||||
encoder.set_bitrate(bitrate_kbps)?;
|
||||
info!("H264 pipeline bitrate updated to {} kbps", bitrate_kbps);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for H264 pipeline configuration
|
||||
pub struct H264PipelineBuilder {
|
||||
config: H264PipelineConfig,
|
||||
}
|
||||
|
||||
impl H264PipelineBuilder {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
config: H264PipelineConfig::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn resolution(mut self, resolution: Resolution) -> Self {
|
||||
self.config.resolution = resolution;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn input_format(mut self, format: PixelFormat) -> Self {
|
||||
self.config.input_format = format;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn bitrate_kbps(mut self, bitrate: u32) -> Self {
|
||||
self.config.bitrate_kbps = bitrate;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn fps(mut self, fps: u32) -> Self {
|
||||
self.config.fps = fps;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn gop_size(mut self, gop: u32) -> Self {
|
||||
self.config.gop_size = gop;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn track_id(mut self, id: &str) -> Self {
|
||||
self.config.track_id = id.to_string();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn stream_id(mut self, id: &str) -> Self {
|
||||
self.config.stream_id = id.to_string();
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> Result<H264Pipeline> {
|
||||
H264Pipeline::new(self.config)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for H264PipelineBuilder {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_pipeline_config_default() {
|
||||
let config = H264PipelineConfig::default();
|
||||
assert_eq!(config.resolution, Resolution::HD720);
|
||||
assert_eq!(config.bitrate_kbps, 2000);
|
||||
assert_eq!(config.fps, 30);
|
||||
assert_eq!(config.gop_size, 30);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pipeline_builder() {
|
||||
let builder = H264PipelineBuilder::new()
|
||||
.resolution(Resolution::HD1080)
|
||||
.bitrate_kbps(4000)
|
||||
.fps(60)
|
||||
.input_format(PixelFormat::Yuyv);
|
||||
|
||||
assert_eq!(builder.config.resolution, Resolution::HD1080);
|
||||
assert_eq!(builder.config.bitrate_kbps, 4000);
|
||||
assert_eq!(builder.config.fps, 60);
|
||||
assert_eq!(builder.config.input_format, PixelFormat::Yuyv);
|
||||
}
|
||||
}
|
||||
29
src/video/mod.rs
Normal file
29
src/video/mod.rs
Normal file
@@ -0,0 +1,29 @@
|
||||
//! Video capture and streaming module
|
||||
//!
|
||||
//! This module provides V4L2 video capture, encoding, and streaming functionality.
|
||||
|
||||
pub mod capture;
|
||||
pub mod convert;
|
||||
pub mod decoder;
|
||||
pub mod device;
|
||||
pub mod encoder;
|
||||
pub mod format;
|
||||
pub mod frame;
|
||||
pub mod h264_pipeline;
|
||||
pub mod shared_video_pipeline;
|
||||
pub mod stream_manager;
|
||||
pub mod streamer;
|
||||
pub mod video_session;
|
||||
|
||||
pub use capture::VideoCapturer;
|
||||
pub use convert::{MjpegDecoder, MjpegToYuv420Converter, PixelConverter, Yuv420pBuffer};
|
||||
pub use decoder::{MjpegVaapiDecoder, MjpegVaapiDecoderConfig};
|
||||
pub use device::{VideoDevice, VideoDeviceInfo};
|
||||
pub use encoder::{JpegEncoder, H264Encoder, H264EncoderType};
|
||||
pub use format::PixelFormat;
|
||||
pub use frame::VideoFrame;
|
||||
pub use h264_pipeline::{H264Pipeline, H264PipelineBuilder, H264PipelineConfig};
|
||||
pub use shared_video_pipeline::{EncodedVideoFrame, SharedVideoPipeline, SharedVideoPipelineConfig, SharedVideoPipelineStats};
|
||||
pub use stream_manager::VideoStreamManager;
|
||||
pub use streamer::{Streamer, StreamerState};
|
||||
pub use video_session::{VideoSessionManager, VideoSessionManagerConfig, VideoSessionInfo, VideoSessionState, CodecInfo};
|
||||
940
src/video/shared_video_pipeline.rs
Normal file
940
src/video/shared_video_pipeline.rs
Normal file
@@ -0,0 +1,940 @@
|
||||
//! Universal shared video encoding pipeline
|
||||
//!
|
||||
//! Supports multiple codecs: H264, H265, VP8, VP9
|
||||
//! A single encoder broadcasts to multiple WebRTC sessions.
|
||||
//!
|
||||
//! Architecture:
|
||||
//! ```text
|
||||
//! VideoCapturer (MJPEG/YUYV/NV12)
|
||||
//! |
|
||||
//! v (broadcast::Receiver<VideoFrame>)
|
||||
//! SharedVideoPipeline (single encoder)
|
||||
//! |
|
||||
//! v (broadcast::Sender<EncodedVideoFrame>)
|
||||
//! ┌────┴────┬────────┬────────┐
|
||||
//! v v v v
|
||||
//! Session1 Session2 Session3 ...
|
||||
//! ```
|
||||
|
||||
use bytes::Bytes;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::sync::{broadcast, watch, Mutex, RwLock};
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use crate::error::{AppError, Result};
|
||||
use crate::video::convert::{Nv12Converter, PixelConverter};
|
||||
use crate::video::decoder::mjpeg::{MjpegTurboDecoder, MjpegVaapiDecoder, MjpegVaapiDecoderConfig};
|
||||
use crate::video::encoder::h264::{H264Config, H264Encoder};
|
||||
use crate::video::encoder::h265::{H265Config, H265Encoder};
|
||||
use crate::video::encoder::registry::{EncoderBackend, EncoderRegistry, VideoEncoderType};
|
||||
use crate::video::encoder::traits::EncoderConfig;
|
||||
use crate::video::encoder::vp8::{VP8Config, VP8Encoder};
|
||||
use crate::video::encoder::vp9::{VP9Config, VP9Encoder};
|
||||
use crate::video::format::{PixelFormat, Resolution};
|
||||
use crate::video::frame::VideoFrame;
|
||||
|
||||
/// Encoded video frame for distribution
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EncodedVideoFrame {
|
||||
/// Encoded data (Annex B for H264/H265, raw for VP8/VP9)
|
||||
pub data: Bytes,
|
||||
/// Presentation timestamp in milliseconds
|
||||
pub pts_ms: i64,
|
||||
/// Whether this is a keyframe
|
||||
pub is_keyframe: bool,
|
||||
/// Frame sequence number
|
||||
pub sequence: u64,
|
||||
/// Frame duration
|
||||
pub duration: Duration,
|
||||
/// Codec type
|
||||
pub codec: VideoEncoderType,
|
||||
}
|
||||
|
||||
/// Shared video pipeline configuration
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SharedVideoPipelineConfig {
|
||||
/// Input resolution
|
||||
pub resolution: Resolution,
|
||||
/// Input pixel format
|
||||
pub input_format: PixelFormat,
|
||||
/// Output codec type
|
||||
pub output_codec: VideoEncoderType,
|
||||
/// Target bitrate in kbps
|
||||
pub bitrate_kbps: u32,
|
||||
/// Target FPS
|
||||
pub fps: u32,
|
||||
/// GOP size
|
||||
pub gop_size: u32,
|
||||
/// Encoder backend (None = auto select best available)
|
||||
pub encoder_backend: Option<EncoderBackend>,
|
||||
}
|
||||
|
||||
impl Default for SharedVideoPipelineConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
resolution: Resolution::HD720,
|
||||
input_format: PixelFormat::Yuyv,
|
||||
output_codec: VideoEncoderType::H264,
|
||||
bitrate_kbps: 8000,
|
||||
fps: 30,
|
||||
gop_size: 30,
|
||||
encoder_backend: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SharedVideoPipelineConfig {
|
||||
/// Create H264 config
|
||||
pub fn h264(resolution: Resolution, bitrate_kbps: u32) -> Self {
|
||||
Self {
|
||||
resolution,
|
||||
output_codec: VideoEncoderType::H264,
|
||||
bitrate_kbps,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Create H265 config
|
||||
pub fn h265(resolution: Resolution, bitrate_kbps: u32) -> Self {
|
||||
Self {
|
||||
resolution,
|
||||
output_codec: VideoEncoderType::H265,
|
||||
bitrate_kbps,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Create VP8 config
|
||||
pub fn vp8(resolution: Resolution, bitrate_kbps: u32) -> Self {
|
||||
Self {
|
||||
resolution,
|
||||
output_codec: VideoEncoderType::VP8,
|
||||
bitrate_kbps,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Create VP9 config
|
||||
pub fn vp9(resolution: Resolution, bitrate_kbps: u32) -> Self {
|
||||
Self {
|
||||
resolution,
|
||||
output_codec: VideoEncoderType::VP9,
|
||||
bitrate_kbps,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Pipeline statistics
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct SharedVideoPipelineStats {
|
||||
pub frames_captured: u64,
|
||||
pub frames_encoded: u64,
|
||||
pub frames_dropped: u64,
|
||||
pub bytes_encoded: u64,
|
||||
pub keyframes_encoded: u64,
|
||||
pub avg_encode_time_ms: f32,
|
||||
pub current_fps: f32,
|
||||
pub errors: u64,
|
||||
pub subscribers: u64,
|
||||
}
|
||||
|
||||
|
||||
/// Universal video encoder trait object
|
||||
#[allow(dead_code)]
|
||||
trait VideoEncoderTrait: Send {
|
||||
fn encode_raw(&mut self, data: &[u8], pts_ms: i64) -> Result<Vec<EncodedFrame>>;
|
||||
fn set_bitrate(&mut self, bitrate_kbps: u32) -> Result<()>;
|
||||
fn codec_name(&self) -> &str;
|
||||
fn request_keyframe(&mut self);
|
||||
}
|
||||
|
||||
/// Encoded frame from encoder
|
||||
#[allow(dead_code)]
|
||||
struct EncodedFrame {
|
||||
data: Vec<u8>,
|
||||
pts: i64,
|
||||
key: i32,
|
||||
}
|
||||
|
||||
/// H264 encoder wrapper
|
||||
struct H264EncoderWrapper(H264Encoder);
|
||||
|
||||
impl VideoEncoderTrait for H264EncoderWrapper {
|
||||
fn encode_raw(&mut self, data: &[u8], pts_ms: i64) -> Result<Vec<EncodedFrame>> {
|
||||
let frames = self.0.encode_raw(data, pts_ms)?;
|
||||
Ok(frames
|
||||
.into_iter()
|
||||
.map(|f| EncodedFrame {
|
||||
data: f.data,
|
||||
pts: f.pts,
|
||||
key: f.key,
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
|
||||
fn set_bitrate(&mut self, bitrate_kbps: u32) -> Result<()> {
|
||||
self.0.set_bitrate(bitrate_kbps)
|
||||
}
|
||||
|
||||
fn codec_name(&self) -> &str {
|
||||
self.0.codec_name()
|
||||
}
|
||||
|
||||
fn request_keyframe(&mut self) {
|
||||
self.0.request_keyframe()
|
||||
}
|
||||
}
|
||||
|
||||
/// H265 encoder wrapper
|
||||
struct H265EncoderWrapper(H265Encoder);
|
||||
|
||||
impl VideoEncoderTrait for H265EncoderWrapper {
|
||||
fn encode_raw(&mut self, data: &[u8], pts_ms: i64) -> Result<Vec<EncodedFrame>> {
|
||||
let frames = self.0.encode_raw(data, pts_ms)?;
|
||||
Ok(frames
|
||||
.into_iter()
|
||||
.map(|f| EncodedFrame {
|
||||
data: f.data,
|
||||
pts: f.pts,
|
||||
key: f.key,
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
|
||||
fn set_bitrate(&mut self, bitrate_kbps: u32) -> Result<()> {
|
||||
self.0.set_bitrate(bitrate_kbps)
|
||||
}
|
||||
|
||||
fn codec_name(&self) -> &str {
|
||||
self.0.codec_name()
|
||||
}
|
||||
|
||||
fn request_keyframe(&mut self) {
|
||||
self.0.request_keyframe()
|
||||
}
|
||||
}
|
||||
|
||||
/// VP8 encoder wrapper
|
||||
struct VP8EncoderWrapper(VP8Encoder);
|
||||
|
||||
impl VideoEncoderTrait for VP8EncoderWrapper {
|
||||
fn encode_raw(&mut self, data: &[u8], pts_ms: i64) -> Result<Vec<EncodedFrame>> {
|
||||
let frames = self.0.encode_raw(data, pts_ms)?;
|
||||
Ok(frames
|
||||
.into_iter()
|
||||
.map(|f| EncodedFrame {
|
||||
data: f.data,
|
||||
pts: f.pts,
|
||||
key: f.key,
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
|
||||
fn set_bitrate(&mut self, bitrate_kbps: u32) -> Result<()> {
|
||||
self.0.set_bitrate(bitrate_kbps)
|
||||
}
|
||||
|
||||
fn codec_name(&self) -> &str {
|
||||
self.0.codec_name()
|
||||
}
|
||||
|
||||
fn request_keyframe(&mut self) {
|
||||
// VP8 encoder doesn't support request_keyframe yet
|
||||
}
|
||||
}
|
||||
|
||||
/// VP9 encoder wrapper
|
||||
struct VP9EncoderWrapper(VP9Encoder);
|
||||
|
||||
impl VideoEncoderTrait for VP9EncoderWrapper {
|
||||
fn encode_raw(&mut self, data: &[u8], pts_ms: i64) -> Result<Vec<EncodedFrame>> {
|
||||
let frames = self.0.encode_raw(data, pts_ms)?;
|
||||
Ok(frames
|
||||
.into_iter()
|
||||
.map(|f| EncodedFrame {
|
||||
data: f.data,
|
||||
pts: f.pts,
|
||||
key: f.key,
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
|
||||
fn set_bitrate(&mut self, bitrate_kbps: u32) -> Result<()> {
|
||||
self.0.set_bitrate(bitrate_kbps)
|
||||
}
|
||||
|
||||
fn codec_name(&self) -> &str {
|
||||
self.0.codec_name()
|
||||
}
|
||||
|
||||
fn request_keyframe(&mut self) {
|
||||
// VP9 encoder doesn't support request_keyframe yet
|
||||
}
|
||||
}
|
||||
|
||||
/// Universal shared video pipeline
|
||||
pub struct SharedVideoPipeline {
|
||||
config: RwLock<SharedVideoPipelineConfig>,
|
||||
encoder: Mutex<Option<Box<dyn VideoEncoderTrait + Send>>>,
|
||||
nv12_converter: Mutex<Option<Nv12Converter>>,
|
||||
yuv420p_converter: Mutex<Option<PixelConverter>>,
|
||||
mjpeg_decoder: Mutex<Option<MjpegVaapiDecoder>>,
|
||||
/// Turbojpeg decoder for direct MJPEG->YUV420P (optimized for software encoders)
|
||||
mjpeg_turbo_decoder: Mutex<Option<MjpegTurboDecoder>>,
|
||||
nv12_buffer: Mutex<Vec<u8>>,
|
||||
/// YUV420P buffer for turbojpeg decoder output
|
||||
yuv420p_buffer: Mutex<Vec<u8>>,
|
||||
/// Whether the encoder needs YUV420P (true) or NV12 (false)
|
||||
encoder_needs_yuv420p: Mutex<bool>,
|
||||
frame_tx: broadcast::Sender<EncodedVideoFrame>,
|
||||
stats: Mutex<SharedVideoPipelineStats>,
|
||||
running: watch::Sender<bool>,
|
||||
running_rx: watch::Receiver<bool>,
|
||||
encode_times: Mutex<Vec<f32>>,
|
||||
sequence: Mutex<u64>,
|
||||
/// Atomic flag for keyframe request (avoids lock contention)
|
||||
keyframe_requested: AtomicBool,
|
||||
}
|
||||
|
||||
impl SharedVideoPipeline {
|
||||
/// Create a new shared video pipeline
|
||||
pub fn new(config: SharedVideoPipelineConfig) -> Result<Arc<Self>> {
|
||||
info!(
|
||||
"Creating shared video pipeline: {} {}x{} @ {} kbps (input: {})",
|
||||
config.output_codec,
|
||||
config.resolution.width,
|
||||
config.resolution.height,
|
||||
config.bitrate_kbps,
|
||||
config.input_format
|
||||
);
|
||||
|
||||
let (frame_tx, _) = broadcast::channel(16);
|
||||
let (running_tx, running_rx) = watch::channel(false);
|
||||
let nv12_size = (config.resolution.width * config.resolution.height * 3 / 2) as usize;
|
||||
let yuv420p_size = nv12_size; // Same size as NV12
|
||||
|
||||
let pipeline = Arc::new(Self {
|
||||
config: RwLock::new(config),
|
||||
encoder: Mutex::new(None),
|
||||
nv12_converter: Mutex::new(None),
|
||||
yuv420p_converter: Mutex::new(None),
|
||||
mjpeg_decoder: Mutex::new(None),
|
||||
mjpeg_turbo_decoder: Mutex::new(None),
|
||||
nv12_buffer: Mutex::new(vec![0u8; nv12_size]),
|
||||
yuv420p_buffer: Mutex::new(vec![0u8; yuv420p_size]),
|
||||
encoder_needs_yuv420p: Mutex::new(false),
|
||||
frame_tx,
|
||||
stats: Mutex::new(SharedVideoPipelineStats::default()),
|
||||
running: running_tx,
|
||||
running_rx,
|
||||
encode_times: Mutex::new(Vec::with_capacity(100)),
|
||||
sequence: Mutex::new(0),
|
||||
keyframe_requested: AtomicBool::new(false),
|
||||
});
|
||||
|
||||
Ok(pipeline)
|
||||
}
|
||||
|
||||
/// Initialize encoder based on config
|
||||
async fn init_encoder(&self) -> Result<()> {
|
||||
let config = self.config.read().await.clone();
|
||||
let registry = EncoderRegistry::global();
|
||||
|
||||
// Helper to get codec name for specific backend
|
||||
let get_codec_name = |format: VideoEncoderType, backend: Option<EncoderBackend>| -> Option<String> {
|
||||
match backend {
|
||||
Some(b) => registry.encoder_with_backend(format, b).map(|e| e.codec_name.clone()),
|
||||
None => registry.best_encoder(format, false).map(|e| e.codec_name.clone()),
|
||||
}
|
||||
};
|
||||
|
||||
// Create encoder based on codec type
|
||||
let encoder: Box<dyn VideoEncoderTrait + Send> = match config.output_codec {
|
||||
VideoEncoderType::H264 => {
|
||||
let encoder_config = H264Config {
|
||||
base: EncoderConfig::h264(config.resolution, config.bitrate_kbps),
|
||||
bitrate_kbps: config.bitrate_kbps,
|
||||
gop_size: config.gop_size,
|
||||
fps: config.fps,
|
||||
input_format: crate::video::encoder::h264::H264InputFormat::Nv12,
|
||||
};
|
||||
|
||||
let encoder = if let Some(ref backend) = config.encoder_backend {
|
||||
// Specific backend requested
|
||||
let codec_name = get_codec_name(VideoEncoderType::H264, Some(*backend))
|
||||
.ok_or_else(|| AppError::VideoError(format!(
|
||||
"Backend {:?} does not support H.264", backend
|
||||
)))?;
|
||||
info!("Creating H264 encoder with backend {:?} (codec: {})", backend, codec_name);
|
||||
H264Encoder::with_codec(encoder_config, &codec_name)?
|
||||
} else {
|
||||
// Auto select
|
||||
H264Encoder::new(encoder_config)?
|
||||
};
|
||||
|
||||
info!("Created H264 encoder: {}", encoder.codec_name());
|
||||
Box::new(H264EncoderWrapper(encoder))
|
||||
}
|
||||
VideoEncoderType::H265 => {
|
||||
let encoder_config = H265Config::low_latency(config.resolution, config.bitrate_kbps);
|
||||
|
||||
let encoder = if let Some(ref backend) = config.encoder_backend {
|
||||
let codec_name = get_codec_name(VideoEncoderType::H265, Some(*backend))
|
||||
.ok_or_else(|| AppError::VideoError(format!(
|
||||
"Backend {:?} does not support H.265", backend
|
||||
)))?;
|
||||
info!("Creating H265 encoder with backend {:?} (codec: {})", backend, codec_name);
|
||||
H265Encoder::with_codec(encoder_config, &codec_name)?
|
||||
} else {
|
||||
H265Encoder::new(encoder_config)?
|
||||
};
|
||||
|
||||
info!("Created H265 encoder: {}", encoder.codec_name());
|
||||
Box::new(H265EncoderWrapper(encoder))
|
||||
}
|
||||
VideoEncoderType::VP8 => {
|
||||
let encoder_config = VP8Config::low_latency(config.resolution, config.bitrate_kbps);
|
||||
|
||||
let encoder = if let Some(ref backend) = config.encoder_backend {
|
||||
let codec_name = get_codec_name(VideoEncoderType::VP8, Some(*backend))
|
||||
.ok_or_else(|| AppError::VideoError(format!(
|
||||
"Backend {:?} does not support VP8", backend
|
||||
)))?;
|
||||
info!("Creating VP8 encoder with backend {:?} (codec: {})", backend, codec_name);
|
||||
VP8Encoder::with_codec(encoder_config, &codec_name)?
|
||||
} else {
|
||||
VP8Encoder::new(encoder_config)?
|
||||
};
|
||||
|
||||
info!("Created VP8 encoder: {}", encoder.codec_name());
|
||||
Box::new(VP8EncoderWrapper(encoder))
|
||||
}
|
||||
VideoEncoderType::VP9 => {
|
||||
let encoder_config = VP9Config::low_latency(config.resolution, config.bitrate_kbps);
|
||||
|
||||
let encoder = if let Some(ref backend) = config.encoder_backend {
|
||||
let codec_name = get_codec_name(VideoEncoderType::VP9, Some(*backend))
|
||||
.ok_or_else(|| AppError::VideoError(format!(
|
||||
"Backend {:?} does not support VP9", backend
|
||||
)))?;
|
||||
info!("Creating VP9 encoder with backend {:?} (codec: {})", backend, codec_name);
|
||||
VP9Encoder::with_codec(encoder_config, &codec_name)?
|
||||
} else {
|
||||
VP9Encoder::new(encoder_config)?
|
||||
};
|
||||
|
||||
info!("Created VP9 encoder: {}", encoder.codec_name());
|
||||
Box::new(VP9EncoderWrapper(encoder))
|
||||
}
|
||||
};
|
||||
|
||||
// Determine if encoder needs YUV420P (software encoders) or NV12 (hardware encoders)
|
||||
let codec_name = encoder.codec_name();
|
||||
let needs_yuv420p = codec_name.contains("libvpx") || codec_name.contains("libx265");
|
||||
|
||||
info!(
|
||||
"Encoder {} needs {} format",
|
||||
codec_name,
|
||||
if needs_yuv420p { "YUV420P" } else { "NV12" }
|
||||
);
|
||||
|
||||
// Create converter or decoder based on input format and encoder needs
|
||||
info!("Initializing input format handler for: {} -> {}",
|
||||
config.input_format,
|
||||
if needs_yuv420p { "YUV420P" } else { "NV12" });
|
||||
|
||||
let (nv12_converter, yuv420p_converter, mjpeg_decoder, mjpeg_turbo_decoder) = if needs_yuv420p {
|
||||
// Software encoder needs YUV420P
|
||||
match config.input_format {
|
||||
PixelFormat::Yuv420 => {
|
||||
info!("Using direct YUV420P input (no conversion)");
|
||||
(None, None, None, None)
|
||||
}
|
||||
PixelFormat::Yuyv => {
|
||||
info!("Using YUYV->YUV420P converter");
|
||||
(None, Some(PixelConverter::yuyv_to_yuv420p(config.resolution)), None, None)
|
||||
}
|
||||
PixelFormat::Nv12 => {
|
||||
info!("Using NV12->YUV420P converter");
|
||||
(None, Some(PixelConverter::nv12_to_yuv420p(config.resolution)), None, None)
|
||||
}
|
||||
PixelFormat::Rgb24 => {
|
||||
info!("Using RGB24->YUV420P converter");
|
||||
(None, Some(PixelConverter::rgb24_to_yuv420p(config.resolution)), None, None)
|
||||
}
|
||||
PixelFormat::Bgr24 => {
|
||||
info!("Using BGR24->YUV420P converter");
|
||||
(None, Some(PixelConverter::bgr24_to_yuv420p(config.resolution)), None, None)
|
||||
}
|
||||
PixelFormat::Mjpeg | PixelFormat::Jpeg => {
|
||||
// Use turbojpeg for direct MJPEG->YUV420P (no intermediate NV12)
|
||||
info!("Using turbojpeg MJPEG decoder (direct YUV420P output)");
|
||||
let turbo_decoder = MjpegTurboDecoder::new(config.resolution)?;
|
||||
(None, None, None, Some(turbo_decoder))
|
||||
}
|
||||
_ => {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"Unsupported input format: {}",
|
||||
config.input_format
|
||||
)));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Hardware encoder needs NV12
|
||||
match config.input_format {
|
||||
PixelFormat::Nv12 => {
|
||||
info!("Using direct NV12 input (no conversion)");
|
||||
(None, None, None, None)
|
||||
}
|
||||
PixelFormat::Yuyv => {
|
||||
info!("Using YUYV->NV12 converter");
|
||||
(Some(Nv12Converter::yuyv_to_nv12(config.resolution)), None, None, None)
|
||||
}
|
||||
PixelFormat::Rgb24 => {
|
||||
info!("Using RGB24->NV12 converter");
|
||||
(Some(Nv12Converter::rgb24_to_nv12(config.resolution)), None, None, None)
|
||||
}
|
||||
PixelFormat::Bgr24 => {
|
||||
info!("Using BGR24->NV12 converter");
|
||||
(Some(Nv12Converter::bgr24_to_nv12(config.resolution)), None, None, None)
|
||||
}
|
||||
PixelFormat::Mjpeg | PixelFormat::Jpeg => {
|
||||
info!("Using MJPEG decoder (NV12 output)");
|
||||
let decoder_config = MjpegVaapiDecoderConfig {
|
||||
resolution: config.resolution,
|
||||
use_hwaccel: true,
|
||||
};
|
||||
let decoder = MjpegVaapiDecoder::new(decoder_config)?;
|
||||
(None, None, Some(decoder), None)
|
||||
}
|
||||
_ => {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"Unsupported input format: {}",
|
||||
config.input_format
|
||||
)));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
*self.encoder.lock().await = Some(encoder);
|
||||
*self.nv12_converter.lock().await = nv12_converter;
|
||||
*self.yuv420p_converter.lock().await = yuv420p_converter;
|
||||
*self.mjpeg_decoder.lock().await = mjpeg_decoder;
|
||||
*self.mjpeg_turbo_decoder.lock().await = mjpeg_turbo_decoder;
|
||||
*self.encoder_needs_yuv420p.lock().await = needs_yuv420p;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Subscribe to encoded frames
|
||||
pub fn subscribe(&self) -> broadcast::Receiver<EncodedVideoFrame> {
|
||||
self.frame_tx.subscribe()
|
||||
}
|
||||
|
||||
/// Get subscriber count
|
||||
pub fn subscriber_count(&self) -> usize {
|
||||
self.frame_tx.receiver_count()
|
||||
}
|
||||
|
||||
/// Request encoder to produce a keyframe on next encode
|
||||
///
|
||||
/// This is useful when a new client connects and needs an immediate
|
||||
/// keyframe to start decoding the video stream.
|
||||
///
|
||||
/// Uses an atomic flag to avoid lock contention with the encoding loop.
|
||||
pub async fn request_keyframe(&self) {
|
||||
self.keyframe_requested.store(true, Ordering::Release);
|
||||
info!("[Pipeline] Keyframe requested for new client");
|
||||
}
|
||||
|
||||
/// Get current stats
|
||||
pub async fn stats(&self) -> SharedVideoPipelineStats {
|
||||
let mut stats = self.stats.lock().await.clone();
|
||||
stats.subscribers = self.frame_tx.receiver_count() as u64;
|
||||
stats
|
||||
}
|
||||
|
||||
/// Check if running
|
||||
pub fn is_running(&self) -> bool {
|
||||
*self.running_rx.borrow()
|
||||
}
|
||||
|
||||
/// Get current codec
|
||||
pub async fn current_codec(&self) -> VideoEncoderType {
|
||||
self.config.read().await.output_codec
|
||||
}
|
||||
|
||||
/// Switch codec (requires restart)
|
||||
pub async fn switch_codec(&self, codec: VideoEncoderType) -> Result<()> {
|
||||
let was_running = self.is_running();
|
||||
|
||||
if was_running {
|
||||
self.stop();
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
|
||||
{
|
||||
let mut config = self.config.write().await;
|
||||
config.output_codec = codec;
|
||||
}
|
||||
|
||||
// Clear encoder state
|
||||
*self.encoder.lock().await = None;
|
||||
*self.nv12_converter.lock().await = None;
|
||||
*self.yuv420p_converter.lock().await = None;
|
||||
*self.mjpeg_decoder.lock().await = None;
|
||||
*self.mjpeg_turbo_decoder.lock().await = None;
|
||||
*self.encoder_needs_yuv420p.lock().await = false;
|
||||
|
||||
info!("Switched to {} codec", codec);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Start the pipeline
|
||||
pub async fn start(self: &Arc<Self>, mut frame_rx: broadcast::Receiver<VideoFrame>) -> Result<()> {
|
||||
if *self.running_rx.borrow() {
|
||||
warn!("Pipeline already running");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.init_encoder().await?;
|
||||
let _ = self.running.send(true);
|
||||
|
||||
let config = self.config.read().await.clone();
|
||||
info!("Starting {} pipeline", config.output_codec);
|
||||
|
||||
let pipeline = self.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut frame_count: u64 = 0;
|
||||
let mut last_fps_time = Instant::now();
|
||||
let mut fps_frame_count: u64 = 0;
|
||||
let mut running_rx = pipeline.running_rx.clone();
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
biased;
|
||||
|
||||
_ = running_rx.changed() => {
|
||||
if !*running_rx.borrow() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
result = frame_rx.recv() => {
|
||||
match result {
|
||||
Ok(video_frame) => {
|
||||
pipeline.stats.lock().await.frames_captured += 1;
|
||||
|
||||
if pipeline.frame_tx.receiver_count() == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
match pipeline.encode_frame(&video_frame, frame_count).await {
|
||||
Ok(Some(encoded_frame)) => {
|
||||
let encode_time = start.elapsed().as_secs_f32() * 1000.0;
|
||||
let _ = pipeline.frame_tx.send(encoded_frame.clone());
|
||||
|
||||
let is_keyframe = encoded_frame.is_keyframe;
|
||||
|
||||
// Update stats
|
||||
{
|
||||
let mut s = pipeline.stats.lock().await;
|
||||
s.frames_encoded += 1;
|
||||
s.bytes_encoded += encoded_frame.data.len() as u64;
|
||||
if is_keyframe {
|
||||
s.keyframes_encoded += 1;
|
||||
}
|
||||
|
||||
let mut times = pipeline.encode_times.lock().await;
|
||||
times.push(encode_time);
|
||||
if times.len() > 100 {
|
||||
times.remove(0);
|
||||
}
|
||||
if !times.is_empty() {
|
||||
s.avg_encode_time_ms = times.iter().sum::<f32>() / times.len() as f32;
|
||||
}
|
||||
}
|
||||
|
||||
frame_count += 1;
|
||||
fps_frame_count += 1;
|
||||
}
|
||||
Ok(None) => {}
|
||||
Err(e) => {
|
||||
error!("Encoding failed: {}", e);
|
||||
pipeline.stats.lock().await.errors += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if last_fps_time.elapsed() >= Duration::from_secs(1) {
|
||||
let mut s = pipeline.stats.lock().await;
|
||||
s.current_fps = fps_frame_count as f32 / last_fps_time.elapsed().as_secs_f32();
|
||||
fps_frame_count = 0;
|
||||
last_fps_time = Instant::now();
|
||||
}
|
||||
}
|
||||
Err(broadcast::error::RecvError::Lagged(n)) => {
|
||||
pipeline.stats.lock().await.frames_dropped += n;
|
||||
}
|
||||
Err(broadcast::error::RecvError::Closed) => {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("Video pipeline stopped");
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Encode a single frame
|
||||
async fn encode_frame(&self, frame: &VideoFrame, frame_count: u64) -> Result<Option<EncodedVideoFrame>> {
|
||||
let config = self.config.read().await;
|
||||
let raw_frame = frame.data();
|
||||
let fps = config.fps;
|
||||
let codec = config.output_codec;
|
||||
drop(config);
|
||||
|
||||
let pts_ms = (frame_count * 1000 / fps as u64) as i64;
|
||||
|
||||
// Debug log for H265
|
||||
if codec == VideoEncoderType::H265 && frame_count % 30 == 1 {
|
||||
debug!(
|
||||
"[Pipeline-H265] Processing frame #{}: input_size={}, pts_ms={}",
|
||||
frame_count,
|
||||
raw_frame.len(),
|
||||
pts_ms
|
||||
);
|
||||
}
|
||||
|
||||
let mut mjpeg_decoder = self.mjpeg_decoder.lock().await;
|
||||
let mut mjpeg_turbo_decoder = self.mjpeg_turbo_decoder.lock().await;
|
||||
let mut nv12_converter = self.nv12_converter.lock().await;
|
||||
let mut yuv420p_converter = self.yuv420p_converter.lock().await;
|
||||
let needs_yuv420p = *self.encoder_needs_yuv420p.lock().await;
|
||||
let mut encoder_guard = self.encoder.lock().await;
|
||||
|
||||
let encoder = encoder_guard.as_mut().ok_or_else(|| {
|
||||
AppError::VideoError("Encoder not initialized".to_string())
|
||||
})?;
|
||||
|
||||
// Check and consume keyframe request (atomic, no lock contention)
|
||||
if self.keyframe_requested.swap(false, Ordering::AcqRel) {
|
||||
encoder.request_keyframe();
|
||||
debug!("[Pipeline] Keyframe will be generated for this frame");
|
||||
}
|
||||
|
||||
let encode_result = if mjpeg_turbo_decoder.is_some() {
|
||||
// Optimized path: MJPEG -> YUV420P directly via turbojpeg (for software encoders)
|
||||
let turbo = mjpeg_turbo_decoder.as_mut().unwrap();
|
||||
let mut yuv420p_buffer = self.yuv420p_buffer.lock().await;
|
||||
let written = turbo.decode_to_yuv420p_buffer(raw_frame, &mut yuv420p_buffer)
|
||||
.map_err(|e| AppError::VideoError(format!("turbojpeg decode failed: {}", e)))?;
|
||||
encoder.encode_raw(&yuv420p_buffer[..written], pts_ms)
|
||||
} else if mjpeg_decoder.is_some() {
|
||||
// MJPEG input: decode to NV12 (for hardware encoders)
|
||||
let decoder = mjpeg_decoder.as_mut().unwrap();
|
||||
let nv12_frame = decoder.decode(raw_frame)
|
||||
.map_err(|e| AppError::VideoError(format!("MJPEG decode failed: {}", e)))?;
|
||||
|
||||
let required_size = (nv12_frame.width * nv12_frame.height * 3 / 2) as usize;
|
||||
let mut nv12_buffer = self.nv12_buffer.lock().await;
|
||||
if nv12_buffer.len() < required_size {
|
||||
nv12_buffer.resize(required_size, 0);
|
||||
}
|
||||
|
||||
let written = nv12_frame.copy_to_packed_nv12(&mut nv12_buffer)
|
||||
.expect("Buffer too small");
|
||||
|
||||
// Debug log for H265 after MJPEG decode
|
||||
if codec == VideoEncoderType::H265 && frame_count % 30 == 1 {
|
||||
debug!(
|
||||
"[Pipeline-H265] MJPEG decoded: nv12_size={}, frame_width={}, frame_height={}",
|
||||
written, nv12_frame.width, nv12_frame.height
|
||||
);
|
||||
}
|
||||
|
||||
encoder.encode_raw(&nv12_buffer[..written], pts_ms)
|
||||
} else if needs_yuv420p && yuv420p_converter.is_some() {
|
||||
// Software encoder with direct input conversion to YUV420P
|
||||
let conv = yuv420p_converter.as_mut().unwrap();
|
||||
let yuv420p_data = conv.convert(raw_frame)
|
||||
.map_err(|e| AppError::VideoError(format!("YUV420P conversion failed: {}", e)))?;
|
||||
encoder.encode_raw(yuv420p_data, pts_ms)
|
||||
} else if nv12_converter.is_some() {
|
||||
// Hardware encoder with input conversion to NV12
|
||||
let conv = nv12_converter.as_mut().unwrap();
|
||||
let nv12_data = conv.convert(raw_frame)
|
||||
.map_err(|e| AppError::VideoError(format!("NV12 conversion failed: {}", e)))?;
|
||||
encoder.encode_raw(nv12_data, pts_ms)
|
||||
} else {
|
||||
// Direct input (already in correct format)
|
||||
encoder.encode_raw(raw_frame, pts_ms)
|
||||
};
|
||||
|
||||
drop(encoder_guard);
|
||||
drop(nv12_converter);
|
||||
drop(yuv420p_converter);
|
||||
drop(mjpeg_decoder);
|
||||
drop(mjpeg_turbo_decoder);
|
||||
|
||||
match encode_result {
|
||||
Ok(frames) => {
|
||||
if !frames.is_empty() {
|
||||
let encoded = frames.into_iter().next().unwrap();
|
||||
let is_keyframe = encoded.key == 1;
|
||||
|
||||
let sequence = {
|
||||
let mut seq = self.sequence.lock().await;
|
||||
*seq += 1;
|
||||
*seq
|
||||
};
|
||||
|
||||
// Debug log for H265 encoded frame
|
||||
if codec == VideoEncoderType::H265 && (is_keyframe || frame_count % 30 == 1) {
|
||||
debug!(
|
||||
"[Pipeline-H265] Encoded frame #{}: output_size={}, keyframe={}, sequence={}",
|
||||
frame_count,
|
||||
encoded.data.len(),
|
||||
is_keyframe,
|
||||
sequence
|
||||
);
|
||||
|
||||
// Log H265 NAL unit types in the encoded data
|
||||
if is_keyframe {
|
||||
let nal_types = parse_h265_nal_types(&encoded.data);
|
||||
debug!("[Pipeline-H265] Keyframe NAL types: {:?}", nal_types);
|
||||
}
|
||||
}
|
||||
|
||||
let config = self.config.read().await;
|
||||
Ok(Some(EncodedVideoFrame {
|
||||
data: Bytes::from(encoded.data),
|
||||
pts_ms,
|
||||
is_keyframe,
|
||||
sequence,
|
||||
duration: Duration::from_millis(1000 / config.fps as u64),
|
||||
codec,
|
||||
}))
|
||||
} else {
|
||||
if codec == VideoEncoderType::H265 {
|
||||
warn!("[Pipeline-H265] Encoder returned no frames for frame #{}", frame_count);
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
if codec == VideoEncoderType::H265 {
|
||||
error!("[Pipeline-H265] Encode error at frame #{}: {}", frame_count, e);
|
||||
}
|
||||
Err(e)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Stop the pipeline
|
||||
pub fn stop(&self) {
|
||||
if *self.running_rx.borrow() {
|
||||
let _ = self.running.send(false);
|
||||
info!("Stopping video pipeline");
|
||||
}
|
||||
}
|
||||
|
||||
/// Set bitrate
|
||||
pub async fn set_bitrate(&self, bitrate_kbps: u32) -> Result<()> {
|
||||
if let Some(ref mut encoder) = *self.encoder.lock().await {
|
||||
encoder.set_bitrate(bitrate_kbps)?;
|
||||
self.config.write().await.bitrate_kbps = bitrate_kbps;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get current config
|
||||
pub async fn config(&self) -> SharedVideoPipelineConfig {
|
||||
self.config.read().await.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for SharedVideoPipeline {
|
||||
fn drop(&mut self) {
|
||||
let _ = self.running.send(false);
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse H265 NAL unit types from Annex B data
|
||||
fn parse_h265_nal_types(data: &[u8]) -> Vec<(u8, usize)> {
|
||||
let mut nal_types = Vec::new();
|
||||
let mut i = 0;
|
||||
|
||||
while i < data.len() {
|
||||
// Find start code
|
||||
let nal_start = if i + 4 <= data.len()
|
||||
&& data[i] == 0
|
||||
&& data[i + 1] == 0
|
||||
&& data[i + 2] == 0
|
||||
&& data[i + 3] == 1
|
||||
{
|
||||
i + 4
|
||||
} else if i + 3 <= data.len()
|
||||
&& data[i] == 0
|
||||
&& data[i + 1] == 0
|
||||
&& data[i + 2] == 1
|
||||
{
|
||||
i + 3
|
||||
} else {
|
||||
i += 1;
|
||||
continue;
|
||||
};
|
||||
|
||||
if nal_start >= data.len() {
|
||||
break;
|
||||
}
|
||||
|
||||
// Find next start code to get NAL size
|
||||
let mut nal_end = data.len();
|
||||
let mut j = nal_start + 1;
|
||||
while j + 3 <= data.len() {
|
||||
if (data[j] == 0 && data[j + 1] == 0 && data[j + 2] == 1)
|
||||
|| (j + 4 <= data.len()
|
||||
&& data[j] == 0
|
||||
&& data[j + 1] == 0
|
||||
&& data[j + 2] == 0
|
||||
&& data[j + 3] == 1)
|
||||
{
|
||||
nal_end = j;
|
||||
break;
|
||||
}
|
||||
j += 1;
|
||||
}
|
||||
|
||||
// H265 NAL type is in bits 1-6 of first byte
|
||||
let nal_type = (data[nal_start] >> 1) & 0x3F;
|
||||
let nal_size = nal_end - nal_start;
|
||||
nal_types.push((nal_type, nal_size));
|
||||
i = nal_end;
|
||||
}
|
||||
|
||||
nal_types
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_pipeline_config() {
|
||||
let h264 = SharedVideoPipelineConfig::h264(Resolution::HD1080, 4000);
|
||||
assert_eq!(h264.output_codec, VideoEncoderType::H264);
|
||||
|
||||
let h265 = SharedVideoPipelineConfig::h265(Resolution::HD720, 2000);
|
||||
assert_eq!(h265.output_codec, VideoEncoderType::H265);
|
||||
}
|
||||
}
|
||||
574
src/video/stream_manager.rs
Normal file
574
src/video/stream_manager.rs
Normal file
@@ -0,0 +1,574 @@
|
||||
//! Video Stream Manager
|
||||
//!
|
||||
//! Unified manager for video streaming that supports single-mode operation.
|
||||
//! At any given time, only one streaming mode (MJPEG or WebRTC) is active.
|
||||
//!
|
||||
//! # Architecture
|
||||
//!
|
||||
//! ```text
|
||||
//! VideoStreamManager (Public API - Single Entry Point)
|
||||
//! │
|
||||
//! ├── mode: StreamMode (current active mode)
|
||||
//! │
|
||||
//! ├── MJPEG Mode
|
||||
//! │ └── Streamer ──► MjpegStreamHandler
|
||||
//! │ (Future: MjpegStreamer with WsAudio/WsHid)
|
||||
//! │
|
||||
//! └── WebRTC Mode
|
||||
//! └── WebRtcStreamer ──► H264SessionManager
|
||||
//! (Extensible: H264, VP8, VP9, H265)
|
||||
//! ```
|
||||
//!
|
||||
//! # Design Goals
|
||||
//!
|
||||
//! 1. **Single Entry Point**: All video operations go through VideoStreamManager
|
||||
//! 2. **Mode Isolation**: MJPEG and WebRTC modes are cleanly separated
|
||||
//! 3. **Extensible Codecs**: WebRTC supports multiple video codecs (H264 now, others reserved)
|
||||
//! 4. **Simplified API**: Complex configuration flows are encapsulated
|
||||
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use crate::config::{ConfigStore, StreamMode};
|
||||
use crate::error::Result;
|
||||
use crate::events::{EventBus, SystemEvent, VideoDeviceInfo};
|
||||
use crate::hid::HidController;
|
||||
use crate::stream::MjpegStreamHandler;
|
||||
use crate::video::format::{PixelFormat, Resolution};
|
||||
use crate::video::streamer::{Streamer, StreamerState};
|
||||
use crate::webrtc::WebRtcStreamer;
|
||||
|
||||
/// Video stream manager configuration
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct StreamManagerConfig {
|
||||
/// Initial streaming mode
|
||||
pub mode: StreamMode,
|
||||
/// Video device path
|
||||
pub device: Option<String>,
|
||||
/// Video format
|
||||
pub format: PixelFormat,
|
||||
/// Resolution
|
||||
pub resolution: Resolution,
|
||||
/// FPS
|
||||
pub fps: u32,
|
||||
}
|
||||
|
||||
impl Default for StreamManagerConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
mode: StreamMode::Mjpeg,
|
||||
device: None,
|
||||
format: PixelFormat::Mjpeg,
|
||||
resolution: Resolution::HD1080,
|
||||
fps: 30,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Unified video stream manager
|
||||
///
|
||||
/// Manages both MJPEG and WebRTC streaming modes, ensuring only one is active
|
||||
/// at any given time. This reduces resource usage and simplifies the architecture.
|
||||
///
|
||||
/// # Components
|
||||
///
|
||||
/// - **Streamer**: Handles video capture and MJPEG distribution (current implementation)
|
||||
/// - **WebRtcStreamer**: High-level WebRTC manager with multi-codec support (new)
|
||||
/// - **H264SessionManager**: Legacy WebRTC manager (for backward compatibility)
|
||||
pub struct VideoStreamManager {
|
||||
/// Current streaming mode
|
||||
mode: RwLock<StreamMode>,
|
||||
/// MJPEG streamer (handles video capture and MJPEG distribution)
|
||||
streamer: Arc<Streamer>,
|
||||
/// WebRTC streamer (unified WebRTC manager with multi-codec support)
|
||||
webrtc_streamer: Arc<WebRtcStreamer>,
|
||||
/// Event bus for notifications
|
||||
events: RwLock<Option<Arc<EventBus>>>,
|
||||
/// Configuration store
|
||||
config_store: RwLock<Option<ConfigStore>>,
|
||||
/// Mode switching lock to prevent concurrent switch requests
|
||||
switching: AtomicBool,
|
||||
}
|
||||
|
||||
impl VideoStreamManager {
|
||||
/// Create a new video stream manager with WebRtcStreamer
|
||||
pub fn with_webrtc_streamer(
|
||||
streamer: Arc<Streamer>,
|
||||
webrtc_streamer: Arc<WebRtcStreamer>,
|
||||
) -> Arc<Self> {
|
||||
Arc::new(Self {
|
||||
mode: RwLock::new(StreamMode::Mjpeg),
|
||||
streamer,
|
||||
webrtc_streamer,
|
||||
events: RwLock::new(None),
|
||||
config_store: RwLock::new(None),
|
||||
switching: AtomicBool::new(false),
|
||||
})
|
||||
}
|
||||
|
||||
/// Check if mode switching is in progress
|
||||
pub fn is_switching(&self) -> bool {
|
||||
self.switching.load(Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Set event bus for notifications
|
||||
pub async fn set_event_bus(&self, events: Arc<EventBus>) {
|
||||
*self.events.write().await = Some(events);
|
||||
}
|
||||
|
||||
/// Set configuration store
|
||||
pub async fn set_config_store(&self, config: ConfigStore) {
|
||||
*self.config_store.write().await = Some(config);
|
||||
}
|
||||
|
||||
/// Get current streaming mode
|
||||
pub async fn current_mode(&self) -> StreamMode {
|
||||
self.mode.read().await.clone()
|
||||
}
|
||||
|
||||
/// Check if MJPEG mode is active
|
||||
pub async fn is_mjpeg_enabled(&self) -> bool {
|
||||
*self.mode.read().await == StreamMode::Mjpeg
|
||||
}
|
||||
|
||||
/// Check if WebRTC mode is active
|
||||
pub async fn is_webrtc_enabled(&self) -> bool {
|
||||
*self.mode.read().await == StreamMode::WebRTC
|
||||
}
|
||||
|
||||
/// Get the underlying streamer (for MJPEG mode)
|
||||
pub fn streamer(&self) -> Arc<Streamer> {
|
||||
self.streamer.clone()
|
||||
}
|
||||
|
||||
/// Get the WebRTC streamer (unified interface with multi-codec support)
|
||||
pub fn webrtc_streamer(&self) -> Arc<WebRtcStreamer> {
|
||||
self.webrtc_streamer.clone()
|
||||
}
|
||||
|
||||
/// Get the MJPEG stream handler
|
||||
pub fn mjpeg_handler(&self) -> Arc<MjpegStreamHandler> {
|
||||
self.streamer.mjpeg_handler()
|
||||
}
|
||||
|
||||
/// Initialize with a specific mode
|
||||
pub async fn init_with_mode(self: &Arc<Self>, mode: StreamMode) -> Result<()> {
|
||||
info!("Initializing video stream manager with mode: {:?}", mode);
|
||||
*self.mode.write().await = mode.clone();
|
||||
|
||||
// Check if streamer is already initialized (capturer exists)
|
||||
let needs_init = self.streamer.state().await == StreamerState::Uninitialized;
|
||||
|
||||
if needs_init {
|
||||
match mode {
|
||||
StreamMode::Mjpeg => {
|
||||
// Initialize MJPEG streamer
|
||||
if let Err(e) = self.streamer.init_auto().await {
|
||||
warn!("Failed to auto-initialize MJPEG streamer: {}", e);
|
||||
}
|
||||
}
|
||||
StreamMode::WebRTC => {
|
||||
// WebRTC is initialized on-demand when clients connect
|
||||
// But we still need to initialize the video capture
|
||||
if let Err(e) = self.streamer.init_auto().await {
|
||||
warn!("Failed to auto-initialize video capture for WebRTC: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Always reconnect frame source after initialization
|
||||
// This ensures WebRTC has the correct frame_tx from the current capturer
|
||||
if let Some(frame_tx) = self.streamer.frame_sender().await {
|
||||
// Synchronize WebRTC config with actual capture format
|
||||
let (format, resolution, fps) = self.streamer.current_video_config().await;
|
||||
info!(
|
||||
"Reconnecting frame source to WebRTC after init: {}x{} {:?} @ {}fps (receiver_count={})",
|
||||
resolution.width, resolution.height, format, fps, frame_tx.receiver_count()
|
||||
);
|
||||
self.webrtc_streamer.update_video_config(resolution, format, fps).await;
|
||||
self.webrtc_streamer.set_video_source(frame_tx).await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Switch streaming mode
|
||||
///
|
||||
/// This will:
|
||||
/// 1. Acquire switching lock (prevent concurrent switches)
|
||||
/// 2. Notify clients of the mode change
|
||||
/// 3. Stop the current mode
|
||||
/// 4. Start the new mode (ensuring video capture runs for WebRTC)
|
||||
/// 5. Update configuration
|
||||
pub async fn switch_mode(self: &Arc<Self>, new_mode: StreamMode) -> Result<()> {
|
||||
let current_mode = self.mode.read().await.clone();
|
||||
|
||||
if current_mode == new_mode {
|
||||
debug!("Already in {:?} mode, no switch needed", new_mode);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Acquire switching lock - prevent concurrent switch requests
|
||||
if self.switching.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst).is_err() {
|
||||
debug!("Mode switch already in progress, ignoring duplicate request");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Use a helper to ensure we release the lock when done
|
||||
let result = self.do_switch_mode(current_mode, new_mode.clone()).await;
|
||||
self.switching.store(false, Ordering::SeqCst);
|
||||
result
|
||||
}
|
||||
|
||||
/// Internal implementation of mode switching (called with lock held)
|
||||
async fn do_switch_mode(self: &Arc<Self>, current_mode: StreamMode, new_mode: StreamMode) -> Result<()> {
|
||||
info!("Switching video mode: {:?} -> {:?}", current_mode, new_mode);
|
||||
|
||||
// Get the actual mode strings (with codec info for WebRTC)
|
||||
let new_mode_str = match &new_mode {
|
||||
StreamMode::Mjpeg => "mjpeg".to_string(),
|
||||
StreamMode::WebRTC => {
|
||||
let codec = self.webrtc_streamer.current_video_codec().await;
|
||||
codec_to_string(codec)
|
||||
}
|
||||
};
|
||||
let previous_mode_str = match ¤t_mode {
|
||||
StreamMode::Mjpeg => "mjpeg".to_string(),
|
||||
StreamMode::WebRTC => {
|
||||
let codec = self.webrtc_streamer.current_video_codec().await;
|
||||
codec_to_string(codec)
|
||||
}
|
||||
};
|
||||
|
||||
// 1. Publish mode change event (clients should prepare to reconnect)
|
||||
self.publish_event(SystemEvent::StreamModeChanged {
|
||||
mode: new_mode_str,
|
||||
previous_mode: previous_mode_str,
|
||||
})
|
||||
.await;
|
||||
|
||||
// 2. Stop current mode
|
||||
match current_mode {
|
||||
StreamMode::Mjpeg => {
|
||||
info!("Stopping MJPEG streaming");
|
||||
// Only stop MJPEG distribution, keep video capture running for WebRTC
|
||||
self.streamer.mjpeg_handler().set_offline();
|
||||
if let Err(e) = self.streamer.stop().await {
|
||||
warn!("Error stopping MJPEG streamer: {}", e);
|
||||
}
|
||||
}
|
||||
StreamMode::WebRTC => {
|
||||
info!("Closing all WebRTC sessions");
|
||||
let closed = self.webrtc_streamer.close_all_sessions().await;
|
||||
if closed > 0 {
|
||||
info!("Closed {} WebRTC sessions", closed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Update mode
|
||||
*self.mode.write().await = new_mode.clone();
|
||||
|
||||
// 4. Start new mode
|
||||
match new_mode {
|
||||
StreamMode::Mjpeg => {
|
||||
info!("Starting MJPEG streaming");
|
||||
if let Err(e) = self.streamer.start().await {
|
||||
error!("Failed to start MJPEG streamer: {}", e);
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
StreamMode::WebRTC => {
|
||||
// WebRTC mode: ensure video capture is running for H264 encoding
|
||||
info!("Activating WebRTC mode");
|
||||
|
||||
// Initialize streamer if not already initialized
|
||||
if self.streamer.state().await == StreamerState::Uninitialized {
|
||||
info!("Initializing video capture for WebRTC");
|
||||
if let Err(e) = self.streamer.init_auto().await {
|
||||
error!("Failed to initialize video capture for WebRTC: {}", e);
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
|
||||
// Start video capture if not streaming
|
||||
if self.streamer.state().await != StreamerState::Streaming {
|
||||
info!("Starting video capture for WebRTC");
|
||||
if let Err(e) = self.streamer.start().await {
|
||||
error!("Failed to start video capture for WebRTC: {}", e);
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
|
||||
// Wait a bit for capture to stabilize
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
|
||||
|
||||
// Connect frame source to WebRTC with correct format
|
||||
if let Some(frame_tx) = self.streamer.frame_sender().await {
|
||||
// Synchronize WebRTC config with actual capture format
|
||||
let (format, resolution, fps) = self.streamer.current_video_config().await;
|
||||
info!(
|
||||
"Connecting frame source to WebRTC pipeline: {}x{} {:?} @ {}fps",
|
||||
resolution.width, resolution.height, format, fps
|
||||
);
|
||||
self.webrtc_streamer.update_video_config(resolution, format, fps).await;
|
||||
self.webrtc_streamer.set_video_source(frame_tx).await;
|
||||
} else {
|
||||
warn!("No frame source available for WebRTC - sessions may fail to receive video");
|
||||
}
|
||||
|
||||
info!("WebRTC mode activated (sessions created on-demand)");
|
||||
}
|
||||
}
|
||||
|
||||
// 5. Update configuration store if available
|
||||
if let Some(ref config_store) = *self.config_store.read().await {
|
||||
let mut config = (*config_store.get()).clone();
|
||||
config.stream.mode = new_mode.clone();
|
||||
if let Err(e) = config_store.set(config).await {
|
||||
warn!("Failed to persist stream mode to config: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
info!("Video mode switched to {:?}", new_mode);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Apply video configuration (device, format, resolution, fps)
|
||||
///
|
||||
/// This is called when video settings change. It will restart the
|
||||
/// appropriate streaming pipeline based on current mode.
|
||||
pub async fn apply_video_config(
|
||||
self: &Arc<Self>,
|
||||
device_path: &str,
|
||||
format: PixelFormat,
|
||||
resolution: Resolution,
|
||||
fps: u32,
|
||||
) -> Result<()> {
|
||||
let mode = self.mode.read().await.clone();
|
||||
|
||||
info!(
|
||||
"Applying video config: {} {:?} {}x{} @ {} fps (mode: {:?})",
|
||||
device_path, format, resolution.width, resolution.height, fps, mode
|
||||
);
|
||||
|
||||
// Apply to streamer (handles video capture)
|
||||
self.streamer
|
||||
.apply_video_config(device_path, format, resolution, fps)
|
||||
.await?;
|
||||
|
||||
// Update WebRTC config if in WebRTC mode
|
||||
if mode == StreamMode::WebRTC {
|
||||
self.webrtc_streamer
|
||||
.update_video_config(resolution, format, fps)
|
||||
.await;
|
||||
|
||||
// Restart video capture for WebRTC (it was stopped during config change)
|
||||
info!("Restarting video capture for WebRTC after config change");
|
||||
if let Err(e) = self.streamer.start().await {
|
||||
error!("Failed to restart video capture for WebRTC: {}", e);
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
// Wait a bit for capture to stabilize
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
|
||||
|
||||
// Reconnect frame source with the new capturer
|
||||
if let Some(frame_tx) = self.streamer.frame_sender().await {
|
||||
// Note: update_video_config was already called above with the requested config,
|
||||
// but verify that actual capture matches
|
||||
let (actual_format, actual_resolution, actual_fps) = self.streamer.current_video_config().await;
|
||||
if actual_format != format || actual_resolution != resolution || actual_fps != fps {
|
||||
info!(
|
||||
"Actual capture config differs from requested, updating WebRTC: {}x{} {:?} @ {}fps",
|
||||
actual_resolution.width, actual_resolution.height, actual_format, actual_fps
|
||||
);
|
||||
self.webrtc_streamer.update_video_config(actual_resolution, actual_format, actual_fps).await;
|
||||
}
|
||||
info!("Reconnecting frame source to WebRTC after config change");
|
||||
self.webrtc_streamer.set_video_source(frame_tx).await;
|
||||
} else {
|
||||
warn!("No frame source available after config change");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Start streaming (based on current mode)
|
||||
pub async fn start(self: &Arc<Self>) -> Result<()> {
|
||||
let mode = self.mode.read().await.clone();
|
||||
|
||||
match mode {
|
||||
StreamMode::Mjpeg => {
|
||||
self.streamer.start().await?;
|
||||
}
|
||||
StreamMode::WebRTC => {
|
||||
// Ensure video capture is running
|
||||
if self.streamer.state().await == StreamerState::Uninitialized {
|
||||
self.streamer.init_auto().await?;
|
||||
}
|
||||
if self.streamer.state().await != StreamerState::Streaming {
|
||||
self.streamer.start().await?;
|
||||
}
|
||||
|
||||
// Connect frame source with correct format
|
||||
if let Some(frame_tx) = self.streamer.frame_sender().await {
|
||||
// Synchronize WebRTC config with actual capture format
|
||||
let (format, resolution, fps) = self.streamer.current_video_config().await;
|
||||
self.webrtc_streamer.update_video_config(resolution, format, fps).await;
|
||||
self.webrtc_streamer.set_video_source(frame_tx).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Stop streaming
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
let mode = self.mode.read().await.clone();
|
||||
|
||||
match mode {
|
||||
StreamMode::Mjpeg => {
|
||||
self.streamer.stop().await?;
|
||||
}
|
||||
StreamMode::WebRTC => {
|
||||
self.webrtc_streamer.close_all_sessions().await;
|
||||
self.streamer.stop().await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get video device info for device_info event
|
||||
pub async fn get_video_info(&self) -> VideoDeviceInfo {
|
||||
let stats = self.streamer.stats().await;
|
||||
let state = self.streamer.state().await;
|
||||
let device = self.streamer.current_device().await;
|
||||
let mode = self.mode.read().await.clone();
|
||||
|
||||
// For WebRTC mode, return specific codec type (h264, h265, vp8, vp9)
|
||||
// instead of generic "webrtc" to prevent frontend from defaulting to h264
|
||||
let stream_mode = match &mode {
|
||||
StreamMode::Mjpeg => "mjpeg".to_string(),
|
||||
StreamMode::WebRTC => {
|
||||
let codec = self.webrtc_streamer.current_video_codec().await;
|
||||
codec_to_string(codec)
|
||||
}
|
||||
};
|
||||
|
||||
VideoDeviceInfo {
|
||||
available: state != StreamerState::Uninitialized,
|
||||
device: device.map(|d| d.path.display().to_string()),
|
||||
format: stats.format,
|
||||
resolution: stats.resolution,
|
||||
fps: stats.target_fps,
|
||||
online: state == StreamerState::Streaming,
|
||||
stream_mode,
|
||||
config_changing: self.streamer.is_config_changing(),
|
||||
error: if state == StreamerState::Error {
|
||||
Some("Video stream error".to_string())
|
||||
} else if state == StreamerState::NoSignal {
|
||||
Some("No video signal".to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Get MJPEG client count
|
||||
pub fn mjpeg_client_count(&self) -> u64 {
|
||||
self.streamer.mjpeg_handler().client_count()
|
||||
}
|
||||
|
||||
/// Get WebRTC session count
|
||||
pub async fn webrtc_session_count(&self) -> usize {
|
||||
self.webrtc_streamer.session_count().await
|
||||
}
|
||||
|
||||
/// Set HID controller for WebRTC DataChannel
|
||||
pub async fn set_hid_controller(&self, hid: Arc<HidController>) {
|
||||
self.webrtc_streamer.set_hid_controller(hid).await;
|
||||
}
|
||||
|
||||
/// Set audio enabled state for WebRTC
|
||||
pub async fn set_webrtc_audio_enabled(&self, enabled: bool) -> Result<()> {
|
||||
self.webrtc_streamer.set_audio_enabled(enabled).await
|
||||
}
|
||||
|
||||
/// Check if WebRTC audio is enabled
|
||||
pub async fn is_webrtc_audio_enabled(&self) -> bool {
|
||||
self.webrtc_streamer.is_audio_enabled().await
|
||||
}
|
||||
|
||||
/// Reconnect audio sources for all WebRTC sessions
|
||||
/// Call this after audio controller restarts (e.g., quality change)
|
||||
pub async fn reconnect_webrtc_audio_sources(&self) {
|
||||
self.webrtc_streamer.reconnect_audio_sources().await;
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Delegated methods from Streamer (for backward compatibility)
|
||||
// =========================================================================
|
||||
|
||||
/// List available video devices
|
||||
pub async fn list_devices(&self) -> crate::error::Result<Vec<crate::video::device::VideoDeviceInfo>> {
|
||||
self.streamer.list_devices().await
|
||||
}
|
||||
|
||||
/// Get streamer statistics
|
||||
pub async fn stats(&self) -> crate::video::streamer::StreamerStats {
|
||||
self.streamer.stats().await
|
||||
}
|
||||
|
||||
/// Check if config is being changed
|
||||
pub fn is_config_changing(&self) -> bool {
|
||||
self.streamer.is_config_changing()
|
||||
}
|
||||
|
||||
/// Check if streaming is active
|
||||
pub async fn is_streaming(&self) -> bool {
|
||||
self.streamer.is_streaming().await
|
||||
}
|
||||
|
||||
/// Get frame sender for video frames
|
||||
pub async fn frame_sender(&self) -> Option<tokio::sync::broadcast::Sender<crate::video::frame::VideoFrame>> {
|
||||
self.streamer.frame_sender().await
|
||||
}
|
||||
|
||||
/// Publish event to event bus
|
||||
async fn publish_event(&self, event: SystemEvent) {
|
||||
if let Some(ref events) = *self.events.read().await {
|
||||
events.publish(event);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert VideoCodecType to lowercase string for frontend
|
||||
fn codec_to_string(codec: crate::video::encoder::VideoCodecType) -> String {
|
||||
match codec {
|
||||
crate::video::encoder::VideoCodecType::H264 => "h264".to_string(),
|
||||
crate::video::encoder::VideoCodecType::H265 => "h265".to_string(),
|
||||
crate::video::encoder::VideoCodecType::VP8 => "vp8".to_string(),
|
||||
crate::video::encoder::VideoCodecType::VP9 => "vp9".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::video::encoder::VideoCodecType;
|
||||
|
||||
#[test]
|
||||
fn test_codec_to_string() {
|
||||
assert_eq!(codec_to_string(VideoCodecType::H264), "h264");
|
||||
assert_eq!(codec_to_string(VideoCodecType::H265), "h265");
|
||||
assert_eq!(codec_to_string(VideoCodecType::VP8), "vp8");
|
||||
assert_eq!(codec_to_string(VideoCodecType::VP9), "vp9");
|
||||
}
|
||||
}
|
||||
892
src/video/streamer.rs
Normal file
892
src/video/streamer.rs
Normal file
@@ -0,0 +1,892 @@
|
||||
//! Video streamer that integrates capture and streaming
|
||||
//!
|
||||
//! This module provides a high-level interface for video capture and streaming,
|
||||
//! managing the lifecycle of the capture thread and MJPEG/WebRTC distribution.
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::{broadcast, RwLock};
|
||||
use tracing::{debug, error, info, trace, warn};
|
||||
|
||||
use super::capture::{CaptureConfig, CaptureState, VideoCapturer};
|
||||
use super::device::{enumerate_devices, find_best_device, VideoDeviceInfo};
|
||||
use super::format::{PixelFormat, Resolution};
|
||||
use super::frame::VideoFrame;
|
||||
use crate::error::{AppError, Result};
|
||||
use crate::events::{EventBus, SystemEvent};
|
||||
use crate::stream::MjpegStreamHandler;
|
||||
|
||||
/// Streamer configuration
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct StreamerConfig {
|
||||
/// Device path (None = auto-detect)
|
||||
pub device_path: Option<PathBuf>,
|
||||
/// Desired resolution
|
||||
pub resolution: Resolution,
|
||||
/// Desired format
|
||||
pub format: PixelFormat,
|
||||
/// Desired FPS
|
||||
pub fps: u32,
|
||||
/// JPEG quality (1-100)
|
||||
pub jpeg_quality: u8,
|
||||
}
|
||||
|
||||
impl Default for StreamerConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
device_path: None,
|
||||
resolution: Resolution::HD1080,
|
||||
format: PixelFormat::Mjpeg,
|
||||
fps: 30,
|
||||
jpeg_quality: 80,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Streamer state
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum StreamerState {
|
||||
/// Not initialized
|
||||
Uninitialized,
|
||||
/// Ready but not streaming
|
||||
Ready,
|
||||
/// Actively streaming
|
||||
Streaming,
|
||||
/// No video signal
|
||||
NoSignal,
|
||||
/// Error occurred
|
||||
Error,
|
||||
/// Device was lost (unplugged)
|
||||
DeviceLost,
|
||||
/// Device is being recovered (reconnecting)
|
||||
Recovering,
|
||||
}
|
||||
|
||||
/// Video streamer service
|
||||
pub struct Streamer {
|
||||
config: RwLock<StreamerConfig>,
|
||||
capturer: RwLock<Option<Arc<VideoCapturer>>>,
|
||||
mjpeg_handler: Arc<MjpegStreamHandler>,
|
||||
current_device: RwLock<Option<VideoDeviceInfo>>,
|
||||
state: RwLock<StreamerState>,
|
||||
start_lock: tokio::sync::Mutex<()>,
|
||||
/// Event bus for broadcasting state changes (optional)
|
||||
events: RwLock<Option<Arc<EventBus>>>,
|
||||
/// Last published state (for change detection)
|
||||
last_published_state: RwLock<Option<StreamerState>>,
|
||||
/// Flag to indicate config is being changed (prevents auto-start during config change)
|
||||
config_changing: std::sync::atomic::AtomicBool,
|
||||
/// Flag to indicate background tasks (stats, cleanup, monitor) have been started
|
||||
/// These tasks should only be started once per Streamer instance
|
||||
background_tasks_started: std::sync::atomic::AtomicBool,
|
||||
/// Device recovery retry count
|
||||
recovery_retry_count: std::sync::atomic::AtomicU32,
|
||||
/// Device recovery in progress flag
|
||||
recovery_in_progress: std::sync::atomic::AtomicBool,
|
||||
/// Last lost device path (for recovery)
|
||||
last_lost_device: RwLock<Option<String>>,
|
||||
/// Last lost device reason (for logging)
|
||||
last_lost_reason: RwLock<Option<String>>,
|
||||
}
|
||||
|
||||
impl Streamer {
|
||||
/// Create a new streamer
|
||||
pub fn new() -> Arc<Self> {
|
||||
Arc::new(Self {
|
||||
config: RwLock::new(StreamerConfig::default()),
|
||||
capturer: RwLock::new(None),
|
||||
mjpeg_handler: Arc::new(MjpegStreamHandler::new()),
|
||||
current_device: RwLock::new(None),
|
||||
state: RwLock::new(StreamerState::Uninitialized),
|
||||
start_lock: tokio::sync::Mutex::new(()),
|
||||
events: RwLock::new(None),
|
||||
last_published_state: RwLock::new(None),
|
||||
config_changing: std::sync::atomic::AtomicBool::new(false),
|
||||
background_tasks_started: std::sync::atomic::AtomicBool::new(false),
|
||||
recovery_retry_count: std::sync::atomic::AtomicU32::new(0),
|
||||
recovery_in_progress: std::sync::atomic::AtomicBool::new(false),
|
||||
last_lost_device: RwLock::new(None),
|
||||
last_lost_reason: RwLock::new(None),
|
||||
})
|
||||
}
|
||||
|
||||
/// Create with specific config
|
||||
pub fn with_config(config: StreamerConfig) -> Arc<Self> {
|
||||
Arc::new(Self {
|
||||
config: RwLock::new(config),
|
||||
capturer: RwLock::new(None),
|
||||
mjpeg_handler: Arc::new(MjpegStreamHandler::new()),
|
||||
current_device: RwLock::new(None),
|
||||
state: RwLock::new(StreamerState::Uninitialized),
|
||||
start_lock: tokio::sync::Mutex::new(()),
|
||||
events: RwLock::new(None),
|
||||
last_published_state: RwLock::new(None),
|
||||
config_changing: std::sync::atomic::AtomicBool::new(false),
|
||||
background_tasks_started: std::sync::atomic::AtomicBool::new(false),
|
||||
recovery_retry_count: std::sync::atomic::AtomicU32::new(0),
|
||||
recovery_in_progress: std::sync::atomic::AtomicBool::new(false),
|
||||
last_lost_device: RwLock::new(None),
|
||||
last_lost_reason: RwLock::new(None),
|
||||
})
|
||||
}
|
||||
|
||||
/// Get current state as SystemEvent
|
||||
pub async fn current_state_event(&self) -> SystemEvent {
|
||||
let state = *self.state.read().await;
|
||||
let device = self.current_device.read().await.as_ref().map(|d| d.path.display().to_string());
|
||||
|
||||
SystemEvent::StreamStateChanged {
|
||||
state: match state {
|
||||
StreamerState::Uninitialized => "uninitialized".to_string(),
|
||||
StreamerState::Ready => "ready".to_string(),
|
||||
StreamerState::Streaming => "streaming".to_string(),
|
||||
StreamerState::NoSignal => "no_signal".to_string(),
|
||||
StreamerState::Error => "error".to_string(),
|
||||
StreamerState::DeviceLost => "device_lost".to_string(),
|
||||
StreamerState::Recovering => "recovering".to_string(),
|
||||
},
|
||||
device,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set event bus for broadcasting state changes
|
||||
pub async fn set_event_bus(&self, events: Arc<EventBus>) {
|
||||
*self.events.write().await = Some(events);
|
||||
}
|
||||
|
||||
/// Get current state
|
||||
pub async fn state(&self) -> StreamerState {
|
||||
*self.state.read().await
|
||||
}
|
||||
|
||||
/// Check if config is currently being changed
|
||||
/// When true, auto-start should be blocked to prevent device busy errors
|
||||
pub fn is_config_changing(&self) -> bool {
|
||||
self.config_changing.load(std::sync::atomic::Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Get MJPEG handler for stream endpoints
|
||||
pub fn mjpeg_handler(&self) -> Arc<MjpegStreamHandler> {
|
||||
self.mjpeg_handler.clone()
|
||||
}
|
||||
|
||||
/// Get frame sender for WebRTC integration
|
||||
/// Returns None if no capturer is initialized
|
||||
pub async fn frame_sender(&self) -> Option<broadcast::Sender<VideoFrame>> {
|
||||
let capturer = self.capturer.read().await;
|
||||
capturer.as_ref().map(|c| c.frame_sender())
|
||||
}
|
||||
|
||||
/// Subscribe to video frames
|
||||
/// Returns None if no capturer is initialized
|
||||
pub async fn subscribe_frames(&self) -> Option<broadcast::Receiver<VideoFrame>> {
|
||||
let capturer = self.capturer.read().await;
|
||||
capturer.as_ref().map(|c| c.subscribe())
|
||||
}
|
||||
|
||||
/// Get current device info
|
||||
pub async fn current_device(&self) -> Option<VideoDeviceInfo> {
|
||||
self.current_device.read().await.clone()
|
||||
}
|
||||
|
||||
/// Get current video configuration (format, resolution, fps)
|
||||
pub async fn current_video_config(&self) -> (PixelFormat, Resolution, u32) {
|
||||
let config = self.config.read().await;
|
||||
(config.format, config.resolution, config.fps)
|
||||
}
|
||||
|
||||
/// List available video devices
|
||||
pub async fn list_devices(&self) -> Result<Vec<VideoDeviceInfo>> {
|
||||
enumerate_devices()
|
||||
}
|
||||
|
||||
/// Validate and apply requested video parameters without auto-selection
|
||||
pub async fn apply_video_config(
|
||||
self: &Arc<Self>,
|
||||
device_path: &str,
|
||||
format: PixelFormat,
|
||||
resolution: Resolution,
|
||||
fps: u32,
|
||||
) -> Result<()> {
|
||||
// Set config_changing flag to prevent frontend mode sync during config change
|
||||
self.config_changing.store(true, std::sync::atomic::Ordering::SeqCst);
|
||||
|
||||
let result = self.apply_video_config_inner(device_path, format, resolution, fps).await;
|
||||
|
||||
// Clear the flag after config change is complete
|
||||
// The stream will be started by MJPEG client connection, not here
|
||||
self.config_changing.store(false, std::sync::atomic::Ordering::SeqCst);
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Internal implementation of apply_video_config
|
||||
async fn apply_video_config_inner(
|
||||
self: &Arc<Self>,
|
||||
device_path: &str,
|
||||
format: PixelFormat,
|
||||
resolution: Resolution,
|
||||
fps: u32,
|
||||
) -> Result<()> {
|
||||
// Publish "config changing" event
|
||||
self.publish_event(SystemEvent::StreamConfigChanging {
|
||||
reason: "device_switch".to_string(),
|
||||
})
|
||||
.await;
|
||||
|
||||
let devices = enumerate_devices()?;
|
||||
let device = devices
|
||||
.into_iter()
|
||||
.find(|d| d.path.to_string_lossy() == device_path)
|
||||
.ok_or_else(|| AppError::VideoError("Video device not found".to_string()))?;
|
||||
|
||||
// Validate format
|
||||
let fmt_info = device
|
||||
.formats
|
||||
.iter()
|
||||
.find(|f| f.format == format)
|
||||
.ok_or_else(|| AppError::VideoError("Requested format not supported".to_string()))?;
|
||||
|
||||
// Validate resolution
|
||||
if !fmt_info.resolutions.is_empty()
|
||||
&& !fmt_info
|
||||
.resolutions
|
||||
.iter()
|
||||
.any(|r| r.width == resolution.width && r.height == resolution.height)
|
||||
{
|
||||
return Err(AppError::VideoError("Requested resolution not supported".to_string()));
|
||||
}
|
||||
|
||||
// IMPORTANT: Disconnect all MJPEG clients FIRST before stopping capture
|
||||
// This prevents race conditions where clients try to reconnect and reopen the device
|
||||
info!("Disconnecting all MJPEG clients before config change...");
|
||||
self.mjpeg_handler.disconnect_all_clients();
|
||||
|
||||
// Give clients time to receive the disconnect signal and close their connections
|
||||
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||
|
||||
// Stop existing capturer and wait for device release
|
||||
{
|
||||
// Take ownership of the old capturer to ensure it's dropped
|
||||
let old_capturer = self.capturer.write().await.take();
|
||||
if let Some(capturer) = old_capturer {
|
||||
info!("Stopping existing capture before applying new config...");
|
||||
if let Err(e) = capturer.stop().await {
|
||||
warn!("Error stopping old capturer: {}", e);
|
||||
}
|
||||
// Explicitly drop the capturer to release V4L2 resources
|
||||
drop(capturer);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Update config
|
||||
{
|
||||
let mut cfg = self.config.write().await;
|
||||
cfg.device_path = Some(device.path.clone());
|
||||
cfg.format = format;
|
||||
cfg.resolution = resolution;
|
||||
cfg.fps = fps;
|
||||
}
|
||||
|
||||
// Recreate capturer
|
||||
let capture_config = CaptureConfig {
|
||||
device_path: device.path.clone(),
|
||||
resolution,
|
||||
format,
|
||||
fps,
|
||||
jpeg_quality: self.config.read().await.jpeg_quality,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let capturer = Arc::new(VideoCapturer::new(capture_config));
|
||||
*self.capturer.write().await = Some(capturer.clone());
|
||||
*self.current_device.write().await = Some(device.clone());
|
||||
*self.state.write().await = StreamerState::Ready;
|
||||
|
||||
// Publish "config applied" event
|
||||
info!("Publishing StreamConfigApplied event: {}x{} {:?} @ {}fps",
|
||||
resolution.width, resolution.height, format, fps);
|
||||
self.publish_event(SystemEvent::StreamConfigApplied {
|
||||
device: device_path.to_string(),
|
||||
resolution: (resolution.width, resolution.height),
|
||||
format: format!("{:?}", format),
|
||||
fps,
|
||||
})
|
||||
.await;
|
||||
|
||||
// Note: We don't auto-start here anymore.
|
||||
// The stream will be started when MJPEG client connects (handlers.rs:790)
|
||||
// This avoids race conditions between config change and client reconnection.
|
||||
info!("Config applied, stream will start when client connects");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Initialize with auto-detected device
|
||||
pub async fn init_auto(self: &Arc<Self>) -> Result<()> {
|
||||
info!("Auto-detecting video device...");
|
||||
|
||||
let device = find_best_device()?;
|
||||
info!("Found device: {} ({})", device.name, device.path.display());
|
||||
|
||||
self.init_with_device(device).await
|
||||
}
|
||||
|
||||
/// Initialize with specific device
|
||||
pub async fn init_with_device(self: &Arc<Self>, device: VideoDeviceInfo) -> Result<()> {
|
||||
info!(
|
||||
"Initializing streamer with device: {} ({})",
|
||||
device.name,
|
||||
device.path.display()
|
||||
);
|
||||
|
||||
// Determine best format for this device
|
||||
let config = self.config.read().await;
|
||||
let format = self.select_format(&device, config.format)?;
|
||||
let resolution = self.select_resolution(&device, &format, config.resolution)?;
|
||||
|
||||
drop(config);
|
||||
|
||||
// Update config with actual values
|
||||
{
|
||||
let mut config = self.config.write().await;
|
||||
config.device_path = Some(device.path.clone());
|
||||
config.format = format;
|
||||
config.resolution = resolution;
|
||||
}
|
||||
|
||||
// Store device info
|
||||
*self.current_device.write().await = Some(device.clone());
|
||||
|
||||
// Create capturer
|
||||
let config = self.config.read().await;
|
||||
let capture_config = CaptureConfig {
|
||||
device_path: device.path.clone(),
|
||||
resolution: config.resolution,
|
||||
format: config.format,
|
||||
fps: config.fps,
|
||||
jpeg_quality: config.jpeg_quality,
|
||||
..Default::default()
|
||||
};
|
||||
drop(config);
|
||||
|
||||
let capturer = Arc::new(VideoCapturer::new(capture_config));
|
||||
*self.capturer.write().await = Some(capturer);
|
||||
|
||||
*self.state.write().await = StreamerState::Ready;
|
||||
|
||||
info!("Streamer initialized: {} @ {}", format, resolution);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Select best format for device
|
||||
fn select_format(&self, device: &VideoDeviceInfo, preferred: PixelFormat) -> Result<PixelFormat> {
|
||||
// Check if preferred format is available
|
||||
if device.formats.iter().any(|f| f.format == preferred) {
|
||||
return Ok(preferred);
|
||||
}
|
||||
|
||||
// Select best available format
|
||||
device
|
||||
.formats
|
||||
.first()
|
||||
.map(|f| f.format)
|
||||
.ok_or_else(|| AppError::VideoError("No supported formats found".to_string()))
|
||||
}
|
||||
|
||||
/// Select best resolution for format
|
||||
fn select_resolution(
|
||||
&self,
|
||||
device: &VideoDeviceInfo,
|
||||
format: &PixelFormat,
|
||||
preferred: Resolution,
|
||||
) -> Result<Resolution> {
|
||||
let format_info = device
|
||||
.formats
|
||||
.iter()
|
||||
.find(|f| &f.format == format)
|
||||
.ok_or_else(|| AppError::VideoError("Format not found".to_string()))?;
|
||||
|
||||
// Check if preferred resolution is available
|
||||
if format_info.resolutions.is_empty()
|
||||
|| format_info.resolutions.iter().any(|r| {
|
||||
r.width == preferred.width && r.height == preferred.height
|
||||
})
|
||||
{
|
||||
return Ok(preferred);
|
||||
}
|
||||
|
||||
// Select largest available resolution
|
||||
format_info
|
||||
.resolutions
|
||||
.first()
|
||||
.map(|r| r.resolution())
|
||||
.ok_or_else(|| AppError::VideoError("No resolutions available".to_string()))
|
||||
}
|
||||
|
||||
/// Restart the capturer only (for recovery - doesn't spawn new monitor)
|
||||
///
|
||||
/// This is a simpler version of start() used during device recovery.
|
||||
/// It doesn't spawn a new state monitor since the existing one is still active.
|
||||
async fn restart_capturer(&self) -> Result<()> {
|
||||
let capturer = self.capturer.read().await;
|
||||
let capturer = capturer
|
||||
.as_ref()
|
||||
.ok_or_else(|| AppError::VideoError("Capturer not initialized".to_string()))?;
|
||||
|
||||
// Start capture
|
||||
capturer.start().await?;
|
||||
|
||||
// Set MJPEG handler online
|
||||
self.mjpeg_handler.set_online();
|
||||
|
||||
// Start frame distribution task
|
||||
let mjpeg_handler = self.mjpeg_handler.clone();
|
||||
let mut frame_rx = capturer.subscribe();
|
||||
|
||||
tokio::spawn(async move {
|
||||
debug!("Recovery frame distribution task started");
|
||||
loop {
|
||||
match frame_rx.recv().await {
|
||||
Ok(frame) => {
|
||||
mjpeg_handler.update_frame(frame);
|
||||
}
|
||||
Err(tokio::sync::broadcast::error::RecvError::Lagged(n)) => {
|
||||
trace!("Frame distribution lagged by {} frames", n);
|
||||
}
|
||||
Err(tokio::sync::broadcast::error::RecvError::Closed) => {
|
||||
debug!("Frame channel closed");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Start streaming
|
||||
pub async fn start(self: &Arc<Self>) -> Result<()> {
|
||||
let _lock = self.start_lock.lock().await;
|
||||
|
||||
let state = self.state().await;
|
||||
if state == StreamerState::Streaming {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if state == StreamerState::Uninitialized {
|
||||
// Auto-initialize if not done
|
||||
self.init_auto().await?;
|
||||
}
|
||||
|
||||
let capturer = self.capturer.read().await;
|
||||
let capturer = capturer
|
||||
.as_ref()
|
||||
.ok_or_else(|| AppError::VideoError("Capturer not initialized".to_string()))?;
|
||||
|
||||
// Start capture
|
||||
capturer.start().await?;
|
||||
|
||||
// Set MJPEG handler online before starting frame distribution
|
||||
// This is important after config changes where disconnect_all_clients() set it offline
|
||||
self.mjpeg_handler.set_online();
|
||||
|
||||
// Start frame distribution task
|
||||
let mjpeg_handler = self.mjpeg_handler.clone();
|
||||
let mut frame_rx = capturer.subscribe();
|
||||
let state_ref = Arc::downgrade(self);
|
||||
|
||||
tokio::spawn(async move {
|
||||
info!("Frame distribution task started");
|
||||
loop {
|
||||
match frame_rx.recv().await {
|
||||
Ok(frame) => {
|
||||
mjpeg_handler.update_frame(frame);
|
||||
}
|
||||
Err(tokio::sync::broadcast::error::RecvError::Lagged(n)) => {
|
||||
trace!("Frame distribution lagged by {} frames", n);
|
||||
}
|
||||
Err(tokio::sync::broadcast::error::RecvError::Closed) => {
|
||||
debug!("Frame channel closed");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if streamer still exists
|
||||
if state_ref.upgrade().is_none() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
info!("Frame distribution task ended");
|
||||
});
|
||||
|
||||
// Monitor capture state
|
||||
let mut state_rx = capturer.state_watch();
|
||||
let state_ref = Arc::downgrade(self);
|
||||
let mjpeg_handler = self.mjpeg_handler.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
while state_rx.changed().await.is_ok() {
|
||||
let capture_state = *state_rx.borrow();
|
||||
match capture_state {
|
||||
CaptureState::Running => {
|
||||
if let Some(streamer) = state_ref.upgrade() {
|
||||
*streamer.state.write().await = StreamerState::Streaming;
|
||||
}
|
||||
}
|
||||
CaptureState::NoSignal => {
|
||||
mjpeg_handler.set_offline();
|
||||
if let Some(streamer) = state_ref.upgrade() {
|
||||
*streamer.state.write().await = StreamerState::NoSignal;
|
||||
}
|
||||
}
|
||||
CaptureState::Stopped => {
|
||||
mjpeg_handler.set_offline();
|
||||
if let Some(streamer) = state_ref.upgrade() {
|
||||
*streamer.state.write().await = StreamerState::Ready;
|
||||
}
|
||||
}
|
||||
CaptureState::Error => {
|
||||
mjpeg_handler.set_offline();
|
||||
if let Some(streamer) = state_ref.upgrade() {
|
||||
*streamer.state.write().await = StreamerState::Error;
|
||||
}
|
||||
}
|
||||
CaptureState::DeviceLost => {
|
||||
mjpeg_handler.set_offline();
|
||||
if let Some(streamer) = state_ref.upgrade() {
|
||||
*streamer.state.write().await = StreamerState::DeviceLost;
|
||||
// Start device recovery task (fire and forget)
|
||||
let streamer_clone = Arc::clone(&streamer);
|
||||
tokio::spawn(async move {
|
||||
streamer_clone.start_device_recovery_internal().await;
|
||||
});
|
||||
}
|
||||
}
|
||||
CaptureState::Starting => {
|
||||
// Starting state - device is initializing, no action needed
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Start background tasks only once per Streamer instance
|
||||
// Use compare_exchange to atomically check and set the flag
|
||||
if self.background_tasks_started
|
||||
.compare_exchange(false, true, std::sync::atomic::Ordering::SeqCst, std::sync::atomic::Ordering::SeqCst)
|
||||
.is_ok()
|
||||
{
|
||||
info!("Starting background tasks (stats, cleanup, monitor)");
|
||||
|
||||
// Start stats broadcast task (sends stats updates every 1 second)
|
||||
let stats_ref = Arc::downgrade(self);
|
||||
tokio::spawn(async move {
|
||||
let mut interval = tokio::time::interval(std::time::Duration::from_secs(1));
|
||||
loop {
|
||||
interval.tick().await;
|
||||
|
||||
if let Some(streamer) = stats_ref.upgrade() {
|
||||
let clients_stat = streamer.mjpeg_handler().get_clients_stat();
|
||||
let clients = clients_stat.len() as u64;
|
||||
|
||||
streamer.publish_event(SystemEvent::StreamStatsUpdate {
|
||||
clients,
|
||||
clients_stat,
|
||||
}).await;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Start client cleanup task (removes stale clients every 5s)
|
||||
self.mjpeg_handler.clone().start_cleanup_task();
|
||||
|
||||
// Start auto-pause monitor task (stops stream if no clients)
|
||||
let monitor_ref = Arc::downgrade(self);
|
||||
let monitor_handler = self.mjpeg_handler.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut interval = tokio::time::interval(std::time::Duration::from_secs(2));
|
||||
let mut zero_since: Option<std::time::Instant> = None;
|
||||
|
||||
loop {
|
||||
interval.tick().await;
|
||||
|
||||
let Some(streamer) = monitor_ref.upgrade() else { break; };
|
||||
|
||||
// Check auto-pause configuration
|
||||
let config = monitor_handler.auto_pause_config();
|
||||
if !config.enabled {
|
||||
zero_since = None;
|
||||
continue;
|
||||
}
|
||||
|
||||
let count = monitor_handler.client_count();
|
||||
|
||||
if count == 0 {
|
||||
if zero_since.is_none() {
|
||||
zero_since = Some(std::time::Instant::now());
|
||||
info!("No clients connected, starting shutdown timer ({}s)", config.shutdown_delay_secs);
|
||||
} else if let Some(since) = zero_since {
|
||||
if since.elapsed().as_secs() >= config.shutdown_delay_secs {
|
||||
info!("Auto-pausing stream (no clients for {}s)", config.shutdown_delay_secs);
|
||||
if let Err(e) = streamer.stop().await {
|
||||
error!("Auto-pause failed: {}", e);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if zero_since.is_some() {
|
||||
info!("Clients reconnected, canceling auto-pause");
|
||||
zero_since = None;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
} else {
|
||||
debug!("Background tasks already started, skipping");
|
||||
}
|
||||
|
||||
*self.state.write().await = StreamerState::Streaming;
|
||||
|
||||
// Publish state change event so DeviceInfo broadcaster can update frontend
|
||||
self.publish_event(self.current_state_event().await).await;
|
||||
|
||||
info!("Streaming started");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Stop streaming
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
if let Some(capturer) = self.capturer.read().await.as_ref() {
|
||||
capturer.stop().await?;
|
||||
}
|
||||
|
||||
self.mjpeg_handler.set_offline();
|
||||
*self.state.write().await = StreamerState::Ready;
|
||||
|
||||
// Publish state change event so DeviceInfo broadcaster can update frontend
|
||||
self.publish_event(self.current_state_event().await).await;
|
||||
|
||||
info!("Streaming stopped");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if streaming
|
||||
pub async fn is_streaming(&self) -> bool {
|
||||
self.state().await == StreamerState::Streaming
|
||||
}
|
||||
|
||||
/// Get stream statistics
|
||||
pub async fn stats(&self) -> StreamerStats {
|
||||
let capturer = self.capturer.read().await;
|
||||
let capture_stats = if let Some(c) = capturer.as_ref() {
|
||||
Some(c.stats().await)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let config = self.config.read().await;
|
||||
|
||||
StreamerStats {
|
||||
state: self.state().await,
|
||||
device: self.current_device().await.map(|d| d.name),
|
||||
format: Some(config.format.to_string()),
|
||||
resolution: Some((config.resolution.width, config.resolution.height)),
|
||||
clients: self.mjpeg_handler.client_count(),
|
||||
target_fps: config.fps,
|
||||
fps: capture_stats.as_ref().map(|s| s.current_fps).unwrap_or(0.0),
|
||||
frames_captured: capture_stats.as_ref().map(|s| s.frames_captured).unwrap_or(0),
|
||||
frames_dropped: capture_stats.as_ref().map(|s| s.frames_dropped).unwrap_or(0),
|
||||
}
|
||||
}
|
||||
|
||||
/// Publish event to event bus (if configured)
|
||||
/// For StreamStateChanged events, only publishes if state actually changed (de-duplication)
|
||||
async fn publish_event(&self, event: SystemEvent) {
|
||||
if let Some(events) = self.events.read().await.as_ref() {
|
||||
// For state change events, check if state actually changed
|
||||
if let SystemEvent::StreamStateChanged { ref state, .. } = event {
|
||||
let current_state = match state.as_str() {
|
||||
"uninitialized" => StreamerState::Uninitialized,
|
||||
"ready" => StreamerState::Ready,
|
||||
"streaming" => StreamerState::Streaming,
|
||||
"no_signal" => StreamerState::NoSignal,
|
||||
"error" => StreamerState::Error,
|
||||
"device_lost" => StreamerState::DeviceLost,
|
||||
"recovering" => StreamerState::Recovering,
|
||||
_ => StreamerState::Error,
|
||||
};
|
||||
|
||||
let mut last_state = self.last_published_state.write().await;
|
||||
if *last_state == Some(current_state) {
|
||||
// State hasn't changed, skip publishing
|
||||
trace!("Skipping duplicate stream state event: {}", state);
|
||||
return;
|
||||
}
|
||||
*last_state = Some(current_state);
|
||||
}
|
||||
|
||||
events.publish(event);
|
||||
}
|
||||
}
|
||||
|
||||
/// Start device recovery task (internal implementation)
|
||||
///
|
||||
/// This method starts a background task that attempts to reconnect
|
||||
/// to the video device after it was lost. It retries every 1 second
|
||||
/// until the device is recovered.
|
||||
async fn start_device_recovery_internal(self: &Arc<Self>) {
|
||||
// Check if recovery is already in progress
|
||||
if self.recovery_in_progress.swap(true, std::sync::atomic::Ordering::SeqCst) {
|
||||
debug!("Device recovery already in progress, skipping");
|
||||
return;
|
||||
}
|
||||
|
||||
// Get last lost device info from capturer
|
||||
let (device, reason) = {
|
||||
let capturer = self.capturer.read().await;
|
||||
if let Some(cap) = capturer.as_ref() {
|
||||
cap.last_error().unwrap_or_else(|| {
|
||||
let device_path = self.current_device.blocking_read()
|
||||
.as_ref()
|
||||
.map(|d| d.path.display().to_string())
|
||||
.unwrap_or_else(|| "unknown".to_string());
|
||||
(device_path, "Device lost".to_string())
|
||||
})
|
||||
} else {
|
||||
("unknown".to_string(), "Device lost".to_string())
|
||||
}
|
||||
};
|
||||
|
||||
// Store error info
|
||||
*self.last_lost_device.write().await = Some(device.clone());
|
||||
*self.last_lost_reason.write().await = Some(reason.clone());
|
||||
self.recovery_retry_count.store(0, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
// Publish device lost event
|
||||
self.publish_event(SystemEvent::StreamDeviceLost {
|
||||
device: device.clone(),
|
||||
reason: reason.clone(),
|
||||
}).await;
|
||||
|
||||
// Start recovery task
|
||||
let streamer = Arc::clone(self);
|
||||
tokio::spawn(async move {
|
||||
let device_path = device.clone();
|
||||
|
||||
loop {
|
||||
let attempt = streamer.recovery_retry_count.fetch_add(1, std::sync::atomic::Ordering::Relaxed) + 1;
|
||||
|
||||
// Check if still in device lost state
|
||||
let current_state = *streamer.state.read().await;
|
||||
if current_state != StreamerState::DeviceLost && current_state != StreamerState::Recovering {
|
||||
info!("Stream state changed during recovery, stopping recovery task");
|
||||
break;
|
||||
}
|
||||
|
||||
// Update state to Recovering
|
||||
*streamer.state.write().await = StreamerState::Recovering;
|
||||
|
||||
// Publish reconnecting event (every 5 attempts to avoid spam)
|
||||
if attempt == 1 || attempt % 5 == 0 {
|
||||
streamer.publish_event(SystemEvent::StreamReconnecting {
|
||||
device: device_path.clone(),
|
||||
attempt,
|
||||
}).await;
|
||||
info!("Attempting to recover video device {} (attempt {})", device_path, attempt);
|
||||
}
|
||||
|
||||
// Wait before retry (1 second)
|
||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
||||
|
||||
// Check if device file exists
|
||||
let device_exists = std::path::Path::new(&device_path).exists();
|
||||
if !device_exists {
|
||||
debug!("Device {} not present yet", device_path);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Try to restart capture
|
||||
match streamer.restart_capturer().await {
|
||||
Ok(_) => {
|
||||
info!("Video device {} recovered after {} attempts", device_path, attempt);
|
||||
streamer.recovery_in_progress.store(false, std::sync::atomic::Ordering::SeqCst);
|
||||
|
||||
// Publish recovered event
|
||||
streamer.publish_event(SystemEvent::StreamRecovered {
|
||||
device: device_path.clone(),
|
||||
}).await;
|
||||
|
||||
// Clear error info
|
||||
*streamer.last_lost_device.write().await = None;
|
||||
*streamer.last_lost_reason.write().await = None;
|
||||
return;
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Failed to restart capture (attempt {}): {}", attempt, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
streamer.recovery_in_progress.store(false, std::sync::atomic::Ordering::SeqCst);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Streamer {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
config: RwLock::new(StreamerConfig::default()),
|
||||
capturer: RwLock::new(None),
|
||||
mjpeg_handler: Arc::new(MjpegStreamHandler::new()),
|
||||
current_device: RwLock::new(None),
|
||||
state: RwLock::new(StreamerState::Uninitialized),
|
||||
start_lock: tokio::sync::Mutex::new(()),
|
||||
events: RwLock::new(None),
|
||||
last_published_state: RwLock::new(None),
|
||||
config_changing: std::sync::atomic::AtomicBool::new(false),
|
||||
background_tasks_started: std::sync::atomic::AtomicBool::new(false),
|
||||
recovery_retry_count: std::sync::atomic::AtomicU32::new(0),
|
||||
recovery_in_progress: std::sync::atomic::AtomicBool::new(false),
|
||||
last_lost_device: RwLock::new(None),
|
||||
last_lost_reason: RwLock::new(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Streamer statistics
|
||||
#[derive(Debug, Clone, serde::Serialize)]
|
||||
pub struct StreamerStats {
|
||||
pub state: StreamerState,
|
||||
pub device: Option<String>,
|
||||
pub format: Option<String>,
|
||||
pub resolution: Option<(u32, u32)>,
|
||||
pub clients: u64,
|
||||
/// Target FPS from configuration
|
||||
pub target_fps: u32,
|
||||
/// Current actual FPS
|
||||
pub fps: f32,
|
||||
pub frames_captured: u64,
|
||||
pub frames_dropped: u64,
|
||||
}
|
||||
|
||||
impl serde::Serialize for StreamerState {
|
||||
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
let s = match self {
|
||||
StreamerState::Uninitialized => "uninitialized",
|
||||
StreamerState::Ready => "ready",
|
||||
StreamerState::Streaming => "streaming",
|
||||
StreamerState::NoSignal => "no_signal",
|
||||
StreamerState::Error => "error",
|
||||
StreamerState::DeviceLost => "device_lost",
|
||||
StreamerState::Recovering => "recovering",
|
||||
};
|
||||
serializer.serialize_str(s)
|
||||
}
|
||||
}
|
||||
595
src/video/video_session.rs
Normal file
595
src/video/video_session.rs
Normal file
@@ -0,0 +1,595 @@
|
||||
//! Video session management with multi-codec support
|
||||
//!
|
||||
//! This module provides session management for video streaming with:
|
||||
//! - Multi-codec support (H264, H265, VP8, VP9)
|
||||
//! - Session lifecycle management
|
||||
//! - Dynamic codec switching
|
||||
//! - Statistics and monitoring
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use tokio::sync::{broadcast, RwLock};
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use super::encoder::registry::{EncoderBackend, EncoderRegistry, VideoEncoderType};
|
||||
use super::format::Resolution;
|
||||
use super::frame::VideoFrame;
|
||||
use super::shared_video_pipeline::{
|
||||
EncodedVideoFrame, SharedVideoPipeline, SharedVideoPipelineConfig, SharedVideoPipelineStats,
|
||||
};
|
||||
use crate::error::{AppError, Result};
|
||||
|
||||
/// Maximum concurrent video sessions
|
||||
const MAX_VIDEO_SESSIONS: usize = 8;
|
||||
|
||||
/// Video session state
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum VideoSessionState {
|
||||
/// Session created but not started
|
||||
Created,
|
||||
/// Session is active and streaming
|
||||
Active,
|
||||
/// Session is paused
|
||||
Paused,
|
||||
/// Session is closing
|
||||
Closing,
|
||||
/// Session is closed
|
||||
Closed,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for VideoSessionState {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
VideoSessionState::Created => write!(f, "Created"),
|
||||
VideoSessionState::Active => write!(f, "Active"),
|
||||
VideoSessionState::Paused => write!(f, "Paused"),
|
||||
VideoSessionState::Closing => write!(f, "Closing"),
|
||||
VideoSessionState::Closed => write!(f, "Closed"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Video session information
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct VideoSessionInfo {
|
||||
/// Session ID
|
||||
pub session_id: String,
|
||||
/// Current codec
|
||||
pub codec: VideoEncoderType,
|
||||
/// Session state
|
||||
pub state: VideoSessionState,
|
||||
/// Creation time
|
||||
pub created_at: Instant,
|
||||
/// Last activity time
|
||||
pub last_activity: Instant,
|
||||
/// Frames received
|
||||
pub frames_received: u64,
|
||||
/// Bytes received
|
||||
pub bytes_received: u64,
|
||||
}
|
||||
|
||||
/// Individual video session
|
||||
struct VideoSession {
|
||||
/// Session ID
|
||||
session_id: String,
|
||||
/// Codec for this session
|
||||
codec: VideoEncoderType,
|
||||
/// Session state
|
||||
state: VideoSessionState,
|
||||
/// Creation time
|
||||
created_at: Instant,
|
||||
/// Last activity time
|
||||
last_activity: Instant,
|
||||
/// Frame receiver
|
||||
frame_rx: Option<broadcast::Receiver<EncodedVideoFrame>>,
|
||||
/// Stats
|
||||
frames_received: u64,
|
||||
bytes_received: u64,
|
||||
}
|
||||
|
||||
impl VideoSession {
|
||||
fn new(session_id: String, codec: VideoEncoderType) -> Self {
|
||||
let now = Instant::now();
|
||||
Self {
|
||||
session_id,
|
||||
codec,
|
||||
state: VideoSessionState::Created,
|
||||
created_at: now,
|
||||
last_activity: now,
|
||||
frame_rx: None,
|
||||
frames_received: 0,
|
||||
bytes_received: 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn info(&self) -> VideoSessionInfo {
|
||||
VideoSessionInfo {
|
||||
session_id: self.session_id.clone(),
|
||||
codec: self.codec,
|
||||
state: self.state,
|
||||
created_at: self.created_at,
|
||||
last_activity: self.last_activity,
|
||||
frames_received: self.frames_received,
|
||||
bytes_received: self.bytes_received,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Video session manager configuration
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct VideoSessionManagerConfig {
|
||||
/// Default codec
|
||||
pub default_codec: VideoEncoderType,
|
||||
/// Default resolution
|
||||
pub resolution: Resolution,
|
||||
/// Default bitrate (kbps)
|
||||
pub bitrate_kbps: u32,
|
||||
/// Default FPS
|
||||
pub fps: u32,
|
||||
/// Session timeout (seconds)
|
||||
pub session_timeout_secs: u64,
|
||||
/// Encoder backend (None = auto select best available)
|
||||
pub encoder_backend: Option<EncoderBackend>,
|
||||
}
|
||||
|
||||
impl Default for VideoSessionManagerConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
default_codec: VideoEncoderType::H264,
|
||||
resolution: Resolution::HD720,
|
||||
bitrate_kbps: 8000,
|
||||
fps: 30,
|
||||
session_timeout_secs: 300,
|
||||
encoder_backend: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Video session manager
|
||||
///
|
||||
/// Manages video encoding sessions with multi-codec support.
|
||||
/// A single encoder is shared across all sessions with the same codec.
|
||||
pub struct VideoSessionManager {
|
||||
/// Configuration
|
||||
config: VideoSessionManagerConfig,
|
||||
/// Active sessions
|
||||
sessions: RwLock<HashMap<String, VideoSession>>,
|
||||
/// Current pipeline (shared across sessions with same codec)
|
||||
pipeline: RwLock<Option<Arc<SharedVideoPipeline>>>,
|
||||
/// Current codec (active pipeline codec)
|
||||
current_codec: RwLock<Option<VideoEncoderType>>,
|
||||
/// Video frame source
|
||||
frame_source: RwLock<Option<broadcast::Receiver<VideoFrame>>>,
|
||||
}
|
||||
|
||||
impl VideoSessionManager {
|
||||
/// Create a new video session manager
|
||||
pub fn new(config: VideoSessionManagerConfig) -> Self {
|
||||
info!(
|
||||
"Creating video session manager with default codec: {}",
|
||||
config.default_codec
|
||||
);
|
||||
|
||||
Self {
|
||||
config,
|
||||
sessions: RwLock::new(HashMap::new()),
|
||||
pipeline: RwLock::new(None),
|
||||
current_codec: RwLock::new(None),
|
||||
frame_source: RwLock::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create with default configuration
|
||||
pub fn with_defaults() -> Self {
|
||||
Self::new(VideoSessionManagerConfig::default())
|
||||
}
|
||||
|
||||
/// Set the video frame source
|
||||
pub async fn set_frame_source(&self, rx: broadcast::Receiver<VideoFrame>) {
|
||||
*self.frame_source.write().await = Some(rx);
|
||||
}
|
||||
|
||||
/// Get available codecs based on hardware capabilities
|
||||
pub fn available_codecs(&self) -> Vec<VideoEncoderType> {
|
||||
EncoderRegistry::global().selectable_formats()
|
||||
}
|
||||
|
||||
/// Check if a codec is available
|
||||
pub fn is_codec_available(&self, codec: VideoEncoderType) -> bool {
|
||||
let hardware_only = codec.hardware_only();
|
||||
EncoderRegistry::global().is_format_available(codec, hardware_only)
|
||||
}
|
||||
|
||||
/// Create a new video session
|
||||
pub async fn create_session(&self, codec: Option<VideoEncoderType>) -> Result<String> {
|
||||
let sessions = self.sessions.read().await;
|
||||
if sessions.len() >= MAX_VIDEO_SESSIONS {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"Maximum video sessions ({}) reached",
|
||||
MAX_VIDEO_SESSIONS
|
||||
)));
|
||||
}
|
||||
drop(sessions);
|
||||
|
||||
// Use specified codec or default
|
||||
let codec = codec.unwrap_or(self.config.default_codec);
|
||||
|
||||
// Verify codec is available
|
||||
if !self.is_codec_available(codec) {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"Codec {} is not available on this system",
|
||||
codec
|
||||
)));
|
||||
}
|
||||
|
||||
// Generate session ID
|
||||
let session_id = uuid::Uuid::new_v4().to_string();
|
||||
|
||||
// Create session
|
||||
let session = VideoSession::new(session_id.clone(), codec);
|
||||
|
||||
// Store session
|
||||
let mut sessions = self.sessions.write().await;
|
||||
sessions.insert(session_id.clone(), session);
|
||||
|
||||
info!(
|
||||
"Video session created: {} (codec: {})",
|
||||
session_id, codec
|
||||
);
|
||||
|
||||
Ok(session_id)
|
||||
}
|
||||
|
||||
/// Start a video session (subscribe to encoded frames)
|
||||
pub async fn start_session(
|
||||
&self,
|
||||
session_id: &str,
|
||||
) -> Result<broadcast::Receiver<EncodedVideoFrame>> {
|
||||
// Ensure pipeline is running with correct codec
|
||||
self.ensure_pipeline_for_session(session_id).await?;
|
||||
|
||||
let mut sessions = self.sessions.write().await;
|
||||
let session = sessions
|
||||
.get_mut(session_id)
|
||||
.ok_or_else(|| AppError::NotFound(format!("Session not found: {}", session_id)))?;
|
||||
|
||||
// Get pipeline and subscribe
|
||||
let pipeline = self.pipeline.read().await;
|
||||
let pipeline = pipeline
|
||||
.as_ref()
|
||||
.ok_or_else(|| AppError::VideoError("Pipeline not initialized".to_string()))?;
|
||||
|
||||
let rx = pipeline.subscribe();
|
||||
session.frame_rx = Some(pipeline.subscribe());
|
||||
session.state = VideoSessionState::Active;
|
||||
session.last_activity = Instant::now();
|
||||
|
||||
info!("Video session started: {}", session_id);
|
||||
Ok(rx)
|
||||
}
|
||||
|
||||
/// Ensure pipeline is running with correct codec for session
|
||||
async fn ensure_pipeline_for_session(&self, session_id: &str) -> Result<()> {
|
||||
let sessions = self.sessions.read().await;
|
||||
let session = sessions
|
||||
.get(session_id)
|
||||
.ok_or_else(|| AppError::NotFound(format!("Session not found: {}", session_id)))?;
|
||||
let required_codec = session.codec;
|
||||
drop(sessions);
|
||||
|
||||
let current_codec = *self.current_codec.read().await;
|
||||
|
||||
// Check if we need to create or switch pipeline
|
||||
if current_codec != Some(required_codec) {
|
||||
self.switch_pipeline_codec(required_codec).await?;
|
||||
}
|
||||
|
||||
// Ensure pipeline is started
|
||||
let pipeline = self.pipeline.read().await;
|
||||
if let Some(ref pipe) = *pipeline {
|
||||
if !pipe.is_running() {
|
||||
// Need frame source to start
|
||||
let frame_rx = {
|
||||
let source = self.frame_source.read().await;
|
||||
source.as_ref().map(|rx| rx.resubscribe())
|
||||
};
|
||||
|
||||
if let Some(rx) = frame_rx {
|
||||
drop(pipeline);
|
||||
let pipeline = self.pipeline.read().await;
|
||||
if let Some(ref pipe) = *pipeline {
|
||||
pipe.start(rx).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Switch pipeline to different codec
|
||||
async fn switch_pipeline_codec(&self, codec: VideoEncoderType) -> Result<()> {
|
||||
info!("Switching pipeline to codec: {}", codec);
|
||||
|
||||
// Stop existing pipeline
|
||||
{
|
||||
let pipeline = self.pipeline.read().await;
|
||||
if let Some(ref pipe) = *pipeline {
|
||||
pipe.stop();
|
||||
}
|
||||
}
|
||||
|
||||
// Create new pipeline config
|
||||
let pipeline_config = SharedVideoPipelineConfig {
|
||||
resolution: self.config.resolution,
|
||||
input_format: crate::video::format::PixelFormat::Mjpeg, // Common input
|
||||
output_codec: codec,
|
||||
bitrate_kbps: self.config.bitrate_kbps,
|
||||
fps: self.config.fps,
|
||||
gop_size: 30,
|
||||
encoder_backend: self.config.encoder_backend,
|
||||
};
|
||||
|
||||
// Create new pipeline
|
||||
let new_pipeline = SharedVideoPipeline::new(pipeline_config)?;
|
||||
|
||||
// Update state
|
||||
*self.pipeline.write().await = Some(new_pipeline);
|
||||
*self.current_codec.write().await = Some(codec);
|
||||
|
||||
info!("Pipeline switched to codec: {}", codec);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get session info
|
||||
pub async fn get_session(&self, session_id: &str) -> Option<VideoSessionInfo> {
|
||||
let sessions = self.sessions.read().await;
|
||||
sessions.get(session_id).map(|s| s.info())
|
||||
}
|
||||
|
||||
/// List all sessions
|
||||
pub async fn list_sessions(&self) -> Vec<VideoSessionInfo> {
|
||||
let sessions = self.sessions.read().await;
|
||||
sessions.values().map(|s| s.info()).collect()
|
||||
}
|
||||
|
||||
/// Pause a session
|
||||
pub async fn pause_session(&self, session_id: &str) -> Result<()> {
|
||||
let mut sessions = self.sessions.write().await;
|
||||
let session = sessions
|
||||
.get_mut(session_id)
|
||||
.ok_or_else(|| AppError::NotFound(format!("Session not found: {}", session_id)))?;
|
||||
|
||||
session.state = VideoSessionState::Paused;
|
||||
session.last_activity = Instant::now();
|
||||
|
||||
debug!("Video session paused: {}", session_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Resume a session
|
||||
pub async fn resume_session(&self, session_id: &str) -> Result<()> {
|
||||
let mut sessions = self.sessions.write().await;
|
||||
let session = sessions
|
||||
.get_mut(session_id)
|
||||
.ok_or_else(|| AppError::NotFound(format!("Session not found: {}", session_id)))?;
|
||||
|
||||
session.state = VideoSessionState::Active;
|
||||
session.last_activity = Instant::now();
|
||||
|
||||
debug!("Video session resumed: {}", session_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Close a session
|
||||
pub async fn close_session(&self, session_id: &str) -> Result<()> {
|
||||
let mut sessions = self.sessions.write().await;
|
||||
if let Some(mut session) = sessions.remove(session_id) {
|
||||
session.state = VideoSessionState::Closed;
|
||||
session.frame_rx = None;
|
||||
info!("Video session closed: {}", session_id);
|
||||
}
|
||||
|
||||
// If no more sessions, consider stopping pipeline
|
||||
if sessions.is_empty() {
|
||||
drop(sessions);
|
||||
self.maybe_stop_pipeline().await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Stop pipeline if no active sessions
|
||||
async fn maybe_stop_pipeline(&self) {
|
||||
let sessions = self.sessions.read().await;
|
||||
let has_active = sessions
|
||||
.values()
|
||||
.any(|s| s.state == VideoSessionState::Active);
|
||||
drop(sessions);
|
||||
|
||||
if !has_active {
|
||||
let pipeline = self.pipeline.read().await;
|
||||
if let Some(ref pipe) = *pipeline {
|
||||
pipe.stop();
|
||||
debug!("Pipeline stopped - no active sessions");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Cleanup stale/timed out sessions
|
||||
pub async fn cleanup_stale_sessions(&self) {
|
||||
let timeout = std::time::Duration::from_secs(self.config.session_timeout_secs);
|
||||
let now = Instant::now();
|
||||
|
||||
let stale_ids: Vec<String> = {
|
||||
let sessions = self.sessions.read().await;
|
||||
sessions
|
||||
.iter()
|
||||
.filter(|(_, s)| {
|
||||
(s.state == VideoSessionState::Paused
|
||||
|| s.state == VideoSessionState::Created)
|
||||
&& now.duration_since(s.last_activity) > timeout
|
||||
})
|
||||
.map(|(id, _)| id.clone())
|
||||
.collect()
|
||||
};
|
||||
|
||||
if !stale_ids.is_empty() {
|
||||
let mut sessions = self.sessions.write().await;
|
||||
for id in stale_ids {
|
||||
info!("Removing stale video session: {}", id);
|
||||
sessions.remove(&id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get session count
|
||||
pub async fn session_count(&self) -> usize {
|
||||
self.sessions.read().await.len()
|
||||
}
|
||||
|
||||
/// Get active session count
|
||||
pub async fn active_session_count(&self) -> usize {
|
||||
self.sessions
|
||||
.read()
|
||||
.await
|
||||
.values()
|
||||
.filter(|s| s.state == VideoSessionState::Active)
|
||||
.count()
|
||||
}
|
||||
|
||||
/// Get pipeline statistics
|
||||
pub async fn pipeline_stats(&self) -> Option<SharedVideoPipelineStats> {
|
||||
let pipeline = self.pipeline.read().await;
|
||||
if let Some(ref pipe) = *pipeline {
|
||||
Some(pipe.stats().await)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Get current active codec
|
||||
pub async fn current_codec(&self) -> Option<VideoEncoderType> {
|
||||
*self.current_codec.read().await
|
||||
}
|
||||
|
||||
/// Set bitrate for current pipeline
|
||||
pub async fn set_bitrate(&self, bitrate_kbps: u32) -> Result<()> {
|
||||
let pipeline = self.pipeline.read().await;
|
||||
if let Some(ref pipe) = *pipeline {
|
||||
pipe.set_bitrate(bitrate_kbps).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Request keyframe for all sessions
|
||||
pub async fn request_keyframe(&self) {
|
||||
// This would be implemented if encoders support forced keyframes
|
||||
warn!("Keyframe request not yet implemented");
|
||||
}
|
||||
|
||||
/// Change codec for a session (requires restart)
|
||||
pub async fn change_session_codec(
|
||||
&self,
|
||||
session_id: &str,
|
||||
new_codec: VideoEncoderType,
|
||||
) -> Result<()> {
|
||||
if !self.is_codec_available(new_codec) {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"Codec {} is not available",
|
||||
new_codec
|
||||
)));
|
||||
}
|
||||
|
||||
let mut sessions = self.sessions.write().await;
|
||||
let session = sessions
|
||||
.get_mut(session_id)
|
||||
.ok_or_else(|| AppError::NotFound(format!("Session not found: {}", session_id)))?;
|
||||
|
||||
let old_codec = session.codec;
|
||||
session.codec = new_codec;
|
||||
session.state = VideoSessionState::Created; // Require restart
|
||||
session.frame_rx = None;
|
||||
session.last_activity = Instant::now();
|
||||
|
||||
info!(
|
||||
"Session {} codec changed: {} -> {}",
|
||||
session_id, old_codec, new_codec
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get codec info
|
||||
pub fn get_codec_info(&self, codec: VideoEncoderType) -> Option<CodecInfo> {
|
||||
let registry = EncoderRegistry::global();
|
||||
let encoder = registry.best_encoder(codec, codec.hardware_only())?;
|
||||
|
||||
Some(CodecInfo {
|
||||
codec_type: codec,
|
||||
codec_name: encoder.codec_name.clone(),
|
||||
backend: encoder.backend.to_string(),
|
||||
is_hardware: encoder.is_hardware,
|
||||
})
|
||||
}
|
||||
|
||||
/// List all available codecs with their info
|
||||
pub fn list_codec_info(&self) -> Vec<CodecInfo> {
|
||||
self.available_codecs()
|
||||
.iter()
|
||||
.filter_map(|c| self.get_codec_info(*c))
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Codec information
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CodecInfo {
|
||||
/// Codec type
|
||||
pub codec_type: VideoEncoderType,
|
||||
/// FFmpeg codec name
|
||||
pub codec_name: String,
|
||||
/// Backend (VAAPI, NVENC, etc.)
|
||||
pub backend: String,
|
||||
/// Whether this is hardware accelerated
|
||||
pub is_hardware: bool,
|
||||
}
|
||||
|
||||
impl Default for VideoSessionManager {
|
||||
fn default() -> Self {
|
||||
Self::with_defaults()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_session_state_display() {
|
||||
assert_eq!(VideoSessionState::Active.to_string(), "Active");
|
||||
assert_eq!(VideoSessionState::Closed.to_string(), "Closed");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_available_codecs() {
|
||||
let manager = VideoSessionManager::with_defaults();
|
||||
let codecs = manager.available_codecs();
|
||||
println!("Available codecs: {:?}", codecs);
|
||||
// H264 should always be available (software fallback)
|
||||
assert!(codecs.contains(&VideoEncoderType::H264));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_codec_info() {
|
||||
let manager = VideoSessionManager::with_defaults();
|
||||
let info = manager.get_codec_info(VideoEncoderType::H264);
|
||||
if let Some(info) = info {
|
||||
println!(
|
||||
"H264: {} ({}, hardware={})",
|
||||
info.codec_name, info.backend, info.is_hardware
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user