refactor: 升级依赖版本并优化构建系统

- 升级核心依赖 (axum 0.8, tower-http 0.6, alsa 0.11 等)
- 简化交叉编译配置,切换至 Debian 11 提高兼容性
- 新增 Debian 包打包支持 (debuerreotype 模板)
- 移除独立的 mjpeg 解码器,简化视频模块
- 静态链接 libx264/libx265/libopus 到二进制
This commit is contained in:
mofeng-git
2026-01-10 10:59:00 +08:00
parent 3fa91772f0
commit e670f1ffd1
46 changed files with 893 additions and 1156 deletions

View File

@@ -414,64 +414,6 @@ pub fn yuyv_buffer_size(resolution: Resolution) -> usize {
(resolution.width * resolution.height * 2) as usize
}
// ============================================================================
// MJPEG Decoder - Decodes JPEG to YUV420P using libyuv
// ============================================================================
/// MJPEG/JPEG decoder that outputs YUV420P using libyuv
pub struct MjpegDecoder {
/// Resolution hint (can be updated from decoded frame)
resolution: Resolution,
/// YUV420P output buffer
yuv_buffer: Yuv420pBuffer,
}
impl MjpegDecoder {
/// Create a new MJPEG decoder with expected resolution
pub fn new(resolution: Resolution) -> Result<Self> {
Ok(Self {
resolution,
yuv_buffer: Yuv420pBuffer::new(resolution),
})
}
/// Decode MJPEG/JPEG data to YUV420P using libyuv
pub fn decode(&mut self, jpeg_data: &[u8]) -> Result<&[u8]> {
// Get MJPEG dimensions
let (width, height) = libyuv::mjpeg_size(jpeg_data)
.map_err(|e| AppError::VideoError(format!("Failed to get MJPEG size: {}", e)))?;
// Check if resolution changed
if width != self.resolution.width as i32 || height != self.resolution.height as i32 {
tracing::debug!(
"MJPEG resolution changed: {}x{} -> {}x{}",
self.resolution.width,
self.resolution.height,
width,
height
);
self.resolution = Resolution::new(width as u32, height as u32);
self.yuv_buffer = Yuv420pBuffer::new(self.resolution);
}
// Decode MJPEG directly to I420 using libyuv
libyuv::mjpeg_to_i420(jpeg_data, self.yuv_buffer.as_bytes_mut(), width, height)
.map_err(|e| AppError::VideoError(format!("MJPEG decode failed: {}", e)))?;
Ok(self.yuv_buffer.as_bytes())
}
/// Get current resolution
pub fn resolution(&self) -> Resolution {
self.resolution
}
/// Get YUV420P buffer size
pub fn yuv_buffer_size(&self) -> usize {
self.yuv_buffer.len()
}
}
// ============================================================================
// NV12 Converter for VAAPI encoder (using libyuv)
// ============================================================================
@@ -572,34 +514,6 @@ pub fn yuyv_to_nv12(yuyv: &[u8], nv12: &mut [u8], width: usize, height: usize) {
}
}
// ============================================================================
// Extended PixelConverter for MJPEG support
// ============================================================================
/// MJPEG to YUV420P converter (wraps MjpegDecoder)
pub struct MjpegToYuv420Converter {
decoder: MjpegDecoder,
}
impl MjpegToYuv420Converter {
/// Create a new MJPEG to YUV420P converter
pub fn new(resolution: Resolution) -> Result<Self> {
Ok(Self {
decoder: MjpegDecoder::new(resolution)?,
})
}
/// Convert MJPEG data to YUV420P
pub fn convert(&mut self, mjpeg_data: &[u8]) -> Result<&[u8]> {
self.decoder.decode(mjpeg_data)
}
/// Get current resolution
pub fn resolution(&self) -> Resolution {
self.decoder.resolution()
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -1,481 +0,0 @@
//! MJPEG decoder implementations
//!
//! Provides MJPEG decoding using libyuv for SIMD-accelerated decoding.
//! All decoders output to standard YUV formats suitable for encoding.
use std::sync::Once;
use tracing::{debug, info};
use crate::error::{AppError, Result};
use crate::video::format::Resolution;
static INIT_LOGGING: Once = Once::new();
/// Initialize decoder logging (only once)
fn init_decoder_logging() {
INIT_LOGGING.call_once(|| {
debug!("MJPEG decoder logging initialized");
});
}
/// MJPEG decoder configuration
#[derive(Debug, Clone)]
pub struct MjpegVaapiDecoderConfig {
/// Expected resolution (can be updated from decoded frame)
pub resolution: Resolution,
/// Use hardware acceleration (ignored, kept for API compatibility)
pub use_hwaccel: bool,
}
impl Default for MjpegVaapiDecoderConfig {
fn default() -> Self {
Self {
resolution: Resolution::HD1080,
use_hwaccel: true,
}
}
}
/// Decoded frame data in NV12 format
#[derive(Debug, Clone)]
pub struct DecodedNv12Frame {
/// Y plane data
pub y_plane: Vec<u8>,
/// UV interleaved plane data
pub uv_plane: Vec<u8>,
/// Y plane linesize (stride)
pub y_linesize: i32,
/// UV plane linesize (stride)
pub uv_linesize: i32,
/// Frame width
pub width: i32,
/// Frame height
pub height: i32,
}
/// Decoded frame data in YUV420P (I420) format
#[derive(Debug, Clone)]
pub struct DecodedYuv420pFrame {
/// Y plane data
pub y_plane: Vec<u8>,
/// U plane data
pub u_plane: Vec<u8>,
/// V plane data
pub v_plane: Vec<u8>,
/// Y plane linesize (stride)
pub y_linesize: i32,
/// U plane linesize (stride)
pub u_linesize: i32,
/// V plane linesize (stride)
pub v_linesize: i32,
/// Frame width
pub width: i32,
/// Frame height
pub height: i32,
}
impl DecodedYuv420pFrame {
/// Get packed YUV420P data (Y plane followed by U and V planes, with stride removed)
pub fn to_packed_yuv420p(&self) -> Vec<u8> {
let width = self.width as usize;
let height = self.height as usize;
let y_size = width * height;
let uv_size = width * height / 4;
let mut packed = Vec::with_capacity(y_size + uv_size * 2);
// Copy Y plane, removing stride padding if any
if self.y_linesize as usize == width {
packed.extend_from_slice(&self.y_plane[..y_size]);
} else {
for row in 0..height {
let src_offset = row * self.y_linesize as usize;
packed.extend_from_slice(&self.y_plane[src_offset..src_offset + width]);
}
}
// Copy U plane
let uv_width = width / 2;
let uv_height = height / 2;
if self.u_linesize as usize == uv_width {
packed.extend_from_slice(&self.u_plane[..uv_size]);
} else {
for row in 0..uv_height {
let src_offset = row * self.u_linesize as usize;
packed.extend_from_slice(&self.u_plane[src_offset..src_offset + uv_width]);
}
}
// Copy V plane
if self.v_linesize as usize == uv_width {
packed.extend_from_slice(&self.v_plane[..uv_size]);
} else {
for row in 0..uv_height {
let src_offset = row * self.v_linesize as usize;
packed.extend_from_slice(&self.v_plane[src_offset..src_offset + uv_width]);
}
}
packed
}
/// Copy packed YUV420P data to external buffer (zero allocation)
/// Returns the number of bytes written, or None if buffer too small
pub fn copy_to_packed_yuv420p(&self, dst: &mut [u8]) -> Option<usize> {
let width = self.width as usize;
let height = self.height as usize;
let y_size = width * height;
let uv_size = width * height / 4;
let total_size = y_size + uv_size * 2;
if dst.len() < total_size {
return None;
}
// Copy Y plane
if self.y_linesize as usize == width {
dst[..y_size].copy_from_slice(&self.y_plane[..y_size]);
} else {
for row in 0..height {
let src_offset = row * self.y_linesize as usize;
let dst_offset = row * width;
dst[dst_offset..dst_offset + width]
.copy_from_slice(&self.y_plane[src_offset..src_offset + width]);
}
}
// Copy U plane
let uv_width = width / 2;
let uv_height = height / 2;
if self.u_linesize as usize == uv_width {
dst[y_size..y_size + uv_size].copy_from_slice(&self.u_plane[..uv_size]);
} else {
for row in 0..uv_height {
let src_offset = row * self.u_linesize as usize;
let dst_offset = y_size + row * uv_width;
dst[dst_offset..dst_offset + uv_width]
.copy_from_slice(&self.u_plane[src_offset..src_offset + uv_width]);
}
}
// Copy V plane
let v_offset = y_size + uv_size;
if self.v_linesize as usize == uv_width {
dst[v_offset..v_offset + uv_size].copy_from_slice(&self.v_plane[..uv_size]);
} else {
for row in 0..uv_height {
let src_offset = row * self.v_linesize as usize;
let dst_offset = v_offset + row * uv_width;
dst[dst_offset..dst_offset + uv_width]
.copy_from_slice(&self.v_plane[src_offset..src_offset + uv_width]);
}
}
Some(total_size)
}
}
impl DecodedNv12Frame {
/// Get packed NV12 data (Y plane followed by UV plane, with stride removed)
pub fn to_packed_nv12(&self) -> Vec<u8> {
let width = self.width as usize;
let height = self.height as usize;
let y_size = width * height;
let uv_size = width * height / 2;
let mut packed = Vec::with_capacity(y_size + uv_size);
// Copy Y plane, removing stride padding if any
if self.y_linesize as usize == width {
// No padding, direct copy
packed.extend_from_slice(&self.y_plane[..y_size]);
} else {
// Has padding, copy row by row
for row in 0..height {
let src_offset = row * self.y_linesize as usize;
packed.extend_from_slice(&self.y_plane[src_offset..src_offset + width]);
}
}
// Copy UV plane, removing stride padding if any
let uv_height = height / 2;
if self.uv_linesize as usize == width {
// No padding, direct copy
packed.extend_from_slice(&self.uv_plane[..uv_size]);
} else {
// Has padding, copy row by row
for row in 0..uv_height {
let src_offset = row * self.uv_linesize as usize;
packed.extend_from_slice(&self.uv_plane[src_offset..src_offset + width]);
}
}
packed
}
/// Copy packed NV12 data to external buffer (zero allocation)
/// Returns the number of bytes written, or None if buffer too small
pub fn copy_to_packed_nv12(&self, dst: &mut [u8]) -> Option<usize> {
let width = self.width as usize;
let height = self.height as usize;
let y_size = width * height;
let uv_size = width * height / 2;
let total_size = y_size + uv_size;
if dst.len() < total_size {
return None;
}
// Copy Y plane, removing stride padding if any
if self.y_linesize as usize == width {
// No padding, direct copy
dst[..y_size].copy_from_slice(&self.y_plane[..y_size]);
} else {
// Has padding, copy row by row
for row in 0..height {
let src_offset = row * self.y_linesize as usize;
let dst_offset = row * width;
dst[dst_offset..dst_offset + width]
.copy_from_slice(&self.y_plane[src_offset..src_offset + width]);
}
}
// Copy UV plane, removing stride padding if any
let uv_height = height / 2;
if self.uv_linesize as usize == width {
// No padding, direct copy
dst[y_size..total_size].copy_from_slice(&self.uv_plane[..uv_size]);
} else {
// Has padding, copy row by row
for row in 0..uv_height {
let src_offset = row * self.uv_linesize as usize;
let dst_offset = y_size + row * width;
dst[dst_offset..dst_offset + width]
.copy_from_slice(&self.uv_plane[src_offset..src_offset + width]);
}
}
Some(total_size)
}
}
/// MJPEG decoder with NV12 output
///
/// Uses libyuv for SIMD-accelerated MJPEG decoding to YUV420P,
/// then converts to NV12 for hardware encoder compatibility.
/// Named "VaapiDecoder" for API compatibility with existing code.
pub struct MjpegVaapiDecoder {
/// Configuration
config: MjpegVaapiDecoderConfig,
/// Frame counter
frame_count: u64,
}
impl MjpegVaapiDecoder {
/// Create a new MJPEG decoder
pub fn new(config: MjpegVaapiDecoderConfig) -> Result<Self> {
init_decoder_logging();
info!(
"Creating MJPEG decoder with libyuv (SIMD-accelerated, NV12 output)"
);
Ok(Self {
config,
frame_count: 0,
})
}
/// Create with default config
pub fn with_vaapi(resolution: Resolution) -> Result<Self> {
Self::new(MjpegVaapiDecoderConfig {
resolution,
use_hwaccel: true,
})
}
/// Create with software decoding (same as with_vaapi, kept for API compatibility)
pub fn with_software(resolution: Resolution) -> Result<Self> {
Self::new(MjpegVaapiDecoderConfig {
resolution,
use_hwaccel: false,
})
}
/// Check if hardware acceleration is active (always false, using libyuv)
pub fn is_hwaccel_active(&self) -> bool {
false
}
/// Decode MJPEG frame to NV12
///
/// Returns the decoded frame in NV12 format, or an error if decoding fails.
pub fn decode(&mut self, jpeg_data: &[u8]) -> Result<DecodedNv12Frame> {
if jpeg_data.len() < 2 {
return Err(AppError::VideoError("JPEG data too small".to_string()));
}
// Verify JPEG signature (FFD8)
if jpeg_data[0] != 0xFF || jpeg_data[1] != 0xD8 {
return Err(AppError::VideoError("Invalid JPEG signature".to_string()));
}
self.frame_count += 1;
// Get JPEG dimensions
let (width, height) = libyuv::mjpeg_size(jpeg_data)
.map_err(|e| AppError::VideoError(format!("Failed to read MJPEG size: {}", e)))?;
// Decode MJPEG to YUV420P first
let y_size = (width * height) as usize;
let uv_size = y_size / 4;
let yuv420_size = y_size + uv_size * 2;
let mut yuv_data = vec![0u8; yuv420_size];
libyuv::mjpeg_to_i420(jpeg_data, &mut yuv_data, width, height)
.map_err(|e| AppError::VideoError(format!("libyuv MJPEG→I420 failed: {}", e)))?;
// Convert I420 to NV12
let nv12_size = (width * height * 3 / 2) as usize;
let mut nv12_data = vec![0u8; nv12_size];
libyuv::i420_to_nv12(&yuv_data, &mut nv12_data, width, height)
.map_err(|e| AppError::VideoError(format!("libyuv I420→NV12 failed: {}", e)))?;
// Split into Y and UV planes
let y_plane = nv12_data[..y_size].to_vec();
let uv_plane = nv12_data[y_size..].to_vec();
Ok(DecodedNv12Frame {
y_plane,
uv_plane,
y_linesize: width,
uv_linesize: width,
width,
height,
})
}
/// Get frame count
pub fn frame_count(&self) -> u64 {
self.frame_count
}
/// Get current resolution from config
pub fn resolution(&self) -> Resolution {
self.config.resolution
}
}
/// Libyuv-based MJPEG decoder for direct YUV420P output
///
/// This decoder is optimized for software encoders (libvpx, libx265) that need YUV420P input.
/// It uses libyuv's MJPGToI420 to decode directly to I420/YUV420P format.
pub struct MjpegTurboDecoder {
/// Frame counter
frame_count: u64,
}
impl MjpegTurboDecoder {
/// Create a new libyuv-based MJPEG decoder
pub fn new(resolution: Resolution) -> Result<Self> {
info!(
"Created libyuv MJPEG decoder for {}x{} (direct YUV420P output)",
resolution.width, resolution.height
);
Ok(Self {
frame_count: 0,
})
}
/// Decode MJPEG frame directly to YUV420P using libyuv
///
/// This is the optimal path for software encoders that need YUV420P input.
/// libyuv handles all JPEG subsampling formats internally.
pub fn decode_to_yuv420p(&mut self, jpeg_data: &[u8]) -> Result<DecodedYuv420pFrame> {
if jpeg_data.len() < 2 || jpeg_data[0] != 0xFF || jpeg_data[1] != 0xD8 {
return Err(AppError::VideoError("Invalid JPEG data".to_string()));
}
self.frame_count += 1;
// Get JPEG dimensions
let (width, height) = libyuv::mjpeg_size(jpeg_data)
.map_err(|e| AppError::VideoError(format!("Failed to read MJPEG size: {}", e)))?;
let y_size = (width * height) as usize;
let uv_size = y_size / 4;
let yuv420_size = y_size + uv_size * 2;
let mut yuv_data = vec![0u8; yuv420_size];
libyuv::mjpeg_to_i420(jpeg_data, &mut yuv_data, width, height)
.map_err(|e| AppError::VideoError(format!("libyuv MJPEG→I420 failed: {}", e)))?;
Ok(DecodedYuv420pFrame {
y_plane: yuv_data[..y_size].to_vec(),
u_plane: yuv_data[y_size..y_size + uv_size].to_vec(),
v_plane: yuv_data[y_size + uv_size..].to_vec(),
y_linesize: width,
u_linesize: width / 2,
v_linesize: width / 2,
width,
height,
})
}
/// Decode directly to packed YUV420P buffer using libyuv
///
/// This uses libyuv's MJPGToI420 which handles all JPEG subsampling formats
/// and converts to I420 directly.
pub fn decode_to_yuv420p_buffer(&mut self, jpeg_data: &[u8], dst: &mut [u8]) -> Result<usize> {
if jpeg_data.len() < 2 || jpeg_data[0] != 0xFF || jpeg_data[1] != 0xD8 {
return Err(AppError::VideoError("Invalid JPEG data".to_string()));
}
self.frame_count += 1;
// Get JPEG dimensions from libyuv
let (width, height) = libyuv::mjpeg_size(jpeg_data)
.map_err(|e| AppError::VideoError(format!("Failed to read MJPEG size: {}", e)))?;
let yuv420_size = (width * height * 3 / 2) as usize;
if dst.len() < yuv420_size {
return Err(AppError::VideoError(format!(
"Buffer too small: {} < {}", dst.len(), yuv420_size
)));
}
// Decode MJPEG directly to I420 using libyuv
// libyuv handles all JPEG subsampling formats (4:2:0, 4:2:2, 4:4:4) internally
libyuv::mjpeg_to_i420(jpeg_data, &mut dst[..yuv420_size], width, height)
.map_err(|e| AppError::VideoError(format!("libyuv MJPEG→I420 failed: {}", e)))?;
Ok(yuv420_size)
}
/// Get frame count
pub fn frame_count(&self) -> u64 {
self.frame_count
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_decoder_creation() {
let config = MjpegVaapiDecoderConfig::default();
match MjpegVaapiDecoder::new(config) {
Ok(decoder) => {
println!("Decoder created, hwaccel: {}", decoder.is_hwaccel_active());
}
Err(e) => {
println!("Failed to create decoder: {}", e);
}
}
}
}

View File

@@ -1,11 +1,3 @@
//! Video decoder implementations
//!
//! This module provides video decoding capabilities including:
//! - MJPEG VAAPI hardware decoding (outputs NV12)
//! - MJPEG turbojpeg decoding (outputs YUV420P directly)
pub mod mjpeg;
pub use mjpeg::{
DecodedYuv420pFrame, MjpegTurboDecoder, MjpegVaapiDecoder, MjpegVaapiDecoderConfig,
};
//! This module provides video decoding capabilities.

View File

@@ -355,7 +355,7 @@ mod tests {
fn test_codec_config_default() {
let config = VideoCodecConfig::default();
assert_eq!(config.codec, VideoCodecType::H264);
assert_eq!(config.bitrate_kbps, 2000);
assert_eq!(config.bitrate_kbps, 8000);
assert_eq!(config.fps, 30);
}

View File

@@ -13,7 +13,6 @@ use tracing::{debug, error, info, warn};
use crate::error::{AppError, Result};
use crate::video::convert::Nv12Converter;
use crate::video::decoder::mjpeg::{MjpegVaapiDecoder, MjpegVaapiDecoderConfig};
use crate::video::encoder::h264::{H264Config, H264Encoder};
use crate::video::format::{PixelFormat, Resolution};
use crate::webrtc::rtp::{H264VideoTrack, H264VideoTrackConfig};
@@ -79,8 +78,6 @@ pub struct H264Pipeline {
encoder: Arc<Mutex<Option<H264Encoder>>>,
/// NV12 converter (for BGR24/RGB24/YUYV → NV12)
nv12_converter: Arc<Mutex<Option<Nv12Converter>>>,
/// MJPEG VAAPI decoder (for MJPEG input, outputs NV12)
mjpeg_decoder: Arc<Mutex<Option<MjpegVaapiDecoder>>>,
/// WebRTC video track
video_track: Arc<H264VideoTrack>,
/// Pipeline statistics
@@ -127,44 +124,38 @@ impl H264Pipeline {
encoder_input_format
);
// Create NV12 converter or MJPEG decoder based on input format
// Create NV12 converter based on input format
// All formats are converted to NV12 for VAAPI encoder
let (nv12_converter, mjpeg_decoder) = match config.input_format {
let nv12_converter = match config.input_format {
// NV12 input - direct passthrough
PixelFormat::Nv12 => {
info!("NV12 input: direct passthrough to encoder");
(None, None)
None
}
// YUYV (4:2:2 packed) → NV12
PixelFormat::Yuyv => {
info!("YUYV input: converting to NV12");
(Some(Nv12Converter::yuyv_to_nv12(config.resolution)), None)
Some(Nv12Converter::yuyv_to_nv12(config.resolution))
}
// RGB24 → NV12
PixelFormat::Rgb24 => {
info!("RGB24 input: converting to NV12");
(Some(Nv12Converter::rgb24_to_nv12(config.resolution)), None)
Some(Nv12Converter::rgb24_to_nv12(config.resolution))
}
// BGR24 → NV12
PixelFormat::Bgr24 => {
info!("BGR24 input: converting to NV12");
(Some(Nv12Converter::bgr24_to_nv12(config.resolution)), None)
Some(Nv12Converter::bgr24_to_nv12(config.resolution))
}
// MJPEG/JPEG → NV12 (via hwcodec decoder)
// MJPEG/JPEG input - not supported (requires libjpeg for decoding)
PixelFormat::Mjpeg | PixelFormat::Jpeg => {
let decoder_config = MjpegVaapiDecoderConfig {
resolution: config.resolution,
use_hwaccel: true,
};
let decoder = MjpegVaapiDecoder::new(decoder_config)?;
info!(
"MJPEG decoder created for H264 pipeline (outputs NV12)"
);
(None, Some(decoder))
return Err(AppError::VideoError(
"MJPEG input format not supported in this build".to_string()
));
}
_ => {
@@ -192,7 +183,6 @@ impl H264Pipeline {
config,
encoder: Arc::new(Mutex::new(Some(encoder))),
nv12_converter: Arc::new(Mutex::new(nv12_converter)),
mjpeg_decoder: Arc::new(Mutex::new(mjpeg_decoder)),
video_track,
stats: Arc::new(Mutex::new(H264PipelineStats::default())),
running: running_tx,
@@ -230,7 +220,6 @@ impl H264Pipeline {
let encoder = self.encoder.lock().await.take();
let nv12_converter = self.nv12_converter.lock().await.take();
let mjpeg_decoder = self.mjpeg_decoder.lock().await.take();
let video_track = self.video_track.clone();
let stats = self.stats.clone();
let encode_times = self.encode_times.clone();
@@ -248,15 +237,10 @@ impl H264Pipeline {
};
let mut nv12_converter = nv12_converter;
let mut mjpeg_decoder = mjpeg_decoder;
let mut frame_count: u64 = 0;
let mut last_fps_time = Instant::now();
let mut fps_frame_count: u64 = 0;
// Pre-allocated NV12 buffer for MJPEG decoder output (avoids per-frame allocation)
let nv12_size = (config.resolution.width * config.resolution.height * 3 / 2) as usize;
let mut nv12_buffer = vec![0u8; nv12_size];
// Flag for one-time warnings
let mut size_mismatch_warned = false;
@@ -298,7 +282,6 @@ impl H264Pipeline {
}
// Convert to NV12 for VAAPI encoder
// MJPEG -> NV12 (via VAAPI decoder)
// BGR24/RGB24/YUYV -> NV12 (via NV12 converter)
// NV12 -> pass through
//
@@ -307,36 +290,7 @@ impl H264Pipeline {
fps_frame_count += 1;
let pts_ms = (frame_count * 1000 / config.fps as u64) as i64;
let encode_result = if let Some(ref mut decoder) = mjpeg_decoder {
// MJPEG input - decode to NV12 via VAAPI
match decoder.decode(&raw_frame) {
Ok(nv12_frame) => {
// Calculate required size for this frame
let required_size = (nv12_frame.width * nv12_frame.height * 3 / 2) as usize;
// Resize buffer if needed (handles resolution changes)
if nv12_buffer.len() < required_size {
debug!(
"Resizing NV12 buffer: {} -> {} bytes (resolution: {}x{})",
nv12_buffer.len(), required_size,
nv12_frame.width, nv12_frame.height
);
nv12_buffer.resize(required_size, 0);
}
// Copy to pre-allocated buffer (guaranteed to fit after resize)
let written = nv12_frame.copy_to_packed_nv12(&mut nv12_buffer)
.expect("BUG: buffer too small after resize");
encoder.encode_raw(&nv12_buffer[..written], pts_ms)
}
Err(e) => {
error!("MJPEG VAAPI decode failed: {}", e);
let mut s = stats.lock().await;
s.errors += 1;
continue;
}
}
} else if let Some(ref mut conv) = nv12_converter {
let encode_result = if let Some(ref mut conv) = nv12_converter {
// BGR24/RGB24/YUYV input - convert to NV12
// Optimized: pass reference directly without copy
match conv.convert(&raw_frame) {
@@ -518,7 +472,7 @@ mod tests {
fn test_pipeline_config_default() {
let config = H264PipelineConfig::default();
assert_eq!(config.resolution, Resolution::HD720);
assert_eq!(config.bitrate_kbps, 2000);
assert_eq!(config.bitrate_kbps, 8000);
assert_eq!(config.fps, 30);
assert_eq!(config.gop_size, 30);
}

View File

@@ -16,8 +16,7 @@ pub mod streamer;
pub mod video_session;
pub use capture::VideoCapturer;
pub use convert::{MjpegDecoder, MjpegToYuv420Converter, PixelConverter, Yuv420pBuffer};
pub use decoder::{MjpegVaapiDecoder, MjpegVaapiDecoderConfig};
pub use convert::{PixelConverter, Yuv420pBuffer};
pub use device::{VideoDevice, VideoDeviceInfo};
pub use encoder::{JpegEncoder, H264Encoder, H264EncoderType};
pub use format::PixelFormat;

View File

@@ -28,7 +28,6 @@ const AUTO_STOP_GRACE_PERIOD_SECS: u64 = 3;
use crate::error::{AppError, Result};
use crate::video::convert::{Nv12Converter, PixelConverter};
use crate::video::decoder::mjpeg::{MjpegTurboDecoder, MjpegVaapiDecoder, MjpegVaapiDecoderConfig};
use crate::video::encoder::h264::{H264Config, H264Encoder};
use crate::video::encoder::h265::{H265Config, H265Encoder};
use crate::video::encoder::registry::{EncoderBackend, EncoderRegistry, VideoEncoderType};
@@ -298,12 +297,6 @@ pub struct SharedVideoPipeline {
encoder: Mutex<Option<Box<dyn VideoEncoderTrait + Send>>>,
nv12_converter: Mutex<Option<Nv12Converter>>,
yuv420p_converter: Mutex<Option<PixelConverter>>,
mjpeg_decoder: Mutex<Option<MjpegVaapiDecoder>>,
/// Turbojpeg decoder for direct MJPEG->YUV420P (optimized for software encoders)
mjpeg_turbo_decoder: Mutex<Option<MjpegTurboDecoder>>,
nv12_buffer: Mutex<Vec<u8>>,
/// YUV420P buffer for turbojpeg decoder output
yuv420p_buffer: Mutex<Vec<u8>>,
/// Whether the encoder needs YUV420P (true) or NV12 (false)
encoder_needs_yuv420p: AtomicBool,
/// Whether YUYV direct input is enabled (RKMPP optimization)
@@ -335,18 +328,12 @@ impl SharedVideoPipeline {
let (frame_tx, _) = broadcast::channel(16); // Reduced from 64 for lower latency
let (running_tx, running_rx) = watch::channel(false);
let nv12_size = (config.resolution.width * config.resolution.height * 3 / 2) as usize;
let yuv420p_size = nv12_size; // Same size as NV12
let pipeline = Arc::new(Self {
config: RwLock::new(config),
encoder: Mutex::new(None),
nv12_converter: Mutex::new(None),
yuv420p_converter: Mutex::new(None),
mjpeg_decoder: Mutex::new(None),
mjpeg_turbo_decoder: Mutex::new(None),
nv12_buffer: Mutex::new(vec![0u8; nv12_size]),
yuv420p_buffer: Mutex::new(vec![0u8; yuv420p_size]),
encoder_needs_yuv420p: AtomicBool::new(false),
yuyv_direct_input: AtomicBool::new(false),
frame_tx,
@@ -505,42 +492,36 @@ impl SharedVideoPipeline {
config.input_format,
if use_yuyv_direct { "YUYV422 (direct)" } else if needs_yuv420p { "YUV420P" } else { "NV12" });
let (nv12_converter, yuv420p_converter, mjpeg_decoder, mjpeg_turbo_decoder) = if use_yuyv_direct {
let (nv12_converter, yuv420p_converter) = if use_yuyv_direct {
// RKMPP with YUYV direct input - skip all conversion
info!("YUYV direct input enabled for RKMPP, skipping format conversion");
(None, None, None, None)
(None, None)
} else if needs_yuv420p {
// Software encoder needs YUV420P
match config.input_format {
PixelFormat::Yuv420 => {
info!("Using direct YUV420P input (no conversion)");
(None, None, None, None)
(None, None)
}
PixelFormat::Yuyv => {
info!("Using YUYV->YUV420P converter");
(None, Some(PixelConverter::yuyv_to_yuv420p(config.resolution)), None, None)
(None, Some(PixelConverter::yuyv_to_yuv420p(config.resolution)))
}
PixelFormat::Nv12 => {
info!("Using NV12->YUV420P converter");
(None, Some(PixelConverter::nv12_to_yuv420p(config.resolution)), None, None)
(None, Some(PixelConverter::nv12_to_yuv420p(config.resolution)))
}
PixelFormat::Rgb24 => {
info!("Using RGB24->YUV420P converter");
(None, Some(PixelConverter::rgb24_to_yuv420p(config.resolution)), None, None)
(None, Some(PixelConverter::rgb24_to_yuv420p(config.resolution)))
}
PixelFormat::Bgr24 => {
info!("Using BGR24->YUV420P converter");
(None, Some(PixelConverter::bgr24_to_yuv420p(config.resolution)), None, None)
}
PixelFormat::Mjpeg | PixelFormat::Jpeg => {
// Use turbojpeg for direct MJPEG->YUV420P (no intermediate NV12)
info!("Using turbojpeg MJPEG decoder (direct YUV420P output)");
let turbo_decoder = MjpegTurboDecoder::new(config.resolution)?;
(None, None, None, Some(turbo_decoder))
(None, Some(PixelConverter::bgr24_to_yuv420p(config.resolution)))
}
_ => {
return Err(AppError::VideoError(format!(
"Unsupported input format: {}",
"Unsupported input format for software encoding: {}",
config.input_format
)));
}
@@ -550,32 +531,23 @@ impl SharedVideoPipeline {
match config.input_format {
PixelFormat::Nv12 => {
info!("Using direct NV12 input (no conversion)");
(None, None, None, None)
(None, None)
}
PixelFormat::Yuyv => {
info!("Using YUYV->NV12 converter");
(Some(Nv12Converter::yuyv_to_nv12(config.resolution)), None, None, None)
(Some(Nv12Converter::yuyv_to_nv12(config.resolution)), None)
}
PixelFormat::Rgb24 => {
info!("Using RGB24->NV12 converter");
(Some(Nv12Converter::rgb24_to_nv12(config.resolution)), None, None, None)
(Some(Nv12Converter::rgb24_to_nv12(config.resolution)), None)
}
PixelFormat::Bgr24 => {
info!("Using BGR24->NV12 converter");
(Some(Nv12Converter::bgr24_to_nv12(config.resolution)), None, None, None)
}
PixelFormat::Mjpeg | PixelFormat::Jpeg => {
info!("Using MJPEG decoder (NV12 output)");
let decoder_config = MjpegVaapiDecoderConfig {
resolution: config.resolution,
use_hwaccel: true,
};
let decoder = MjpegVaapiDecoder::new(decoder_config)?;
(None, None, Some(decoder), None)
(Some(Nv12Converter::bgr24_to_nv12(config.resolution)), None)
}
_ => {
return Err(AppError::VideoError(format!(
"Unsupported input format: {}",
"Unsupported input format for hardware encoding: {}",
config.input_format
)));
}
@@ -585,8 +557,6 @@ impl SharedVideoPipeline {
*self.encoder.lock().await = Some(encoder);
*self.nv12_converter.lock().await = nv12_converter;
*self.yuv420p_converter.lock().await = yuv420p_converter;
*self.mjpeg_decoder.lock().await = mjpeg_decoder;
*self.mjpeg_turbo_decoder.lock().await = mjpeg_turbo_decoder;
self.encoder_needs_yuv420p.store(needs_yuv420p, Ordering::Release);
self.yuyv_direct_input.store(use_yuyv_direct, Ordering::Release);
@@ -669,8 +639,6 @@ impl SharedVideoPipeline {
*self.encoder.lock().await = None;
*self.nv12_converter.lock().await = None;
*self.yuv420p_converter.lock().await = None;
*self.mjpeg_decoder.lock().await = None;
*self.mjpeg_turbo_decoder.lock().await = None;
self.encoder_needs_yuv420p.store(false, Ordering::Release);
info!("Switched to {} codec", codec);
@@ -862,8 +830,6 @@ impl SharedVideoPipeline {
);
}
let mut mjpeg_decoder = self.mjpeg_decoder.lock().await;
let mut mjpeg_turbo_decoder = self.mjpeg_turbo_decoder.lock().await;
let mut nv12_converter = self.nv12_converter.lock().await;
let mut yuv420p_converter = self.yuv420p_converter.lock().await;
let needs_yuv420p = self.encoder_needs_yuv420p.load(Ordering::Acquire);
@@ -879,38 +845,7 @@ impl SharedVideoPipeline {
debug!("[Pipeline] Keyframe will be generated for this frame");
}
let encode_result = if mjpeg_turbo_decoder.is_some() {
// Optimized path: MJPEG -> YUV420P directly via turbojpeg (for software encoders)
let turbo = mjpeg_turbo_decoder.as_mut().unwrap();
let mut yuv420p_buffer = self.yuv420p_buffer.lock().await;
let written = turbo.decode_to_yuv420p_buffer(raw_frame, &mut yuv420p_buffer)
.map_err(|e| AppError::VideoError(format!("turbojpeg decode failed: {}", e)))?;
encoder.encode_raw(&yuv420p_buffer[..written], pts_ms)
} else if mjpeg_decoder.is_some() {
// MJPEG input: decode to NV12 (for hardware encoders)
let decoder = mjpeg_decoder.as_mut().unwrap();
let nv12_frame = decoder.decode(raw_frame)
.map_err(|e| AppError::VideoError(format!("MJPEG decode failed: {}", e)))?;
let required_size = (nv12_frame.width * nv12_frame.height * 3 / 2) as usize;
let mut nv12_buffer = self.nv12_buffer.lock().await;
if nv12_buffer.len() < required_size {
nv12_buffer.resize(required_size, 0);
}
let written = nv12_frame.copy_to_packed_nv12(&mut nv12_buffer)
.expect("Buffer too small");
// Debug log for H265 after MJPEG decode
if codec == VideoEncoderType::H265 && frame_count % 30 == 1 {
debug!(
"[Pipeline-H265] MJPEG decoded: nv12_size={}, frame_width={}, frame_height={}",
written, nv12_frame.width, nv12_frame.height
);
}
encoder.encode_raw(&nv12_buffer[..written], pts_ms)
} else if needs_yuv420p && yuv420p_converter.is_some() {
let encode_result = if needs_yuv420p && yuv420p_converter.is_some() {
// Software encoder with direct input conversion to YUV420P
let conv = yuv420p_converter.as_mut().unwrap();
let yuv420p_data = conv.convert(raw_frame)
@@ -930,8 +865,6 @@ impl SharedVideoPipeline {
drop(encoder_guard);
drop(nv12_converter);
drop(yuv420p_converter);
drop(mjpeg_decoder);
drop(mjpeg_turbo_decoder);
match encode_result {
Ok(frames) => {