mirror of
https://github.com/mofeng-git/One-KVM.git
synced 2026-04-30 01:46:37 +08:00
feat: 适配 RK 原生 HDMI IN 适配采集
This commit is contained in:
@@ -318,6 +318,12 @@ impl MjpegStreamHandler {
|
||||
PixelFormat::Nv12 => encoder
|
||||
.encode_nv12(frame.data(), sequence)
|
||||
.map_err(|e| format!("NV12 encode failed: {}", e))?,
|
||||
PixelFormat::Nv16 => encoder
|
||||
.encode_nv16(frame.data(), sequence)
|
||||
.map_err(|e| format!("NV16 encode failed: {}", e))?,
|
||||
PixelFormat::Nv24 => encoder
|
||||
.encode_nv24(frame.data(), sequence)
|
||||
.map_err(|e| format!("NV24 encode failed: {}", e))?,
|
||||
PixelFormat::Rgb24 => encoder
|
||||
.encode_rgb(frame.data(), sequence)
|
||||
.map_err(|e| format!("RGB encode failed: {}", e))?,
|
||||
|
||||
@@ -190,87 +190,70 @@ pub struct PixelConverter {
|
||||
resolution: Resolution,
|
||||
/// Output buffer (reused across conversions)
|
||||
output_buffer: Yuv420pBuffer,
|
||||
/// Scratch buffer for split chroma planes when converting semiplanar 4:2:2 / 4:4:4 input.
|
||||
uv_split_buffer: Vec<u8>,
|
||||
}
|
||||
|
||||
impl PixelConverter {
|
||||
/// Create a new converter for YUYV → YUV420P
|
||||
pub fn yuyv_to_yuv420p(resolution: Resolution) -> Self {
|
||||
fn new(src_format: PixelFormat, dst_format: PixelFormat, resolution: Resolution) -> Self {
|
||||
let max_uv_plane_size = (resolution.width * resolution.height) as usize;
|
||||
Self {
|
||||
src_format: PixelFormat::Yuyv,
|
||||
dst_format: PixelFormat::Yuv420,
|
||||
src_format,
|
||||
dst_format,
|
||||
resolution,
|
||||
output_buffer: Yuv420pBuffer::new(resolution),
|
||||
uv_split_buffer: vec![0u8; max_uv_plane_size * 2],
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new converter for YUYV → YUV420P
|
||||
pub fn yuyv_to_yuv420p(resolution: Resolution) -> Self {
|
||||
Self::new(PixelFormat::Yuyv, PixelFormat::Yuv420, resolution)
|
||||
}
|
||||
|
||||
/// Create a new converter for UYVY → YUV420P
|
||||
pub fn uyvy_to_yuv420p(resolution: Resolution) -> Self {
|
||||
Self {
|
||||
src_format: PixelFormat::Uyvy,
|
||||
dst_format: PixelFormat::Yuv420,
|
||||
resolution,
|
||||
output_buffer: Yuv420pBuffer::new(resolution),
|
||||
}
|
||||
Self::new(PixelFormat::Uyvy, PixelFormat::Yuv420, resolution)
|
||||
}
|
||||
|
||||
/// Create a new converter for YVYU → YUV420P
|
||||
pub fn yvyu_to_yuv420p(resolution: Resolution) -> Self {
|
||||
Self {
|
||||
src_format: PixelFormat::Yvyu,
|
||||
dst_format: PixelFormat::Yuv420,
|
||||
resolution,
|
||||
output_buffer: Yuv420pBuffer::new(resolution),
|
||||
}
|
||||
Self::new(PixelFormat::Yvyu, PixelFormat::Yuv420, resolution)
|
||||
}
|
||||
|
||||
/// Create a new converter for NV12 → YUV420P
|
||||
pub fn nv12_to_yuv420p(resolution: Resolution) -> Self {
|
||||
Self {
|
||||
src_format: PixelFormat::Nv12,
|
||||
dst_format: PixelFormat::Yuv420,
|
||||
resolution,
|
||||
output_buffer: Yuv420pBuffer::new(resolution),
|
||||
}
|
||||
Self::new(PixelFormat::Nv12, PixelFormat::Yuv420, resolution)
|
||||
}
|
||||
|
||||
/// Create a new converter for NV21 → YUV420P
|
||||
pub fn nv21_to_yuv420p(resolution: Resolution) -> Self {
|
||||
Self {
|
||||
src_format: PixelFormat::Nv21,
|
||||
dst_format: PixelFormat::Yuv420,
|
||||
resolution,
|
||||
output_buffer: Yuv420pBuffer::new(resolution),
|
||||
}
|
||||
Self::new(PixelFormat::Nv21, PixelFormat::Yuv420, resolution)
|
||||
}
|
||||
|
||||
/// Create a new converter for NV16 → YUV420P
|
||||
pub fn nv16_to_yuv420p(resolution: Resolution) -> Self {
|
||||
Self::new(PixelFormat::Nv16, PixelFormat::Yuv420, resolution)
|
||||
}
|
||||
|
||||
/// Create a new converter for NV24 → YUV420P
|
||||
pub fn nv24_to_yuv420p(resolution: Resolution) -> Self {
|
||||
Self::new(PixelFormat::Nv24, PixelFormat::Yuv420, resolution)
|
||||
}
|
||||
|
||||
/// Create a new converter for YVU420 → YUV420P (swap U and V planes)
|
||||
pub fn yvu420_to_yuv420p(resolution: Resolution) -> Self {
|
||||
Self {
|
||||
src_format: PixelFormat::Yvu420,
|
||||
dst_format: PixelFormat::Yuv420,
|
||||
resolution,
|
||||
output_buffer: Yuv420pBuffer::new(resolution),
|
||||
}
|
||||
Self::new(PixelFormat::Yvu420, PixelFormat::Yuv420, resolution)
|
||||
}
|
||||
|
||||
/// Create a new converter for RGB24 → YUV420P
|
||||
pub fn rgb24_to_yuv420p(resolution: Resolution) -> Self {
|
||||
Self {
|
||||
src_format: PixelFormat::Rgb24,
|
||||
dst_format: PixelFormat::Yuv420,
|
||||
resolution,
|
||||
output_buffer: Yuv420pBuffer::new(resolution),
|
||||
}
|
||||
Self::new(PixelFormat::Rgb24, PixelFormat::Yuv420, resolution)
|
||||
}
|
||||
|
||||
/// Create a new converter for BGR24 → YUV420P
|
||||
pub fn bgr24_to_yuv420p(resolution: Resolution) -> Self {
|
||||
Self {
|
||||
src_format: PixelFormat::Bgr24,
|
||||
dst_format: PixelFormat::Yuv420,
|
||||
resolution,
|
||||
output_buffer: Yuv420pBuffer::new(resolution),
|
||||
}
|
||||
Self::new(PixelFormat::Bgr24, PixelFormat::Yuv420, resolution)
|
||||
}
|
||||
|
||||
/// Convert a frame and return reference to the output buffer
|
||||
@@ -304,6 +287,12 @@ impl PixelConverter {
|
||||
AppError::VideoError(format!("libyuv conversion failed: {}", e))
|
||||
})?;
|
||||
}
|
||||
(PixelFormat::Nv16, PixelFormat::Yuv420) => {
|
||||
self.convert_nv16_to_yuv420p(input)?;
|
||||
}
|
||||
(PixelFormat::Nv24, PixelFormat::Yuv420) => {
|
||||
self.convert_nv24_to_yuv420p(input)?;
|
||||
}
|
||||
(PixelFormat::Rgb24, PixelFormat::Yuv420) => {
|
||||
libyuv::rgb24_to_i420(input, self.output_buffer.as_bytes_mut(), width, height)
|
||||
.map_err(|e| {
|
||||
@@ -429,6 +418,102 @@ impl PixelConverter {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Convert NV16 (4:2:2 semiplanar) → YUV420P using libyuv split + I422 downsample
|
||||
fn convert_nv16_to_yuv420p(&mut self, nv16: &[u8]) -> Result<()> {
|
||||
let width = self.resolution.width as usize;
|
||||
let height = self.resolution.height as usize;
|
||||
let y_size = width * height;
|
||||
let uv_size = y_size;
|
||||
|
||||
if nv16.len() < y_size + uv_size {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"NV16 data too small: {} < {}",
|
||||
nv16.len(),
|
||||
y_size + uv_size
|
||||
)));
|
||||
}
|
||||
|
||||
let src_uv = &nv16[y_size..y_size + uv_size];
|
||||
let chroma_plane_size = y_size / 2;
|
||||
let (u_plane_422, rest) = self.uv_split_buffer.split_at_mut(chroma_plane_size);
|
||||
let (v_plane_422, _) = rest.split_at_mut(chroma_plane_size);
|
||||
|
||||
libyuv::split_uv_plane(
|
||||
src_uv,
|
||||
width as i32,
|
||||
u_plane_422,
|
||||
(width / 2) as i32,
|
||||
v_plane_422,
|
||||
(width / 2) as i32,
|
||||
(width / 2) as i32,
|
||||
height as i32,
|
||||
)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv NV16 split failed: {}", e)))?;
|
||||
|
||||
libyuv::i422_to_i420_planar(
|
||||
&nv16[..y_size],
|
||||
width as i32,
|
||||
u_plane_422,
|
||||
(width / 2) as i32,
|
||||
v_plane_422,
|
||||
(width / 2) as i32,
|
||||
self.output_buffer.as_bytes_mut(),
|
||||
width as i32,
|
||||
height as i32,
|
||||
)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv NV16→I420 failed: {}", e)))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Convert NV24 (4:4:4 semiplanar) → YUV420P using libyuv split + I444 downsample
|
||||
fn convert_nv24_to_yuv420p(&mut self, nv24: &[u8]) -> Result<()> {
|
||||
let width = self.resolution.width as usize;
|
||||
let height = self.resolution.height as usize;
|
||||
let y_size = width * height;
|
||||
let uv_size = y_size * 2;
|
||||
|
||||
if nv24.len() < y_size + uv_size {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"NV24 data too small: {} < {}",
|
||||
nv24.len(),
|
||||
y_size + uv_size
|
||||
)));
|
||||
}
|
||||
|
||||
let src_uv = &nv24[y_size..y_size + uv_size];
|
||||
let chroma_plane_size = y_size;
|
||||
let (u_plane_444, rest) = self.uv_split_buffer.split_at_mut(chroma_plane_size);
|
||||
let (v_plane_444, _) = rest.split_at_mut(chroma_plane_size);
|
||||
|
||||
libyuv::split_uv_plane(
|
||||
src_uv,
|
||||
(width * 2) as i32,
|
||||
u_plane_444,
|
||||
width as i32,
|
||||
v_plane_444,
|
||||
width as i32,
|
||||
width as i32,
|
||||
height as i32,
|
||||
)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv NV24 split failed: {}", e)))?;
|
||||
|
||||
libyuv::i444_to_i420_planar(
|
||||
&nv24[..y_size],
|
||||
width as i32,
|
||||
u_plane_444,
|
||||
width as i32,
|
||||
v_plane_444,
|
||||
width as i32,
|
||||
self.output_buffer.as_bytes_mut(),
|
||||
width as i32,
|
||||
height as i32,
|
||||
)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv NV24→I420 failed: {}", e)))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate YUV420P buffer size for a given resolution
|
||||
@@ -519,6 +604,16 @@ impl Nv12Converter {
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new converter for NV24 → NV12
|
||||
pub fn nv24_to_nv12(resolution: Resolution) -> Self {
|
||||
Self {
|
||||
src_format: PixelFormat::Nv24,
|
||||
resolution,
|
||||
output_buffer: Nv12Buffer::new(resolution),
|
||||
i420_buffer: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a frame and return reference to the output buffer
|
||||
pub fn convert(&mut self, input: &[u8]) -> Result<&[u8]> {
|
||||
let width = self.resolution.width as i32;
|
||||
@@ -553,6 +648,16 @@ impl Nv12Converter {
|
||||
)?;
|
||||
return Ok(self.output_buffer.as_bytes());
|
||||
}
|
||||
PixelFormat::Nv24 => {
|
||||
let dst = self.output_buffer.as_bytes_mut();
|
||||
Self::convert_nv24_to_nv12_with_dims(
|
||||
self.resolution.width as usize,
|
||||
self.resolution.height as usize,
|
||||
input,
|
||||
dst,
|
||||
)?;
|
||||
return Ok(self.output_buffer.as_bytes());
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
@@ -635,6 +740,57 @@ impl Nv12Converter {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn convert_nv24_to_nv12_with_dims(
|
||||
width: usize,
|
||||
height: usize,
|
||||
input: &[u8],
|
||||
dst: &mut [u8],
|
||||
) -> Result<()> {
|
||||
let y_size = width * height;
|
||||
let uv_size_nv24 = y_size * 2;
|
||||
let uv_size_nv12 = y_size / 2;
|
||||
|
||||
if input.len() < y_size + uv_size_nv24 {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"NV24 data too small: {} < {}",
|
||||
input.len(),
|
||||
y_size + uv_size_nv24
|
||||
)));
|
||||
}
|
||||
|
||||
dst[..y_size].copy_from_slice(&input[..y_size]);
|
||||
|
||||
let src_uv = &input[y_size..y_size + uv_size_nv24];
|
||||
let dst_uv = &mut dst[y_size..y_size + uv_size_nv12];
|
||||
let dst_rows = height / 2;
|
||||
|
||||
for row in 0..dst_rows {
|
||||
let src_row0 = &src_uv[row * 2 * width * 2..row * 2 * width * 2 + width * 2];
|
||||
let src_row1 =
|
||||
&src_uv[(row * 2 + 1) * width * 2..(row * 2 + 1) * width * 2 + width * 2];
|
||||
let dst_row = &mut dst_uv[row * width..row * width + width];
|
||||
|
||||
for pair in 0..(width / 2) {
|
||||
let src_idx0 = pair * 4;
|
||||
let src_idx1 = src_idx0 + 2;
|
||||
let dst_idx = pair * 2;
|
||||
|
||||
dst_row[dst_idx] = ((src_row0[src_idx0] as u32
|
||||
+ src_row0[src_idx1] as u32
|
||||
+ src_row1[src_idx0] as u32
|
||||
+ src_row1[src_idx1] as u32)
|
||||
/ 4) as u8;
|
||||
dst_row[dst_idx + 1] = ((src_row0[src_idx0 + 1] as u32
|
||||
+ src_row0[src_idx1 + 1] as u32
|
||||
+ src_row1[src_idx0 + 1] as u32
|
||||
+ src_row1[src_idx1 + 1] as u32)
|
||||
/ 4) as u8;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get output buffer length
|
||||
pub fn output_len(&self) -> usize {
|
||||
self.output_buffer.len()
|
||||
|
||||
@@ -6,7 +6,10 @@ use std::path::{Path, PathBuf};
|
||||
use std::sync::mpsc;
|
||||
use std::time::Duration;
|
||||
use tracing::{debug, info, warn};
|
||||
use v4l2r::bindings::{v4l2_frmivalenum, v4l2_frmsizeenum};
|
||||
use v4l2r::bindings::{
|
||||
v4l2_bt_timings, v4l2_dv_timings, v4l2_frmivalenum, v4l2_frmsizeenum, v4l2_streamparm,
|
||||
V4L2_DV_BT_656_1120,
|
||||
};
|
||||
use v4l2r::ioctl::{
|
||||
self, Capabilities, Capability as V4l2rCapability, FormatIterator, FrmIvalTypes, FrmSizeTypes,
|
||||
};
|
||||
@@ -14,6 +17,7 @@ use v4l2r::nix::errno::Errno;
|
||||
use v4l2r::{Format as V4l2rFormat, QueueType};
|
||||
|
||||
use super::format::{PixelFormat, Resolution};
|
||||
use super::is_rk_hdmirx_driver;
|
||||
use crate::error::{AppError, Result};
|
||||
|
||||
const DEVICE_PROBE_TIMEOUT_MS: u64 = 400;
|
||||
@@ -57,11 +61,11 @@ pub struct FormatInfo {
|
||||
pub struct ResolutionInfo {
|
||||
pub width: u32,
|
||||
pub height: u32,
|
||||
pub fps: Vec<u32>,
|
||||
pub fps: Vec<f64>,
|
||||
}
|
||||
|
||||
impl ResolutionInfo {
|
||||
pub fn new(width: u32, height: u32, fps: Vec<u32>) -> Self {
|
||||
pub fn new(width: u32, height: u32, fps: Vec<f64>) -> Self {
|
||||
Self { width, height, fps }
|
||||
}
|
||||
|
||||
@@ -143,7 +147,11 @@ impl VideoDevice {
|
||||
read_write: flags.contains(Capabilities::READWRITE),
|
||||
};
|
||||
|
||||
let formats = self.enumerate_formats()?;
|
||||
let formats = if is_rk_hdmirx_driver(&caps.driver, &caps.card) {
|
||||
self.enumerate_current_format_only()?
|
||||
} else {
|
||||
self.enumerate_formats()?
|
||||
};
|
||||
|
||||
// Determine if this is likely an HDMI capture card
|
||||
let is_capture_card = Self::detect_capture_card(&caps.card, &caps.driver, &formats);
|
||||
@@ -176,6 +184,15 @@ impl VideoDevice {
|
||||
// Try to convert FourCC to our PixelFormat
|
||||
if let Some(format) = PixelFormat::from_v4l2r(desc.pixelformat) {
|
||||
let resolutions = self.enumerate_resolutions(desc.pixelformat)?;
|
||||
let is_current_format = self.current_active_format() == Some(format);
|
||||
|
||||
if resolutions.is_empty() && !is_current_format {
|
||||
debug!(
|
||||
"Skipping format {:?} ({}): not usable for current active mode",
|
||||
desc.pixelformat, desc.description
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
formats.push(FormatInfo {
|
||||
format,
|
||||
@@ -196,9 +213,38 @@ impl VideoDevice {
|
||||
Ok(formats)
|
||||
}
|
||||
|
||||
fn enumerate_current_format_only(&self) -> Result<Vec<FormatInfo>> {
|
||||
let current = self.get_format()?;
|
||||
let Some(format) = PixelFormat::from_v4l2r(current.pixelformat) else {
|
||||
debug!(
|
||||
"Current active format {:?} is not supported by One-KVM, falling back to full enumeration",
|
||||
current.pixelformat
|
||||
);
|
||||
return self.enumerate_formats();
|
||||
};
|
||||
|
||||
let description = self
|
||||
.format_description(current.pixelformat)
|
||||
.unwrap_or_else(|| format.to_string());
|
||||
|
||||
let mut resolutions = self.enumerate_resolutions(current.pixelformat)?;
|
||||
if resolutions.is_empty() {
|
||||
if let Some(current_mode) = self.current_mode_resolution_info() {
|
||||
resolutions.push(current_mode);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(vec![FormatInfo {
|
||||
format,
|
||||
resolutions,
|
||||
description,
|
||||
}])
|
||||
}
|
||||
|
||||
/// Enumerate resolutions for a specific format
|
||||
fn enumerate_resolutions(&self, fourcc: v4l2r::PixelFormat) -> Result<Vec<ResolutionInfo>> {
|
||||
let mut resolutions = Vec::new();
|
||||
let mut should_fallback_to_current_mode = false;
|
||||
|
||||
let mut index = 0u32;
|
||||
loop {
|
||||
@@ -241,7 +287,15 @@ impl VideoDevice {
|
||||
e,
|
||||
v4l2r::ioctl::FrameSizeError::IoctlError(err) if err == Errno::EINVAL
|
||||
);
|
||||
if !is_einval {
|
||||
let is_unsupported = matches!(
|
||||
e,
|
||||
v4l2r::ioctl::FrameSizeError::IoctlError(err)
|
||||
if matches!(err, Errno::ENOTTY | Errno::ENOSYS | Errno::EOPNOTSUPP)
|
||||
);
|
||||
if is_unsupported && resolutions.is_empty() {
|
||||
should_fallback_to_current_mode = true;
|
||||
}
|
||||
if !is_einval && !is_unsupported {
|
||||
debug!("Failed to enumerate frame sizes for {:?}: {}", fourcc, e);
|
||||
}
|
||||
break;
|
||||
@@ -249,6 +303,23 @@ impl VideoDevice {
|
||||
}
|
||||
}
|
||||
|
||||
if should_fallback_to_current_mode {
|
||||
if let Some(resolution) = self.current_mode_resolution_info() {
|
||||
if self.format_works_for_resolution(fourcc, resolution.width, resolution.height) {
|
||||
debug!(
|
||||
"Falling back to current active mode for {:?}: {}x{} @ {:?} fps",
|
||||
fourcc, resolution.width, resolution.height, resolution.fps
|
||||
);
|
||||
resolutions.push(resolution);
|
||||
} else {
|
||||
debug!(
|
||||
"Skipping current-mode fallback for {:?}: TRY_FMT rejected {}x{}",
|
||||
fourcc, resolution.width, resolution.height
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by resolution (largest first)
|
||||
resolutions.sort_by(|a, b| (b.width * b.height).cmp(&(a.width * a.height)));
|
||||
resolutions.dedup_by(|a, b| a.width == b.width && a.height == b.height);
|
||||
@@ -262,8 +333,9 @@ impl VideoDevice {
|
||||
fourcc: v4l2r::PixelFormat,
|
||||
width: u32,
|
||||
height: u32,
|
||||
) -> Result<Vec<u32>> {
|
||||
) -> Result<Vec<f64>> {
|
||||
let mut fps_list = Vec::new();
|
||||
let mut should_fallback_to_current_mode = false;
|
||||
|
||||
let mut index = 0u32;
|
||||
loop {
|
||||
@@ -274,15 +346,18 @@ impl VideoDevice {
|
||||
if let Some(interval) = interval.intervals() {
|
||||
match interval {
|
||||
FrmIvalTypes::Discrete(fraction) => {
|
||||
if fraction.numerator > 0 {
|
||||
let fps = fraction.denominator / fraction.numerator;
|
||||
if fraction.numerator > 0 && fraction.denominator > 0 {
|
||||
let fps =
|
||||
fraction.denominator as f64 / fraction.numerator as f64;
|
||||
fps_list.push(fps);
|
||||
}
|
||||
}
|
||||
FrmIvalTypes::StepWise(step) => {
|
||||
if step.max.numerator > 0 {
|
||||
let min_fps = step.max.denominator / step.max.numerator;
|
||||
let max_fps = step.min.denominator / step.min.numerator;
|
||||
if step.max.numerator > 0 && step.max.denominator > 0 {
|
||||
let min_fps =
|
||||
step.max.denominator as f64 / step.max.numerator as f64;
|
||||
let max_fps =
|
||||
step.min.denominator as f64 / step.min.numerator as f64;
|
||||
fps_list.push(min_fps);
|
||||
if max_fps != min_fps {
|
||||
fps_list.push(max_fps);
|
||||
@@ -298,7 +373,15 @@ impl VideoDevice {
|
||||
e,
|
||||
v4l2r::ioctl::FrameIntervalsError::IoctlError(err) if err == Errno::EINVAL
|
||||
);
|
||||
if !is_einval {
|
||||
let is_unsupported = matches!(
|
||||
e,
|
||||
v4l2r::ioctl::FrameIntervalsError::IoctlError(err)
|
||||
if matches!(err, Errno::ENOTTY | Errno::ENOSYS | Errno::EOPNOTSUPP)
|
||||
);
|
||||
if is_unsupported && fps_list.is_empty() {
|
||||
should_fallback_to_current_mode = true;
|
||||
}
|
||||
if !is_einval && !is_unsupported {
|
||||
debug!(
|
||||
"Failed to enumerate frame intervals for {:?} {}x{}: {}",
|
||||
fourcc, width, height, e
|
||||
@@ -309,8 +392,11 @@ impl VideoDevice {
|
||||
}
|
||||
}
|
||||
|
||||
fps_list.sort_by(|a, b| b.cmp(a));
|
||||
fps_list.dedup();
|
||||
if should_fallback_to_current_mode {
|
||||
fps_list.extend(self.current_mode_fps());
|
||||
}
|
||||
|
||||
normalize_fps_list(&mut fps_list);
|
||||
Ok(fps_list)
|
||||
}
|
||||
|
||||
@@ -426,6 +512,105 @@ impl VideoDevice {
|
||||
&self.fd
|
||||
}
|
||||
|
||||
fn current_mode_resolution_info(&self) -> Option<ResolutionInfo> {
|
||||
let (width, height) = self
|
||||
.current_dv_timings_mode()
|
||||
.map(|(width, height, _)| (width, height))
|
||||
.or_else(|| self.current_format_resolution())?;
|
||||
Some(ResolutionInfo::new(width, height, self.current_mode_fps()))
|
||||
}
|
||||
|
||||
fn current_mode_fps(&self) -> Vec<f64> {
|
||||
let mut fps = Vec::new();
|
||||
|
||||
if let Some(frame_rate) = self.current_parm_fps() {
|
||||
fps.push(frame_rate);
|
||||
}
|
||||
|
||||
if let Some((_, _, Some(frame_rate))) = self.current_dv_timings_mode() {
|
||||
fps.push(frame_rate);
|
||||
}
|
||||
|
||||
normalize_fps_list(&mut fps);
|
||||
fps
|
||||
}
|
||||
|
||||
fn current_parm_fps(&self) -> Option<f64> {
|
||||
let queue = self.capture_queue_type().ok()?;
|
||||
let params: v4l2_streamparm = ioctl::g_parm(&self.fd, queue).ok()?;
|
||||
let capture = unsafe { params.parm.capture };
|
||||
let timeperframe = capture.timeperframe;
|
||||
if timeperframe.numerator == 0 || timeperframe.denominator == 0 {
|
||||
return None;
|
||||
}
|
||||
Some(timeperframe.denominator as f64 / timeperframe.numerator as f64)
|
||||
}
|
||||
|
||||
fn current_dv_timings_mode(&self) -> Option<(u32, u32, Option<f64>)> {
|
||||
let timings = ioctl::query_dv_timings::<v4l2_dv_timings>(&self.fd)
|
||||
.or_else(|_| ioctl::g_dv_timings::<v4l2_dv_timings>(&self.fd))
|
||||
.ok()?;
|
||||
|
||||
if timings.type_ != V4L2_DV_BT_656_1120 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let bt = unsafe { timings.__bindgen_anon_1.bt };
|
||||
if bt.width == 0 || bt.height == 0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some((bt.width, bt.height, dv_timings_fps(&bt)))
|
||||
}
|
||||
|
||||
fn current_format_resolution(&self) -> Option<(u32, u32)> {
|
||||
let format = self.get_format().ok()?;
|
||||
if format.width == 0 || format.height == 0 {
|
||||
return None;
|
||||
}
|
||||
Some((format.width, format.height))
|
||||
}
|
||||
|
||||
fn current_active_format(&self) -> Option<PixelFormat> {
|
||||
let format = self.get_format().ok()?;
|
||||
PixelFormat::from_v4l2r(format.pixelformat)
|
||||
}
|
||||
|
||||
fn format_description(&self, fourcc: v4l2r::PixelFormat) -> Option<String> {
|
||||
let queue = self.capture_queue_type().ok()?;
|
||||
FormatIterator::new(&self.fd, queue)
|
||||
.find(|desc| desc.pixelformat == fourcc)
|
||||
.map(|desc| desc.description)
|
||||
}
|
||||
|
||||
fn format_works_for_resolution(
|
||||
&self,
|
||||
fourcc: v4l2r::PixelFormat,
|
||||
width: u32,
|
||||
height: u32,
|
||||
) -> bool {
|
||||
let queue = match self.capture_queue_type() {
|
||||
Ok(queue) => queue,
|
||||
Err(_) => return false,
|
||||
};
|
||||
|
||||
let mut fmt = match ioctl::g_fmt::<V4l2rFormat>(&self.fd, queue) {
|
||||
Ok(fmt) => fmt,
|
||||
Err(_) => return false,
|
||||
};
|
||||
|
||||
fmt.width = width;
|
||||
fmt.height = height;
|
||||
fmt.pixelformat = fourcc;
|
||||
|
||||
let actual = match ioctl::try_fmt::<_, V4l2rFormat>(&self.fd, (queue, &fmt)) {
|
||||
Ok(actual) => actual,
|
||||
Err(_) => return false,
|
||||
};
|
||||
|
||||
actual.pixelformat == fourcc && actual.width == width && actual.height == height
|
||||
}
|
||||
|
||||
fn capture_queue_type(&self) -> Result<QueueType> {
|
||||
let caps = self.capabilities()?;
|
||||
if caps.video_capture {
|
||||
@@ -588,6 +773,36 @@ fn extract_uevent_value(content: &str, key: &str) -> Option<String> {
|
||||
None
|
||||
}
|
||||
|
||||
fn dv_timings_fps(bt: &v4l2_bt_timings) -> Option<f64> {
|
||||
let total_width = bt.width + bt.hfrontporch + bt.hsync + bt.hbackporch;
|
||||
let total_height = if bt.interlaced != 0 {
|
||||
bt.height
|
||||
+ bt.vfrontporch
|
||||
+ bt.vsync
|
||||
+ bt.vbackporch
|
||||
+ bt.il_vfrontporch
|
||||
+ bt.il_vsync
|
||||
+ bt.il_vbackporch
|
||||
} else {
|
||||
bt.height + bt.vfrontporch + bt.vsync + bt.vbackporch
|
||||
};
|
||||
|
||||
if bt.pixelclock == 0 || total_width == 0 || total_height == 0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(bt.pixelclock as f64 / total_width as f64 / total_height as f64)
|
||||
}
|
||||
|
||||
fn normalize_fps_list(fps_list: &mut Vec<f64>) {
|
||||
fps_list.retain(|fps| fps.is_finite() && *fps > 0.0);
|
||||
for fps in fps_list.iter_mut() {
|
||||
*fps = (*fps * 100.0).round() / 100.0;
|
||||
}
|
||||
fps_list.sort_by(|a, b| b.total_cmp(a));
|
||||
fps_list.dedup_by(|a, b| (*a - *b).abs() < 0.01);
|
||||
}
|
||||
|
||||
/// Find the best video device for KVM use
|
||||
pub fn find_best_device() -> Result<VideoDeviceInfo> {
|
||||
let devices = enumerate_devices()?;
|
||||
|
||||
@@ -13,7 +13,7 @@ use std::sync::Once;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use hwcodec::common::{Quality, RateControl};
|
||||
use hwcodec::ffmpeg::AVPixelFormat;
|
||||
use hwcodec::ffmpeg::{resolve_pixel_format, AVPixelFormat};
|
||||
use hwcodec::ffmpeg_ram::encode::{EncodeContext, Encoder as HwEncoder};
|
||||
use hwcodec::ffmpeg_ram::CodecInfo;
|
||||
|
||||
@@ -195,7 +195,7 @@ pub fn get_available_encoders(width: u32, height: u32) -> Vec<CodecInfo> {
|
||||
mc_name: None,
|
||||
width: width as i32,
|
||||
height: height as i32,
|
||||
pixfmt: AVPixelFormat::AV_PIX_FMT_YUV420P,
|
||||
pixfmt: resolve_pixel_format("yuv420p", AVPixelFormat::AV_PIX_FMT_YUV420P),
|
||||
align: 1,
|
||||
fps: 30,
|
||||
gop: 30,
|
||||
@@ -273,16 +273,17 @@ impl H264Encoder {
|
||||
let height = config.base.resolution.height;
|
||||
|
||||
// Select pixel format based on config
|
||||
let pixfmt = match config.input_format {
|
||||
H264InputFormat::Nv12 => AVPixelFormat::AV_PIX_FMT_NV12,
|
||||
H264InputFormat::Nv21 => AVPixelFormat::AV_PIX_FMT_NV21,
|
||||
H264InputFormat::Nv16 => AVPixelFormat::AV_PIX_FMT_NV16,
|
||||
H264InputFormat::Nv24 => AVPixelFormat::AV_PIX_FMT_NV24,
|
||||
H264InputFormat::Yuv420p => AVPixelFormat::AV_PIX_FMT_YUV420P,
|
||||
H264InputFormat::Yuyv422 => AVPixelFormat::AV_PIX_FMT_YUYV422,
|
||||
H264InputFormat::Rgb24 => AVPixelFormat::AV_PIX_FMT_RGB24,
|
||||
H264InputFormat::Bgr24 => AVPixelFormat::AV_PIX_FMT_BGR24,
|
||||
let (pixfmt_name, pixfmt_fallback) = match config.input_format {
|
||||
H264InputFormat::Nv12 => ("nv12", AVPixelFormat::AV_PIX_FMT_NV12),
|
||||
H264InputFormat::Nv21 => ("nv21", AVPixelFormat::AV_PIX_FMT_NV21),
|
||||
H264InputFormat::Nv16 => ("nv16", AVPixelFormat::AV_PIX_FMT_NV16),
|
||||
H264InputFormat::Nv24 => ("nv24", AVPixelFormat::AV_PIX_FMT_NV24),
|
||||
H264InputFormat::Yuv420p => ("yuv420p", AVPixelFormat::AV_PIX_FMT_YUV420P),
|
||||
H264InputFormat::Yuyv422 => ("yuyv422", AVPixelFormat::AV_PIX_FMT_YUYV422),
|
||||
H264InputFormat::Rgb24 => ("rgb24", AVPixelFormat::AV_PIX_FMT_RGB24),
|
||||
H264InputFormat::Bgr24 => ("bgr24", AVPixelFormat::AV_PIX_FMT_BGR24),
|
||||
};
|
||||
let pixfmt = resolve_pixel_format(pixfmt_name, pixfmt_fallback);
|
||||
|
||||
info!(
|
||||
"Creating H.264 encoder: {} at {}x{} @ {} kbps (input: {:?})",
|
||||
|
||||
@@ -11,7 +11,7 @@ use std::sync::Once;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use hwcodec::common::{DataFormat, Quality, RateControl};
|
||||
use hwcodec::ffmpeg::AVPixelFormat;
|
||||
use hwcodec::ffmpeg::{resolve_pixel_format, AVPixelFormat};
|
||||
use hwcodec::ffmpeg_ram::encode::{EncodeContext, Encoder as HwEncoder};
|
||||
use hwcodec::ffmpeg_ram::CodecInfo;
|
||||
|
||||
@@ -198,7 +198,7 @@ pub fn get_available_h265_encoders(width: u32, height: u32) -> Vec<CodecInfo> {
|
||||
mc_name: None,
|
||||
width: width as i32,
|
||||
height: height as i32,
|
||||
pixfmt: AVPixelFormat::AV_PIX_FMT_NV12,
|
||||
pixfmt: resolve_pixel_format("nv12", AVPixelFormat::AV_PIX_FMT_NV12),
|
||||
align: 1,
|
||||
fps: 30,
|
||||
gop: 30,
|
||||
@@ -310,24 +310,45 @@ impl H265Encoder {
|
||||
let height = config.base.resolution.height;
|
||||
|
||||
// Software encoders (libx265) require YUV420P, hardware encoders use NV12 or YUYV422
|
||||
let (pixfmt, actual_input_format) = if is_software {
|
||||
(AVPixelFormat::AV_PIX_FMT_YUV420P, H265InputFormat::Yuv420p)
|
||||
let (pixfmt_name, pixfmt_fallback, actual_input_format) = if is_software {
|
||||
(
|
||||
"yuv420p",
|
||||
AVPixelFormat::AV_PIX_FMT_YUV420P,
|
||||
H265InputFormat::Yuv420p,
|
||||
)
|
||||
} else {
|
||||
match config.input_format {
|
||||
H265InputFormat::Nv12 => (AVPixelFormat::AV_PIX_FMT_NV12, H265InputFormat::Nv12),
|
||||
H265InputFormat::Nv21 => (AVPixelFormat::AV_PIX_FMT_NV21, H265InputFormat::Nv21),
|
||||
H265InputFormat::Nv16 => (AVPixelFormat::AV_PIX_FMT_NV16, H265InputFormat::Nv16),
|
||||
H265InputFormat::Nv24 => (AVPixelFormat::AV_PIX_FMT_NV24, H265InputFormat::Nv24),
|
||||
H265InputFormat::Yuv420p => {
|
||||
(AVPixelFormat::AV_PIX_FMT_YUV420P, H265InputFormat::Yuv420p)
|
||||
H265InputFormat::Nv12 => {
|
||||
("nv12", AVPixelFormat::AV_PIX_FMT_NV12, H265InputFormat::Nv12)
|
||||
}
|
||||
H265InputFormat::Yuyv422 => {
|
||||
(AVPixelFormat::AV_PIX_FMT_YUYV422, H265InputFormat::Yuyv422)
|
||||
H265InputFormat::Nv21 => {
|
||||
("nv21", AVPixelFormat::AV_PIX_FMT_NV21, H265InputFormat::Nv21)
|
||||
}
|
||||
H265InputFormat::Nv16 => {
|
||||
("nv16", AVPixelFormat::AV_PIX_FMT_NV16, H265InputFormat::Nv16)
|
||||
}
|
||||
H265InputFormat::Nv24 => {
|
||||
("nv24", AVPixelFormat::AV_PIX_FMT_NV24, H265InputFormat::Nv24)
|
||||
}
|
||||
H265InputFormat::Yuv420p => (
|
||||
"yuv420p",
|
||||
AVPixelFormat::AV_PIX_FMT_YUV420P,
|
||||
H265InputFormat::Yuv420p,
|
||||
),
|
||||
H265InputFormat::Yuyv422 => (
|
||||
"yuyv422",
|
||||
AVPixelFormat::AV_PIX_FMT_YUYV422,
|
||||
H265InputFormat::Yuyv422,
|
||||
),
|
||||
H265InputFormat::Rgb24 => {
|
||||
("rgb24", AVPixelFormat::AV_PIX_FMT_RGB24, H265InputFormat::Rgb24)
|
||||
}
|
||||
H265InputFormat::Bgr24 => {
|
||||
("bgr24", AVPixelFormat::AV_PIX_FMT_BGR24, H265InputFormat::Bgr24)
|
||||
}
|
||||
H265InputFormat::Rgb24 => (AVPixelFormat::AV_PIX_FMT_RGB24, H265InputFormat::Rgb24),
|
||||
H265InputFormat::Bgr24 => (AVPixelFormat::AV_PIX_FMT_BGR24, H265InputFormat::Bgr24),
|
||||
}
|
||||
};
|
||||
let pixfmt = resolve_pixel_format(pixfmt_name, pixfmt_fallback);
|
||||
|
||||
info!(
|
||||
"Creating H.265 encoder: {} at {}x{} @ {} kbps (input: {:?})",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
//! JPEG encoder implementation
|
||||
//!
|
||||
//! Provides JPEG encoding for raw video frames (YUYV, NV12, RGB, BGR)
|
||||
//! Provides JPEG encoding for raw video frames (YUYV, NV12, NV16, NV24, RGB, BGR)
|
||||
//! Uses libyuv for SIMD-accelerated color space conversion to I420,
|
||||
//! then turbojpeg for direct YUV encoding (skips internal color conversion).
|
||||
|
||||
@@ -14,7 +14,7 @@ use crate::video::format::{PixelFormat, Resolution};
|
||||
///
|
||||
/// Encoding pipeline (all SIMD accelerated):
|
||||
/// ```text
|
||||
/// YUYV/NV12/BGR24/RGB24 ──libyuv──> I420 ──turbojpeg──> JPEG
|
||||
/// YUYV/NV12/NV16/NV24/BGR24/RGB24 ──libyuv──> I420 ──turbojpeg──> JPEG
|
||||
/// ```
|
||||
///
|
||||
/// Note: This encoder is NOT thread-safe due to turbojpeg limitations.
|
||||
@@ -24,6 +24,10 @@ pub struct JpegEncoder {
|
||||
compressor: turbojpeg::Compressor,
|
||||
/// I420 buffer for YUV encoding (Y + U + V planes)
|
||||
i420_buffer: Vec<u8>,
|
||||
/// Scratch buffer for split chroma planes when converting semiplanar 4:2:2 / 4:4:4 input.
|
||||
uv_split_buffer: Vec<u8>,
|
||||
/// BGRA buffer used when a source format needs explicit YUV matrix expansion before JPEG.
|
||||
bgra_buffer: Vec<u8>,
|
||||
}
|
||||
|
||||
impl JpegEncoder {
|
||||
@@ -34,6 +38,8 @@ impl JpegEncoder {
|
||||
let height = resolution.height as usize;
|
||||
// I420: Y = width*height, U = width*height/4, V = width*height/4
|
||||
let i420_size = width * height * 3 / 2;
|
||||
let max_uv_plane_size = width * height;
|
||||
let bgra_size = width * height * 4;
|
||||
|
||||
let mut compressor = turbojpeg::Compressor::new().map_err(|e| {
|
||||
AppError::VideoError(format!("Failed to create turbojpeg compressor: {}", e))
|
||||
@@ -47,6 +53,8 @@ impl JpegEncoder {
|
||||
config,
|
||||
compressor,
|
||||
i420_buffer: vec![0u8; i420_size],
|
||||
uv_split_buffer: vec![0u8; max_uv_plane_size * 2],
|
||||
bgra_buffer: vec![0u8; bgra_size],
|
||||
})
|
||||
}
|
||||
|
||||
@@ -93,6 +101,36 @@ impl JpegEncoder {
|
||||
))
|
||||
}
|
||||
|
||||
/// Encode BGRA buffer to JPEG using turbojpeg's RGB path.
|
||||
#[inline]
|
||||
fn encode_bgra_to_jpeg(&mut self, sequence: u64) -> Result<EncodedFrame> {
|
||||
let width = self.config.resolution.width as usize;
|
||||
let height = self.config.resolution.height as usize;
|
||||
|
||||
self.compressor
|
||||
.set_subsamp(turbojpeg::Subsamp::Sub2x2)
|
||||
.map_err(|e| AppError::VideoError(format!("Failed to set JPEG subsampling: {}", e)))?;
|
||||
|
||||
let image = turbojpeg::Image {
|
||||
pixels: self.bgra_buffer.as_slice(),
|
||||
width,
|
||||
pitch: width * 4,
|
||||
height,
|
||||
format: turbojpeg::PixelFormat::BGRA,
|
||||
};
|
||||
|
||||
let jpeg_data = self
|
||||
.compressor
|
||||
.compress_to_vec(image)
|
||||
.map_err(|e| AppError::VideoError(format!("JPEG compression failed: {}", e)))?;
|
||||
|
||||
Ok(EncodedFrame::jpeg(
|
||||
Bytes::from(jpeg_data),
|
||||
self.config.resolution,
|
||||
sequence,
|
||||
))
|
||||
}
|
||||
|
||||
/// Encode YUYV (YUV422) frame to JPEG
|
||||
pub fn encode_yuyv(&mut self, data: &[u8], sequence: u64) -> Result<EncodedFrame> {
|
||||
let width = self.config.resolution.width as usize;
|
||||
@@ -135,6 +173,101 @@ impl JpegEncoder {
|
||||
self.encode_i420_to_jpeg(sequence)
|
||||
}
|
||||
|
||||
/// Encode NV16 frame to JPEG
|
||||
pub fn encode_nv16(&mut self, data: &[u8], sequence: u64) -> Result<EncodedFrame> {
|
||||
let width = self.config.resolution.width as usize;
|
||||
let height = self.config.resolution.height as usize;
|
||||
let y_size = width * height;
|
||||
let uv_size = y_size;
|
||||
let expected_size = y_size + uv_size;
|
||||
|
||||
if data.len() < expected_size {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"NV16 data too small: {} < {}",
|
||||
data.len(),
|
||||
expected_size
|
||||
)));
|
||||
}
|
||||
|
||||
let src_uv = &data[y_size..expected_size];
|
||||
let chroma_plane_size = y_size / 2;
|
||||
let (u_plane_422, rest) = self.uv_split_buffer.split_at_mut(chroma_plane_size);
|
||||
let (v_plane_422, _) = rest.split_at_mut(chroma_plane_size);
|
||||
|
||||
libyuv::split_uv_plane(
|
||||
src_uv,
|
||||
width as i32,
|
||||
u_plane_422,
|
||||
(width / 2) as i32,
|
||||
v_plane_422,
|
||||
(width / 2) as i32,
|
||||
(width / 2) as i32,
|
||||
height as i32,
|
||||
)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv NV16 split failed: {}", e)))?;
|
||||
|
||||
libyuv::i422_to_i420_planar(
|
||||
&data[..y_size],
|
||||
width as i32,
|
||||
u_plane_422,
|
||||
(width / 2) as i32,
|
||||
v_plane_422,
|
||||
(width / 2) as i32,
|
||||
&mut self.i420_buffer,
|
||||
width as i32,
|
||||
height as i32,
|
||||
)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv NV16→I420 failed: {}", e)))?;
|
||||
|
||||
self.encode_i420_to_jpeg(sequence)
|
||||
}
|
||||
|
||||
/// Encode NV24 frame to JPEG
|
||||
pub fn encode_nv24(&mut self, data: &[u8], sequence: u64) -> Result<EncodedFrame> {
|
||||
let width = self.config.resolution.width as usize;
|
||||
let height = self.config.resolution.height as usize;
|
||||
let y_size = width * height;
|
||||
let uv_size = y_size * 2;
|
||||
let expected_size = y_size + uv_size;
|
||||
|
||||
if data.len() < expected_size {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"NV24 data too small: {} < {}",
|
||||
data.len(),
|
||||
expected_size
|
||||
)));
|
||||
}
|
||||
|
||||
let src_uv = &data[y_size..expected_size];
|
||||
let chroma_plane_size = y_size;
|
||||
let (u_plane_444, rest) = self.uv_split_buffer.split_at_mut(chroma_plane_size);
|
||||
let (v_plane_444, _) = rest.split_at_mut(chroma_plane_size);
|
||||
|
||||
libyuv::split_uv_plane(
|
||||
src_uv,
|
||||
(width * 2) as i32,
|
||||
u_plane_444,
|
||||
width as i32,
|
||||
v_plane_444,
|
||||
width as i32,
|
||||
width as i32,
|
||||
height as i32,
|
||||
)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv NV24 split failed: {}", e)))?;
|
||||
|
||||
libyuv::h444_to_bgra(
|
||||
&data[..y_size],
|
||||
u_plane_444,
|
||||
v_plane_444,
|
||||
&mut self.bgra_buffer,
|
||||
width as i32,
|
||||
height as i32,
|
||||
)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv NV24(H444)→BGRA failed: {}", e)))?;
|
||||
|
||||
self.encode_bgra_to_jpeg(sequence)
|
||||
}
|
||||
|
||||
/// Encode RGB24 frame to JPEG
|
||||
pub fn encode_rgb(&mut self, data: &[u8], sequence: u64) -> Result<EncodedFrame> {
|
||||
let width = self.config.resolution.width as usize;
|
||||
@@ -192,6 +325,8 @@ impl crate::video::encoder::traits::Encoder for JpegEncoder {
|
||||
match self.config.input_format {
|
||||
PixelFormat::Yuyv | PixelFormat::Yvyu => self.encode_yuyv(data, sequence),
|
||||
PixelFormat::Nv12 => self.encode_nv12(data, sequence),
|
||||
PixelFormat::Nv16 => self.encode_nv16(data, sequence),
|
||||
PixelFormat::Nv24 => self.encode_nv24(data, sequence),
|
||||
PixelFormat::Rgb24 => self.encode_rgb(data, sequence),
|
||||
PixelFormat::Bgr24 => self.encode_bgr(data, sequence),
|
||||
_ => Err(AppError::VideoError(format!(
|
||||
@@ -211,6 +346,8 @@ impl crate::video::encoder::traits::Encoder for JpegEncoder {
|
||||
PixelFormat::Yuyv
|
||||
| PixelFormat::Yvyu
|
||||
| PixelFormat::Nv12
|
||||
| PixelFormat::Nv16
|
||||
| PixelFormat::Nv24
|
||||
| PixelFormat::Rgb24
|
||||
| PixelFormat::Bgr24
|
||||
)
|
||||
|
||||
@@ -11,7 +11,7 @@ use std::time::Duration;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use hwcodec::common::{DataFormat, Quality, RateControl};
|
||||
use hwcodec::ffmpeg::AVPixelFormat;
|
||||
use hwcodec::ffmpeg::{resolve_pixel_format, AVPixelFormat};
|
||||
use hwcodec::ffmpeg_ram::encode::{EncodeContext, Encoder as HwEncoder};
|
||||
use hwcodec::ffmpeg_ram::CodecInfo;
|
||||
|
||||
@@ -309,7 +309,7 @@ impl EncoderRegistry {
|
||||
mc_name: None,
|
||||
width: width as i32,
|
||||
height: height as i32,
|
||||
pixfmt: AVPixelFormat::AV_PIX_FMT_NV12,
|
||||
pixfmt: resolve_pixel_format("nv12", AVPixelFormat::AV_PIX_FMT_NV12),
|
||||
align: 1,
|
||||
fps: 30,
|
||||
gop: 30,
|
||||
|
||||
@@ -11,7 +11,7 @@ use std::sync::Once;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use hwcodec::common::{DataFormat, Quality, RateControl};
|
||||
use hwcodec::ffmpeg::AVPixelFormat;
|
||||
use hwcodec::ffmpeg::{resolve_pixel_format, AVPixelFormat};
|
||||
use hwcodec::ffmpeg_ram::encode::{EncodeContext, Encoder as HwEncoder};
|
||||
use hwcodec::ffmpeg_ram::CodecInfo;
|
||||
|
||||
@@ -133,7 +133,7 @@ pub fn get_available_vp8_encoders(width: u32, height: u32) -> Vec<CodecInfo> {
|
||||
mc_name: None,
|
||||
width: width as i32,
|
||||
height: height as i32,
|
||||
pixfmt: AVPixelFormat::AV_PIX_FMT_NV12,
|
||||
pixfmt: resolve_pixel_format("nv12", AVPixelFormat::AV_PIX_FMT_NV12),
|
||||
align: 1,
|
||||
fps: 30,
|
||||
gop: 30,
|
||||
@@ -244,16 +244,25 @@ impl VP8Encoder {
|
||||
let height = config.base.resolution.height;
|
||||
|
||||
// Software encoders (libvpx) require YUV420P, hardware (VAAPI) uses NV12
|
||||
let (pixfmt, actual_input_format) = if is_software {
|
||||
(AVPixelFormat::AV_PIX_FMT_YUV420P, VP8InputFormat::Yuv420p)
|
||||
let (pixfmt_name, pixfmt_fallback, actual_input_format) = if is_software {
|
||||
(
|
||||
"yuv420p",
|
||||
AVPixelFormat::AV_PIX_FMT_YUV420P,
|
||||
VP8InputFormat::Yuv420p,
|
||||
)
|
||||
} else {
|
||||
match config.input_format {
|
||||
VP8InputFormat::Nv12 => (AVPixelFormat::AV_PIX_FMT_NV12, VP8InputFormat::Nv12),
|
||||
VP8InputFormat::Yuv420p => {
|
||||
(AVPixelFormat::AV_PIX_FMT_YUV420P, VP8InputFormat::Yuv420p)
|
||||
VP8InputFormat::Nv12 => {
|
||||
("nv12", AVPixelFormat::AV_PIX_FMT_NV12, VP8InputFormat::Nv12)
|
||||
}
|
||||
VP8InputFormat::Yuv420p => (
|
||||
"yuv420p",
|
||||
AVPixelFormat::AV_PIX_FMT_YUV420P,
|
||||
VP8InputFormat::Yuv420p,
|
||||
),
|
||||
}
|
||||
};
|
||||
let pixfmt = resolve_pixel_format(pixfmt_name, pixfmt_fallback);
|
||||
|
||||
info!(
|
||||
"Creating VP8 encoder: {} at {}x{} @ {} kbps (input: {:?})",
|
||||
|
||||
@@ -11,7 +11,7 @@ use std::sync::Once;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use hwcodec::common::{DataFormat, Quality, RateControl};
|
||||
use hwcodec::ffmpeg::AVPixelFormat;
|
||||
use hwcodec::ffmpeg::{resolve_pixel_format, AVPixelFormat};
|
||||
use hwcodec::ffmpeg_ram::encode::{EncodeContext, Encoder as HwEncoder};
|
||||
use hwcodec::ffmpeg_ram::CodecInfo;
|
||||
|
||||
@@ -133,7 +133,7 @@ pub fn get_available_vp9_encoders(width: u32, height: u32) -> Vec<CodecInfo> {
|
||||
mc_name: None,
|
||||
width: width as i32,
|
||||
height: height as i32,
|
||||
pixfmt: AVPixelFormat::AV_PIX_FMT_NV12,
|
||||
pixfmt: resolve_pixel_format("nv12", AVPixelFormat::AV_PIX_FMT_NV12),
|
||||
align: 1,
|
||||
fps: 30,
|
||||
gop: 30,
|
||||
@@ -244,16 +244,25 @@ impl VP9Encoder {
|
||||
let height = config.base.resolution.height;
|
||||
|
||||
// Software encoders (libvpx-vp9) require YUV420P, hardware (VAAPI) uses NV12
|
||||
let (pixfmt, actual_input_format) = if is_software {
|
||||
(AVPixelFormat::AV_PIX_FMT_YUV420P, VP9InputFormat::Yuv420p)
|
||||
let (pixfmt_name, pixfmt_fallback, actual_input_format) = if is_software {
|
||||
(
|
||||
"yuv420p",
|
||||
AVPixelFormat::AV_PIX_FMT_YUV420P,
|
||||
VP9InputFormat::Yuv420p,
|
||||
)
|
||||
} else {
|
||||
match config.input_format {
|
||||
VP9InputFormat::Nv12 => (AVPixelFormat::AV_PIX_FMT_NV12, VP9InputFormat::Nv12),
|
||||
VP9InputFormat::Yuv420p => {
|
||||
(AVPixelFormat::AV_PIX_FMT_YUV420P, VP9InputFormat::Yuv420p)
|
||||
VP9InputFormat::Nv12 => {
|
||||
("nv12", AVPixelFormat::AV_PIX_FMT_NV12, VP9InputFormat::Nv12)
|
||||
}
|
||||
VP9InputFormat::Yuv420p => (
|
||||
"yuv420p",
|
||||
AVPixelFormat::AV_PIX_FMT_YUV420P,
|
||||
VP9InputFormat::Yuv420p,
|
||||
),
|
||||
}
|
||||
};
|
||||
let pixfmt = resolve_pixel_format(pixfmt_name, pixfmt_fallback);
|
||||
|
||||
info!(
|
||||
"Creating VP9 encoder: {} at {}x{} @ {} kbps (input: {:?})",
|
||||
|
||||
@@ -24,3 +24,11 @@ pub use shared_video_pipeline::{
|
||||
};
|
||||
pub use stream_manager::VideoStreamManager;
|
||||
pub use streamer::{Streamer, StreamerState};
|
||||
|
||||
pub(crate) fn is_rk_hdmirx_driver(driver: &str, card: &str) -> bool {
|
||||
driver.eq_ignore_ascii_case("rk_hdmirx") || card.eq_ignore_ascii_case("rk_hdmirx")
|
||||
}
|
||||
|
||||
pub(crate) fn is_rk_hdmirx_device(device: &device::VideoDeviceInfo) -> bool {
|
||||
is_rk_hdmirx_driver(&device.driver, &device.card)
|
||||
}
|
||||
|
||||
@@ -599,6 +599,14 @@ fn converters_for_pipeline(
|
||||
info!("Using NV21->YUV420P converter");
|
||||
Ok((None, Some(PixelConverter::nv21_to_yuv420p(resolution))))
|
||||
}
|
||||
PixelFormat::Nv16 => {
|
||||
info!("Using NV16->YUV420P converter");
|
||||
Ok((None, Some(PixelConverter::nv16_to_yuv420p(resolution))))
|
||||
}
|
||||
PixelFormat::Nv24 => {
|
||||
info!("Using NV24->YUV420P converter");
|
||||
Ok((None, Some(PixelConverter::nv24_to_yuv420p(resolution))))
|
||||
}
|
||||
PixelFormat::Rgb24 => {
|
||||
info!("Using RGB24->YUV420P converter");
|
||||
Ok((None, Some(PixelConverter::rgb24_to_yuv420p(resolution))))
|
||||
@@ -631,6 +639,10 @@ fn converters_for_pipeline(
|
||||
info!("Using NV16->NV12 converter");
|
||||
Ok((Some(Nv12Converter::nv16_to_nv12(resolution)), None))
|
||||
}
|
||||
PixelFormat::Nv24 => {
|
||||
info!("Using NV24->NV12 converter");
|
||||
Ok((Some(Nv12Converter::nv24_to_nv12(resolution)), None))
|
||||
}
|
||||
PixelFormat::Yuv420 => {
|
||||
info!("Using YUV420P->NV12 converter");
|
||||
Ok((Some(Nv12Converter::yuv420_to_nv12(resolution)), None))
|
||||
|
||||
@@ -38,6 +38,7 @@ use crate::hid::HidController;
|
||||
use crate::stream::MjpegStreamHandler;
|
||||
use crate::video::codec_constraints::StreamCodecConstraints;
|
||||
use crate::video::format::{PixelFormat, Resolution};
|
||||
use crate::video::is_rk_hdmirx_device;
|
||||
use crate::video::streamer::{Streamer, StreamerState};
|
||||
use crate::webrtc::WebRtcStreamer;
|
||||
|
||||
@@ -427,7 +428,8 @@ impl VideoStreamManager {
|
||||
device.formats.iter().map(|f| f.format).collect();
|
||||
|
||||
// If current format is not MJPEG and device supports MJPEG, switch to it
|
||||
if current_format != PixelFormat::Mjpeg
|
||||
if !is_rk_hdmirx_device(&device)
|
||||
&& current_format != PixelFormat::Mjpeg
|
||||
&& available_formats.contains(&PixelFormat::Mjpeg)
|
||||
{
|
||||
info!("Auto-switching to MJPEG format for MJPEG mode");
|
||||
|
||||
@@ -14,6 +14,7 @@ use tracing::{debug, error, info, trace, warn};
|
||||
use super::device::{enumerate_devices, find_best_device, VideoDeviceInfo};
|
||||
use super::format::{PixelFormat, Resolution};
|
||||
use super::frame::{FrameBuffer, FrameBufferPool, VideoFrame};
|
||||
use super::is_rk_hdmirx_device;
|
||||
use crate::error::{AppError, Result};
|
||||
use crate::events::{EventBus, SystemEvent};
|
||||
use crate::stream::MjpegStreamHandler;
|
||||
@@ -269,24 +270,8 @@ impl Streamer {
|
||||
.find(|d| d.path.to_string_lossy() == device_path)
|
||||
.ok_or_else(|| AppError::VideoError("Video device not found".to_string()))?;
|
||||
|
||||
// Validate format
|
||||
let fmt_info = device
|
||||
.formats
|
||||
.iter()
|
||||
.find(|f| f.format == format)
|
||||
.ok_or_else(|| AppError::VideoError("Requested format not supported".to_string()))?;
|
||||
|
||||
// Validate resolution
|
||||
if !fmt_info.resolutions.is_empty()
|
||||
&& !fmt_info
|
||||
.resolutions
|
||||
.iter()
|
||||
.any(|r| r.width == resolution.width && r.height == resolution.height)
|
||||
{
|
||||
return Err(AppError::VideoError(
|
||||
"Requested resolution not supported".to_string(),
|
||||
));
|
||||
}
|
||||
let (format, resolution) =
|
||||
self.resolve_capture_config(&device, format, resolution)?;
|
||||
|
||||
// IMPORTANT: Disconnect all MJPEG clients FIRST before stopping capture
|
||||
// This prevents race conditions where clients try to reconnect and reopen the device
|
||||
@@ -385,6 +370,14 @@ impl Streamer {
|
||||
device: &VideoDeviceInfo,
|
||||
preferred: PixelFormat,
|
||||
) -> Result<PixelFormat> {
|
||||
if is_rk_hdmirx_device(device) {
|
||||
return device
|
||||
.formats
|
||||
.first()
|
||||
.map(|f| f.format)
|
||||
.ok_or_else(|| AppError::VideoError("No supported formats found".to_string()));
|
||||
}
|
||||
|
||||
// Check if preferred format is available
|
||||
if device.formats.iter().any(|f| f.format == preferred) {
|
||||
return Ok(preferred);
|
||||
@@ -411,6 +404,14 @@ impl Streamer {
|
||||
.find(|f| &f.format == format)
|
||||
.ok_or_else(|| AppError::VideoError("Format not found".to_string()))?;
|
||||
|
||||
if is_rk_hdmirx_device(device) {
|
||||
return Ok(format_info
|
||||
.resolutions
|
||||
.first()
|
||||
.map(|r| r.resolution())
|
||||
.unwrap_or(preferred));
|
||||
}
|
||||
|
||||
// Check if preferred resolution is available
|
||||
if format_info.resolutions.is_empty()
|
||||
|| format_info
|
||||
@@ -429,6 +430,17 @@ impl Streamer {
|
||||
.ok_or_else(|| AppError::VideoError("No resolutions available".to_string()))
|
||||
}
|
||||
|
||||
fn resolve_capture_config(
|
||||
&self,
|
||||
device: &VideoDeviceInfo,
|
||||
requested_format: PixelFormat,
|
||||
requested_resolution: Resolution,
|
||||
) -> Result<(PixelFormat, Resolution)> {
|
||||
let format = self.select_format(device, requested_format)?;
|
||||
let resolution = self.select_resolution(device, &format, requested_resolution)?;
|
||||
Ok((format, resolution))
|
||||
}
|
||||
|
||||
/// Restart capture for recovery (direct capture path)
|
||||
async fn restart_capture(self: &Arc<Self>) -> Result<()> {
|
||||
self.direct_stop.store(false, Ordering::SeqCst);
|
||||
|
||||
@@ -14,6 +14,7 @@ use v4l2r::ioctl::{
|
||||
QBuffer, QueryBuffer, V4l2Buffer,
|
||||
};
|
||||
use v4l2r::memory::{MemoryType, MmapHandle};
|
||||
use v4l2r::nix::errno::Errno;
|
||||
use v4l2r::{Format as V4l2rFormat, PixelFormat as V4l2rPixelFormat, QueueType};
|
||||
|
||||
use crate::error::{AppError, Result};
|
||||
@@ -91,8 +92,11 @@ impl V4l2rCaptureStream {
|
||||
});
|
||||
|
||||
if fps > 0 {
|
||||
if let Err(e) = set_fps(&fd, queue, fps) {
|
||||
warn!("Failed to set hardware FPS: {}", e);
|
||||
match set_fps(&fd, queue, fps) {
|
||||
Ok(()) => {}
|
||||
Err(ioctl::GParmError::IoctlError(err))
|
||||
if matches!(err, Errno::ENOTTY | Errno::ENOSYS | Errno::EOPNOTSUPP) => {}
|
||||
Err(e) => warn!("Failed to set hardware FPS: {}", e),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -258,7 +262,7 @@ impl Drop for V4l2rCaptureStream {
|
||||
}
|
||||
}
|
||||
|
||||
fn set_fps(fd: &File, queue: QueueType, fps: u32) -> Result<()> {
|
||||
fn set_fps(fd: &File, queue: QueueType, fps: u32) -> std::result::Result<(), ioctl::GParmError> {
|
||||
let mut params = unsafe { std::mem::zeroed::<v4l2_streamparm>() };
|
||||
params.type_ = queue as u32;
|
||||
params.parm = v4l2_streamparm__bindgen_ty_1 {
|
||||
@@ -271,7 +275,6 @@ fn set_fps(fd: &File, queue: QueueType, fps: u32) -> Result<()> {
|
||||
},
|
||||
};
|
||||
|
||||
let _actual: v4l2_streamparm = ioctl::s_parm(fd, params)
|
||||
.map_err(|e| AppError::VideoError(format!("Failed to set FPS: {}", e)))?;
|
||||
let _actual: v4l2_streamparm = ioctl::s_parm(fd, params)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1001,7 +1001,7 @@ pub struct VideoFormat {
|
||||
pub struct VideoResolution {
|
||||
pub width: u32,
|
||||
pub height: u32,
|
||||
pub fps: Vec<u32>,
|
||||
pub fps: Vec<f64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
|
||||
Reference in New Issue
Block a user