mirror of
https://github.com/mofeng-git/One-KVM.git
synced 2026-01-28 16:41:52 +08:00
refactor: 升级依赖版本并优化构建系统
- 升级核心依赖 (axum 0.8, tower-http 0.6, alsa 0.11 等) - 简化交叉编译配置,切换至 Debian 11 提高兼容性 - 新增 Debian 包打包支持 (debuerreotype 模板) - 移除独立的 mjpeg 解码器,简化视频模块 - 静态链接 libx264/libx265/libopus 到二进制
This commit is contained in:
@@ -7,10 +7,12 @@ use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use tokio::sync::{broadcast, watch, Mutex};
|
||||
use tracing::{debug, error, info, warn};
|
||||
use tracing::{debug, info};
|
||||
|
||||
use super::device::AudioDeviceInfo;
|
||||
use crate::error::{AppError, Result};
|
||||
use crate::utils::LogThrottler;
|
||||
use crate::{error_throttled, warn_throttled};
|
||||
|
||||
/// Audio capture configuration
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -134,6 +136,8 @@ pub struct AudioCapturer {
|
||||
stop_flag: Arc<AtomicBool>,
|
||||
sequence: Arc<AtomicU64>,
|
||||
capture_handle: Mutex<Option<tokio::task::JoinHandle<()>>>,
|
||||
/// Log throttler to prevent log flooding
|
||||
log_throttler: LogThrottler,
|
||||
}
|
||||
|
||||
impl AudioCapturer {
|
||||
@@ -151,6 +155,7 @@ impl AudioCapturer {
|
||||
stop_flag: Arc::new(AtomicBool::new(false)),
|
||||
sequence: Arc::new(AtomicU64::new(0)),
|
||||
capture_handle: Mutex::new(None),
|
||||
log_throttler: LogThrottler::with_secs(5),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -193,9 +198,10 @@ impl AudioCapturer {
|
||||
let frame_tx = self.frame_tx.clone();
|
||||
let stop_flag = self.stop_flag.clone();
|
||||
let sequence = self.sequence.clone();
|
||||
let log_throttler = self.log_throttler.clone();
|
||||
|
||||
let handle = tokio::task::spawn_blocking(move || {
|
||||
capture_loop(config, state, stats, frame_tx, stop_flag, sequence);
|
||||
capture_loop(config, state, stats, frame_tx, stop_flag, sequence, log_throttler);
|
||||
});
|
||||
|
||||
*self.capture_handle.lock().await = Some(handle);
|
||||
@@ -229,11 +235,20 @@ fn capture_loop(
|
||||
frame_tx: broadcast::Sender<AudioFrame>,
|
||||
stop_flag: Arc<AtomicBool>,
|
||||
sequence: Arc<AtomicU64>,
|
||||
log_throttler: LogThrottler,
|
||||
) {
|
||||
let result = run_capture(&config, &state, &stats, &frame_tx, &stop_flag, &sequence);
|
||||
let result = run_capture(
|
||||
&config,
|
||||
&state,
|
||||
&stats,
|
||||
&frame_tx,
|
||||
&stop_flag,
|
||||
&sequence,
|
||||
&log_throttler,
|
||||
);
|
||||
|
||||
if let Err(e) = result {
|
||||
error!("Audio capture error: {}", e);
|
||||
error_throttled!(log_throttler, "capture_error", "Audio capture error: {}", e);
|
||||
let _ = state.send(CaptureState::Error);
|
||||
} else {
|
||||
let _ = state.send(CaptureState::Stopped);
|
||||
@@ -247,6 +262,7 @@ fn run_capture(
|
||||
frame_tx: &broadcast::Sender<AudioFrame>,
|
||||
stop_flag: &AtomicBool,
|
||||
sequence: &AtomicU64,
|
||||
log_throttler: &LogThrottler,
|
||||
) -> Result<()> {
|
||||
// Open ALSA device
|
||||
let pcm = PCM::new(&config.device_name, Direction::Capture, false).map_err(|e| {
|
||||
@@ -316,7 +332,7 @@ fn run_capture(
|
||||
// Check PCM state
|
||||
match pcm.state() {
|
||||
State::XRun => {
|
||||
warn!("Audio buffer overrun, recovering");
|
||||
warn_throttled!(log_throttler, "xrun", "Audio buffer overrun, recovering");
|
||||
if let Ok(mut s) = stats.try_lock() {
|
||||
s.buffer_overruns += 1;
|
||||
}
|
||||
@@ -324,7 +340,7 @@ fn run_capture(
|
||||
continue;
|
||||
}
|
||||
State::Suspended => {
|
||||
warn!("Audio device suspended, recovering");
|
||||
warn_throttled!(log_throttler, "suspended", "Audio device suspended, recovering");
|
||||
let _ = pcm.resume();
|
||||
continue;
|
||||
}
|
||||
@@ -370,13 +386,19 @@ fn run_capture(
|
||||
let desc = e.to_string();
|
||||
if desc.contains("EPIPE") || desc.contains("Broken pipe") {
|
||||
// Buffer overrun
|
||||
warn!("Audio buffer overrun");
|
||||
warn_throttled!(log_throttler, "buffer_overrun", "Audio buffer overrun");
|
||||
if let Ok(mut s) = stats.try_lock() {
|
||||
s.buffer_overruns += 1;
|
||||
}
|
||||
let _ = pcm.prepare();
|
||||
} else if desc.contains("No such device") || desc.contains("ENODEV") {
|
||||
// Device disconnected - use longer throttle for this
|
||||
error_throttled!(log_throttler, "no_device", "Audio read error: {}", e);
|
||||
if let Ok(mut s) = stats.try_lock() {
|
||||
s.frames_dropped += 1;
|
||||
}
|
||||
} else {
|
||||
error!("Audio read error: {}", e);
|
||||
error_throttled!(log_throttler, "read_error", "Audio read error: {}", e);
|
||||
if let Ok(mut s) = stats.try_lock() {
|
||||
s.frames_dropped += 1;
|
||||
}
|
||||
|
||||
@@ -284,6 +284,7 @@ mod tests {
|
||||
right_alt: false,
|
||||
right_meta: false,
|
||||
},
|
||||
is_usb_hid: false,
|
||||
};
|
||||
|
||||
let encoded = encode_keyboard_event(&event);
|
||||
|
||||
@@ -50,7 +50,7 @@ async fn handle_hid_socket(socket: WebSocket, state: Arc<AppState>) {
|
||||
vec![RESP_ERR_HID_UNAVAILABLE]
|
||||
};
|
||||
|
||||
if sender.send(Message::Binary(initial_response)).await.is_err() {
|
||||
if sender.send(Message::Binary(initial_response.into())).await.is_err() {
|
||||
error!("Failed to send initial HID status");
|
||||
return;
|
||||
}
|
||||
@@ -66,7 +66,7 @@ async fn handle_hid_socket(socket: WebSocket, state: Arc<AppState>) {
|
||||
warn!("HID controller not available, ignoring message");
|
||||
}
|
||||
// Send error response (optional, for client awareness)
|
||||
let _ = sender.send(Message::Binary(vec![RESP_ERR_HID_UNAVAILABLE])).await;
|
||||
let _ = sender.send(Message::Binary(vec![RESP_ERR_HID_UNAVAILABLE].into())).await;
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -83,7 +83,7 @@ async fn handle_hid_socket(socket: WebSocket, state: Arc<AppState>) {
|
||||
if log_throttler.should_log("text_message_rejected") {
|
||||
debug!("Received text message (not supported): {} bytes", text.len());
|
||||
}
|
||||
let _ = sender.send(Message::Binary(vec![RESP_ERR_INVALID_MESSAGE])).await;
|
||||
let _ = sender.send(Message::Binary(vec![RESP_ERR_INVALID_MESSAGE].into())).await;
|
||||
}
|
||||
Ok(Message::Ping(data)) => {
|
||||
let _ = sender.send(Message::Pong(data)).await;
|
||||
|
||||
@@ -532,7 +532,7 @@ async fn main() -> anyhow::Result<()> {
|
||||
let cert = generate_self_signed_cert()?;
|
||||
tokio::fs::create_dir_all(&cert_dir).await?;
|
||||
tokio::fs::write(&cert_path, cert.cert.pem()).await?;
|
||||
tokio::fs::write(&key_path, cert.key_pair.serialize_pem()).await?;
|
||||
tokio::fs::write(&key_path, cert.signing_key.serialize_pem()).await?;
|
||||
} else {
|
||||
tracing::info!("Using existing TLS certificate from {}", cert_dir.display());
|
||||
}
|
||||
@@ -633,7 +633,7 @@ fn parse_video_config(config: &AppConfig) -> (PixelFormat, Resolution) {
|
||||
}
|
||||
|
||||
/// Generate a self-signed TLS certificate
|
||||
fn generate_self_signed_cert() -> anyhow::Result<rcgen::CertifiedKey> {
|
||||
fn generate_self_signed_cert() -> anyhow::Result<rcgen::CertifiedKey<rcgen::KeyPair>> {
|
||||
use rcgen::generate_simple_self_signed;
|
||||
|
||||
let subject_alt_names = vec![
|
||||
|
||||
@@ -507,10 +507,83 @@ impl Drop for ChannelWriter {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::process::Command;
|
||||
use std::sync::OnceLock;
|
||||
use tempfile::TempDir;
|
||||
|
||||
/// Path to ventoy resources directory
|
||||
static RESOURCE_DIR: &str = concat!(
|
||||
env!("CARGO_MANIFEST_DIR"),
|
||||
"/../ventoy-img-rs/resources"
|
||||
);
|
||||
|
||||
/// Initialize ventoy resources once
|
||||
fn init_ventoy_resources() -> bool {
|
||||
static INIT: OnceLock<bool> = OnceLock::new();
|
||||
*INIT.get_or_init(|| {
|
||||
let resource_path = std::path::Path::new(RESOURCE_DIR);
|
||||
|
||||
// Decompress xz files if needed
|
||||
let core_xz = resource_path.join("core.img.xz");
|
||||
let core_img = resource_path.join("core.img");
|
||||
if core_xz.exists() && !core_img.exists() {
|
||||
if let Err(e) = decompress_xz(&core_xz, &core_img) {
|
||||
eprintln!("Failed to decompress core.img.xz: {}", e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
let disk_xz = resource_path.join("ventoy.disk.img.xz");
|
||||
let disk_img = resource_path.join("ventoy.disk.img");
|
||||
if disk_xz.exists() && !disk_img.exists() {
|
||||
if let Err(e) = decompress_xz(&disk_xz, &disk_img) {
|
||||
eprintln!("Failed to decompress ventoy.disk.img.xz: {}", e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize resources
|
||||
if let Err(e) = ventoy_img::resources::init_resources(resource_path) {
|
||||
eprintln!("Failed to init ventoy resources: {}", e);
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
})
|
||||
}
|
||||
|
||||
/// Decompress xz file using system command
|
||||
fn decompress_xz(src: &std::path::Path, dst: &std::path::Path) -> std::io::Result<()> {
|
||||
let output = Command::new("xz")
|
||||
.args(&["-d", "-k", "-c", src.to_str().unwrap()])
|
||||
.output()?;
|
||||
|
||||
if !output.status.success() {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::Other,
|
||||
format!("xz decompress failed: {}", String::from_utf8_lossy(&output.stderr)),
|
||||
));
|
||||
}
|
||||
|
||||
std::fs::write(dst, &output.stdout)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Ensure resources are initialized, skip test if failed
|
||||
fn ensure_resources() -> bool {
|
||||
if !init_ventoy_resources() {
|
||||
eprintln!("Skipping test: ventoy resources not available");
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_drive_init() {
|
||||
if !ensure_resources() {
|
||||
return;
|
||||
}
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let drive_path = temp_dir.path().join("test_ventoy.img");
|
||||
let drive = VentoyDrive::new(drive_path);
|
||||
@@ -522,6 +595,9 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_drive_mkdir() {
|
||||
if !ensure_resources() {
|
||||
return;
|
||||
}
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let drive_path = temp_dir.path().join("test_ventoy.img");
|
||||
let drive = VentoyDrive::new(drive_path);
|
||||
@@ -537,6 +613,9 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_drive_file_write_and_read() {
|
||||
if !ensure_resources() {
|
||||
return;
|
||||
}
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let drive_path = temp_dir.path().join("test_ventoy.img");
|
||||
let drive = VentoyDrive::new(drive_path.clone());
|
||||
@@ -565,6 +644,9 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_drive_get_file_info() {
|
||||
if !ensure_resources() {
|
||||
return;
|
||||
}
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let drive_path = temp_dir.path().join("test_ventoy.img");
|
||||
let drive = VentoyDrive::new(drive_path.clone());
|
||||
@@ -611,6 +693,9 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_drive_stream_read() {
|
||||
if !ensure_resources() {
|
||||
return;
|
||||
}
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let drive_path = temp_dir.path().join("test_ventoy.img");
|
||||
let drive = VentoyDrive::new(drive_path.clone());
|
||||
@@ -652,6 +737,9 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_drive_stream_read_small_file() {
|
||||
if !ensure_resources() {
|
||||
return;
|
||||
}
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let drive_path = temp_dir.path().join("test_ventoy.img");
|
||||
let drive = VentoyDrive::new(drive_path.clone());
|
||||
|
||||
@@ -423,13 +423,13 @@ mod tests {
|
||||
fn test_endpoint_tracking() {
|
||||
let mut manager = OtgGadgetManager::with_config("test", 8);
|
||||
|
||||
// Keyboard uses 2 endpoints
|
||||
// Keyboard uses 1 endpoint
|
||||
let _ = manager.add_keyboard();
|
||||
assert_eq!(manager.endpoint_allocator.used(), 2);
|
||||
assert_eq!(manager.endpoint_allocator.used(), 1);
|
||||
|
||||
// Mouse uses 1 endpoint each
|
||||
let _ = manager.add_mouse_relative();
|
||||
let _ = manager.add_mouse_absolute();
|
||||
assert_eq!(manager.endpoint_allocator.used(), 4);
|
||||
assert_eq!(manager.endpoint_allocator.used(), 3);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -558,7 +558,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_service_creation() {
|
||||
let service = OtgService::new();
|
||||
let _service = OtgService::new();
|
||||
// Just test that creation doesn't panic
|
||||
assert!(!OtgService::is_available() || true); // Depends on environment
|
||||
}
|
||||
|
||||
@@ -161,8 +161,8 @@ impl RustDeskConfig {
|
||||
/// Generate a random 9-digit device ID
|
||||
pub fn generate_device_id() -> String {
|
||||
use rand::Rng;
|
||||
let mut rng = rand::thread_rng();
|
||||
let id: u32 = rng.gen_range(100_000_000..999_999_999);
|
||||
let mut rng = rand::rng();
|
||||
let id: u32 = rng.random_range(100_000_000..999_999_999);
|
||||
id.to_string()
|
||||
}
|
||||
|
||||
@@ -170,10 +170,10 @@ pub fn generate_device_id() -> String {
|
||||
pub fn generate_random_password() -> String {
|
||||
use rand::Rng;
|
||||
const CHARSET: &[u8] = b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut rng = rand::rng();
|
||||
(0..8)
|
||||
.map(|_| {
|
||||
let idx = rng.gen_range(0..CHARSET.len());
|
||||
let idx = rng.random_range(0..CHARSET.len());
|
||||
CHARSET[idx] as char
|
||||
})
|
||||
.collect()
|
||||
@@ -226,6 +226,7 @@ mod tests {
|
||||
|
||||
// No rendezvous server, relay is None
|
||||
config.rendezvous_server = String::new();
|
||||
config.relay_server = None;
|
||||
assert_eq!(config.relay_addr(), None);
|
||||
}
|
||||
|
||||
|
||||
@@ -183,7 +183,7 @@ impl WsHidHandler {
|
||||
|
||||
// Send initial status as binary: 0x00 = ok, 0x01 = error
|
||||
let status_byte = if self.is_hid_available() { 0x00u8 } else { 0x01u8 };
|
||||
let _ = sender.send(Message::Binary(vec![status_byte])).await;
|
||||
let _ = sender.send(Message::Binary(vec![status_byte].into())).await;
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
|
||||
@@ -111,6 +111,15 @@ impl LogThrottler {
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for LogThrottler {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
last_logged: RwLock::new(HashMap::new()),
|
||||
interval: self.interval,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for LogThrottler {
|
||||
/// Create a default log throttler with 5 second interval
|
||||
fn default() -> Self {
|
||||
|
||||
@@ -414,64 +414,6 @@ pub fn yuyv_buffer_size(resolution: Resolution) -> usize {
|
||||
(resolution.width * resolution.height * 2) as usize
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// MJPEG Decoder - Decodes JPEG to YUV420P using libyuv
|
||||
// ============================================================================
|
||||
|
||||
/// MJPEG/JPEG decoder that outputs YUV420P using libyuv
|
||||
pub struct MjpegDecoder {
|
||||
/// Resolution hint (can be updated from decoded frame)
|
||||
resolution: Resolution,
|
||||
/// YUV420P output buffer
|
||||
yuv_buffer: Yuv420pBuffer,
|
||||
}
|
||||
|
||||
impl MjpegDecoder {
|
||||
/// Create a new MJPEG decoder with expected resolution
|
||||
pub fn new(resolution: Resolution) -> Result<Self> {
|
||||
Ok(Self {
|
||||
resolution,
|
||||
yuv_buffer: Yuv420pBuffer::new(resolution),
|
||||
})
|
||||
}
|
||||
|
||||
/// Decode MJPEG/JPEG data to YUV420P using libyuv
|
||||
pub fn decode(&mut self, jpeg_data: &[u8]) -> Result<&[u8]> {
|
||||
// Get MJPEG dimensions
|
||||
let (width, height) = libyuv::mjpeg_size(jpeg_data)
|
||||
.map_err(|e| AppError::VideoError(format!("Failed to get MJPEG size: {}", e)))?;
|
||||
|
||||
// Check if resolution changed
|
||||
if width != self.resolution.width as i32 || height != self.resolution.height as i32 {
|
||||
tracing::debug!(
|
||||
"MJPEG resolution changed: {}x{} -> {}x{}",
|
||||
self.resolution.width,
|
||||
self.resolution.height,
|
||||
width,
|
||||
height
|
||||
);
|
||||
self.resolution = Resolution::new(width as u32, height as u32);
|
||||
self.yuv_buffer = Yuv420pBuffer::new(self.resolution);
|
||||
}
|
||||
|
||||
// Decode MJPEG directly to I420 using libyuv
|
||||
libyuv::mjpeg_to_i420(jpeg_data, self.yuv_buffer.as_bytes_mut(), width, height)
|
||||
.map_err(|e| AppError::VideoError(format!("MJPEG decode failed: {}", e)))?;
|
||||
|
||||
Ok(self.yuv_buffer.as_bytes())
|
||||
}
|
||||
|
||||
/// Get current resolution
|
||||
pub fn resolution(&self) -> Resolution {
|
||||
self.resolution
|
||||
}
|
||||
|
||||
/// Get YUV420P buffer size
|
||||
pub fn yuv_buffer_size(&self) -> usize {
|
||||
self.yuv_buffer.len()
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// NV12 Converter for VAAPI encoder (using libyuv)
|
||||
// ============================================================================
|
||||
@@ -572,34 +514,6 @@ pub fn yuyv_to_nv12(yuyv: &[u8], nv12: &mut [u8], width: usize, height: usize) {
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Extended PixelConverter for MJPEG support
|
||||
// ============================================================================
|
||||
|
||||
/// MJPEG to YUV420P converter (wraps MjpegDecoder)
|
||||
pub struct MjpegToYuv420Converter {
|
||||
decoder: MjpegDecoder,
|
||||
}
|
||||
|
||||
impl MjpegToYuv420Converter {
|
||||
/// Create a new MJPEG to YUV420P converter
|
||||
pub fn new(resolution: Resolution) -> Result<Self> {
|
||||
Ok(Self {
|
||||
decoder: MjpegDecoder::new(resolution)?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Convert MJPEG data to YUV420P
|
||||
pub fn convert(&mut self, mjpeg_data: &[u8]) -> Result<&[u8]> {
|
||||
self.decoder.decode(mjpeg_data)
|
||||
}
|
||||
|
||||
/// Get current resolution
|
||||
pub fn resolution(&self) -> Resolution {
|
||||
self.decoder.resolution()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -1,481 +0,0 @@
|
||||
//! MJPEG decoder implementations
|
||||
//!
|
||||
//! Provides MJPEG decoding using libyuv for SIMD-accelerated decoding.
|
||||
//! All decoders output to standard YUV formats suitable for encoding.
|
||||
|
||||
use std::sync::Once;
|
||||
use tracing::{debug, info};
|
||||
|
||||
use crate::error::{AppError, Result};
|
||||
use crate::video::format::Resolution;
|
||||
|
||||
static INIT_LOGGING: Once = Once::new();
|
||||
|
||||
/// Initialize decoder logging (only once)
|
||||
fn init_decoder_logging() {
|
||||
INIT_LOGGING.call_once(|| {
|
||||
debug!("MJPEG decoder logging initialized");
|
||||
});
|
||||
}
|
||||
|
||||
/// MJPEG decoder configuration
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MjpegVaapiDecoderConfig {
|
||||
/// Expected resolution (can be updated from decoded frame)
|
||||
pub resolution: Resolution,
|
||||
/// Use hardware acceleration (ignored, kept for API compatibility)
|
||||
pub use_hwaccel: bool,
|
||||
}
|
||||
|
||||
impl Default for MjpegVaapiDecoderConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
resolution: Resolution::HD1080,
|
||||
use_hwaccel: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Decoded frame data in NV12 format
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DecodedNv12Frame {
|
||||
/// Y plane data
|
||||
pub y_plane: Vec<u8>,
|
||||
/// UV interleaved plane data
|
||||
pub uv_plane: Vec<u8>,
|
||||
/// Y plane linesize (stride)
|
||||
pub y_linesize: i32,
|
||||
/// UV plane linesize (stride)
|
||||
pub uv_linesize: i32,
|
||||
/// Frame width
|
||||
pub width: i32,
|
||||
/// Frame height
|
||||
pub height: i32,
|
||||
}
|
||||
|
||||
/// Decoded frame data in YUV420P (I420) format
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DecodedYuv420pFrame {
|
||||
/// Y plane data
|
||||
pub y_plane: Vec<u8>,
|
||||
/// U plane data
|
||||
pub u_plane: Vec<u8>,
|
||||
/// V plane data
|
||||
pub v_plane: Vec<u8>,
|
||||
/// Y plane linesize (stride)
|
||||
pub y_linesize: i32,
|
||||
/// U plane linesize (stride)
|
||||
pub u_linesize: i32,
|
||||
/// V plane linesize (stride)
|
||||
pub v_linesize: i32,
|
||||
/// Frame width
|
||||
pub width: i32,
|
||||
/// Frame height
|
||||
pub height: i32,
|
||||
}
|
||||
|
||||
impl DecodedYuv420pFrame {
|
||||
/// Get packed YUV420P data (Y plane followed by U and V planes, with stride removed)
|
||||
pub fn to_packed_yuv420p(&self) -> Vec<u8> {
|
||||
let width = self.width as usize;
|
||||
let height = self.height as usize;
|
||||
let y_size = width * height;
|
||||
let uv_size = width * height / 4;
|
||||
|
||||
let mut packed = Vec::with_capacity(y_size + uv_size * 2);
|
||||
|
||||
// Copy Y plane, removing stride padding if any
|
||||
if self.y_linesize as usize == width {
|
||||
packed.extend_from_slice(&self.y_plane[..y_size]);
|
||||
} else {
|
||||
for row in 0..height {
|
||||
let src_offset = row * self.y_linesize as usize;
|
||||
packed.extend_from_slice(&self.y_plane[src_offset..src_offset + width]);
|
||||
}
|
||||
}
|
||||
|
||||
// Copy U plane
|
||||
let uv_width = width / 2;
|
||||
let uv_height = height / 2;
|
||||
if self.u_linesize as usize == uv_width {
|
||||
packed.extend_from_slice(&self.u_plane[..uv_size]);
|
||||
} else {
|
||||
for row in 0..uv_height {
|
||||
let src_offset = row * self.u_linesize as usize;
|
||||
packed.extend_from_slice(&self.u_plane[src_offset..src_offset + uv_width]);
|
||||
}
|
||||
}
|
||||
|
||||
// Copy V plane
|
||||
if self.v_linesize as usize == uv_width {
|
||||
packed.extend_from_slice(&self.v_plane[..uv_size]);
|
||||
} else {
|
||||
for row in 0..uv_height {
|
||||
let src_offset = row * self.v_linesize as usize;
|
||||
packed.extend_from_slice(&self.v_plane[src_offset..src_offset + uv_width]);
|
||||
}
|
||||
}
|
||||
|
||||
packed
|
||||
}
|
||||
|
||||
/// Copy packed YUV420P data to external buffer (zero allocation)
|
||||
/// Returns the number of bytes written, or None if buffer too small
|
||||
pub fn copy_to_packed_yuv420p(&self, dst: &mut [u8]) -> Option<usize> {
|
||||
let width = self.width as usize;
|
||||
let height = self.height as usize;
|
||||
let y_size = width * height;
|
||||
let uv_size = width * height / 4;
|
||||
let total_size = y_size + uv_size * 2;
|
||||
|
||||
if dst.len() < total_size {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Copy Y plane
|
||||
if self.y_linesize as usize == width {
|
||||
dst[..y_size].copy_from_slice(&self.y_plane[..y_size]);
|
||||
} else {
|
||||
for row in 0..height {
|
||||
let src_offset = row * self.y_linesize as usize;
|
||||
let dst_offset = row * width;
|
||||
dst[dst_offset..dst_offset + width]
|
||||
.copy_from_slice(&self.y_plane[src_offset..src_offset + width]);
|
||||
}
|
||||
}
|
||||
|
||||
// Copy U plane
|
||||
let uv_width = width / 2;
|
||||
let uv_height = height / 2;
|
||||
if self.u_linesize as usize == uv_width {
|
||||
dst[y_size..y_size + uv_size].copy_from_slice(&self.u_plane[..uv_size]);
|
||||
} else {
|
||||
for row in 0..uv_height {
|
||||
let src_offset = row * self.u_linesize as usize;
|
||||
let dst_offset = y_size + row * uv_width;
|
||||
dst[dst_offset..dst_offset + uv_width]
|
||||
.copy_from_slice(&self.u_plane[src_offset..src_offset + uv_width]);
|
||||
}
|
||||
}
|
||||
|
||||
// Copy V plane
|
||||
let v_offset = y_size + uv_size;
|
||||
if self.v_linesize as usize == uv_width {
|
||||
dst[v_offset..v_offset + uv_size].copy_from_slice(&self.v_plane[..uv_size]);
|
||||
} else {
|
||||
for row in 0..uv_height {
|
||||
let src_offset = row * self.v_linesize as usize;
|
||||
let dst_offset = v_offset + row * uv_width;
|
||||
dst[dst_offset..dst_offset + uv_width]
|
||||
.copy_from_slice(&self.v_plane[src_offset..src_offset + uv_width]);
|
||||
}
|
||||
}
|
||||
|
||||
Some(total_size)
|
||||
}
|
||||
}
|
||||
|
||||
impl DecodedNv12Frame {
|
||||
/// Get packed NV12 data (Y plane followed by UV plane, with stride removed)
|
||||
pub fn to_packed_nv12(&self) -> Vec<u8> {
|
||||
let width = self.width as usize;
|
||||
let height = self.height as usize;
|
||||
let y_size = width * height;
|
||||
let uv_size = width * height / 2;
|
||||
|
||||
let mut packed = Vec::with_capacity(y_size + uv_size);
|
||||
|
||||
// Copy Y plane, removing stride padding if any
|
||||
if self.y_linesize as usize == width {
|
||||
// No padding, direct copy
|
||||
packed.extend_from_slice(&self.y_plane[..y_size]);
|
||||
} else {
|
||||
// Has padding, copy row by row
|
||||
for row in 0..height {
|
||||
let src_offset = row * self.y_linesize as usize;
|
||||
packed.extend_from_slice(&self.y_plane[src_offset..src_offset + width]);
|
||||
}
|
||||
}
|
||||
|
||||
// Copy UV plane, removing stride padding if any
|
||||
let uv_height = height / 2;
|
||||
if self.uv_linesize as usize == width {
|
||||
// No padding, direct copy
|
||||
packed.extend_from_slice(&self.uv_plane[..uv_size]);
|
||||
} else {
|
||||
// Has padding, copy row by row
|
||||
for row in 0..uv_height {
|
||||
let src_offset = row * self.uv_linesize as usize;
|
||||
packed.extend_from_slice(&self.uv_plane[src_offset..src_offset + width]);
|
||||
}
|
||||
}
|
||||
|
||||
packed
|
||||
}
|
||||
|
||||
/// Copy packed NV12 data to external buffer (zero allocation)
|
||||
/// Returns the number of bytes written, or None if buffer too small
|
||||
pub fn copy_to_packed_nv12(&self, dst: &mut [u8]) -> Option<usize> {
|
||||
let width = self.width as usize;
|
||||
let height = self.height as usize;
|
||||
let y_size = width * height;
|
||||
let uv_size = width * height / 2;
|
||||
let total_size = y_size + uv_size;
|
||||
|
||||
if dst.len() < total_size {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Copy Y plane, removing stride padding if any
|
||||
if self.y_linesize as usize == width {
|
||||
// No padding, direct copy
|
||||
dst[..y_size].copy_from_slice(&self.y_plane[..y_size]);
|
||||
} else {
|
||||
// Has padding, copy row by row
|
||||
for row in 0..height {
|
||||
let src_offset = row * self.y_linesize as usize;
|
||||
let dst_offset = row * width;
|
||||
dst[dst_offset..dst_offset + width]
|
||||
.copy_from_slice(&self.y_plane[src_offset..src_offset + width]);
|
||||
}
|
||||
}
|
||||
|
||||
// Copy UV plane, removing stride padding if any
|
||||
let uv_height = height / 2;
|
||||
if self.uv_linesize as usize == width {
|
||||
// No padding, direct copy
|
||||
dst[y_size..total_size].copy_from_slice(&self.uv_plane[..uv_size]);
|
||||
} else {
|
||||
// Has padding, copy row by row
|
||||
for row in 0..uv_height {
|
||||
let src_offset = row * self.uv_linesize as usize;
|
||||
let dst_offset = y_size + row * width;
|
||||
dst[dst_offset..dst_offset + width]
|
||||
.copy_from_slice(&self.uv_plane[src_offset..src_offset + width]);
|
||||
}
|
||||
}
|
||||
|
||||
Some(total_size)
|
||||
}
|
||||
}
|
||||
|
||||
/// MJPEG decoder with NV12 output
|
||||
///
|
||||
/// Uses libyuv for SIMD-accelerated MJPEG decoding to YUV420P,
|
||||
/// then converts to NV12 for hardware encoder compatibility.
|
||||
/// Named "VaapiDecoder" for API compatibility with existing code.
|
||||
pub struct MjpegVaapiDecoder {
|
||||
/// Configuration
|
||||
config: MjpegVaapiDecoderConfig,
|
||||
/// Frame counter
|
||||
frame_count: u64,
|
||||
}
|
||||
|
||||
impl MjpegVaapiDecoder {
|
||||
/// Create a new MJPEG decoder
|
||||
pub fn new(config: MjpegVaapiDecoderConfig) -> Result<Self> {
|
||||
init_decoder_logging();
|
||||
|
||||
info!(
|
||||
"Creating MJPEG decoder with libyuv (SIMD-accelerated, NV12 output)"
|
||||
);
|
||||
|
||||
Ok(Self {
|
||||
config,
|
||||
frame_count: 0,
|
||||
})
|
||||
}
|
||||
|
||||
/// Create with default config
|
||||
pub fn with_vaapi(resolution: Resolution) -> Result<Self> {
|
||||
Self::new(MjpegVaapiDecoderConfig {
|
||||
resolution,
|
||||
use_hwaccel: true,
|
||||
})
|
||||
}
|
||||
|
||||
/// Create with software decoding (same as with_vaapi, kept for API compatibility)
|
||||
pub fn with_software(resolution: Resolution) -> Result<Self> {
|
||||
Self::new(MjpegVaapiDecoderConfig {
|
||||
resolution,
|
||||
use_hwaccel: false,
|
||||
})
|
||||
}
|
||||
|
||||
/// Check if hardware acceleration is active (always false, using libyuv)
|
||||
pub fn is_hwaccel_active(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
/// Decode MJPEG frame to NV12
|
||||
///
|
||||
/// Returns the decoded frame in NV12 format, or an error if decoding fails.
|
||||
pub fn decode(&mut self, jpeg_data: &[u8]) -> Result<DecodedNv12Frame> {
|
||||
if jpeg_data.len() < 2 {
|
||||
return Err(AppError::VideoError("JPEG data too small".to_string()));
|
||||
}
|
||||
|
||||
// Verify JPEG signature (FFD8)
|
||||
if jpeg_data[0] != 0xFF || jpeg_data[1] != 0xD8 {
|
||||
return Err(AppError::VideoError("Invalid JPEG signature".to_string()));
|
||||
}
|
||||
|
||||
self.frame_count += 1;
|
||||
|
||||
// Get JPEG dimensions
|
||||
let (width, height) = libyuv::mjpeg_size(jpeg_data)
|
||||
.map_err(|e| AppError::VideoError(format!("Failed to read MJPEG size: {}", e)))?;
|
||||
|
||||
// Decode MJPEG to YUV420P first
|
||||
let y_size = (width * height) as usize;
|
||||
let uv_size = y_size / 4;
|
||||
let yuv420_size = y_size + uv_size * 2;
|
||||
let mut yuv_data = vec![0u8; yuv420_size];
|
||||
|
||||
libyuv::mjpeg_to_i420(jpeg_data, &mut yuv_data, width, height)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv MJPEG→I420 failed: {}", e)))?;
|
||||
|
||||
// Convert I420 to NV12
|
||||
let nv12_size = (width * height * 3 / 2) as usize;
|
||||
let mut nv12_data = vec![0u8; nv12_size];
|
||||
|
||||
libyuv::i420_to_nv12(&yuv_data, &mut nv12_data, width, height)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv I420→NV12 failed: {}", e)))?;
|
||||
|
||||
// Split into Y and UV planes
|
||||
let y_plane = nv12_data[..y_size].to_vec();
|
||||
let uv_plane = nv12_data[y_size..].to_vec();
|
||||
|
||||
Ok(DecodedNv12Frame {
|
||||
y_plane,
|
||||
uv_plane,
|
||||
y_linesize: width,
|
||||
uv_linesize: width,
|
||||
width,
|
||||
height,
|
||||
})
|
||||
}
|
||||
|
||||
/// Get frame count
|
||||
pub fn frame_count(&self) -> u64 {
|
||||
self.frame_count
|
||||
}
|
||||
|
||||
/// Get current resolution from config
|
||||
pub fn resolution(&self) -> Resolution {
|
||||
self.config.resolution
|
||||
}
|
||||
}
|
||||
|
||||
/// Libyuv-based MJPEG decoder for direct YUV420P output
|
||||
///
|
||||
/// This decoder is optimized for software encoders (libvpx, libx265) that need YUV420P input.
|
||||
/// It uses libyuv's MJPGToI420 to decode directly to I420/YUV420P format.
|
||||
pub struct MjpegTurboDecoder {
|
||||
/// Frame counter
|
||||
frame_count: u64,
|
||||
}
|
||||
|
||||
impl MjpegTurboDecoder {
|
||||
/// Create a new libyuv-based MJPEG decoder
|
||||
pub fn new(resolution: Resolution) -> Result<Self> {
|
||||
info!(
|
||||
"Created libyuv MJPEG decoder for {}x{} (direct YUV420P output)",
|
||||
resolution.width, resolution.height
|
||||
);
|
||||
|
||||
Ok(Self {
|
||||
frame_count: 0,
|
||||
})
|
||||
}
|
||||
|
||||
/// Decode MJPEG frame directly to YUV420P using libyuv
|
||||
///
|
||||
/// This is the optimal path for software encoders that need YUV420P input.
|
||||
/// libyuv handles all JPEG subsampling formats internally.
|
||||
pub fn decode_to_yuv420p(&mut self, jpeg_data: &[u8]) -> Result<DecodedYuv420pFrame> {
|
||||
if jpeg_data.len() < 2 || jpeg_data[0] != 0xFF || jpeg_data[1] != 0xD8 {
|
||||
return Err(AppError::VideoError("Invalid JPEG data".to_string()));
|
||||
}
|
||||
|
||||
self.frame_count += 1;
|
||||
|
||||
// Get JPEG dimensions
|
||||
let (width, height) = libyuv::mjpeg_size(jpeg_data)
|
||||
.map_err(|e| AppError::VideoError(format!("Failed to read MJPEG size: {}", e)))?;
|
||||
|
||||
let y_size = (width * height) as usize;
|
||||
let uv_size = y_size / 4;
|
||||
let yuv420_size = y_size + uv_size * 2;
|
||||
|
||||
let mut yuv_data = vec![0u8; yuv420_size];
|
||||
|
||||
libyuv::mjpeg_to_i420(jpeg_data, &mut yuv_data, width, height)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv MJPEG→I420 failed: {}", e)))?;
|
||||
|
||||
Ok(DecodedYuv420pFrame {
|
||||
y_plane: yuv_data[..y_size].to_vec(),
|
||||
u_plane: yuv_data[y_size..y_size + uv_size].to_vec(),
|
||||
v_plane: yuv_data[y_size + uv_size..].to_vec(),
|
||||
y_linesize: width,
|
||||
u_linesize: width / 2,
|
||||
v_linesize: width / 2,
|
||||
width,
|
||||
height,
|
||||
})
|
||||
}
|
||||
|
||||
/// Decode directly to packed YUV420P buffer using libyuv
|
||||
///
|
||||
/// This uses libyuv's MJPGToI420 which handles all JPEG subsampling formats
|
||||
/// and converts to I420 directly.
|
||||
pub fn decode_to_yuv420p_buffer(&mut self, jpeg_data: &[u8], dst: &mut [u8]) -> Result<usize> {
|
||||
if jpeg_data.len() < 2 || jpeg_data[0] != 0xFF || jpeg_data[1] != 0xD8 {
|
||||
return Err(AppError::VideoError("Invalid JPEG data".to_string()));
|
||||
}
|
||||
|
||||
self.frame_count += 1;
|
||||
|
||||
// Get JPEG dimensions from libyuv
|
||||
let (width, height) = libyuv::mjpeg_size(jpeg_data)
|
||||
.map_err(|e| AppError::VideoError(format!("Failed to read MJPEG size: {}", e)))?;
|
||||
|
||||
let yuv420_size = (width * height * 3 / 2) as usize;
|
||||
|
||||
if dst.len() < yuv420_size {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"Buffer too small: {} < {}", dst.len(), yuv420_size
|
||||
)));
|
||||
}
|
||||
|
||||
// Decode MJPEG directly to I420 using libyuv
|
||||
// libyuv handles all JPEG subsampling formats (4:2:0, 4:2:2, 4:4:4) internally
|
||||
libyuv::mjpeg_to_i420(jpeg_data, &mut dst[..yuv420_size], width, height)
|
||||
.map_err(|e| AppError::VideoError(format!("libyuv MJPEG→I420 failed: {}", e)))?;
|
||||
|
||||
Ok(yuv420_size)
|
||||
}
|
||||
|
||||
/// Get frame count
|
||||
pub fn frame_count(&self) -> u64 {
|
||||
self.frame_count
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_decoder_creation() {
|
||||
let config = MjpegVaapiDecoderConfig::default();
|
||||
match MjpegVaapiDecoder::new(config) {
|
||||
Ok(decoder) => {
|
||||
println!("Decoder created, hwaccel: {}", decoder.is_hwaccel_active());
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Failed to create decoder: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,3 @@
|
||||
//! Video decoder implementations
|
||||
//!
|
||||
//! This module provides video decoding capabilities including:
|
||||
//! - MJPEG VAAPI hardware decoding (outputs NV12)
|
||||
//! - MJPEG turbojpeg decoding (outputs YUV420P directly)
|
||||
|
||||
pub mod mjpeg;
|
||||
|
||||
pub use mjpeg::{
|
||||
DecodedYuv420pFrame, MjpegTurboDecoder, MjpegVaapiDecoder, MjpegVaapiDecoderConfig,
|
||||
};
|
||||
//! This module provides video decoding capabilities.
|
||||
|
||||
@@ -355,7 +355,7 @@ mod tests {
|
||||
fn test_codec_config_default() {
|
||||
let config = VideoCodecConfig::default();
|
||||
assert_eq!(config.codec, VideoCodecType::H264);
|
||||
assert_eq!(config.bitrate_kbps, 2000);
|
||||
assert_eq!(config.bitrate_kbps, 8000);
|
||||
assert_eq!(config.fps, 30);
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ use tracing::{debug, error, info, warn};
|
||||
|
||||
use crate::error::{AppError, Result};
|
||||
use crate::video::convert::Nv12Converter;
|
||||
use crate::video::decoder::mjpeg::{MjpegVaapiDecoder, MjpegVaapiDecoderConfig};
|
||||
use crate::video::encoder::h264::{H264Config, H264Encoder};
|
||||
use crate::video::format::{PixelFormat, Resolution};
|
||||
use crate::webrtc::rtp::{H264VideoTrack, H264VideoTrackConfig};
|
||||
@@ -79,8 +78,6 @@ pub struct H264Pipeline {
|
||||
encoder: Arc<Mutex<Option<H264Encoder>>>,
|
||||
/// NV12 converter (for BGR24/RGB24/YUYV → NV12)
|
||||
nv12_converter: Arc<Mutex<Option<Nv12Converter>>>,
|
||||
/// MJPEG VAAPI decoder (for MJPEG input, outputs NV12)
|
||||
mjpeg_decoder: Arc<Mutex<Option<MjpegVaapiDecoder>>>,
|
||||
/// WebRTC video track
|
||||
video_track: Arc<H264VideoTrack>,
|
||||
/// Pipeline statistics
|
||||
@@ -127,44 +124,38 @@ impl H264Pipeline {
|
||||
encoder_input_format
|
||||
);
|
||||
|
||||
// Create NV12 converter or MJPEG decoder based on input format
|
||||
// Create NV12 converter based on input format
|
||||
// All formats are converted to NV12 for VAAPI encoder
|
||||
let (nv12_converter, mjpeg_decoder) = match config.input_format {
|
||||
let nv12_converter = match config.input_format {
|
||||
// NV12 input - direct passthrough
|
||||
PixelFormat::Nv12 => {
|
||||
info!("NV12 input: direct passthrough to encoder");
|
||||
(None, None)
|
||||
None
|
||||
}
|
||||
|
||||
// YUYV (4:2:2 packed) → NV12
|
||||
PixelFormat::Yuyv => {
|
||||
info!("YUYV input: converting to NV12");
|
||||
(Some(Nv12Converter::yuyv_to_nv12(config.resolution)), None)
|
||||
Some(Nv12Converter::yuyv_to_nv12(config.resolution))
|
||||
}
|
||||
|
||||
// RGB24 → NV12
|
||||
PixelFormat::Rgb24 => {
|
||||
info!("RGB24 input: converting to NV12");
|
||||
(Some(Nv12Converter::rgb24_to_nv12(config.resolution)), None)
|
||||
Some(Nv12Converter::rgb24_to_nv12(config.resolution))
|
||||
}
|
||||
|
||||
// BGR24 → NV12
|
||||
PixelFormat::Bgr24 => {
|
||||
info!("BGR24 input: converting to NV12");
|
||||
(Some(Nv12Converter::bgr24_to_nv12(config.resolution)), None)
|
||||
Some(Nv12Converter::bgr24_to_nv12(config.resolution))
|
||||
}
|
||||
|
||||
// MJPEG/JPEG → NV12 (via hwcodec decoder)
|
||||
// MJPEG/JPEG input - not supported (requires libjpeg for decoding)
|
||||
PixelFormat::Mjpeg | PixelFormat::Jpeg => {
|
||||
let decoder_config = MjpegVaapiDecoderConfig {
|
||||
resolution: config.resolution,
|
||||
use_hwaccel: true,
|
||||
};
|
||||
let decoder = MjpegVaapiDecoder::new(decoder_config)?;
|
||||
info!(
|
||||
"MJPEG decoder created for H264 pipeline (outputs NV12)"
|
||||
);
|
||||
(None, Some(decoder))
|
||||
return Err(AppError::VideoError(
|
||||
"MJPEG input format not supported in this build".to_string()
|
||||
));
|
||||
}
|
||||
|
||||
_ => {
|
||||
@@ -192,7 +183,6 @@ impl H264Pipeline {
|
||||
config,
|
||||
encoder: Arc::new(Mutex::new(Some(encoder))),
|
||||
nv12_converter: Arc::new(Mutex::new(nv12_converter)),
|
||||
mjpeg_decoder: Arc::new(Mutex::new(mjpeg_decoder)),
|
||||
video_track,
|
||||
stats: Arc::new(Mutex::new(H264PipelineStats::default())),
|
||||
running: running_tx,
|
||||
@@ -230,7 +220,6 @@ impl H264Pipeline {
|
||||
|
||||
let encoder = self.encoder.lock().await.take();
|
||||
let nv12_converter = self.nv12_converter.lock().await.take();
|
||||
let mjpeg_decoder = self.mjpeg_decoder.lock().await.take();
|
||||
let video_track = self.video_track.clone();
|
||||
let stats = self.stats.clone();
|
||||
let encode_times = self.encode_times.clone();
|
||||
@@ -248,15 +237,10 @@ impl H264Pipeline {
|
||||
};
|
||||
|
||||
let mut nv12_converter = nv12_converter;
|
||||
let mut mjpeg_decoder = mjpeg_decoder;
|
||||
let mut frame_count: u64 = 0;
|
||||
let mut last_fps_time = Instant::now();
|
||||
let mut fps_frame_count: u64 = 0;
|
||||
|
||||
// Pre-allocated NV12 buffer for MJPEG decoder output (avoids per-frame allocation)
|
||||
let nv12_size = (config.resolution.width * config.resolution.height * 3 / 2) as usize;
|
||||
let mut nv12_buffer = vec![0u8; nv12_size];
|
||||
|
||||
// Flag for one-time warnings
|
||||
let mut size_mismatch_warned = false;
|
||||
|
||||
@@ -298,7 +282,6 @@ impl H264Pipeline {
|
||||
}
|
||||
|
||||
// Convert to NV12 for VAAPI encoder
|
||||
// MJPEG -> NV12 (via VAAPI decoder)
|
||||
// BGR24/RGB24/YUYV -> NV12 (via NV12 converter)
|
||||
// NV12 -> pass through
|
||||
//
|
||||
@@ -307,36 +290,7 @@ impl H264Pipeline {
|
||||
fps_frame_count += 1;
|
||||
let pts_ms = (frame_count * 1000 / config.fps as u64) as i64;
|
||||
|
||||
let encode_result = if let Some(ref mut decoder) = mjpeg_decoder {
|
||||
// MJPEG input - decode to NV12 via VAAPI
|
||||
match decoder.decode(&raw_frame) {
|
||||
Ok(nv12_frame) => {
|
||||
// Calculate required size for this frame
|
||||
let required_size = (nv12_frame.width * nv12_frame.height * 3 / 2) as usize;
|
||||
|
||||
// Resize buffer if needed (handles resolution changes)
|
||||
if nv12_buffer.len() < required_size {
|
||||
debug!(
|
||||
"Resizing NV12 buffer: {} -> {} bytes (resolution: {}x{})",
|
||||
nv12_buffer.len(), required_size,
|
||||
nv12_frame.width, nv12_frame.height
|
||||
);
|
||||
nv12_buffer.resize(required_size, 0);
|
||||
}
|
||||
|
||||
// Copy to pre-allocated buffer (guaranteed to fit after resize)
|
||||
let written = nv12_frame.copy_to_packed_nv12(&mut nv12_buffer)
|
||||
.expect("BUG: buffer too small after resize");
|
||||
encoder.encode_raw(&nv12_buffer[..written], pts_ms)
|
||||
}
|
||||
Err(e) => {
|
||||
error!("MJPEG VAAPI decode failed: {}", e);
|
||||
let mut s = stats.lock().await;
|
||||
s.errors += 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
} else if let Some(ref mut conv) = nv12_converter {
|
||||
let encode_result = if let Some(ref mut conv) = nv12_converter {
|
||||
// BGR24/RGB24/YUYV input - convert to NV12
|
||||
// Optimized: pass reference directly without copy
|
||||
match conv.convert(&raw_frame) {
|
||||
@@ -518,7 +472,7 @@ mod tests {
|
||||
fn test_pipeline_config_default() {
|
||||
let config = H264PipelineConfig::default();
|
||||
assert_eq!(config.resolution, Resolution::HD720);
|
||||
assert_eq!(config.bitrate_kbps, 2000);
|
||||
assert_eq!(config.bitrate_kbps, 8000);
|
||||
assert_eq!(config.fps, 30);
|
||||
assert_eq!(config.gop_size, 30);
|
||||
}
|
||||
|
||||
@@ -16,8 +16,7 @@ pub mod streamer;
|
||||
pub mod video_session;
|
||||
|
||||
pub use capture::VideoCapturer;
|
||||
pub use convert::{MjpegDecoder, MjpegToYuv420Converter, PixelConverter, Yuv420pBuffer};
|
||||
pub use decoder::{MjpegVaapiDecoder, MjpegVaapiDecoderConfig};
|
||||
pub use convert::{PixelConverter, Yuv420pBuffer};
|
||||
pub use device::{VideoDevice, VideoDeviceInfo};
|
||||
pub use encoder::{JpegEncoder, H264Encoder, H264EncoderType};
|
||||
pub use format::PixelFormat;
|
||||
|
||||
@@ -28,7 +28,6 @@ const AUTO_STOP_GRACE_PERIOD_SECS: u64 = 3;
|
||||
|
||||
use crate::error::{AppError, Result};
|
||||
use crate::video::convert::{Nv12Converter, PixelConverter};
|
||||
use crate::video::decoder::mjpeg::{MjpegTurboDecoder, MjpegVaapiDecoder, MjpegVaapiDecoderConfig};
|
||||
use crate::video::encoder::h264::{H264Config, H264Encoder};
|
||||
use crate::video::encoder::h265::{H265Config, H265Encoder};
|
||||
use crate::video::encoder::registry::{EncoderBackend, EncoderRegistry, VideoEncoderType};
|
||||
@@ -298,12 +297,6 @@ pub struct SharedVideoPipeline {
|
||||
encoder: Mutex<Option<Box<dyn VideoEncoderTrait + Send>>>,
|
||||
nv12_converter: Mutex<Option<Nv12Converter>>,
|
||||
yuv420p_converter: Mutex<Option<PixelConverter>>,
|
||||
mjpeg_decoder: Mutex<Option<MjpegVaapiDecoder>>,
|
||||
/// Turbojpeg decoder for direct MJPEG->YUV420P (optimized for software encoders)
|
||||
mjpeg_turbo_decoder: Mutex<Option<MjpegTurboDecoder>>,
|
||||
nv12_buffer: Mutex<Vec<u8>>,
|
||||
/// YUV420P buffer for turbojpeg decoder output
|
||||
yuv420p_buffer: Mutex<Vec<u8>>,
|
||||
/// Whether the encoder needs YUV420P (true) or NV12 (false)
|
||||
encoder_needs_yuv420p: AtomicBool,
|
||||
/// Whether YUYV direct input is enabled (RKMPP optimization)
|
||||
@@ -335,18 +328,12 @@ impl SharedVideoPipeline {
|
||||
|
||||
let (frame_tx, _) = broadcast::channel(16); // Reduced from 64 for lower latency
|
||||
let (running_tx, running_rx) = watch::channel(false);
|
||||
let nv12_size = (config.resolution.width * config.resolution.height * 3 / 2) as usize;
|
||||
let yuv420p_size = nv12_size; // Same size as NV12
|
||||
|
||||
let pipeline = Arc::new(Self {
|
||||
config: RwLock::new(config),
|
||||
encoder: Mutex::new(None),
|
||||
nv12_converter: Mutex::new(None),
|
||||
yuv420p_converter: Mutex::new(None),
|
||||
mjpeg_decoder: Mutex::new(None),
|
||||
mjpeg_turbo_decoder: Mutex::new(None),
|
||||
nv12_buffer: Mutex::new(vec![0u8; nv12_size]),
|
||||
yuv420p_buffer: Mutex::new(vec![0u8; yuv420p_size]),
|
||||
encoder_needs_yuv420p: AtomicBool::new(false),
|
||||
yuyv_direct_input: AtomicBool::new(false),
|
||||
frame_tx,
|
||||
@@ -505,42 +492,36 @@ impl SharedVideoPipeline {
|
||||
config.input_format,
|
||||
if use_yuyv_direct { "YUYV422 (direct)" } else if needs_yuv420p { "YUV420P" } else { "NV12" });
|
||||
|
||||
let (nv12_converter, yuv420p_converter, mjpeg_decoder, mjpeg_turbo_decoder) = if use_yuyv_direct {
|
||||
let (nv12_converter, yuv420p_converter) = if use_yuyv_direct {
|
||||
// RKMPP with YUYV direct input - skip all conversion
|
||||
info!("YUYV direct input enabled for RKMPP, skipping format conversion");
|
||||
(None, None, None, None)
|
||||
(None, None)
|
||||
} else if needs_yuv420p {
|
||||
// Software encoder needs YUV420P
|
||||
match config.input_format {
|
||||
PixelFormat::Yuv420 => {
|
||||
info!("Using direct YUV420P input (no conversion)");
|
||||
(None, None, None, None)
|
||||
(None, None)
|
||||
}
|
||||
PixelFormat::Yuyv => {
|
||||
info!("Using YUYV->YUV420P converter");
|
||||
(None, Some(PixelConverter::yuyv_to_yuv420p(config.resolution)), None, None)
|
||||
(None, Some(PixelConverter::yuyv_to_yuv420p(config.resolution)))
|
||||
}
|
||||
PixelFormat::Nv12 => {
|
||||
info!("Using NV12->YUV420P converter");
|
||||
(None, Some(PixelConverter::nv12_to_yuv420p(config.resolution)), None, None)
|
||||
(None, Some(PixelConverter::nv12_to_yuv420p(config.resolution)))
|
||||
}
|
||||
PixelFormat::Rgb24 => {
|
||||
info!("Using RGB24->YUV420P converter");
|
||||
(None, Some(PixelConverter::rgb24_to_yuv420p(config.resolution)), None, None)
|
||||
(None, Some(PixelConverter::rgb24_to_yuv420p(config.resolution)))
|
||||
}
|
||||
PixelFormat::Bgr24 => {
|
||||
info!("Using BGR24->YUV420P converter");
|
||||
(None, Some(PixelConverter::bgr24_to_yuv420p(config.resolution)), None, None)
|
||||
}
|
||||
PixelFormat::Mjpeg | PixelFormat::Jpeg => {
|
||||
// Use turbojpeg for direct MJPEG->YUV420P (no intermediate NV12)
|
||||
info!("Using turbojpeg MJPEG decoder (direct YUV420P output)");
|
||||
let turbo_decoder = MjpegTurboDecoder::new(config.resolution)?;
|
||||
(None, None, None, Some(turbo_decoder))
|
||||
(None, Some(PixelConverter::bgr24_to_yuv420p(config.resolution)))
|
||||
}
|
||||
_ => {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"Unsupported input format: {}",
|
||||
"Unsupported input format for software encoding: {}",
|
||||
config.input_format
|
||||
)));
|
||||
}
|
||||
@@ -550,32 +531,23 @@ impl SharedVideoPipeline {
|
||||
match config.input_format {
|
||||
PixelFormat::Nv12 => {
|
||||
info!("Using direct NV12 input (no conversion)");
|
||||
(None, None, None, None)
|
||||
(None, None)
|
||||
}
|
||||
PixelFormat::Yuyv => {
|
||||
info!("Using YUYV->NV12 converter");
|
||||
(Some(Nv12Converter::yuyv_to_nv12(config.resolution)), None, None, None)
|
||||
(Some(Nv12Converter::yuyv_to_nv12(config.resolution)), None)
|
||||
}
|
||||
PixelFormat::Rgb24 => {
|
||||
info!("Using RGB24->NV12 converter");
|
||||
(Some(Nv12Converter::rgb24_to_nv12(config.resolution)), None, None, None)
|
||||
(Some(Nv12Converter::rgb24_to_nv12(config.resolution)), None)
|
||||
}
|
||||
PixelFormat::Bgr24 => {
|
||||
info!("Using BGR24->NV12 converter");
|
||||
(Some(Nv12Converter::bgr24_to_nv12(config.resolution)), None, None, None)
|
||||
}
|
||||
PixelFormat::Mjpeg | PixelFormat::Jpeg => {
|
||||
info!("Using MJPEG decoder (NV12 output)");
|
||||
let decoder_config = MjpegVaapiDecoderConfig {
|
||||
resolution: config.resolution,
|
||||
use_hwaccel: true,
|
||||
};
|
||||
let decoder = MjpegVaapiDecoder::new(decoder_config)?;
|
||||
(None, None, Some(decoder), None)
|
||||
(Some(Nv12Converter::bgr24_to_nv12(config.resolution)), None)
|
||||
}
|
||||
_ => {
|
||||
return Err(AppError::VideoError(format!(
|
||||
"Unsupported input format: {}",
|
||||
"Unsupported input format for hardware encoding: {}",
|
||||
config.input_format
|
||||
)));
|
||||
}
|
||||
@@ -585,8 +557,6 @@ impl SharedVideoPipeline {
|
||||
*self.encoder.lock().await = Some(encoder);
|
||||
*self.nv12_converter.lock().await = nv12_converter;
|
||||
*self.yuv420p_converter.lock().await = yuv420p_converter;
|
||||
*self.mjpeg_decoder.lock().await = mjpeg_decoder;
|
||||
*self.mjpeg_turbo_decoder.lock().await = mjpeg_turbo_decoder;
|
||||
self.encoder_needs_yuv420p.store(needs_yuv420p, Ordering::Release);
|
||||
self.yuyv_direct_input.store(use_yuyv_direct, Ordering::Release);
|
||||
|
||||
@@ -669,8 +639,6 @@ impl SharedVideoPipeline {
|
||||
*self.encoder.lock().await = None;
|
||||
*self.nv12_converter.lock().await = None;
|
||||
*self.yuv420p_converter.lock().await = None;
|
||||
*self.mjpeg_decoder.lock().await = None;
|
||||
*self.mjpeg_turbo_decoder.lock().await = None;
|
||||
self.encoder_needs_yuv420p.store(false, Ordering::Release);
|
||||
|
||||
info!("Switched to {} codec", codec);
|
||||
@@ -862,8 +830,6 @@ impl SharedVideoPipeline {
|
||||
);
|
||||
}
|
||||
|
||||
let mut mjpeg_decoder = self.mjpeg_decoder.lock().await;
|
||||
let mut mjpeg_turbo_decoder = self.mjpeg_turbo_decoder.lock().await;
|
||||
let mut nv12_converter = self.nv12_converter.lock().await;
|
||||
let mut yuv420p_converter = self.yuv420p_converter.lock().await;
|
||||
let needs_yuv420p = self.encoder_needs_yuv420p.load(Ordering::Acquire);
|
||||
@@ -879,38 +845,7 @@ impl SharedVideoPipeline {
|
||||
debug!("[Pipeline] Keyframe will be generated for this frame");
|
||||
}
|
||||
|
||||
let encode_result = if mjpeg_turbo_decoder.is_some() {
|
||||
// Optimized path: MJPEG -> YUV420P directly via turbojpeg (for software encoders)
|
||||
let turbo = mjpeg_turbo_decoder.as_mut().unwrap();
|
||||
let mut yuv420p_buffer = self.yuv420p_buffer.lock().await;
|
||||
let written = turbo.decode_to_yuv420p_buffer(raw_frame, &mut yuv420p_buffer)
|
||||
.map_err(|e| AppError::VideoError(format!("turbojpeg decode failed: {}", e)))?;
|
||||
encoder.encode_raw(&yuv420p_buffer[..written], pts_ms)
|
||||
} else if mjpeg_decoder.is_some() {
|
||||
// MJPEG input: decode to NV12 (for hardware encoders)
|
||||
let decoder = mjpeg_decoder.as_mut().unwrap();
|
||||
let nv12_frame = decoder.decode(raw_frame)
|
||||
.map_err(|e| AppError::VideoError(format!("MJPEG decode failed: {}", e)))?;
|
||||
|
||||
let required_size = (nv12_frame.width * nv12_frame.height * 3 / 2) as usize;
|
||||
let mut nv12_buffer = self.nv12_buffer.lock().await;
|
||||
if nv12_buffer.len() < required_size {
|
||||
nv12_buffer.resize(required_size, 0);
|
||||
}
|
||||
|
||||
let written = nv12_frame.copy_to_packed_nv12(&mut nv12_buffer)
|
||||
.expect("Buffer too small");
|
||||
|
||||
// Debug log for H265 after MJPEG decode
|
||||
if codec == VideoEncoderType::H265 && frame_count % 30 == 1 {
|
||||
debug!(
|
||||
"[Pipeline-H265] MJPEG decoded: nv12_size={}, frame_width={}, frame_height={}",
|
||||
written, nv12_frame.width, nv12_frame.height
|
||||
);
|
||||
}
|
||||
|
||||
encoder.encode_raw(&nv12_buffer[..written], pts_ms)
|
||||
} else if needs_yuv420p && yuv420p_converter.is_some() {
|
||||
let encode_result = if needs_yuv420p && yuv420p_converter.is_some() {
|
||||
// Software encoder with direct input conversion to YUV420P
|
||||
let conv = yuv420p_converter.as_mut().unwrap();
|
||||
let yuv420p_data = conv.convert(raw_frame)
|
||||
@@ -930,8 +865,6 @@ impl SharedVideoPipeline {
|
||||
drop(encoder_guard);
|
||||
drop(nv12_converter);
|
||||
drop(yuv420p_converter);
|
||||
drop(mjpeg_decoder);
|
||||
drop(mjpeg_turbo_decoder);
|
||||
|
||||
match encode_result {
|
||||
Ok(frames) => {
|
||||
|
||||
@@ -57,7 +57,7 @@ async fn handle_audio_socket(socket: WebSocket, state: Arc<AppState>) {
|
||||
// Send error message before closing
|
||||
let _ = sender
|
||||
.send(Message::Text(
|
||||
r#"{"error": "Audio not streaming"}"#.to_string(),
|
||||
r#"{"error": "Audio not streaming"}"#.to_string().into(),
|
||||
))
|
||||
.await;
|
||||
return;
|
||||
@@ -83,7 +83,7 @@ async fn handle_audio_socket(socket: WebSocket, state: Arc<AppState>) {
|
||||
match opus_result {
|
||||
Ok(frame) => {
|
||||
let binary = encode_audio_packet(&frame, stream_start);
|
||||
if sender.send(Message::Binary(binary)).await.is_err() {
|
||||
if sender.send(Message::Binary(binary.into())).await.is_err() {
|
||||
debug!("Failed to send audio frame, client disconnected");
|
||||
break;
|
||||
}
|
||||
@@ -133,7 +133,7 @@ async fn handle_audio_socket(socket: WebSocket, state: Arc<AppState>) {
|
||||
|
||||
// Periodic ping to keep connection alive (using interval)
|
||||
_ = ping_interval.tick() => {
|
||||
if sender.send(Message::Ping(vec![])).await.is_err() {
|
||||
if sender.send(Message::Ping(vec![].into())).await.is_err() {
|
||||
warn!("Failed to send ping, disconnecting");
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -82,7 +82,7 @@ async fn handle_terminal_websocket(client_ws: WebSocket, query_string: String) {
|
||||
let client_to_ttyd = tokio::spawn(async move {
|
||||
while let Some(msg) = client_rx.next().await {
|
||||
let ttyd_msg = match msg {
|
||||
Ok(AxumMessage::Text(text)) => TungsteniteMessage::Text(text),
|
||||
Ok(AxumMessage::Text(text)) => TungsteniteMessage::Text(text.to_string().into()),
|
||||
Ok(AxumMessage::Binary(data)) => TungsteniteMessage::Binary(data),
|
||||
Ok(AxumMessage::Ping(data)) => TungsteniteMessage::Ping(data),
|
||||
Ok(AxumMessage::Pong(data)) => TungsteniteMessage::Pong(data),
|
||||
@@ -103,7 +103,7 @@ async fn handle_terminal_websocket(client_ws: WebSocket, query_string: String) {
|
||||
let ttyd_to_client = tokio::spawn(async move {
|
||||
while let Some(msg) = ttyd_rx.next().await {
|
||||
let client_msg = match msg {
|
||||
Ok(TungsteniteMessage::Text(text)) => AxumMessage::Text(text),
|
||||
Ok(TungsteniteMessage::Text(text)) => AxumMessage::Text(text.to_string().into()),
|
||||
Ok(TungsteniteMessage::Binary(data)) => AxumMessage::Binary(data),
|
||||
Ok(TungsteniteMessage::Ping(data)) => AxumMessage::Ping(data),
|
||||
Ok(TungsteniteMessage::Pong(data)) => AxumMessage::Pong(data),
|
||||
|
||||
@@ -70,7 +70,7 @@ pub fn create_router(state: Arc<AppState>) -> Router {
|
||||
// Audio WebSocket endpoint
|
||||
.route("/ws/audio", any(audio_ws_handler))
|
||||
// User can change their own password (handler will check ownership)
|
||||
.route("/users/:id/password", post(handlers::change_user_password));
|
||||
.route("/users/{id}/password", post(handlers::change_user_password));
|
||||
|
||||
// Admin-only routes (require admin privileges)
|
||||
let admin_routes = Router::new()
|
||||
@@ -106,8 +106,8 @@ pub fn create_router(state: Arc<AppState>) -> Router {
|
||||
.route("/msd/images", get(handlers::msd_images_list))
|
||||
.route("/msd/images/download", post(handlers::msd_image_download))
|
||||
.route("/msd/images/download/cancel", post(handlers::msd_image_download_cancel))
|
||||
.route("/msd/images/:id", get(handlers::msd_image_get))
|
||||
.route("/msd/images/:id", delete(handlers::msd_image_delete))
|
||||
.route("/msd/images/{id}", get(handlers::msd_image_get))
|
||||
.route("/msd/images/{id}", delete(handlers::msd_image_delete))
|
||||
.route("/msd/connect", post(handlers::msd_connect))
|
||||
.route("/msd/disconnect", post(handlers::msd_disconnect))
|
||||
// MSD Virtual Drive endpoints
|
||||
@@ -115,9 +115,9 @@ pub fn create_router(state: Arc<AppState>) -> Router {
|
||||
.route("/msd/drive", delete(handlers::msd_drive_delete))
|
||||
.route("/msd/drive/init", post(handlers::msd_drive_init))
|
||||
.route("/msd/drive/files", get(handlers::msd_drive_files))
|
||||
.route("/msd/drive/files/*path", get(handlers::msd_drive_download))
|
||||
.route("/msd/drive/files/*path", delete(handlers::msd_drive_file_delete))
|
||||
.route("/msd/drive/mkdir/*path", post(handlers::msd_drive_mkdir))
|
||||
.route("/msd/drive/files/{*path}", get(handlers::msd_drive_download))
|
||||
.route("/msd/drive/files/{*path}", delete(handlers::msd_drive_file_delete))
|
||||
.route("/msd/drive/mkdir/{*path}", post(handlers::msd_drive_mkdir))
|
||||
// ATX (Power Control) endpoints
|
||||
.route("/atx/status", get(handlers::atx_status))
|
||||
.route("/atx/power", post(handlers::atx_power))
|
||||
@@ -127,14 +127,14 @@ pub fn create_router(state: Arc<AppState>) -> Router {
|
||||
// User management endpoints
|
||||
.route("/users", get(handlers::list_users))
|
||||
.route("/users", post(handlers::create_user))
|
||||
.route("/users/:id", put(handlers::update_user))
|
||||
.route("/users/:id", delete(handlers::delete_user))
|
||||
.route("/users/{id}", put(handlers::update_user))
|
||||
.route("/users/{id}", delete(handlers::delete_user))
|
||||
// Extension management endpoints
|
||||
.route("/extensions", get(handlers::extensions::list_extensions))
|
||||
.route("/extensions/:id", get(handlers::extensions::get_extension))
|
||||
.route("/extensions/:id/start", post(handlers::extensions::start_extension))
|
||||
.route("/extensions/:id/stop", post(handlers::extensions::stop_extension))
|
||||
.route("/extensions/:id/logs", get(handlers::extensions::get_extension_logs))
|
||||
.route("/extensions/{id}", get(handlers::extensions::get_extension))
|
||||
.route("/extensions/{id}/start", post(handlers::extensions::start_extension))
|
||||
.route("/extensions/{id}/stop", post(handlers::extensions::stop_extension))
|
||||
.route("/extensions/{id}/logs", get(handlers::extensions::get_extension_logs))
|
||||
.route("/extensions/ttyd/config", patch(handlers::extensions::update_ttyd_config))
|
||||
.route("/extensions/ttyd/status", get(handlers::extensions::get_ttyd_status))
|
||||
.route("/extensions/gostc/config", patch(handlers::extensions::update_gostc_config))
|
||||
@@ -143,7 +143,7 @@ pub fn create_router(state: Arc<AppState>) -> Router {
|
||||
.route("/terminal", get(handlers::terminal::terminal_index))
|
||||
.route("/terminal/", get(handlers::terminal::terminal_index))
|
||||
.route("/terminal/ws", get(handlers::terminal::terminal_ws))
|
||||
.route("/terminal/*path", get(handlers::terminal::terminal_proxy))
|
||||
.route("/terminal/{*path}", get(handlers::terminal::terminal_proxy))
|
||||
// Apply admin middleware to all admin routes
|
||||
.layer(middleware::from_fn_with_state(state.clone(), require_admin));
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ where
|
||||
{
|
||||
Router::new()
|
||||
.route("/", get(index_handler))
|
||||
.route("/*path", get(static_handler))
|
||||
.route("/{*path}", get(static_handler))
|
||||
}
|
||||
|
||||
/// Serve index.html for root path
|
||||
|
||||
@@ -79,7 +79,7 @@ async fn handle_socket(socket: WebSocket, state: Arc<AppState>) {
|
||||
if !device_info_sent && !subscribed_topics.is_empty() {
|
||||
let device_info = state.get_device_info().await;
|
||||
if let Ok(json) = serialize_event(&device_info) {
|
||||
if sender.send(Message::Text(json)).await.is_err() {
|
||||
if sender.send(Message::Text(json.into())).await.is_err() {
|
||||
warn!("Failed to send device info to client");
|
||||
break;
|
||||
}
|
||||
@@ -113,7 +113,7 @@ async fn handle_socket(socket: WebSocket, state: Arc<AppState>) {
|
||||
// Filter event based on subscribed topics
|
||||
if should_send_event(&event, &subscribed_topics) {
|
||||
if let Ok(json) = serialize_event(&event) {
|
||||
if sender.send(Message::Text(json)).await.is_err() {
|
||||
if sender.send(Message::Text(json.into())).await.is_err() {
|
||||
warn!("Failed to send event to client, disconnecting");
|
||||
break;
|
||||
}
|
||||
@@ -127,7 +127,7 @@ async fn handle_socket(socket: WebSocket, state: Arc<AppState>) {
|
||||
message: format!("Lagged by {} events", n),
|
||||
};
|
||||
if let Ok(json) = serialize_event(&error_event) {
|
||||
let _ = sender.send(Message::Text(json)).await;
|
||||
let _ = sender.send(Message::Text(json.into())).await;
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
@@ -139,7 +139,7 @@ async fn handle_socket(socket: WebSocket, state: Arc<AppState>) {
|
||||
|
||||
// Heartbeat
|
||||
_ = heartbeat_interval.tick() => {
|
||||
if sender.send(Message::Ping(vec![])).await.is_err() {
|
||||
if sender.send(Message::Ping(vec![].into())).await.is_err() {
|
||||
warn!("Failed to send ping, disconnecting");
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -397,7 +397,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_verify_with_rtp_depacketizer() {
|
||||
use rtp::codecs::h265::{H265Packet, H265Payload, H265FragmentationUnitPacket};
|
||||
use rtp::codecs::h265::{H265Packet, H265Payload};
|
||||
use rtp::packetizer::Depacketizer;
|
||||
|
||||
let mut payloader = H265Payloader::new();
|
||||
|
||||
@@ -731,7 +731,7 @@ mod tests {
|
||||
fn test_h264_track_config_default() {
|
||||
let config = H264VideoTrackConfig::default();
|
||||
assert_eq!(config.fps, 30);
|
||||
assert_eq!(config.bitrate_kbps, 2000);
|
||||
assert_eq!(config.bitrate_kbps, 8000);
|
||||
assert_eq!(config.resolution, Resolution::HD720);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1058,7 +1058,7 @@ mod tests {
|
||||
let config = WebRtcStreamerConfig::default();
|
||||
assert_eq!(config.video_codec, VideoCodecType::H264);
|
||||
assert_eq!(config.resolution, Resolution::HD720);
|
||||
assert_eq!(config.bitrate_preset, BitratePreset::Quality);
|
||||
assert_eq!(config.bitrate_preset, BitratePreset::Balanced);
|
||||
assert_eq!(config.fps, 30);
|
||||
assert!(!config.audio_enabled);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user