diff --git a/Cargo.toml b/Cargo.toml index 6172656f..a95b5b8f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "one-kvm" -version = "0.1.0" +version = "0.1.1" edition = "2021" authors = ["SilentWind"] description = "A open and lightweight IP-KVM solution written in Rust" @@ -140,4 +140,4 @@ panic = "abort" # Static linking profile for musl targets [profile.release-static] inherits = "release" -opt-level = "z" # Optimize for size \ No newline at end of file +opt-level = "z" # Optimize for size diff --git a/build/cross/Dockerfile.arm64 b/build/cross/Dockerfile.arm64 index ec860e7f..d0542c1b 100644 --- a/build/cross/Dockerfile.arm64 +++ b/build/cross/Dockerfile.arm64 @@ -217,6 +217,9 @@ RUN mkdir -p /tmp/ffmpeg-build && cd /tmp/ffmpeg-build \ -Dlibrga_demo=false \ && ninja -C build \ && ninja -C build install \ + # Create static librga.a from built objects (rkrga uses shared_library) + && ar rcs /usr/aarch64-linux-gnu/lib/librga.a $(find build -name '*.o') \ + && ranlib /usr/aarch64-linux-gnu/lib/librga.a \ && sed -i 's/^Libs:.*$/& -lstdc++ -lm -lpthread/' /usr/aarch64-linux-gnu/lib/pkgconfig/librga.pc \ && cd .. \ # Create pkg-config wrapper for cross-compilation @@ -267,8 +270,10 @@ RUN mkdir -p /tmp/ffmpeg-build && cd /tmp/ffmpeg-build \ --disable-avfilter \ --disable-avdevice \ --disable-postproc \ - # Disable all decoders + # Disable all decoders (re-enable only what we need) --disable-decoders \ + --enable-decoder=mjpeg \ + --enable-decoder=mjpeg_rkmpp \ # Disable all encoders, enable only needed ones --disable-encoders \ --enable-encoder=h264_rkmpp \ @@ -292,8 +297,7 @@ RUN mkdir -p /tmp/ffmpeg-build && cd /tmp/ffmpeg-build \ --disable-bsfs \ --enable-bsf=h264_mp4toannexb \ --enable-bsf=hevc_mp4toannexb \ - # Disable hardware decoding - --disable-hwaccels \ + # Hardware decoding uses explicit rkmpp decoder (no hwaccel flag) # Disable other unused features --disable-indevs \ --disable-outdevs \ diff --git a/build/cross/Dockerfile.armv7 b/build/cross/Dockerfile.armv7 index 197a265c..3bebfc6f 100644 --- a/build/cross/Dockerfile.armv7 +++ b/build/cross/Dockerfile.armv7 @@ -206,6 +206,9 @@ RUN mkdir -p /tmp/ffmpeg-build && cd /tmp/ffmpeg-build \ -Dlibrga_demo=false \ && ninja -C build \ && ninja -C build install \ + # Create static librga.a from built objects (rkrga uses shared_library) + && ar rcs /usr/arm-linux-gnueabihf/lib/librga.a $(find build -name '*.o') \ + && ranlib /usr/arm-linux-gnueabihf/lib/librga.a \ && sed -i 's/^Libs:.*$/& -lstdc++ -lm -lpthread/' /usr/arm-linux-gnueabihf/lib/pkgconfig/librga.pc \ && cd .. \ # Create pkg-config wrapper for cross-compilation @@ -256,8 +259,10 @@ RUN mkdir -p /tmp/ffmpeg-build && cd /tmp/ffmpeg-build \ --disable-avfilter \ --disable-avdevice \ --disable-postproc \ - # Disable all decoders + # Disable all decoders (re-enable only what we need) --disable-decoders \ + --enable-decoder=mjpeg \ + --enable-decoder=mjpeg_rkmpp \ # Disable all encoders, enable only needed ones --disable-encoders \ --enable-encoder=h264_rkmpp \ @@ -281,8 +286,7 @@ RUN mkdir -p /tmp/ffmpeg-build && cd /tmp/ffmpeg-build \ --disable-bsfs \ --enable-bsf=h264_mp4toannexb \ --enable-bsf=hevc_mp4toannexb \ - # Disable hardware decoding - --disable-hwaccels \ + # Hardware decoding uses explicit rkmpp decoder (no hwaccel flag) # Disable other unused features --disable-indevs \ --disable-outdevs \ diff --git a/build/package-deb.sh b/build/package-deb.sh index e754bbe9..3099ce45 100755 --- a/build/package-deb.sh +++ b/build/package-deb.sh @@ -125,13 +125,20 @@ EOF chmod 755 "$PKG_DIR/DEBIAN/prerm" # Create control file + BASE_DEPS="libc6 (>= 2.31), libgcc-s1, libstdc++6, libasound2 (>= 1.1), libdrm2 (>= 2.4)" + AMD64_DEPS="libva2 (>= 2.0), libva-drm2 (>= 2.10), libva-x11-2 (>= 2.10), libmfx1 (>= 21.1), libx11-6 (>= 1.6), libxcb1 (>= 1.14)" + DEPS="$BASE_DEPS" + if [ "$DEB_ARCH" = "amd64" ]; then + DEPS="$DEPS, $AMD64_DEPS" + fi + cat > "$PKG_DIR/DEBIAN/control" <= 2.31), libgcc-s1, libstdc++6, libasound2 (>= 1.1), libva2 (>= 2.0), libdrm2 (>= 2.4), libx11-6 (>= 1.6), libxcb1 (>= 1.14) +Depends: $DEPS Maintainer: SilentWind Description: A open and lightweight IP-KVM solution Enables BIOS-level remote management of servers and workstations. diff --git a/libs/hwcodec/Cargo.toml b/libs/hwcodec/Cargo.toml index 1909c6b4..a4ae0fac 100644 --- a/libs/hwcodec/Cargo.toml +++ b/libs/hwcodec/Cargo.toml @@ -6,6 +6,7 @@ description = "Hardware video codec for IP-KVM (Windows/Linux)" [features] default = [] +rkmpp = [] [dependencies] log = "0.4" diff --git a/libs/hwcodec/build.rs b/libs/hwcodec/build.rs index 444d6a8c..791316eb 100644 --- a/libs/hwcodec/build.rs +++ b/libs/hwcodec/build.rs @@ -152,7 +152,12 @@ mod ffmpeg { } else { // RKMPP for ARM println!("cargo:rustc-link-lib=rockchip_mpp"); - println!("cargo:rustc-link-lib=rga"); + let rga_static = lib_dir.join("librga.a"); + if rga_static.exists() { + println!("cargo:rustc-link-lib=static=rga"); + } else { + println!("cargo:rustc-link-lib=rga"); + } } // Software codec dependencies (dynamic - GPL) @@ -198,15 +203,24 @@ mod ffmpeg { if let Ok(output) = Command::new("pkg-config").args(&pkg_config_args).output() { if output.status.success() { let libs_str = String::from_utf8_lossy(&output.stdout); + let mut link_paths: Vec = Vec::new(); for flag in libs_str.split_whitespace() { if flag.starts_with("-L") { - println!("cargo:rustc-link-search=native={}", &flag[2..]); + let path = flag[2..].to_string(); + println!("cargo:rustc-link-search=native={}", path); + link_paths.push(path); } else if flag.starts_with("-l") { let lib_name = &flag[2..]; if use_static { // For static linking, link FFmpeg libs statically, others dynamically if lib_name.starts_with("av") || lib_name == "swresample" { println!("cargo:rustc-link-lib=static={}", lib_name); + } else if lib_name == "rga" + && link_paths + .iter() + .any(|path| Path::new(path).join("librga.a").exists()) + { + println!("cargo:rustc-link-lib=static=rga"); } else { // Runtime libraries (va, drm, etc.) must be dynamic println!("cargo:rustc-link-lib={}", lib_name); @@ -343,6 +357,20 @@ mod ffmpeg { .write_to_file(Path::new(&env::var_os("OUT_DIR").unwrap()).join("ffmpeg_ram_ffi.rs")) .unwrap(); - builder.files(["ffmpeg_ram_encode.cpp"].map(|f| ffmpeg_ram_dir.join(f))); + builder.file(ffmpeg_ram_dir.join("ffmpeg_ram_encode.cpp")); + + // RKMPP decode only exists on ARM builds where FFmpeg is compiled with RKMPP support. + // Avoid compiling this file on x86/x64 where `AV_HWDEVICE_TYPE_RKMPP` doesn't exist. + let target_arch = std::env::var("CARGO_CFG_TARGET_ARCH").unwrap_or_default(); + let enable_rkmpp = matches!(target_arch.as_str(), "aarch64" | "arm") + || std::env::var_os("CARGO_FEATURE_RKMPP").is_some(); + if enable_rkmpp { + builder.file(ffmpeg_ram_dir.join("ffmpeg_ram_decode.cpp")); + } else { + println!( + "cargo:info=Skipping ffmpeg_ram_decode.cpp (RKMPP) for arch {}", + target_arch + ); + } } } diff --git a/libs/hwcodec/cpp/ffmpeg_ram/ffmpeg_ram_decode.cpp b/libs/hwcodec/cpp/ffmpeg_ram/ffmpeg_ram_decode.cpp new file mode 100644 index 00000000..52a6e2a6 --- /dev/null +++ b/libs/hwcodec/cpp/ffmpeg_ram/ffmpeg_ram_decode.cpp @@ -0,0 +1,280 @@ +// Minimal FFmpeg RAM MJPEG decoder (RKMPP only) -> NV12 in CPU memory. + +extern "C" { +#include +#include +#include +#include +#include +} + +#include +#include +#include + +#include "common.h" + +#define LOG_MODULE "FFMPEG_RAM_DEC" +#include +#include + +typedef void (*RamDecodeCallback)(const uint8_t *data, int len, int width, + int height, int pixfmt, const void *obj); + +namespace { +thread_local std::string g_last_error; + +static void set_last_error(const std::string &msg) { + g_last_error = msg; +} + +class FFmpegRamDecoder { +public: + AVCodecContext *c_ = NULL; + AVPacket *pkt_ = NULL; + AVFrame *frame_ = NULL; + AVFrame *sw_frame_ = NULL; + std::string name_; + int width_ = 0; + int height_ = 0; + AVPixelFormat sw_pixfmt_ = AV_PIX_FMT_NV12; + int thread_count_ = 1; + RamDecodeCallback callback_ = NULL; + + AVHWDeviceType hw_device_type_ = AV_HWDEVICE_TYPE_NONE; + AVPixelFormat hw_pixfmt_ = AV_PIX_FMT_NONE; + AVBufferRef *hw_device_ctx_ = NULL; + + explicit FFmpegRamDecoder(const char *name, int width, int height, int sw_pixfmt, + int thread_count, RamDecodeCallback callback) { + name_ = name ? name : ""; + width_ = width; + height_ = height; + sw_pixfmt_ = (AVPixelFormat)sw_pixfmt; + thread_count_ = thread_count > 0 ? thread_count : 1; + callback_ = callback; + + if (name_.find("rkmpp") != std::string::npos) { + hw_device_type_ = AV_HWDEVICE_TYPE_RKMPP; + } + } + + ~FFmpegRamDecoder() {} + + static enum AVPixelFormat get_hw_format(AVCodecContext *ctx, + const enum AVPixelFormat *pix_fmts) { + FFmpegRamDecoder *dec = reinterpret_cast(ctx->opaque); + if (dec && dec->hw_pixfmt_ != AV_PIX_FMT_NONE) { + const enum AVPixelFormat *p; + for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) { + if (*p == dec->hw_pixfmt_) { + return *p; + } + } + } + return pix_fmts[0]; + } + + bool init() { + g_last_error.clear(); + const AVCodec *codec = NULL; + int ret = 0; + + if (!(codec = avcodec_find_decoder_by_name(name_.c_str()))) { + set_last_error(std::string("Decoder not found: ") + name_); + return false; + } + + if (!(c_ = avcodec_alloc_context3(codec))) { + set_last_error(std::string("Could not allocate decoder context")); + return false; + } + + c_->width = width_; + c_->height = height_; + c_->thread_count = thread_count_; + c_->opaque = this; + + if (hw_device_type_ != AV_HWDEVICE_TYPE_NONE) { + const AVCodecHWConfig *cfg = NULL; + for (int i = 0; (cfg = avcodec_get_hw_config(codec, i)); i++) { + if (cfg->device_type == hw_device_type_) { + hw_pixfmt_ = cfg->pix_fmt; + break; + } + } + if (hw_pixfmt_ == AV_PIX_FMT_NONE) { + set_last_error(std::string("No suitable HW pixfmt for decoder")); + return false; + } + + ret = av_hwdevice_ctx_create(&hw_device_ctx_, hw_device_type_, NULL, NULL, 0); + if (ret < 0) { + set_last_error(std::string("av_hwdevice_ctx_create failed, ret = ") + av_err2str(ret)); + return false; + } + c_->hw_device_ctx = av_buffer_ref(hw_device_ctx_); + c_->get_format = get_hw_format; + + AVBufferRef *frames_ref = av_hwframe_ctx_alloc(c_->hw_device_ctx); + if (!frames_ref) { + set_last_error(std::string("av_hwframe_ctx_alloc failed")); + return false; + } + AVHWFramesContext *frames_ctx = (AVHWFramesContext *)frames_ref->data; + frames_ctx->format = hw_pixfmt_; + frames_ctx->sw_format = sw_pixfmt_; + frames_ctx->width = width_; + frames_ctx->height = height_; + frames_ctx->initial_pool_size = 8; + ret = av_hwframe_ctx_init(frames_ref); + if (ret < 0) { + av_buffer_unref(&frames_ref); + set_last_error(std::string("av_hwframe_ctx_init failed, ret = ") + av_err2str(ret)); + return false; + } + c_->hw_frames_ctx = av_buffer_ref(frames_ref); + av_buffer_unref(&frames_ref); + } + + if ((ret = avcodec_open2(c_, codec, NULL)) < 0) { + set_last_error(std::string("avcodec_open2 failed, ret = ") + av_err2str(ret)); + return false; + } + + pkt_ = av_packet_alloc(); + frame_ = av_frame_alloc(); + sw_frame_ = av_frame_alloc(); + if (!pkt_ || !frame_ || !sw_frame_) { + set_last_error(std::string("Failed to allocate packet/frame")); + return false; + } + + return true; + } + + int decode(const uint8_t *data, int length, const void *obj) { + g_last_error.clear(); + int ret = 0; + if (!c_ || !pkt_ || !frame_) { + set_last_error(std::string("Decoder not initialized")); + return -1; + } + + av_packet_unref(pkt_); + ret = av_new_packet(pkt_, length); + if (ret < 0) { + set_last_error(std::string("av_new_packet failed, ret = ") + av_err2str(ret)); + return ret; + } + memcpy(pkt_->data, data, length); + pkt_->size = length; + + ret = avcodec_send_packet(c_, pkt_); + av_packet_unref(pkt_); + if (ret < 0) { + set_last_error(std::string("avcodec_send_packet failed, ret = ") + av_err2str(ret)); + return ret; + } + + while (true) { + ret = avcodec_receive_frame(c_, frame_); + if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { + break; + } + if (ret < 0) { + set_last_error(std::string("avcodec_receive_frame failed, ret = ") + av_err2str(ret)); + return ret; + } + + AVFrame *out = frame_; + if (frame_->format == hw_pixfmt_) { + av_frame_unref(sw_frame_); + ret = av_hwframe_transfer_data(sw_frame_, frame_, 0); + if (ret < 0) { + set_last_error(std::string("av_hwframe_transfer_data failed, ret = ") + av_err2str(ret)); + return ret; + } + out = sw_frame_; + } + + int buf_size = + av_image_get_buffer_size((AVPixelFormat)out->format, out->width, out->height, 1); + if (buf_size < 0) { + set_last_error(std::string("av_image_get_buffer_size failed, ret = ") + av_err2str(buf_size)); + return buf_size; + } + + std::vector buf(buf_size); + ret = av_image_copy_to_buffer(buf.data(), buf_size, + (const uint8_t *const *)out->data, out->linesize, + (AVPixelFormat)out->format, out->width, out->height, 1); + if (ret < 0) { + set_last_error(std::string("av_image_copy_to_buffer failed, ret = ") + av_err2str(ret)); + return ret; + } + + if (callback_) { + callback_(buf.data(), buf_size, out->width, out->height, out->format, obj); + } + + av_frame_unref(frame_); + } + + return 0; + } + + void fini() { + if (pkt_) { + av_packet_free(&pkt_); + } + if (frame_) { + av_frame_free(&frame_); + } + if (sw_frame_) { + av_frame_free(&sw_frame_); + } + if (c_) { + avcodec_free_context(&c_); + } + if (hw_device_ctx_) { + av_buffer_unref(&hw_device_ctx_); + } + } +}; +} // namespace + +extern "C" void *ffmpeg_ram_new_decoder(const char *name, int width, int height, + int sw_pixfmt, int thread_count, + RamDecodeCallback callback) { + FFmpegRamDecoder *dec = + new FFmpegRamDecoder(name, width, height, sw_pixfmt, thread_count, callback); + if (!dec->init()) { + dec->fini(); + delete dec; + return NULL; + } + return dec; +} + +extern "C" int ffmpeg_ram_decode(void *decoder, const uint8_t *data, int length, + const void *obj) { + FFmpegRamDecoder *dec = reinterpret_cast(decoder); + if (!dec) { + return -1; + } + return dec->decode(data, length, obj); +} + +extern "C" void ffmpeg_ram_free_decoder(void *decoder) { + FFmpegRamDecoder *dec = reinterpret_cast(decoder); + if (!dec) { + return; + } + dec->fini(); + delete dec; +} + +extern "C" const char *ffmpeg_ram_last_error(void) { + return g_last_error.c_str(); +} diff --git a/libs/hwcodec/cpp/ffmpeg_ram/ffmpeg_ram_ffi.h b/libs/hwcodec/cpp/ffmpeg_ram/ffmpeg_ram_ffi.h index 814c22ea..60c45b3a 100644 --- a/libs/hwcodec/cpp/ffmpeg_ram/ffmpeg_ram_ffi.h +++ b/libs/hwcodec/cpp/ffmpeg_ram/ffmpeg_ram_ffi.h @@ -7,6 +7,8 @@ typedef void (*RamEncodeCallback)(const uint8_t *data, int len, int64_t pts, int key, const void *obj); +typedef void (*RamDecodeCallback)(const uint8_t *data, int len, int width, + int height, int pixfmt, const void *obj); void *ffmpeg_ram_new_encoder(const char *name, const char *mc_name, int width, int height, int pixfmt, int align, int fps, @@ -23,4 +25,12 @@ int ffmpeg_ram_get_linesize_offset_length(int pix_fmt, int width, int height, int ffmpeg_ram_set_bitrate(void *encoder, int kbs); void ffmpeg_ram_request_keyframe(void *encoder); +void *ffmpeg_ram_new_decoder(const char *name, int width, int height, + int sw_pixfmt, int thread_count, + RamDecodeCallback callback); +int ffmpeg_ram_decode(void *decoder, const uint8_t *data, int length, + const void *obj); +void ffmpeg_ram_free_decoder(void *decoder); +const char *ffmpeg_ram_last_error(void); + #endif // FFMPEG_RAM_FFI_H diff --git a/libs/hwcodec/src/ffmpeg_ram/decode.rs b/libs/hwcodec/src/ffmpeg_ram/decode.rs new file mode 100644 index 00000000..df0512a3 --- /dev/null +++ b/libs/hwcodec/src/ffmpeg_ram/decode.rs @@ -0,0 +1,127 @@ +use crate::{ + ffmpeg::{init_av_log, AVPixelFormat}, + ffmpeg_ram::{ + ffmpeg_ram_decode, ffmpeg_ram_free_decoder, ffmpeg_ram_last_error, + ffmpeg_ram_new_decoder, + }, +}; +use std::{ + ffi::{c_void, CString}, + os::raw::c_int, + slice, +}; + +#[derive(Debug, Clone, PartialEq)] +pub struct DecodeContext { + pub name: String, + pub width: i32, + pub height: i32, + pub sw_pixfmt: AVPixelFormat, + pub thread_count: i32, +} + +pub struct DecodeFrame { + pub data: Vec, + pub width: i32, + pub height: i32, + pub pixfmt: AVPixelFormat, +} + +pub struct Decoder { + codec: *mut c_void, + frames: *mut Vec, + pub ctx: DecodeContext, +} + +// Safety: Decoder is only accessed through higher-level synchronization +// (a tokio::Mutex in the video pipeline). It is never accessed concurrently, +// but may be moved across threads; the underlying FFmpeg RAM decoder state +// is thread-confined per instance, so Send (but not Sync) is acceptable. +unsafe impl Send for Decoder {} + +impl Decoder { + pub fn new(ctx: DecodeContext) -> Result { + init_av_log(); + unsafe { + let codec = ffmpeg_ram_new_decoder( + CString::new(ctx.name.as_str()).map_err(|_| ())?.as_ptr(), + ctx.width, + ctx.height, + ctx.sw_pixfmt as c_int, + ctx.thread_count, + Some(Decoder::callback), + ); + if codec.is_null() { + let msg = last_error_message(); + if !msg.is_empty() { + log::error!("ffmpeg_ram_new_decoder failed: {}", msg); + } + return Err(()); + } + Ok(Decoder { + codec, + frames: Box::into_raw(Box::new(Vec::::new())), + ctx, + }) + } + } + + pub fn decode(&mut self, data: &[u8]) -> Result<&mut Vec, i32> { + unsafe { + (&mut *self.frames).clear(); + let ret = ffmpeg_ram_decode( + self.codec, + data.as_ptr(), + data.len() as c_int, + self.frames as *const _ as *const c_void, + ); + if ret != 0 { + let msg = last_error_message(); + if !msg.is_empty() { + log::error!("ffmpeg_ram_decode failed: {}", msg); + } + return Err(ret); + } + Ok(&mut *self.frames) + } + } + + extern "C" fn callback( + data: *const u8, + size: c_int, + width: c_int, + height: c_int, + pixfmt: c_int, + obj: *const c_void, + ) { + unsafe { + let frames = &mut *(obj as *mut Vec); + frames.push(DecodeFrame { + data: slice::from_raw_parts(data, size as usize).to_vec(), + width, + height, + pixfmt: std::mem::transmute::(pixfmt), + }); + } + } +} + +impl Drop for Decoder { + fn drop(&mut self) { + unsafe { + ffmpeg_ram_free_decoder(self.codec); + drop(Box::from_raw(self.frames)); + } + } +} + +fn last_error_message() -> String { + unsafe { + let ptr = ffmpeg_ram_last_error(); + if ptr.is_null() { + return String::new(); + } + let cstr = std::ffi::CStr::from_ptr(ptr); + cstr.to_string_lossy().to_string() + } +} diff --git a/libs/hwcodec/src/ffmpeg_ram/mod.rs b/libs/hwcodec/src/ffmpeg_ram/mod.rs index 1f02ed28..2ec0f758 100644 --- a/libs/hwcodec/src/ffmpeg_ram/mod.rs +++ b/libs/hwcodec/src/ffmpeg_ram/mod.rs @@ -12,6 +12,47 @@ use std::ffi::c_int; include!(concat!(env!("OUT_DIR"), "/ffmpeg_ram_ffi.rs")); +#[cfg(any(target_arch = "aarch64", target_arch = "arm", feature = "rkmpp"))] +pub mod decode; + +// Provide a small stub on non-ARM builds so dependents can still compile, but decoder +// construction will fail (since the C++ RKMPP decoder isn't built/linked). +#[cfg(not(any(target_arch = "aarch64", target_arch = "arm", feature = "rkmpp")))] +pub mod decode { + use crate::ffmpeg::AVPixelFormat; + + #[derive(Debug, Clone, PartialEq)] + pub struct DecodeContext { + pub name: String, + pub width: i32, + pub height: i32, + pub sw_pixfmt: AVPixelFormat, + pub thread_count: i32, + } + + pub struct DecodeFrame { + pub data: Vec, + pub width: i32, + pub height: i32, + pub pixfmt: AVPixelFormat, + } + + pub struct Decoder { + pub ctx: DecodeContext, + } + + impl Decoder { + pub fn new(ctx: DecodeContext) -> Result { + let _ = ctx; + Err(()) + } + + pub fn decode(&mut self, _data: &[u8]) -> Result<&mut Vec, i32> { + Err(-1) + } + } +} + pub mod encode; pub enum Priority { diff --git a/src/config/schema.rs b/src/config/schema.rs index c2b2062b..7fa53497 100644 --- a/src/config/schema.rs +++ b/src/config/schema.rs @@ -202,25 +202,37 @@ impl Default for HidConfig { pub struct MsdConfig { /// Enable MSD functionality pub enabled: bool, - /// Storage path for ISO/IMG images - pub images_path: String, - /// Path for Ventoy bootable drive file - pub drive_path: String, - /// Ventoy drive size in MB (minimum 1024 MB / 1 GB) - pub virtual_drive_size_mb: u32, + /// MSD base directory (absolute path) + pub msd_dir: String, } impl Default for MsdConfig { fn default() -> Self { Self { enabled: true, - images_path: "./data/msd/images".to_string(), - drive_path: "./data/msd/ventoy.img".to_string(), - virtual_drive_size_mb: 16 * 1024, // 16GB default + msd_dir: String::new(), } } } +impl MsdConfig { + pub fn msd_dir_path(&self) -> std::path::PathBuf { + std::path::PathBuf::from(&self.msd_dir) + } + + pub fn images_dir(&self) -> std::path::PathBuf { + self.msd_dir_path().join("images") + } + + pub fn ventoy_dir(&self) -> std::path::PathBuf { + self.msd_dir_path().join("ventoy") + } + + pub fn drive_path(&self) -> std::path::PathBuf { + self.ventoy_dir().join("ventoy.img") + } +} + // Re-export ATX types from atx module for configuration pub use crate::atx::{ActiveLevel, AtxDriverType, AtxKeyConfig, AtxLedConfig}; diff --git a/src/main.rs b/src/main.rs index 08291de1..9c2d93ea 100644 --- a/src/main.rs +++ b/src/main.rs @@ -65,7 +65,7 @@ struct CliArgs { #[arg(long, value_name = "FILE", requires = "ssl_cert")] ssl_key: Option, - /// Data directory path (default: ./data) + /// Data directory path (default: /etc/one-kvm) #[arg(short = 'd', long, value_name = "DIR")] data_dir: Option, @@ -104,6 +104,34 @@ async fn main() -> anyhow::Result<()> { let config_store = ConfigStore::new(&db_path).await?; let mut config = (*config_store.get()).clone(); + // Normalize MSD directory (absolute path under data dir if empty/relative) + let mut msd_dir_updated = false; + if config.msd.msd_dir.trim().is_empty() { + let msd_dir = data_dir.join("msd"); + config.msd.msd_dir = msd_dir.to_string_lossy().to_string(); + msd_dir_updated = true; + } else if !PathBuf::from(&config.msd.msd_dir).is_absolute() { + let msd_dir = data_dir.join(&config.msd.msd_dir); + tracing::warn!( + "MSD directory is relative, rebasing to {}", + msd_dir.display() + ); + config.msd.msd_dir = msd_dir.to_string_lossy().to_string(); + msd_dir_updated = true; + } + if msd_dir_updated { + config_store.set(config.clone()).await?; + } + + // Ensure MSD directories exist (msd/images, msd/ventoy) + let msd_dir = PathBuf::from(&config.msd.msd_dir); + if let Err(e) = tokio::fs::create_dir_all(msd_dir.join("images")).await { + tracing::warn!("Failed to create MSD images directory: {}", e); + } + if let Err(e) = tokio::fs::create_dir_all(msd_dir.join("ventoy")).await { + tracing::warn!("Failed to create MSD ventoy directory: {}", e); + } + // Apply CLI argument overrides to config (only if explicitly specified) if let Some(addr) = args.address { config.web.bind_address = addr; @@ -344,11 +372,7 @@ async fn main() -> anyhow::Result<()> { ); } - let controller = MsdController::new( - otg_service.clone(), - &config.msd.images_path, - &config.msd.drive_path, - ); + let controller = MsdController::new(otg_service.clone(), config.msd.msd_dir_path()); if let Err(e) = controller.init().await { tracing::warn!("Failed to initialize MSD controller: {}", e); None diff --git a/src/msd/controller.rs b/src/msd/controller.rs index 0b8dc676..5641d3c7 100644 --- a/src/msd/controller.rs +++ b/src/msd/controller.rs @@ -32,6 +32,8 @@ pub struct MsdController { state: RwLock, /// Images storage path images_path: PathBuf, + /// Ventoy directory path + ventoy_dir: PathBuf, /// Virtual drive path drive_path: PathBuf, /// Event bus for broadcasting state changes (optional) @@ -49,19 +51,22 @@ impl MsdController { /// /// # Parameters /// * `otg_service` - OTG service for gadget management - /// * `images_path` - Directory path for storing ISO/IMG files - /// * `drive_path` - File path for the virtual FAT32 drive + /// * `msd_dir` - Base directory for MSD storage pub fn new( otg_service: Arc, - images_path: impl Into, - drive_path: impl Into, + msd_dir: impl Into, ) -> Self { + let msd_dir = msd_dir.into(); + let images_path = msd_dir.join("images"); + let ventoy_dir = msd_dir.join("ventoy"); + let drive_path = ventoy_dir.join("ventoy.img"); Self { otg_service, msd_function: RwLock::new(None), state: RwLock::new(MsdState::default()), - images_path: images_path.into(), - drive_path: drive_path.into(), + images_path, + ventoy_dir, + drive_path, events: tokio::sync::RwLock::new(None), downloads: Arc::new(RwLock::new(HashMap::new())), operation_lock: Arc::new(RwLock::new(())), @@ -77,6 +82,9 @@ impl MsdController { if let Err(e) = std::fs::create_dir_all(&self.images_path) { warn!("Failed to create images directory: {}", e); } + if let Err(e) = std::fs::create_dir_all(&self.ventoy_dir) { + warn!("Failed to create ventoy directory: {}", e); + } // 2. Request MSD function from OtgService info!("Requesting MSD function from OtgService"); @@ -364,6 +372,11 @@ impl MsdController { &self.images_path } + /// Get ventoy directory path + pub fn ventoy_dir(&self) -> &PathBuf { + &self.ventoy_dir + } + /// Get virtual drive path pub fn drive_path(&self) -> &PathBuf { &self.drive_path @@ -588,10 +601,9 @@ mod tests { async fn test_controller_creation() { let temp_dir = TempDir::new().unwrap(); let otg_service = Arc::new(OtgService::new()); - let images_path = temp_dir.path().join("images"); - let drive_path = temp_dir.path().join("ventoy.img"); + let msd_dir = temp_dir.path().join("msd"); - let controller = MsdController::new(otg_service, &images_path, &drive_path); + let controller = MsdController::new(otg_service, &msd_dir); // Check that MSD is not initialized (msd_function is None) let state = controller.state().await; @@ -604,10 +616,9 @@ mod tests { async fn test_state_default() { let temp_dir = TempDir::new().unwrap(); let otg_service = Arc::new(OtgService::new()); - let images_path = temp_dir.path().join("images"); - let drive_path = temp_dir.path().join("ventoy.img"); + let msd_dir = temp_dir.path().join("msd"); - let controller = MsdController::new(otg_service, &images_path, &drive_path); + let controller = MsdController::new(otg_service, &msd_dir); let state = controller.state().await; assert!(!state.available); diff --git a/src/stream/mjpeg.rs b/src/stream/mjpeg.rs index 1b7607b6..7ed8de50 100644 --- a/src/stream/mjpeg.rs +++ b/src/stream/mjpeg.rs @@ -184,11 +184,22 @@ impl MjpegStreamHandler { /// Update current frame pub fn update_frame(&self, frame: VideoFrame) { - // Skip JPEG encoding if no clients are connected (optimization for WebRTC-only mode) - // This avoids unnecessary libyuv conversion when only WebRTC is active - if self.clients.read().is_empty() && !frame.format.is_compressed() { - // Still update the online status and sequence for monitoring purposes - // but skip the expensive JPEG encoding + // Fast path: if no MJPEG clients are connected, do minimal bookkeeping and avoid + // expensive work (JPEG encoding and per-frame dedup hashing). + let has_clients = !self.clients.read().is_empty(); + if !has_clients { + self.dropped_same_frames.store(0, Ordering::Relaxed); + self.sequence.fetch_add(1, Ordering::Relaxed); + self.online.store(frame.online, Ordering::SeqCst); + *self.last_frame_ts.write() = Some(Instant::now()); + + // Keep the latest compressed frame for "instant first frame" when a client connects. + // Avoid retaining large raw buffers when there are no MJPEG clients. + if frame.format.is_compressed() { + self.current_frame.store(Arc::new(Some(frame))); + } else { + self.current_frame.store(Arc::new(None)); + } return; } @@ -237,7 +248,7 @@ impl MjpegStreamHandler { self.dropped_same_frames.store(0, Ordering::Relaxed); self.sequence.fetch_add(1, Ordering::Relaxed); - self.online.store(true, Ordering::SeqCst); + self.online.store(frame.online, Ordering::SeqCst); *self.last_frame_ts.write() = Some(Instant::now()); self.current_frame.store(Arc::new(Some(frame))); @@ -535,9 +546,44 @@ fn frames_are_identical(a: &VideoFrame, b: &VideoFrame) -> bool { return false; } - // Compare hashes instead of full binary data - // Hash is computed once and cached in OnceLock for efficiency - // This is much faster than binary comparison for large frames (1080p MJPEG) + // Avoid hashing the whole frame for obviously different frames by sampling a few + // fixed-size windows first. If all samples match, fall back to the cached hash. + let a_data = a.data(); + let b_data = b.data(); + let len = a_data.len(); + + // Small frames: direct compare is cheap. + if len <= 256 { + return a_data == b_data; + } + + const SAMPLE: usize = 16; + debug_assert!(len == b_data.len()); + + // Head + tail. + if a_data[..SAMPLE] != b_data[..SAMPLE] { + return false; + } + if a_data[len - SAMPLE..] != b_data[len - SAMPLE..] { + return false; + } + + // Two interior samples (quarter + middle) to catch common "same header/footer" cases. + let quarter = len / 4; + let quarter_start = quarter.saturating_sub(SAMPLE / 2); + if a_data[quarter_start..quarter_start + SAMPLE] + != b_data[quarter_start..quarter_start + SAMPLE] + { + return false; + } + let mid = len / 2; + let mid_start = mid.saturating_sub(SAMPLE / 2); + if a_data[mid_start..mid_start + SAMPLE] != b_data[mid_start..mid_start + SAMPLE] { + return false; + } + + // Compare hashes instead of full binary data. + // Hash is computed once and cached in OnceLock for efficiency. a.get_hash() == b.get_hash() } diff --git a/src/video/decoder/mjpeg_rkmpp.rs b/src/video/decoder/mjpeg_rkmpp.rs new file mode 100644 index 00000000..c95ada1e --- /dev/null +++ b/src/video/decoder/mjpeg_rkmpp.rs @@ -0,0 +1,95 @@ +//! MJPEG decoder using RKMPP via hwcodec (FFmpeg RAM). + +use hwcodec::ffmpeg::AVPixelFormat; +use hwcodec::ffmpeg_ram::decode::{DecodeContext, Decoder}; +use tracing::warn; + +use crate::error::{AppError, Result}; +use crate::video::convert::Nv12Converter; +use crate::video::format::Resolution; + +pub struct MjpegRkmppDecoder { + decoder: Decoder, + resolution: Resolution, + nv16_to_nv12: Option, + last_pixfmt: Option, +} + +impl MjpegRkmppDecoder { + pub fn new(resolution: Resolution) -> Result { + let ctx = DecodeContext { + name: "mjpeg_rkmpp".to_string(), + width: resolution.width as i32, + height: resolution.height as i32, + sw_pixfmt: AVPixelFormat::AV_PIX_FMT_NV12, + thread_count: 1, + }; + let decoder = Decoder::new(ctx).map_err(|_| { + AppError::VideoError("Failed to create mjpeg_rkmpp decoder".to_string()) + })?; + Ok(Self { + decoder, + resolution, + nv16_to_nv12: None, + last_pixfmt: None, + }) + } + + pub fn decode_to_nv12(&mut self, mjpeg: &[u8]) -> Result> { + let frames = self + .decoder + .decode(mjpeg) + .map_err(|e| AppError::VideoError(format!("mjpeg_rkmpp decode failed: {}", e)))?; + if frames.is_empty() { + return Err(AppError::VideoError( + "mjpeg_rkmpp decode returned no frames".to_string(), + )); + } + if frames.len() > 1 { + warn!( + "mjpeg_rkmpp decode returned {} frames, using last", + frames.len() + ); + } + let frame = frames + .pop() + .ok_or_else(|| AppError::VideoError("mjpeg_rkmpp decode returned empty".to_string()))?; + + if frame.width as u32 != self.resolution.width + || frame.height as u32 != self.resolution.height + { + warn!( + "mjpeg_rkmpp output size {}x{} differs from expected {}x{}", + frame.width, frame.height, self.resolution.width, self.resolution.height + ); + } + + if let Some(last) = self.last_pixfmt { + if frame.pixfmt != last { + warn!( + "mjpeg_rkmpp output pixfmt changed from {:?} to {:?}", + last, frame.pixfmt + ); + } + } else { + self.last_pixfmt = Some(frame.pixfmt); + } + + let pixfmt = self.last_pixfmt.unwrap_or(frame.pixfmt); + match pixfmt { + AVPixelFormat::AV_PIX_FMT_NV12 => Ok(frame.data), + AVPixelFormat::AV_PIX_FMT_NV16 => { + if self.nv16_to_nv12.is_none() { + self.nv16_to_nv12 = Some(Nv12Converter::nv16_to_nv12(self.resolution)); + } + let conv = self.nv16_to_nv12.as_mut().unwrap(); + let nv12 = conv.convert(&frame.data)?; + Ok(nv12.to_vec()) + } + other => Err(AppError::VideoError(format!( + "mjpeg_rkmpp output pixfmt {:?} (expected NV12/NV16)", + other + ))), + } + } +} diff --git a/src/video/decoder/mjpeg_turbo.rs b/src/video/decoder/mjpeg_turbo.rs new file mode 100644 index 00000000..9c8359d1 --- /dev/null +++ b/src/video/decoder/mjpeg_turbo.rs @@ -0,0 +1,54 @@ +//! MJPEG decoder using TurboJPEG (software) -> RGB24. + +use turbojpeg::{Decompressor, Image, PixelFormat as TJPixelFormat}; + +use crate::error::{AppError, Result}; +use crate::video::format::Resolution; + +pub struct MjpegTurboDecoder { + decompressor: Decompressor, + resolution: Resolution, +} + +impl MjpegTurboDecoder { + pub fn new(resolution: Resolution) -> Result { + let decompressor = Decompressor::new().map_err(|e| { + AppError::VideoError(format!("Failed to create turbojpeg decoder: {}", e)) + })?; + Ok(Self { + decompressor, + resolution, + }) + } + + pub fn decode_to_rgb(&mut self, mjpeg: &[u8]) -> Result> { + let header = self + .decompressor + .read_header(mjpeg) + .map_err(|e| AppError::VideoError(format!("turbojpeg read_header failed: {}", e)))?; + + if header.width as u32 != self.resolution.width + || header.height as u32 != self.resolution.height + { + return Err(AppError::VideoError(format!( + "turbojpeg size mismatch: {}x{} (expected {}x{})", + header.width, header.height, self.resolution.width, self.resolution.height + ))); + } + + let pitch = header.width * 3; + let mut image = Image { + pixels: vec![0u8; header.height * pitch], + width: header.width, + pitch, + height: header.height, + format: TJPixelFormat::RGB, + }; + + self.decompressor + .decompress(mjpeg, image.as_deref_mut()) + .map_err(|e| AppError::VideoError(format!("turbojpeg decode failed: {}", e)))?; + + Ok(image.pixels) + } +} diff --git a/src/video/decoder/mod.rs b/src/video/decoder/mod.rs index 928feec9..55a1569f 100644 --- a/src/video/decoder/mod.rs +++ b/src/video/decoder/mod.rs @@ -1,3 +1,11 @@ //! Video decoder implementations //! //! This module provides video decoding capabilities. + +#[cfg(any(target_arch = "aarch64", target_arch = "arm"))] +pub mod mjpeg_rkmpp; +pub mod mjpeg_turbo; + +#[cfg(any(target_arch = "aarch64", target_arch = "arm"))] +pub use mjpeg_rkmpp::MjpegRkmppDecoder; +pub use mjpeg_turbo::MjpegTurboDecoder; diff --git a/src/video/shared_video_pipeline.rs b/src/video/shared_video_pipeline.rs index 2ed46296..928709e4 100644 --- a/src/video/shared_video_pipeline.rs +++ b/src/video/shared_video_pipeline.rs @@ -28,14 +28,17 @@ const AUTO_STOP_GRACE_PERIOD_SECS: u64 = 3; use crate::error::{AppError, Result}; use crate::video::convert::{Nv12Converter, PixelConverter}; +#[cfg(any(target_arch = "aarch64", target_arch = "arm"))] +use crate::video::decoder::MjpegRkmppDecoder; +use crate::video::decoder::MjpegTurboDecoder; use crate::video::encoder::h264::{detect_best_encoder, H264Config, H264Encoder, H264InputFormat}; use crate::video::encoder::h265::{ detect_best_h265_encoder, H265Config, H265Encoder, H265InputFormat, }; use crate::video::encoder::registry::{EncoderBackend, EncoderRegistry, VideoEncoderType}; use crate::video::encoder::traits::EncoderConfig; -use crate::video::encoder::vp8::{VP8Config, VP8Encoder}; -use crate::video::encoder::vp9::{VP9Config, VP9Encoder}; +use crate::video::encoder::vp8::{detect_best_vp8_encoder, VP8Config, VP8Encoder}; +use crate::video::encoder::vp9::{detect_best_vp9_encoder, VP9Config, VP9Encoder}; use crate::video::format::{PixelFormat, Resolution}; use crate::video::frame::VideoFrame; @@ -292,10 +295,27 @@ impl VideoEncoderTrait for VP9EncoderWrapper { } } +enum MjpegDecoderKind { + #[cfg(any(target_arch = "aarch64", target_arch = "arm"))] + Rkmpp(MjpegRkmppDecoder), + Turbo(MjpegTurboDecoder), +} + +impl MjpegDecoderKind { + fn decode(&mut self, data: &[u8]) -> Result> { + match self { + #[cfg(any(target_arch = "aarch64", target_arch = "arm"))] + MjpegDecoderKind::Rkmpp(decoder) => decoder.decode_to_nv12(data), + MjpegDecoderKind::Turbo(decoder) => decoder.decode_to_rgb(data), + } + } +} + /// Universal shared video pipeline pub struct SharedVideoPipeline { config: RwLock, encoder: Mutex>>, + mjpeg_decoder: Mutex>, nv12_converter: Mutex>, yuv420p_converter: Mutex>, /// Whether the encoder needs YUV420P (true) or NV12 (false) @@ -333,6 +353,7 @@ impl SharedVideoPipeline { let pipeline = Arc::new(Self { config: RwLock::new(config), encoder: Mutex::new(None), + mjpeg_decoder: Mutex::new(None), nv12_converter: Mutex::new(None), yuv420p_converter: Mutex::new(None), encoder_needs_yuv420p: AtomicBool::new(false), @@ -367,12 +388,16 @@ impl SharedVideoPipeline { } }; + let needs_mjpeg_decode = config.input_format.is_compressed(); + // Check if RKMPP backend is available for direct input optimization let is_rkmpp_available = registry .encoder_with_backend(VideoEncoderType::H264, EncoderBackend::Rkmpp) .is_some(); - let use_yuyv_direct = is_rkmpp_available && config.input_format == PixelFormat::Yuyv; + let use_yuyv_direct = + is_rkmpp_available && !needs_mjpeg_decode && config.input_format == PixelFormat::Yuyv; let use_rkmpp_direct = is_rkmpp_available + && !needs_mjpeg_decode && matches!( config.input_format, PixelFormat::Yuyv @@ -396,10 +421,9 @@ impl SharedVideoPipeline { ); } - // Create encoder based on codec type - let encoder: Box = match config.output_codec { + let selected_codec_name = match config.output_codec { VideoEncoderType::H264 => { - let codec_name = if use_rkmpp_direct { + if use_rkmpp_direct { // Force RKMPP backend for direct input get_codec_name(VideoEncoderType::H264, Some(EncoderBackend::Rkmpp)).ok_or_else( || { @@ -423,11 +447,109 @@ impl SharedVideoPipeline { detected.ok_or_else(|| { AppError::VideoError("No H.264 encoder available".to_string()) })? - }; + } + } + VideoEncoderType::H265 => { + if use_rkmpp_direct { + get_codec_name(VideoEncoderType::H265, Some(EncoderBackend::Rkmpp)).ok_or_else( + || { + AppError::VideoError( + "RKMPP backend not available for H.265".to_string(), + ) + }, + )? + } else if let Some(ref backend) = config.encoder_backend { + get_codec_name(VideoEncoderType::H265, Some(*backend)).ok_or_else(|| { + AppError::VideoError(format!( + "Backend {:?} does not support H.265", + backend + )) + })? + } else { + let (_encoder_type, detected) = + detect_best_h265_encoder(config.resolution.width, config.resolution.height); + detected.ok_or_else(|| { + AppError::VideoError("No H.265 encoder available".to_string()) + })? + } + } + VideoEncoderType::VP8 => { + if let Some(ref backend) = config.encoder_backend { + get_codec_name(VideoEncoderType::VP8, Some(*backend)).ok_or_else(|| { + AppError::VideoError(format!("Backend {:?} does not support VP8", backend)) + })? + } else { + let (_encoder_type, detected) = + detect_best_vp8_encoder(config.resolution.width, config.resolution.height); + detected.ok_or_else(|| { + AppError::VideoError("No VP8 encoder available".to_string()) + })? + } + } + VideoEncoderType::VP9 => { + if let Some(ref backend) = config.encoder_backend { + get_codec_name(VideoEncoderType::VP9, Some(*backend)).ok_or_else(|| { + AppError::VideoError(format!("Backend {:?} does not support VP9", backend)) + })? + } else { + let (_encoder_type, detected) = + detect_best_vp9_encoder(config.resolution.width, config.resolution.height); + detected.ok_or_else(|| { + AppError::VideoError("No VP9 encoder available".to_string()) + })? + } + } + }; + + let is_rkmpp_encoder = selected_codec_name.contains("rkmpp"); + let is_software_encoder = selected_codec_name.contains("libx264") + || selected_codec_name.contains("libx265") + || selected_codec_name.contains("libvpx"); + + let pipeline_input_format = if needs_mjpeg_decode { + if is_rkmpp_encoder { + info!( + "MJPEG input detected, using RKMPP decoder ({} -> NV12 with NV16 fallback)", + config.input_format + ); + #[cfg(any(target_arch = "aarch64", target_arch = "arm"))] + { + let decoder = MjpegRkmppDecoder::new(config.resolution)?; + *self.mjpeg_decoder.lock().await = Some(MjpegDecoderKind::Rkmpp(decoder)); + PixelFormat::Nv12 + } + #[cfg(not(any(target_arch = "aarch64", target_arch = "arm")))] + { + return Err(AppError::VideoError( + "RKMPP MJPEG decode is only supported on ARM builds".to_string(), + )); + } + } else if is_software_encoder { + info!( + "MJPEG input detected, using TurboJPEG decoder ({} -> RGB24)", + config.input_format + ); + let decoder = MjpegTurboDecoder::new(config.resolution)?; + *self.mjpeg_decoder.lock().await = Some(MjpegDecoderKind::Turbo(decoder)); + PixelFormat::Rgb24 + } else { + return Err(AppError::VideoError( + "MJPEG input requires RKMPP or software encoder".to_string(), + )); + } + } else { + *self.mjpeg_decoder.lock().await = None; + config.input_format + }; + + // Create encoder based on codec type + let encoder: Box = match config.output_codec { + VideoEncoderType::H264 => { + let codec_name = selected_codec_name.clone(); let is_rkmpp = codec_name.contains("rkmpp"); let direct_input_format = if is_rkmpp { - match config.input_format { + match pipeline_input_format { PixelFormat::Yuyv => Some(H264InputFormat::Yuyv422), PixelFormat::Yuv420 => Some(H264InputFormat::Yuv420p), PixelFormat::Rgb24 => Some(H264InputFormat::Rgb24), @@ -439,7 +561,7 @@ impl SharedVideoPipeline { _ => None, } } else if codec_name.contains("libx264") { - match config.input_format { + match pipeline_input_format { PixelFormat::Nv12 => Some(H264InputFormat::Nv12), PixelFormat::Nv16 => Some(H264InputFormat::Nv16), PixelFormat::Nv21 => Some(H264InputFormat::Nv21), @@ -485,32 +607,11 @@ impl SharedVideoPipeline { Box::new(H264EncoderWrapper(encoder)) } VideoEncoderType::H265 => { - let codec_name = if use_rkmpp_direct { - get_codec_name(VideoEncoderType::H265, Some(EncoderBackend::Rkmpp)).ok_or_else( - || { - AppError::VideoError( - "RKMPP backend not available for H.265".to_string(), - ) - }, - )? - } else if let Some(ref backend) = config.encoder_backend { - get_codec_name(VideoEncoderType::H265, Some(*backend)).ok_or_else(|| { - AppError::VideoError(format!( - "Backend {:?} does not support H.265", - backend - )) - })? - } else { - let (_encoder_type, detected) = - detect_best_h265_encoder(config.resolution.width, config.resolution.height); - detected.ok_or_else(|| { - AppError::VideoError("No H.265 encoder available".to_string()) - })? - }; + let codec_name = selected_codec_name.clone(); let is_rkmpp = codec_name.contains("rkmpp"); let direct_input_format = if is_rkmpp { - match config.input_format { + match pipeline_input_format { PixelFormat::Yuyv => Some(H265InputFormat::Yuyv422), PixelFormat::Yuv420 => Some(H265InputFormat::Yuv420p), PixelFormat::Rgb24 => Some(H265InputFormat::Rgb24), @@ -522,7 +623,7 @@ impl SharedVideoPipeline { _ => None, } } else if codec_name.contains("libx265") { - match config.input_format { + match pipeline_input_format { PixelFormat::Yuv420 => Some(H265InputFormat::Yuv420p), _ => None, } @@ -572,23 +673,14 @@ impl SharedVideoPipeline { VideoEncoderType::VP8 => { let encoder_config = VP8Config::low_latency(config.resolution, config.bitrate_kbps()); - - let encoder = if let Some(ref backend) = config.encoder_backend { - let codec_name = get_codec_name(VideoEncoderType::VP8, Some(*backend)) - .ok_or_else(|| { - AppError::VideoError(format!( - "Backend {:?} does not support VP8", - backend - )) - })?; + let codec_name = selected_codec_name.clone(); + if let Some(ref backend) = config.encoder_backend { info!( "Creating VP8 encoder with backend {:?} (codec: {})", backend, codec_name ); - VP8Encoder::with_codec(encoder_config, &codec_name)? - } else { - VP8Encoder::new(encoder_config)? - }; + } + let encoder = VP8Encoder::with_codec(encoder_config, &codec_name)?; info!("Created VP8 encoder: {}", encoder.codec_name()); Box::new(VP8EncoderWrapper(encoder)) @@ -596,23 +688,14 @@ impl SharedVideoPipeline { VideoEncoderType::VP9 => { let encoder_config = VP9Config::low_latency(config.resolution, config.bitrate_kbps()); - - let encoder = if let Some(ref backend) = config.encoder_backend { - let codec_name = get_codec_name(VideoEncoderType::VP9, Some(*backend)) - .ok_or_else(|| { - AppError::VideoError(format!( - "Backend {:?} does not support VP9", - backend - )) - })?; + let codec_name = selected_codec_name.clone(); + if let Some(ref backend) = config.encoder_backend { info!( "Creating VP9 encoder with backend {:?} (codec: {})", backend, codec_name ); - VP9Encoder::with_codec(encoder_config, &codec_name)? - } else { - VP9Encoder::new(encoder_config)? - }; + } + let encoder = VP9Encoder::with_codec(encoder_config, &codec_name)?; info!("Created VP9 encoder: {}", encoder.codec_name()); Box::new(VP9EncoderWrapper(encoder)) @@ -623,7 +706,7 @@ impl SharedVideoPipeline { let codec_name = encoder.codec_name(); let use_direct_input = if codec_name.contains("rkmpp") { matches!( - config.input_format, + pipeline_input_format, PixelFormat::Yuyv | PixelFormat::Yuv420 | PixelFormat::Rgb24 @@ -635,7 +718,7 @@ impl SharedVideoPipeline { ) } else if codec_name.contains("libx264") { matches!( - config.input_format, + pipeline_input_format, PixelFormat::Nv12 | PixelFormat::Nv16 | PixelFormat::Nv21 | PixelFormat::Yuv420 ) } else { @@ -645,7 +728,7 @@ impl SharedVideoPipeline { // Determine if encoder needs YUV420P (software encoders) or NV12 (hardware encoders) let needs_yuv420p = if codec_name.contains("libx264") { !matches!( - config.input_format, + pipeline_input_format, PixelFormat::Nv12 | PixelFormat::Nv16 | PixelFormat::Nv21 | PixelFormat::Yuv420 ) } else { @@ -667,7 +750,7 @@ impl SharedVideoPipeline { // Create converter or decoder based on input format and encoder needs info!( "Initializing input format handler for: {} -> {}", - config.input_format, + pipeline_input_format, if use_direct_input { "direct" } else if needs_yuv420p { @@ -686,7 +769,7 @@ impl SharedVideoPipeline { (None, None) } else if needs_yuv420p { // Software encoder needs YUV420P - match config.input_format { + match pipeline_input_format { PixelFormat::Yuv420 => { info!("Using direct YUV420P input (no conversion)"); (None, None) @@ -729,13 +812,13 @@ impl SharedVideoPipeline { _ => { return Err(AppError::VideoError(format!( "Unsupported input format for software encoding: {}", - config.input_format + pipeline_input_format ))); } } } else { // Hardware encoder needs NV12 - match config.input_format { + match pipeline_input_format { PixelFormat::Nv12 => { info!("Using direct NV12 input (no conversion)"); (None, None) @@ -767,7 +850,7 @@ impl SharedVideoPipeline { _ => { return Err(AppError::VideoError(format!( "Unsupported input format for hardware encoding: {}", - config.input_format + pipeline_input_format ))); } } @@ -857,6 +940,7 @@ impl SharedVideoPipeline { // Clear encoder state *self.encoder.lock().await = None; + *self.mjpeg_decoder.lock().await = None; *self.nv12_converter.lock().await = None; *self.yuv420p_converter.lock().await = None; self.encoder_needs_yuv420p.store(false, Ordering::Release); @@ -973,8 +1057,10 @@ impl SharedVideoPipeline { } // Batch update stats every second (reduces lock contention) - if last_fps_time.elapsed() >= Duration::from_secs(1) { - let current_fps = fps_frame_count as f32 / last_fps_time.elapsed().as_secs_f32(); + let fps_elapsed = last_fps_time.elapsed(); + if fps_elapsed >= Duration::from_secs(1) { + let current_fps = + fps_frame_count as f32 / fps_elapsed.as_secs_f32(); fps_frame_count = 0; last_fps_time = Instant::now(); @@ -1020,11 +1106,25 @@ impl SharedVideoPipeline { frame: &VideoFrame, frame_count: u64, ) -> Result> { - let config = self.config.read().await; + let (fps, codec, input_format) = { + let config = self.config.read().await; + (config.fps, config.output_codec, config.input_format) + }; + let raw_frame = frame.data(); - let fps = config.fps; - let codec = config.output_codec; - drop(config); + let decoded_buf = if input_format.is_compressed() { + let decoded = { + let mut decoder_guard = self.mjpeg_decoder.lock().await; + let decoder = decoder_guard.as_mut().ok_or_else(|| { + AppError::VideoError("MJPEG decoder not initialized".to_string()) + })?; + decoder.decode(raw_frame)? + }; + Some(decoded) + } else { + None + }; + let raw_frame = decoded_buf.as_deref().unwrap_or(raw_frame); // Calculate PTS from real capture timestamp (lock-free using AtomicI64) // This ensures smooth playback even when capture timing varies diff --git a/src/web/handlers/config/apply.rs b/src/web/handlers/config/apply.rs index 6dac7862..e80c8683 100644 --- a/src/web/handlers/config/apply.rs +++ b/src/web/handlers/config/apply.rs @@ -220,11 +220,8 @@ pub async fn apply_hid_config( // Get MSD config from store let config = state.config.get(); - let msd = crate::msd::MsdController::new( - state.otg_service.clone(), - &config.msd.images_path, - &config.msd.drive_path, - ); + let msd = + crate::msd::MsdController::new(state.otg_service.clone(), config.msd.msd_dir_path()); if let Err(e) = msd.init().await { tracing::warn!("Failed to auto-initialize MSD for OTG: {}", e); @@ -253,51 +250,73 @@ pub async fn apply_msd_config( // Check if MSD enabled state changed let old_msd_enabled = old_config.enabled; let new_msd_enabled = new_config.enabled; + let msd_dir_changed = old_config.msd_dir != new_config.msd_dir; tracing::info!( "MSD enabled: old={}, new={}", old_msd_enabled, new_msd_enabled ); + if msd_dir_changed { + tracing::info!("MSD directory changed: {}", new_config.msd_dir); + } - if old_msd_enabled != new_msd_enabled { - if new_msd_enabled { - // MSD was disabled, now enabled - need to initialize - tracing::info!("MSD enabled in config, initializing..."); + // Ensure MSD directories exist (msd/images, msd/ventoy) + let msd_dir = new_config.msd_dir_path(); + if let Err(e) = std::fs::create_dir_all(msd_dir.join("images")) { + tracing::warn!("Failed to create MSD images directory: {}", e); + } + if let Err(e) = std::fs::create_dir_all(msd_dir.join("ventoy")) { + tracing::warn!("Failed to create MSD ventoy directory: {}", e); + } - let msd = crate::msd::MsdController::new( - state.otg_service.clone(), - &new_config.images_path, - &new_config.drive_path, - ); - msd.init() - .await - .map_err(|e| AppError::Config(format!("MSD initialization failed: {}", e)))?; - - // Set event bus - let events = state.events.clone(); - msd.set_event_bus(events).await; - - // Store the initialized controller - *state.msd.write().await = Some(msd); - tracing::info!("MSD initialized successfully"); - } else { - // MSD was enabled, now disabled - shutdown - tracing::info!("MSD disabled in config, shutting down..."); - - if let Some(msd) = state.msd.write().await.as_mut() { - if let Err(e) = msd.shutdown().await { - tracing::warn!("MSD shutdown failed: {}", e); - } - } - *state.msd.write().await = None; - tracing::info!("MSD shutdown complete"); - } - } else { + let needs_reload = old_msd_enabled != new_msd_enabled || msd_dir_changed; + if !needs_reload { tracing::info!( - "MSD enabled state unchanged ({}), no reload needed", + "MSD enabled state unchanged ({}) and directory unchanged, no reload needed", new_msd_enabled ); + return Ok(()); + } + + if new_msd_enabled { + tracing::info!("(Re)initializing MSD..."); + + // Shutdown existing controller if present + let mut msd_guard = state.msd.write().await; + if let Some(msd) = msd_guard.as_mut() { + if let Err(e) = msd.shutdown().await { + tracing::warn!("MSD shutdown failed: {}", e); + } + } + *msd_guard = None; + drop(msd_guard); + + let msd = + crate::msd::MsdController::new(state.otg_service.clone(), new_config.msd_dir_path()); + msd.init() + .await + .map_err(|e| AppError::Config(format!("MSD initialization failed: {}", e)))?; + + // Set event bus + let events = state.events.clone(); + msd.set_event_bus(events).await; + + // Store the initialized controller + *state.msd.write().await = Some(msd); + tracing::info!("MSD initialized successfully"); + } else { + // MSD disabled - shutdown + tracing::info!("MSD disabled in config, shutting down..."); + + let mut msd_guard = state.msd.write().await; + if let Some(msd) = msd_guard.as_mut() { + if let Err(e) = msd.shutdown().await { + tracing::warn!("MSD shutdown failed: {}", e); + } + } + *msd_guard = None; + tracing::info!("MSD shutdown complete"); } Ok(()) diff --git a/src/web/handlers/config/types.rs b/src/web/handlers/config/types.rs index b23bdb49..a3b0e049 100644 --- a/src/web/handlers/config/types.rs +++ b/src/web/handlers/config/types.rs @@ -3,6 +3,7 @@ use crate::error::AppError; use crate::rustdesk::config::RustDeskConfig; use crate::video::encoder::BitratePreset; use serde::Deserialize; +use std::path::Path; use typeshare::typeshare; // ===== Video Config ===== @@ -305,16 +306,20 @@ impl HidConfigUpdate { #[derive(Debug, Deserialize)] pub struct MsdConfigUpdate { pub enabled: Option, - pub images_path: Option, - pub drive_path: Option, - pub virtual_drive_size_mb: Option, + pub msd_dir: Option, } impl MsdConfigUpdate { pub fn validate(&self) -> crate::error::Result<()> { - if let Some(size) = self.virtual_drive_size_mb { - if !(1..=10240).contains(&size) { - return Err(AppError::BadRequest("Drive size must be 1-10240 MB".into())); + if let Some(ref dir) = self.msd_dir { + let trimmed = dir.trim(); + if trimmed.is_empty() { + return Err(AppError::BadRequest("MSD directory cannot be empty".into())); + } + if !Path::new(trimmed).is_absolute() { + return Err(AppError::BadRequest( + "MSD directory must be an absolute path".into(), + )); } } Ok(()) @@ -324,14 +329,8 @@ impl MsdConfigUpdate { if let Some(enabled) = self.enabled { config.enabled = enabled; } - if let Some(ref path) = self.images_path { - config.images_path = path.clone(); - } - if let Some(ref path) = self.drive_path { - config.drive_path = path.clone(); - } - if let Some(size) = self.virtual_drive_size_mb { - config.virtual_drive_size_mb = size; + if let Some(ref dir) = self.msd_dir { + config.msd_dir = dir.trim().to_string(); } } } diff --git a/src/web/handlers/mod.rs b/src/web/handlers/mod.rs index a8bbf379..787697bf 100644 --- a/src/web/handlers/mod.rs +++ b/src/web/handlers/mod.rs @@ -90,12 +90,13 @@ pub struct CapabilityInfo { pub async fn system_info(State(state): State>) -> Json { let config = state.config.get(); - // Get disk space information for MSD images directory + // Get disk space information for MSD base directory let disk_space = { - if let Some(ref msd_controller) = *state.msd.read().await { - get_disk_space(msd_controller.images_path()).ok() - } else { + let msd_dir = config.msd.msd_dir_path(); + if msd_dir.as_os_str().is_empty() { None + } else { + get_disk_space(&msd_dir).ok() } }; @@ -933,66 +934,85 @@ pub async fn update_config( } } - // MSD config processing - reload if enabled state changed + // MSD config processing - reload if enabled state or directory changed if has_msd { tracing::info!("MSD config sent, checking if reload needed..."); tracing::debug!("Old MSD config: {:?}", old_config.msd); tracing::debug!("New MSD config: {:?}", new_config.msd); - // Check if MSD enabled state changed let old_msd_enabled = old_config.msd.enabled; let new_msd_enabled = new_config.msd.enabled; + let msd_dir_changed = old_config.msd.msd_dir != new_config.msd.msd_dir; tracing::info!( "MSD enabled: old={}, new={}", old_msd_enabled, new_msd_enabled ); + if msd_dir_changed { + tracing::info!("MSD directory changed: {}", new_config.msd.msd_dir); + } - if old_msd_enabled != new_msd_enabled { - if new_msd_enabled { - // MSD was disabled, now enabled - need to initialize - tracing::info!("MSD enabled in config, initializing..."); + // Ensure MSD directories exist (msd/images, msd/ventoy) + let msd_dir = new_config.msd.msd_dir_path(); + if let Err(e) = std::fs::create_dir_all(msd_dir.join("images")) { + tracing::warn!("Failed to create MSD images directory: {}", e); + } + if let Err(e) = std::fs::create_dir_all(msd_dir.join("ventoy")) { + tracing::warn!("Failed to create MSD ventoy directory: {}", e); + } - let msd = crate::msd::MsdController::new( - state.otg_service.clone(), - &new_config.msd.images_path, - &new_config.msd.drive_path, - ); - if let Err(e) = msd.init().await { - tracing::error!("MSD initialization failed: {}", e); - // Rollback config on failure - state.config.set((*old_config).clone()).await?; - return Ok(Json(LoginResponse { - success: false, - message: Some(format!("MSD initialization failed: {}", e)), - })); - } - - // Set event bus - let events = state.events.clone(); - msd.set_event_bus(events).await; - - // Store the initialized controller - *state.msd.write().await = Some(msd); - tracing::info!("MSD initialized successfully"); - } else { - // MSD was enabled, now disabled - shutdown - tracing::info!("MSD disabled in config, shutting down..."); - - if let Some(msd) = state.msd.write().await.as_mut() { - if let Err(e) = msd.shutdown().await { - tracing::warn!("MSD shutdown failed: {}", e); - } - } - *state.msd.write().await = None; - tracing::info!("MSD shutdown complete"); - } - } else { + let needs_reload = old_msd_enabled != new_msd_enabled || msd_dir_changed; + if !needs_reload { tracing::info!( - "MSD enabled state unchanged ({}), no reload needed", + "MSD enabled state unchanged ({}) and directory unchanged, no reload needed", new_msd_enabled ); + } else if new_msd_enabled { + tracing::info!("(Re)initializing MSD..."); + + // Shutdown existing controller if present + let mut msd_guard = state.msd.write().await; + if let Some(msd) = msd_guard.as_mut() { + if let Err(e) = msd.shutdown().await { + tracing::warn!("MSD shutdown failed: {}", e); + } + } + *msd_guard = None; + drop(msd_guard); + + let msd = crate::msd::MsdController::new( + state.otg_service.clone(), + new_config.msd.msd_dir_path(), + ); + if let Err(e) = msd.init().await { + tracing::error!("MSD initialization failed: {}", e); + // Rollback config on failure + state.config.set((*old_config).clone()).await?; + return Ok(Json(LoginResponse { + success: false, + message: Some(format!("MSD initialization failed: {}", e)), + })); + } + + // Set event bus + let events = state.events.clone(); + msd.set_event_bus(events).await; + + // Store the initialized controller + *state.msd.write().await = Some(msd); + tracing::info!("MSD initialized successfully"); + } else { + tracing::info!("MSD disabled in config, shutting down..."); + + let mut msd_guard = state.msd.write().await; + if let Some(msd) = msd_guard.as_mut() { + if let Err(e) = msd.shutdown().await { + tracing::warn!("MSD shutdown failed: {}", e); + } + } + *msd_guard = None; + tracing::info!("MSD shutdown complete"); } } @@ -2069,7 +2089,7 @@ pub async fn msd_status(State(state): State>) -> Result>) -> Result>> { let config = state.config.get(); - let images_path = std::path::PathBuf::from(&config.msd.images_path); + let images_path = config.msd.images_dir(); let manager = ImageManager::new(images_path); let images = manager.list()?; @@ -2082,7 +2102,7 @@ pub async fn msd_image_upload( mut multipart: Multipart, ) -> Result> { let config = state.config.get(); - let images_path = std::path::PathBuf::from(&config.msd.images_path); + let images_path = config.msd.images_dir(); let manager = ImageManager::new(images_path); while let Some(field) = multipart @@ -2115,7 +2135,7 @@ pub async fn msd_image_get( AxumPath(id): AxumPath, ) -> Result> { let config = state.config.get(); - let images_path = std::path::PathBuf::from(&config.msd.images_path); + let images_path = config.msd.images_dir(); let manager = ImageManager::new(images_path); let image = manager.get(&id)?; @@ -2128,7 +2148,7 @@ pub async fn msd_image_delete( AxumPath(id): AxumPath, ) -> Result> { let config = state.config.get(); - let images_path = std::path::PathBuf::from(&config.msd.images_path); + let images_path = config.msd.images_dir(); let manager = ImageManager::new(images_path); manager.delete(&id)?; @@ -2194,7 +2214,7 @@ pub async fn msd_connect( })?; // Get image info from ImageManager - let images_path = std::path::PathBuf::from(&config.msd.images_path); + let images_path = config.msd.images_dir(); let manager = ImageManager::new(images_path); let image = manager.get(&image_id)?; @@ -2240,7 +2260,7 @@ pub async fn msd_disconnect(State(state): State>) -> Result>) -> Result> { let config = state.config.get(); - let drive_path = std::path::PathBuf::from(&config.msd.drive_path); + let drive_path = config.msd.drive_path(); let drive = VentoyDrive::new(drive_path); if !drive.exists() { @@ -2257,7 +2277,7 @@ pub async fn msd_drive_init( Json(req): Json, ) -> Result> { let config = state.config.get(); - let drive_path = std::path::PathBuf::from(&config.msd.drive_path); + let drive_path = config.msd.drive_path(); let drive = VentoyDrive::new(drive_path); let info = drive.init(req.size_mb).await?; @@ -2281,7 +2301,7 @@ pub async fn msd_drive_delete(State(state): State>) -> Result>, ) -> Result>> { let config = state.config.get(); - let drive_path = std::path::PathBuf::from(&config.msd.drive_path); + let drive_path = config.msd.drive_path(); let drive = VentoyDrive::new(drive_path); let dir_path = params.get("path").map(|s| s.as_str()).unwrap_or("/"); @@ -2314,7 +2334,7 @@ pub async fn msd_drive_upload( mut multipart: Multipart, ) -> Result> { let config = state.config.get(); - let drive_path = std::path::PathBuf::from(&config.msd.drive_path); + let drive_path = config.msd.drive_path(); let drive = VentoyDrive::new(drive_path); let target_dir = params.get("path").map(|s| s.as_str()).unwrap_or("/"); @@ -2359,7 +2379,7 @@ pub async fn msd_drive_download( AxumPath(file_path): AxumPath, ) -> Result { let config = state.config.get(); - let drive_path = std::path::PathBuf::from(&config.msd.drive_path); + let drive_path = config.msd.drive_path(); let drive = VentoyDrive::new(drive_path); // Get file stream (returns file size and channel receiver) @@ -2393,7 +2413,7 @@ pub async fn msd_drive_file_delete( AxumPath(file_path): AxumPath, ) -> Result> { let config = state.config.get(); - let drive_path = std::path::PathBuf::from(&config.msd.drive_path); + let drive_path = config.msd.drive_path(); let drive = VentoyDrive::new(drive_path); drive.delete(&file_path).await?; @@ -2410,7 +2430,7 @@ pub async fn msd_drive_mkdir( AxumPath(dir_path): AxumPath, ) -> Result> { let config = state.config.get(); - let drive_path = std::path::PathBuf::from(&config.msd.drive_path); + let drive_path = config.msd.drive_path(); let drive = VentoyDrive::new(drive_path); drive.mkdir(&dir_path).await?; diff --git a/src/webrtc/universal_session.rs b/src/webrtc/universal_session.rs index f091408d..81f7e34c 100644 --- a/src/webrtc/universal_session.rs +++ b/src/webrtc/universal_session.rs @@ -586,7 +586,10 @@ impl UniversalSession { // Send encoded frame via RTP if let Err(e) = video_track - .write_frame(&encoded_frame.data, encoded_frame.is_keyframe) + .write_frame_bytes( + encoded_frame.data.clone(), + encoded_frame.is_keyframe, + ) .await { if frames_sent % 100 == 0 { diff --git a/src/webrtc/video_track.rs b/src/webrtc/video_track.rs index 7705dc74..ef4614cd 100644 --- a/src/webrtc/video_track.rs +++ b/src/webrtc/video_track.rs @@ -310,7 +310,7 @@ impl UniversalVideoTrack { /// Handles codec-specific processing: /// - H264/H265: NAL unit parsing, parameter caching /// - VP8/VP9: Direct frame sending - pub async fn write_frame(&self, data: &[u8], is_keyframe: bool) -> Result<()> { + pub async fn write_frame_bytes(&self, data: Bytes, is_keyframe: bool) -> Result<()> { if data.is_empty() { return Ok(()); } @@ -323,11 +323,16 @@ impl UniversalVideoTrack { } } + pub async fn write_frame(&self, data: &[u8], is_keyframe: bool) -> Result<()> { + self.write_frame_bytes(Bytes::copy_from_slice(data), is_keyframe) + .await + } + /// Write H264 frame (Annex B format) /// /// Sends the entire Annex B frame as a single Sample to allow the /// H264Payloader to aggregate SPS+PPS into STAP-A packets. - async fn write_h264_frame(&self, data: &[u8], is_keyframe: bool) -> Result<()> { + async fn write_h264_frame(&self, data: Bytes, is_keyframe: bool) -> Result<()> { // Send entire Annex B frame as one Sample // The H264Payloader in rtp crate will: // 1. Parse NAL units from Annex B format @@ -335,8 +340,9 @@ impl UniversalVideoTrack { // 3. Aggregate SPS+PPS+IDR into STAP-A when possible // 4. Fragment large NALs using FU-A let frame_duration = Duration::from_micros(1_000_000 / self.config.fps.max(1) as u64); + let data_len = data.len(); let sample = Sample { - data: Bytes::copy_from_slice(data), + data, duration: frame_duration, ..Default::default() }; @@ -355,7 +361,7 @@ impl UniversalVideoTrack { // Update stats let mut stats = self.stats.lock().await; stats.frames_sent += 1; - stats.bytes_sent += data.len() as u64; + stats.bytes_sent += data_len as u64; if is_keyframe { stats.keyframes_sent += 1; } @@ -367,18 +373,19 @@ impl UniversalVideoTrack { /// /// Pass raw Annex B data directly to the official HevcPayloader. /// The payloader handles NAL parsing, VPS/SPS/PPS caching, AP generation, and FU fragmentation. - async fn write_h265_frame(&self, data: &[u8], is_keyframe: bool) -> Result<()> { + async fn write_h265_frame(&self, data: Bytes, is_keyframe: bool) -> Result<()> { // Pass raw Annex B data directly to the official HevcPayloader self.send_h265_rtp(data, is_keyframe).await } /// Write VP8 frame - async fn write_vp8_frame(&self, data: &[u8], is_keyframe: bool) -> Result<()> { + async fn write_vp8_frame(&self, data: Bytes, is_keyframe: bool) -> Result<()> { // VP8 frames are sent directly without NAL parsing // Calculate frame duration based on configured FPS let frame_duration = Duration::from_micros(1_000_000 / self.config.fps.max(1) as u64); + let data_len = data.len(); let sample = Sample { - data: Bytes::copy_from_slice(data), + data, duration: frame_duration, ..Default::default() }; @@ -397,7 +404,7 @@ impl UniversalVideoTrack { // Update stats let mut stats = self.stats.lock().await; stats.frames_sent += 1; - stats.bytes_sent += data.len() as u64; + stats.bytes_sent += data_len as u64; if is_keyframe { stats.keyframes_sent += 1; } @@ -406,12 +413,13 @@ impl UniversalVideoTrack { } /// Write VP9 frame - async fn write_vp9_frame(&self, data: &[u8], is_keyframe: bool) -> Result<()> { + async fn write_vp9_frame(&self, data: Bytes, is_keyframe: bool) -> Result<()> { // VP9 frames are sent directly without NAL parsing // Calculate frame duration based on configured FPS let frame_duration = Duration::from_micros(1_000_000 / self.config.fps.max(1) as u64); + let data_len = data.len(); let sample = Sample { - data: Bytes::copy_from_slice(data), + data, duration: frame_duration, ..Default::default() }; @@ -430,7 +438,7 @@ impl UniversalVideoTrack { // Update stats let mut stats = self.stats.lock().await; stats.frames_sent += 1; - stats.bytes_sent += data.len() as u64; + stats.bytes_sent += data_len as u64; if is_keyframe { stats.keyframes_sent += 1; } @@ -439,7 +447,7 @@ impl UniversalVideoTrack { } /// Send H265 NAL units via custom H265Payloader - async fn send_h265_rtp(&self, data: &[u8], is_keyframe: bool) -> Result<()> { + async fn send_h265_rtp(&self, payload: Bytes, is_keyframe: bool) -> Result<()> { let rtp_track = match &self.track { TrackType::Rtp(t) => t, TrackType::Sample(_) => { @@ -459,7 +467,6 @@ impl UniversalVideoTrack { // Minimize lock hold time: only hold lock during payload generation and state update let (payloads, timestamp, seq_start, num_payloads) = { let mut state = h265_state.lock().await; - let payload = Bytes::copy_from_slice(data); // Use custom H265Payloader to fragment the data let payloads = state.payloader.payload(RTP_MTU, &payload); diff --git a/web/package.json b/web/package.json index 62c70a1b..8f474a59 100644 --- a/web/package.json +++ b/web/package.json @@ -1,7 +1,7 @@ { "name": "web", "private": true, - "version": "0.0.0", + "version": "0.1.1", "type": "module", "scripts": { "dev": "vite", diff --git a/web/src/components/ActionBar.vue b/web/src/components/ActionBar.vue index 09e8fd2a..d600f52b 100644 --- a/web/src/components/ActionBar.vue +++ b/web/src/components/ActionBar.vue @@ -49,8 +49,10 @@ const systemStore = useSystemStore() const overflowMenuOpen = ref(false) // MSD is only available when HID backend is not CH9329 (CH9329 is serial-only, no USB gadget) +const hidBackend = computed(() => (systemStore.hid?.backend ?? '').toLowerCase()) +const isCh9329Backend = computed(() => hidBackend.value.includes('ch9329')) const showMsd = computed(() => { - return props.isAdmin && systemStore.hid?.backend !== 'ch9329' + return props.isAdmin && !isCh9329Backend.value }) const props = defineProps<{ @@ -310,5 +312,5 @@ const extensionOpen = ref(false) - + diff --git a/web/src/components/VideoConfigPopover.vue b/web/src/components/VideoConfigPopover.vue index 9d1529bd..d5d08b53 100644 --- a/web/src/components/VideoConfigPopover.vue +++ b/web/src/components/VideoConfigPopover.vue @@ -114,6 +114,7 @@ function detectBrowserCodecSupport() { // Check if a codec is supported by browser const isBrowserSupported = (codecId: string): boolean => { + if (codecId === 'mjpeg') return true return browserSupportedCodecs.value.has(codecId) } @@ -704,7 +705,7 @@ watch(currentConfig, () => { v-for="format in availableFormats" :key="format.format" :value="format.format" - :class="['text-xs', { 'opacity-50': isFormatNotRecommended(format.format) }]" + class="text-xs" >
{{ format.description }} diff --git a/web/src/i18n/en-US.ts b/web/src/i18n/en-US.ts index 0b4a877a..03951874 100644 --- a/web/src/i18n/en-US.ts +++ b/web/src/i18n/en-US.ts @@ -502,6 +502,11 @@ export default { notAvailable: 'Not available', msdEnable: 'Enable MSD', msdEnableDesc: 'Enable to mount ISO images and virtual drives to the target machine', + msdCh9329Warning: 'HID backend is CH9329, MSD is unavailable', + msdCh9329WarningDesc: 'CH9329 is a serial HID backend and does not support USB Gadget MSD', + msdDir: 'MSD directory', + msdDirDesc: 'MSD base directory containing images/ and ventoy/ subfolders', + msdDirHint: 'Changing this rebuilds MSD and updates console capacity stats', willBeEnabledAfterSave: 'Will be enabled after save', disabled: 'Disabled', msdDesc: 'Mass Storage Device allows you to mount ISO images and virtual drives to the target machine. Use the MSD panel on the main page to manage images.', diff --git a/web/src/i18n/zh-CN.ts b/web/src/i18n/zh-CN.ts index d2081995..c7464423 100644 --- a/web/src/i18n/zh-CN.ts +++ b/web/src/i18n/zh-CN.ts @@ -502,6 +502,11 @@ export default { notAvailable: '不可用', msdEnable: '启用 MSD', msdEnableDesc: '启用后可以挂载 ISO 镜像和虚拟驱动器到目标机器', + msdCh9329Warning: '当前 HID 后端为 CH9329,MSD 功能不可用', + msdCh9329WarningDesc: 'CH9329 为串口 HID 方案,不支持 USB Gadget 的 MSD 功能', + msdDir: 'MSD 目录', + msdDirDesc: 'MSD 根目录,内部包含 images/ 和 ventoy/ 两个子目录', + msdDirHint: '修改后会重建 MSD,控制台容量统计以该目录为准', willBeEnabledAfterSave: '保存后生效', disabled: '已禁用', msdDesc: '虚拟存储设备允许您将 ISO 镜像和虚拟驱动器挂载到目标机器。请在主页面的 MSD 面板中管理镜像。', diff --git a/web/src/types/generated.ts b/web/src/types/generated.ts index ff4e168b..0e5f5215 100644 --- a/web/src/types/generated.ts +++ b/web/src/types/generated.ts @@ -76,12 +76,8 @@ export interface HidConfig { export interface MsdConfig { /** Enable MSD functionality */ enabled: boolean; - /** Storage path for ISO/IMG images */ - images_path: string; - /** Path for Ventoy bootable drive file */ - drive_path: string; - /** Ventoy drive size in MB (minimum 1024 MB / 1 GB) */ - virtual_drive_size_mb: number; + /** MSD base directory (absolute path) */ + msd_dir: string; } /** Driver type for ATX key operations */ @@ -511,9 +507,7 @@ export interface HidConfigUpdate { export interface MsdConfigUpdate { enabled?: boolean; - images_path?: string; - drive_path?: string; - virtual_drive_size_mb?: number; + msd_dir?: string; } export interface RustDeskConfigUpdate { diff --git a/web/src/views/SettingsView.vue b/web/src/views/SettingsView.vue index b0d13853..98e6fa40 100644 --- a/web/src/views/SettingsView.vue +++ b/web/src/views/SettingsView.vue @@ -233,6 +233,7 @@ const config = ref({ hid_serial_device: '', hid_serial_baudrate: 9600, msd_enabled: false, + msd_dir: '', network_port: 8080, encoder_backend: 'auto', // STUN/TURN settings @@ -297,6 +298,8 @@ const selectedBackendFormats = computed(() => { return backend?.supported_formats || [] }) +const isCh9329Backend = computed(() => config.value.hid_backend === 'ch9329') + // Video selection computed properties import { watch } from 'vue' @@ -536,6 +539,7 @@ async function loadConfig() { hid_serial_device: hid.ch9329_port || '', hid_serial_baudrate: hid.ch9329_baudrate || 9600, msd_enabled: msd.enabled || false, + msd_dir: msd.msd_dir || '', network_port: 8080, // 从旧 API 加载 encoder_backend: stream.encoder || 'auto', // STUN/TURN settings @@ -1499,6 +1503,10 @@ onMounted(async () => { {{ t('settings.msdDesc') }} +
+

{{ t('settings.msdCh9329Warning') }}

+

{{ t('settings.msdCh9329WarningDesc') }}

+
@@ -1506,11 +1514,21 @@ onMounted(async () => {
+
+
+ + +

{{ t('settings.msdDirDesc') }}

+
+

{{ t('settings.msdDirHint') }}

+
+

{{ t('settings.msdStatus') }}