feat: 适配 RK 原生 HDMI IN 适配采集

This commit is contained in:
mofeng-git
2026-04-01 21:28:15 +08:00
parent 51d7d8b8be
commit abb319068b
36 changed files with 1382 additions and 406 deletions

View File

@@ -25,6 +25,7 @@ enum AVPixelFormat {
AV_PIX_FMT_NV24 = 188,
};
int av_get_pix_fmt(const char *name);
int av_log_get_level(void);
void av_log_set_level(int level);
void hwcodec_set_av_log_callback();

View File

@@ -30,9 +30,15 @@ static int calculate_offset_length(int pix_fmt, int height, const int *linesize,
*length = offset[1] + linesize[2] * height / 2;
break;
case AV_PIX_FMT_NV12:
case AV_PIX_FMT_NV21:
offset[0] = linesize[0] * height;
*length = offset[0] + linesize[1] * height / 2;
break;
case AV_PIX_FMT_NV16:
case AV_PIX_FMT_NV24:
offset[0] = linesize[0] * height;
*length = offset[0] + linesize[1] * height;
break;
case AV_PIX_FMT_YUYV422:
case AV_PIX_FMT_YVYU422:
case AV_PIX_FMT_UYVY422:
@@ -41,6 +47,11 @@ static int calculate_offset_length(int pix_fmt, int height, const int *linesize,
offset[0] = 0; // Only one plane
*length = linesize[0] * height;
break;
case AV_PIX_FMT_RGB24:
case AV_PIX_FMT_BGR24:
offset[0] = 0; // Only one plane
*length = linesize[0] * height;
break;
default:
LOG_ERROR(std::string("unsupported pixfmt") + std::to_string(pix_fmt));
return -1;
@@ -397,9 +408,23 @@ private:
const int *const offset) {
switch (frame->format) {
case AV_PIX_FMT_NV12:
case AV_PIX_FMT_NV21:
if (data_length <
frame->height * (frame->linesize[0] + frame->linesize[1] / 2)) {
LOG_ERROR(std::string("fill_frame: NV12 data length error. data_length:") +
LOG_ERROR(std::string("fill_frame: NV12/NV21 data length error. data_length:") +
std::to_string(data_length) +
", linesize[0]:" + std::to_string(frame->linesize[0]) +
", linesize[1]:" + std::to_string(frame->linesize[1]));
return -1;
}
frame->data[0] = data;
frame->data[1] = data + offset[0];
break;
case AV_PIX_FMT_NV16:
case AV_PIX_FMT_NV24:
if (data_length <
frame->height * (frame->linesize[0] + frame->linesize[1])) {
LOG_ERROR(std::string("fill_frame: NV16/NV24 data length error. data_length:") +
std::to_string(data_length) +
", linesize[0]:" + std::to_string(frame->linesize[0]) +
", linesize[1]:" + std::to_string(frame->linesize[1]));
@@ -436,6 +461,17 @@ private:
}
frame->data[0] = data;
break;
case AV_PIX_FMT_RGB24:
case AV_PIX_FMT_BGR24:
if (data_length < frame->height * frame->linesize[0]) {
LOG_ERROR(std::string("fill_frame: RGB24/BGR24 data length error. data_length:") +
std::to_string(data_length) +
", linesize[0]:" + std::to_string(frame->linesize[0]) +
", height:" + std::to_string(frame->height));
return -1;
}
frame->data[0] = data;
break;
default:
LOG_ERROR(std::string("fill_frame: unsupported format, ") +
std::to_string(frame->format));

View File

@@ -6,7 +6,7 @@
include!(concat!(env!("OUT_DIR"), "/ffmpeg_ffi.rs"));
use serde_derive::{Deserialize, Serialize};
use std::env;
use std::{env, ffi::CString};
#[derive(Debug, Eq, PartialEq, Clone, Copy, Serialize, Deserialize)]
pub enum AVHWDeviceType {
@@ -59,6 +59,22 @@ pub(crate) fn init_av_log() {
});
}
pub fn resolve_pixel_format(name: &str, fallback: AVPixelFormat) -> i32 {
let c_name = match CString::new(name) {
Ok(name) => name,
Err(_) => return fallback as i32,
};
unsafe {
let resolved = av_get_pix_fmt(c_name.as_ptr());
if resolved >= 0 {
resolved
} else {
fallback as i32
}
}
}
fn parse_ffmpeg_log_level() -> i32 {
let raw = match env::var("ONE_KVM_FFMPEG_LOG") {
Ok(value) => value,

View File

@@ -243,7 +243,8 @@ fn enumerate_candidate_codecs(ctx: &EncodeContext) -> Vec<CodecInfo> {
}
codecs.retain(|codec| {
!(ctx.pixfmt == AVPixelFormat::AV_PIX_FMT_YUV420P && codec.name.contains("qsv"))
!(ctx.pixfmt == AVPixelFormat::AV_PIX_FMT_YUV420P as i32
&& codec.name.contains("qsv"))
});
codecs
}
@@ -428,7 +429,7 @@ pub struct EncodeContext {
pub mc_name: Option<String>,
pub width: i32,
pub height: i32,
pub pixfmt: AVPixelFormat,
pub pixfmt: i32,
pub align: i32,
pub fps: i32,
pub gop: i32,
@@ -483,7 +484,7 @@ impl Encoder {
CString::new(mc_name.as_str()).map_err(|_| ())?.as_ptr(),
ctx.width,
ctx.height,
ctx.pixfmt as c_int,
ctx.pixfmt,
ctx.align,
ctx.fps,
ctx.gop,

View File

@@ -5,7 +5,6 @@
use crate::common::DataFormat::{self, *};
use crate::ffmpeg::{
AVHWDeviceType::{self, *},
AVPixelFormat,
};
use serde_derive::{Deserialize, Serialize};
use std::ffi::c_int;
@@ -234,7 +233,7 @@ impl CodecInfos {
}
pub fn ffmpeg_linesize_offset_length(
pixfmt: AVPixelFormat,
pixfmt: i32,
width: usize,
height: usize,
align: usize,
@@ -247,7 +246,7 @@ pub fn ffmpeg_linesize_offset_length(
length.resize(1, 0);
unsafe {
if ffmpeg_ram_get_linesize_offset_length(
pixfmt as _,
pixfmt,
width as _,
height as _,
align as _,

View File

@@ -34,10 +34,12 @@ fn generate_bindings(cpp_dir: &Path) {
.allowlist_function("I420Copy")
// I422 conversions
.allowlist_function("I422ToI420")
.allowlist_function("I444ToI420")
// NV12/NV21 conversions
.allowlist_function("NV12ToI420")
.allowlist_function("NV21ToI420")
.allowlist_function("NV12Copy")
.allowlist_function("SplitUVPlane")
// ARGB/BGRA conversions
.allowlist_function("ARGBToI420")
.allowlist_function("ARGBToNV12")
@@ -53,6 +55,7 @@ fn generate_bindings(cpp_dir: &Path) {
// YUV to RGB conversions
.allowlist_function("I420ToRGB24")
.allowlist_function("I420ToARGB")
.allowlist_function("H444ToARGB")
.allowlist_function("NV12ToRGB24")
.allowlist_function("NV12ToARGB")
.allowlist_function("YUY2ToARGB")

View File

@@ -58,6 +58,15 @@ int I422ToI420(const uint8_t* src_y, int src_stride_y,
uint8_t* dst_v, int dst_stride_v,
int width, int height);
// I444 (YUV444P) -> I420 (YUV420P) with horizontal and vertical chroma downsampling
int I444ToI420(const uint8_t* src_y, int src_stride_y,
const uint8_t* src_u, int src_stride_u,
const uint8_t* src_v, int src_stride_v,
uint8_t* dst_y, int dst_stride_y,
uint8_t* dst_u, int dst_stride_u,
uint8_t* dst_v, int dst_stride_v,
int width, int height);
// I420 -> NV12
int I420ToNV12(const uint8_t* src_y, int src_stride_y,
const uint8_t* src_u, int src_stride_u,
@@ -94,6 +103,12 @@ int NV21ToI420(const uint8_t* src_y, int src_stride_y,
uint8_t* dst_v, int dst_stride_v,
int width, int height);
// Split interleaved UV plane into separate U and V planes
void SplitUVPlane(const uint8_t* src_uv, int src_stride_uv,
uint8_t* dst_u, int dst_stride_u,
uint8_t* dst_v, int dst_stride_v,
int width, int height);
// ----------------------------------------------------------------------------
// ARGB/BGRA conversions (32-bit RGB)
// Note: libyuv uses ARGB to mean BGRA in memory (little-endian)
@@ -180,6 +195,13 @@ int I420ToARGB(const uint8_t* src_y, int src_stride_y,
uint8_t* dst_argb, int dst_stride_argb,
int width, int height);
// H444 (BT.709 limited-range YUV444P) -> ARGB (BGRA)
int H444ToARGB(const uint8_t* src_y, int src_stride_y,
const uint8_t* src_u, int src_stride_u,
const uint8_t* src_v, int src_stride_v,
uint8_t* dst_argb, int dst_stride_argb,
int width, int height);
// NV12 -> RGB24
int NV12ToRGB24(const uint8_t* src_y, int src_stride_y,
const uint8_t* src_uv, int src_stride_uv,

View File

@@ -297,6 +297,94 @@ pub fn i422_to_i420_planar(
))
}
/// Convert I444 (YUV444P) to I420 (YUV420P) with separate planes and explicit strides
/// This performs horizontal and vertical chroma downsampling using SIMD
pub fn i444_to_i420_planar(
src_y: &[u8],
src_y_stride: i32,
src_u: &[u8],
src_u_stride: i32,
src_v: &[u8],
src_v_stride: i32,
dst: &mut [u8],
width: i32,
height: i32,
) -> Result<()> {
if width % 2 != 0 || height % 2 != 0 {
return Err(YuvError::InvalidDimensions);
}
let w = width as usize;
let h = height as usize;
let y_size = w * h;
let uv_size = (w / 2) * (h / 2);
if dst.len() < i420_size(w, h) {
return Err(YuvError::BufferTooSmall);
}
call_yuv!(I444ToI420(
src_y.as_ptr(),
src_y_stride,
src_u.as_ptr(),
src_u_stride,
src_v.as_ptr(),
src_v_stride,
dst.as_mut_ptr(),
width,
dst[y_size..].as_mut_ptr(),
width / 2,
dst[y_size + uv_size..].as_mut_ptr(),
width / 2,
width,
height,
))
}
/// Split an interleaved UV plane into separate U and V planes using libyuv SIMD helpers.
///
/// `width` is the number of chroma samples per row, not the number of source bytes.
pub fn split_uv_plane(
src_uv: &[u8],
src_stride_uv: i32,
dst_u: &mut [u8],
dst_stride_u: i32,
dst_v: &mut [u8],
dst_stride_v: i32,
width: i32,
height: i32,
) -> Result<()> {
if width <= 0 || height <= 0 {
return Err(YuvError::InvalidDimensions);
}
let width = width as usize;
let height = height as usize;
let src_required = (src_stride_uv as usize).saturating_mul(height);
let dst_u_required = (dst_stride_u as usize).saturating_mul(height);
let dst_v_required = (dst_stride_v as usize).saturating_mul(height);
if src_uv.len() < src_required || dst_u.len() < dst_u_required || dst_v.len() < dst_v_required
{
return Err(YuvError::BufferTooSmall);
}
unsafe {
SplitUVPlane(
src_uv.as_ptr(),
src_stride_uv,
dst_u.as_mut_ptr(),
dst_stride_u,
dst_v.as_mut_ptr(),
dst_stride_v,
width as i32,
height as i32,
);
}
Ok(())
}
// ============================================================================
// I420 <-> NV12 conversions
// ============================================================================
@@ -761,6 +849,41 @@ pub fn i420_to_bgra(src: &[u8], dst: &mut [u8], width: i32, height: i32) -> Resu
))
}
/// Convert H444 (BT.709 limited-range YUV444P) to BGRA.
pub fn h444_to_bgra(
src_y: &[u8],
src_u: &[u8],
src_v: &[u8],
dst: &mut [u8],
width: i32,
height: i32,
) -> Result<()> {
let w = width as usize;
let h = height as usize;
let plane_size = w * h;
if src_y.len() < plane_size || src_u.len() < plane_size || src_v.len() < plane_size {
return Err(YuvError::BufferTooSmall);
}
if dst.len() < argb_size(w, h) {
return Err(YuvError::BufferTooSmall);
}
call_yuv!(H444ToARGB(
src_y.as_ptr(),
width,
src_u.as_ptr(),
width,
src_v.as_ptr(),
width,
dst.as_mut_ptr(),
width * 4,
width,
height,
))
}
/// Convert NV12 to RGB24
pub fn nv12_to_rgb24(src: &[u8], dst: &mut [u8], width: i32, height: i32) -> Result<()> {
if width % 2 != 0 || height % 2 != 0 {

341
scripts/build-update-site.sh Executable file
View File

@@ -0,0 +1,341 @@
#!/usr/bin/env bash
#
# 生成 One-KVM 在线升级静态站点并打包为可部署 tar.gz。
# 输出目录结构:
# <site_name>/v1/channels.json
# <site_name>/v1/releases.json
# <site_name>/v1/bin/<version>/one-kvm-<triple>
#
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
VERSION=""
RELEASE_CHANNEL="stable"
STABLE_VERSION=""
BETA_VERSION=""
PUBLISHED_AT="$(date -u +"%Y-%m-%dT%H:%M:%SZ")"
ARTIFACTS_DIR=""
X86_64_BIN=""
AARCH64_BIN=""
ARMV7_BIN=""
X86_64_SET=0
AARCH64_SET=0
ARMV7_SET=0
SITE_NAME="one-kvm-update"
OUTPUT_FILE=""
OUTPUT_DIR="${PROJECT_ROOT}/dist"
declare -a NOTES=()
usage() {
cat <<'EOF'
Usage:
./scripts/build-update-site.sh --version <x.x.x> [options]
Required:
--version <x.x.x> Release 版本号(如 0.1.10
Artifact input (二选一,可混用):
--artifacts-dir <dir> 自动扫描目录中的标准文件名:
one-kvm-x86_64-unknown-linux-gnu
one-kvm-aarch64-unknown-linux-gnu
one-kvm-armv7-unknown-linux-gnueabihf
--x86_64 <file> 指定 x86_64 二进制路径
--aarch64 <file> 指定 aarch64 二进制路径
--armv7 <file> 指定 armv7 二进制路径
Manifest options:
--release-channel <stable|beta> releases.json 里该版本所属渠道,默认 stable
--stable <x.x.x> channels.json 的 stable 指针,默认等于 --version
--beta <x.x.x> channels.json 的 beta 指针,默认等于 --version
--published-at <RFC3339> 发布时间,默认当前 UTC 时间
--note <text> 发布说明,可重复传入多次
Output options:
--site-name <name> 打包根目录名,默认 one-kvm-update
--output-dir <dir> 输出目录(默认 <repo>/dist
--output <file.tar.gz> 输出包完整路径(优先级高于 --output-dir
Other:
-h, --help 显示帮助
Example:
./scripts/build-update-site.sh \
--version 0.1.10 \
--artifacts-dir ./target/release \
--release-channel stable \
--stable 0.1.10 \
--beta 0.1.11 \
--note "修复 WebRTC 断流问题" \
--note "优化 HID 输入延迟"
EOF
}
fail() {
echo "Error: $*" >&2
exit 1
}
require_cmd() {
local cmd="$1"
command -v "$cmd" >/dev/null 2>&1 || fail "Missing required command: ${cmd}"
}
json_escape() {
local s="$1"
s=${s//\\/\\\\}
s=${s//\"/\\\"}
s=${s//$'\n'/\\n}
s=${s//$'\r'/\\r}
s=${s//$'\t'/\\t}
printf '%s' "$s"
}
is_valid_version() {
local v="$1"
[[ "$v" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]
}
is_valid_channel() {
local c="$1"
[[ "$c" == "stable" || "$c" == "beta" ]]
}
while [[ $# -gt 0 ]]; do
case "$1" in
--version)
VERSION="${2:-}"
shift 2
;;
--release-channel)
RELEASE_CHANNEL="${2:-}"
shift 2
;;
--stable)
STABLE_VERSION="${2:-}"
shift 2
;;
--beta)
BETA_VERSION="${2:-}"
shift 2
;;
--published-at)
PUBLISHED_AT="${2:-}"
shift 2
;;
--note)
NOTES+=("${2:-}")
shift 2
;;
--artifacts-dir)
ARTIFACTS_DIR="${2:-}"
shift 2
;;
--x86_64)
X86_64_BIN="${2:-}"
X86_64_SET=1
shift 2
;;
--aarch64)
AARCH64_BIN="${2:-}"
AARCH64_SET=1
shift 2
;;
--armv7)
ARMV7_BIN="${2:-}"
ARMV7_SET=1
shift 2
;;
--site-name)
SITE_NAME="${2:-}"
shift 2
;;
--output-dir)
OUTPUT_DIR="${2:-}"
shift 2
;;
--output)
OUTPUT_FILE="${2:-}"
shift 2
;;
-h | --help)
usage
exit 0
;;
*)
fail "Unknown argument: $1 (use --help)"
;;
esac
done
require_cmd sha256sum
require_cmd stat
require_cmd tar
require_cmd mktemp
[[ -n "$VERSION" ]] || fail "--version is required"
is_valid_version "$VERSION" || fail "Invalid --version: ${VERSION} (expected x.x.x)"
is_valid_channel "$RELEASE_CHANNEL" || fail "Invalid --release-channel: ${RELEASE_CHANNEL}"
if [[ -z "$STABLE_VERSION" ]]; then
STABLE_VERSION="$VERSION"
fi
if [[ -z "$BETA_VERSION" ]]; then
BETA_VERSION="$VERSION"
fi
is_valid_version "$STABLE_VERSION" || fail "Invalid --stable: ${STABLE_VERSION}"
is_valid_version "$BETA_VERSION" || fail "Invalid --beta: ${BETA_VERSION}"
if [[ -n "$ARTIFACTS_DIR" ]]; then
[[ -d "$ARTIFACTS_DIR" ]] || fail "--artifacts-dir not found: ${ARTIFACTS_DIR}"
[[ -n "$X86_64_BIN" ]] || X86_64_BIN="${ARTIFACTS_DIR}/one-kvm-x86_64-unknown-linux-gnu"
[[ -n "$AARCH64_BIN" ]] || AARCH64_BIN="${ARTIFACTS_DIR}/one-kvm-aarch64-unknown-linux-gnu"
[[ -n "$ARMV7_BIN" ]] || ARMV7_BIN="${ARTIFACTS_DIR}/one-kvm-armv7-unknown-linux-gnueabihf"
fi
if [[ "$X86_64_SET" -eq 1 && ! -f "$X86_64_BIN" ]]; then
fail "--x86_64 file not found: ${X86_64_BIN}"
fi
if [[ "$AARCH64_SET" -eq 1 && ! -f "$AARCH64_BIN" ]]; then
fail "--aarch64 file not found: ${AARCH64_BIN}"
fi
if [[ "$ARMV7_SET" -eq 1 && ! -f "$ARMV7_BIN" ]]; then
fail "--armv7 file not found: ${ARMV7_BIN}"
fi
declare -A SRC_BY_TRIPLE=()
if [[ -n "$X86_64_BIN" && -f "$X86_64_BIN" ]]; then
SRC_BY_TRIPLE["x86_64-unknown-linux-gnu"]="$X86_64_BIN"
fi
if [[ -n "$AARCH64_BIN" && -f "$AARCH64_BIN" ]]; then
SRC_BY_TRIPLE["aarch64-unknown-linux-gnu"]="$AARCH64_BIN"
fi
if [[ -n "$ARMV7_BIN" && -f "$ARMV7_BIN" ]]; then
SRC_BY_TRIPLE["armv7-unknown-linux-gnueabihf"]="$ARMV7_BIN"
fi
if [[ ${#SRC_BY_TRIPLE[@]} -eq 0 ]]; then
fail "No artifact found. Provide --artifacts-dir or at least one of --x86_64/--aarch64/--armv7."
fi
BUILD_DIR="$(mktemp -d)"
trap 'rm -rf "$BUILD_DIR"' EXIT
SITE_DIR="${BUILD_DIR}/${SITE_NAME}"
V1_DIR="${SITE_DIR}/v1"
BIN_DIR="${V1_DIR}/bin/${VERSION}"
mkdir -p "$BIN_DIR"
declare -A SHA_BY_TRIPLE=()
declare -A SIZE_BY_TRIPLE=()
TRIPLES=(
"x86_64-unknown-linux-gnu"
"aarch64-unknown-linux-gnu"
"armv7-unknown-linux-gnueabihf"
)
for triple in "${TRIPLES[@]}"; do
src="${SRC_BY_TRIPLE[$triple]:-}"
if [[ -z "$src" ]]; then
continue
fi
[[ -f "$src" ]] || fail "Artifact not found for ${triple}: ${src}"
dest_name="one-kvm-${triple}"
dest_path="${BIN_DIR}/${dest_name}"
cp "$src" "$dest_path"
sha="$(sha256sum "$dest_path" | awk '{print $1}')"
size="$(stat -c%s "$dest_path")"
SHA_BY_TRIPLE["$triple"]="$sha"
SIZE_BY_TRIPLE["$triple"]="$size"
done
cat >"${V1_DIR}/channels.json" <<EOF
{
"stable": "${STABLE_VERSION}",
"beta": "${BETA_VERSION}"
}
EOF
RELEASES_FILE="${V1_DIR}/releases.json"
{
echo '{'
echo ' "releases": ['
echo ' {'
echo " \"version\": \"${VERSION}\","
echo " \"channel\": \"${RELEASE_CHANNEL}\","
echo " \"published_at\": \"${PUBLISHED_AT}\","
if [[ ${#NOTES[@]} -eq 0 ]]; then
echo ' "notes": [],'
else
echo ' "notes": ['
for i in "${!NOTES[@]}"; do
esc_note="$(json_escape "${NOTES[$i]}")"
if [[ "$i" -lt $((${#NOTES[@]} - 1)) ]]; then
echo " \"${esc_note}\","
else
echo " \"${esc_note}\""
fi
done
echo ' ],'
fi
echo ' "artifacts": {'
written=0
for triple in "${TRIPLES[@]}"; do
if [[ -z "${SHA_BY_TRIPLE[$triple]:-}" ]]; then
continue
fi
url="/v1/bin/${VERSION}/one-kvm-${triple}"
if [[ $written -eq 1 ]]; then
echo ','
fi
cat <<EOF
"${triple}": {
"url": "${url}",
"sha256": "${SHA_BY_TRIPLE[$triple]}",
"size": ${SIZE_BY_TRIPLE[$triple]}
}
EOF
written=1
done
echo
echo ' }'
echo ' }'
echo ' ]'
echo '}'
} >"$RELEASES_FILE"
if [[ -n "$OUTPUT_FILE" ]]; then
if [[ "$OUTPUT_FILE" != /* ]]; then
OUTPUT_FILE="${PROJECT_ROOT}/${OUTPUT_FILE}"
fi
else
mkdir -p "$OUTPUT_DIR"
OUTPUT_FILE="${OUTPUT_DIR}/${SITE_NAME}-${VERSION}.tar.gz"
fi
mkdir -p "$(dirname "$OUTPUT_FILE")"
tar -C "$BUILD_DIR" -czf "$OUTPUT_FILE" "$SITE_NAME"
echo "Build complete:"
echo " package: ${OUTPUT_FILE}"
echo " site root in tar: ${SITE_NAME}/"
echo " release version: ${VERSION}"
echo " release channel: ${RELEASE_CHANNEL}"
echo " channels: stable=${STABLE_VERSION}, beta=${BETA_VERSION}"
echo " artifacts:"
for triple in "${TRIPLES[@]}"; do
if [[ -n "${SHA_BY_TRIPLE[$triple]:-}" ]]; then
echo " - ${triple}: size=${SIZE_BY_TRIPLE[$triple]} sha256=${SHA_BY_TRIPLE[$triple]}"
fi
done
echo
echo "Deploy example:"
echo " tar -xzf \"${OUTPUT_FILE}\" -C /var/www/"
echo " # then ensure nginx root points to /var/www/${SITE_NAME}"

View File

@@ -318,6 +318,12 @@ impl MjpegStreamHandler {
PixelFormat::Nv12 => encoder
.encode_nv12(frame.data(), sequence)
.map_err(|e| format!("NV12 encode failed: {}", e))?,
PixelFormat::Nv16 => encoder
.encode_nv16(frame.data(), sequence)
.map_err(|e| format!("NV16 encode failed: {}", e))?,
PixelFormat::Nv24 => encoder
.encode_nv24(frame.data(), sequence)
.map_err(|e| format!("NV24 encode failed: {}", e))?,
PixelFormat::Rgb24 => encoder
.encode_rgb(frame.data(), sequence)
.map_err(|e| format!("RGB encode failed: {}", e))?,

View File

@@ -190,87 +190,70 @@ pub struct PixelConverter {
resolution: Resolution,
/// Output buffer (reused across conversions)
output_buffer: Yuv420pBuffer,
/// Scratch buffer for split chroma planes when converting semiplanar 4:2:2 / 4:4:4 input.
uv_split_buffer: Vec<u8>,
}
impl PixelConverter {
/// Create a new converter for YUYV → YUV420P
pub fn yuyv_to_yuv420p(resolution: Resolution) -> Self {
fn new(src_format: PixelFormat, dst_format: PixelFormat, resolution: Resolution) -> Self {
let max_uv_plane_size = (resolution.width * resolution.height) as usize;
Self {
src_format: PixelFormat::Yuyv,
dst_format: PixelFormat::Yuv420,
src_format,
dst_format,
resolution,
output_buffer: Yuv420pBuffer::new(resolution),
uv_split_buffer: vec![0u8; max_uv_plane_size * 2],
}
}
/// Create a new converter for YUYV → YUV420P
pub fn yuyv_to_yuv420p(resolution: Resolution) -> Self {
Self::new(PixelFormat::Yuyv, PixelFormat::Yuv420, resolution)
}
/// Create a new converter for UYVY → YUV420P
pub fn uyvy_to_yuv420p(resolution: Resolution) -> Self {
Self {
src_format: PixelFormat::Uyvy,
dst_format: PixelFormat::Yuv420,
resolution,
output_buffer: Yuv420pBuffer::new(resolution),
}
Self::new(PixelFormat::Uyvy, PixelFormat::Yuv420, resolution)
}
/// Create a new converter for YVYU → YUV420P
pub fn yvyu_to_yuv420p(resolution: Resolution) -> Self {
Self {
src_format: PixelFormat::Yvyu,
dst_format: PixelFormat::Yuv420,
resolution,
output_buffer: Yuv420pBuffer::new(resolution),
}
Self::new(PixelFormat::Yvyu, PixelFormat::Yuv420, resolution)
}
/// Create a new converter for NV12 → YUV420P
pub fn nv12_to_yuv420p(resolution: Resolution) -> Self {
Self {
src_format: PixelFormat::Nv12,
dst_format: PixelFormat::Yuv420,
resolution,
output_buffer: Yuv420pBuffer::new(resolution),
}
Self::new(PixelFormat::Nv12, PixelFormat::Yuv420, resolution)
}
/// Create a new converter for NV21 → YUV420P
pub fn nv21_to_yuv420p(resolution: Resolution) -> Self {
Self {
src_format: PixelFormat::Nv21,
dst_format: PixelFormat::Yuv420,
resolution,
output_buffer: Yuv420pBuffer::new(resolution),
Self::new(PixelFormat::Nv21, PixelFormat::Yuv420, resolution)
}
/// Create a new converter for NV16 → YUV420P
pub fn nv16_to_yuv420p(resolution: Resolution) -> Self {
Self::new(PixelFormat::Nv16, PixelFormat::Yuv420, resolution)
}
/// Create a new converter for NV24 → YUV420P
pub fn nv24_to_yuv420p(resolution: Resolution) -> Self {
Self::new(PixelFormat::Nv24, PixelFormat::Yuv420, resolution)
}
/// Create a new converter for YVU420 → YUV420P (swap U and V planes)
pub fn yvu420_to_yuv420p(resolution: Resolution) -> Self {
Self {
src_format: PixelFormat::Yvu420,
dst_format: PixelFormat::Yuv420,
resolution,
output_buffer: Yuv420pBuffer::new(resolution),
}
Self::new(PixelFormat::Yvu420, PixelFormat::Yuv420, resolution)
}
/// Create a new converter for RGB24 → YUV420P
pub fn rgb24_to_yuv420p(resolution: Resolution) -> Self {
Self {
src_format: PixelFormat::Rgb24,
dst_format: PixelFormat::Yuv420,
resolution,
output_buffer: Yuv420pBuffer::new(resolution),
}
Self::new(PixelFormat::Rgb24, PixelFormat::Yuv420, resolution)
}
/// Create a new converter for BGR24 → YUV420P
pub fn bgr24_to_yuv420p(resolution: Resolution) -> Self {
Self {
src_format: PixelFormat::Bgr24,
dst_format: PixelFormat::Yuv420,
resolution,
output_buffer: Yuv420pBuffer::new(resolution),
}
Self::new(PixelFormat::Bgr24, PixelFormat::Yuv420, resolution)
}
/// Convert a frame and return reference to the output buffer
@@ -304,6 +287,12 @@ impl PixelConverter {
AppError::VideoError(format!("libyuv conversion failed: {}", e))
})?;
}
(PixelFormat::Nv16, PixelFormat::Yuv420) => {
self.convert_nv16_to_yuv420p(input)?;
}
(PixelFormat::Nv24, PixelFormat::Yuv420) => {
self.convert_nv24_to_yuv420p(input)?;
}
(PixelFormat::Rgb24, PixelFormat::Yuv420) => {
libyuv::rgb24_to_i420(input, self.output_buffer.as_bytes_mut(), width, height)
.map_err(|e| {
@@ -429,6 +418,102 @@ impl PixelConverter {
Ok(())
}
/// Convert NV16 (4:2:2 semiplanar) → YUV420P using libyuv split + I422 downsample
fn convert_nv16_to_yuv420p(&mut self, nv16: &[u8]) -> Result<()> {
let width = self.resolution.width as usize;
let height = self.resolution.height as usize;
let y_size = width * height;
let uv_size = y_size;
if nv16.len() < y_size + uv_size {
return Err(AppError::VideoError(format!(
"NV16 data too small: {} < {}",
nv16.len(),
y_size + uv_size
)));
}
let src_uv = &nv16[y_size..y_size + uv_size];
let chroma_plane_size = y_size / 2;
let (u_plane_422, rest) = self.uv_split_buffer.split_at_mut(chroma_plane_size);
let (v_plane_422, _) = rest.split_at_mut(chroma_plane_size);
libyuv::split_uv_plane(
src_uv,
width as i32,
u_plane_422,
(width / 2) as i32,
v_plane_422,
(width / 2) as i32,
(width / 2) as i32,
height as i32,
)
.map_err(|e| AppError::VideoError(format!("libyuv NV16 split failed: {}", e)))?;
libyuv::i422_to_i420_planar(
&nv16[..y_size],
width as i32,
u_plane_422,
(width / 2) as i32,
v_plane_422,
(width / 2) as i32,
self.output_buffer.as_bytes_mut(),
width as i32,
height as i32,
)
.map_err(|e| AppError::VideoError(format!("libyuv NV16→I420 failed: {}", e)))?;
Ok(())
}
/// Convert NV24 (4:4:4 semiplanar) → YUV420P using libyuv split + I444 downsample
fn convert_nv24_to_yuv420p(&mut self, nv24: &[u8]) -> Result<()> {
let width = self.resolution.width as usize;
let height = self.resolution.height as usize;
let y_size = width * height;
let uv_size = y_size * 2;
if nv24.len() < y_size + uv_size {
return Err(AppError::VideoError(format!(
"NV24 data too small: {} < {}",
nv24.len(),
y_size + uv_size
)));
}
let src_uv = &nv24[y_size..y_size + uv_size];
let chroma_plane_size = y_size;
let (u_plane_444, rest) = self.uv_split_buffer.split_at_mut(chroma_plane_size);
let (v_plane_444, _) = rest.split_at_mut(chroma_plane_size);
libyuv::split_uv_plane(
src_uv,
(width * 2) as i32,
u_plane_444,
width as i32,
v_plane_444,
width as i32,
width as i32,
height as i32,
)
.map_err(|e| AppError::VideoError(format!("libyuv NV24 split failed: {}", e)))?;
libyuv::i444_to_i420_planar(
&nv24[..y_size],
width as i32,
u_plane_444,
width as i32,
v_plane_444,
width as i32,
self.output_buffer.as_bytes_mut(),
width as i32,
height as i32,
)
.map_err(|e| AppError::VideoError(format!("libyuv NV24→I420 failed: {}", e)))?;
Ok(())
}
}
/// Calculate YUV420P buffer size for a given resolution
@@ -519,6 +604,16 @@ impl Nv12Converter {
}
}
/// Create a new converter for NV24 → NV12
pub fn nv24_to_nv12(resolution: Resolution) -> Self {
Self {
src_format: PixelFormat::Nv24,
resolution,
output_buffer: Nv12Buffer::new(resolution),
i420_buffer: None,
}
}
/// Convert a frame and return reference to the output buffer
pub fn convert(&mut self, input: &[u8]) -> Result<&[u8]> {
let width = self.resolution.width as i32;
@@ -553,6 +648,16 @@ impl Nv12Converter {
)?;
return Ok(self.output_buffer.as_bytes());
}
PixelFormat::Nv24 => {
let dst = self.output_buffer.as_bytes_mut();
Self::convert_nv24_to_nv12_with_dims(
self.resolution.width as usize,
self.resolution.height as usize,
input,
dst,
)?;
return Ok(self.output_buffer.as_bytes());
}
_ => {}
}
@@ -635,6 +740,57 @@ impl Nv12Converter {
Ok(())
}
fn convert_nv24_to_nv12_with_dims(
width: usize,
height: usize,
input: &[u8],
dst: &mut [u8],
) -> Result<()> {
let y_size = width * height;
let uv_size_nv24 = y_size * 2;
let uv_size_nv12 = y_size / 2;
if input.len() < y_size + uv_size_nv24 {
return Err(AppError::VideoError(format!(
"NV24 data too small: {} < {}",
input.len(),
y_size + uv_size_nv24
)));
}
dst[..y_size].copy_from_slice(&input[..y_size]);
let src_uv = &input[y_size..y_size + uv_size_nv24];
let dst_uv = &mut dst[y_size..y_size + uv_size_nv12];
let dst_rows = height / 2;
for row in 0..dst_rows {
let src_row0 = &src_uv[row * 2 * width * 2..row * 2 * width * 2 + width * 2];
let src_row1 =
&src_uv[(row * 2 + 1) * width * 2..(row * 2 + 1) * width * 2 + width * 2];
let dst_row = &mut dst_uv[row * width..row * width + width];
for pair in 0..(width / 2) {
let src_idx0 = pair * 4;
let src_idx1 = src_idx0 + 2;
let dst_idx = pair * 2;
dst_row[dst_idx] = ((src_row0[src_idx0] as u32
+ src_row0[src_idx1] as u32
+ src_row1[src_idx0] as u32
+ src_row1[src_idx1] as u32)
/ 4) as u8;
dst_row[dst_idx + 1] = ((src_row0[src_idx0 + 1] as u32
+ src_row0[src_idx1 + 1] as u32
+ src_row1[src_idx0 + 1] as u32
+ src_row1[src_idx1 + 1] as u32)
/ 4) as u8;
}
}
Ok(())
}
/// Get output buffer length
pub fn output_len(&self) -> usize {
self.output_buffer.len()

View File

@@ -6,7 +6,10 @@ use std::path::{Path, PathBuf};
use std::sync::mpsc;
use std::time::Duration;
use tracing::{debug, info, warn};
use v4l2r::bindings::{v4l2_frmivalenum, v4l2_frmsizeenum};
use v4l2r::bindings::{
v4l2_bt_timings, v4l2_dv_timings, v4l2_frmivalenum, v4l2_frmsizeenum, v4l2_streamparm,
V4L2_DV_BT_656_1120,
};
use v4l2r::ioctl::{
self, Capabilities, Capability as V4l2rCapability, FormatIterator, FrmIvalTypes, FrmSizeTypes,
};
@@ -14,6 +17,7 @@ use v4l2r::nix::errno::Errno;
use v4l2r::{Format as V4l2rFormat, QueueType};
use super::format::{PixelFormat, Resolution};
use super::is_rk_hdmirx_driver;
use crate::error::{AppError, Result};
const DEVICE_PROBE_TIMEOUT_MS: u64 = 400;
@@ -57,11 +61,11 @@ pub struct FormatInfo {
pub struct ResolutionInfo {
pub width: u32,
pub height: u32,
pub fps: Vec<u32>,
pub fps: Vec<f64>,
}
impl ResolutionInfo {
pub fn new(width: u32, height: u32, fps: Vec<u32>) -> Self {
pub fn new(width: u32, height: u32, fps: Vec<f64>) -> Self {
Self { width, height, fps }
}
@@ -143,7 +147,11 @@ impl VideoDevice {
read_write: flags.contains(Capabilities::READWRITE),
};
let formats = self.enumerate_formats()?;
let formats = if is_rk_hdmirx_driver(&caps.driver, &caps.card) {
self.enumerate_current_format_only()?
} else {
self.enumerate_formats()?
};
// Determine if this is likely an HDMI capture card
let is_capture_card = Self::detect_capture_card(&caps.card, &caps.driver, &formats);
@@ -176,6 +184,15 @@ impl VideoDevice {
// Try to convert FourCC to our PixelFormat
if let Some(format) = PixelFormat::from_v4l2r(desc.pixelformat) {
let resolutions = self.enumerate_resolutions(desc.pixelformat)?;
let is_current_format = self.current_active_format() == Some(format);
if resolutions.is_empty() && !is_current_format {
debug!(
"Skipping format {:?} ({}): not usable for current active mode",
desc.pixelformat, desc.description
);
continue;
}
formats.push(FormatInfo {
format,
@@ -196,9 +213,38 @@ impl VideoDevice {
Ok(formats)
}
fn enumerate_current_format_only(&self) -> Result<Vec<FormatInfo>> {
let current = self.get_format()?;
let Some(format) = PixelFormat::from_v4l2r(current.pixelformat) else {
debug!(
"Current active format {:?} is not supported by One-KVM, falling back to full enumeration",
current.pixelformat
);
return self.enumerate_formats();
};
let description = self
.format_description(current.pixelformat)
.unwrap_or_else(|| format.to_string());
let mut resolutions = self.enumerate_resolutions(current.pixelformat)?;
if resolutions.is_empty() {
if let Some(current_mode) = self.current_mode_resolution_info() {
resolutions.push(current_mode);
}
}
Ok(vec![FormatInfo {
format,
resolutions,
description,
}])
}
/// Enumerate resolutions for a specific format
fn enumerate_resolutions(&self, fourcc: v4l2r::PixelFormat) -> Result<Vec<ResolutionInfo>> {
let mut resolutions = Vec::new();
let mut should_fallback_to_current_mode = false;
let mut index = 0u32;
loop {
@@ -241,7 +287,15 @@ impl VideoDevice {
e,
v4l2r::ioctl::FrameSizeError::IoctlError(err) if err == Errno::EINVAL
);
if !is_einval {
let is_unsupported = matches!(
e,
v4l2r::ioctl::FrameSizeError::IoctlError(err)
if matches!(err, Errno::ENOTTY | Errno::ENOSYS | Errno::EOPNOTSUPP)
);
if is_unsupported && resolutions.is_empty() {
should_fallback_to_current_mode = true;
}
if !is_einval && !is_unsupported {
debug!("Failed to enumerate frame sizes for {:?}: {}", fourcc, e);
}
break;
@@ -249,6 +303,23 @@ impl VideoDevice {
}
}
if should_fallback_to_current_mode {
if let Some(resolution) = self.current_mode_resolution_info() {
if self.format_works_for_resolution(fourcc, resolution.width, resolution.height) {
debug!(
"Falling back to current active mode for {:?}: {}x{} @ {:?} fps",
fourcc, resolution.width, resolution.height, resolution.fps
);
resolutions.push(resolution);
} else {
debug!(
"Skipping current-mode fallback for {:?}: TRY_FMT rejected {}x{}",
fourcc, resolution.width, resolution.height
);
}
}
}
// Sort by resolution (largest first)
resolutions.sort_by(|a, b| (b.width * b.height).cmp(&(a.width * a.height)));
resolutions.dedup_by(|a, b| a.width == b.width && a.height == b.height);
@@ -262,8 +333,9 @@ impl VideoDevice {
fourcc: v4l2r::PixelFormat,
width: u32,
height: u32,
) -> Result<Vec<u32>> {
) -> Result<Vec<f64>> {
let mut fps_list = Vec::new();
let mut should_fallback_to_current_mode = false;
let mut index = 0u32;
loop {
@@ -274,15 +346,18 @@ impl VideoDevice {
if let Some(interval) = interval.intervals() {
match interval {
FrmIvalTypes::Discrete(fraction) => {
if fraction.numerator > 0 {
let fps = fraction.denominator / fraction.numerator;
if fraction.numerator > 0 && fraction.denominator > 0 {
let fps =
fraction.denominator as f64 / fraction.numerator as f64;
fps_list.push(fps);
}
}
FrmIvalTypes::StepWise(step) => {
if step.max.numerator > 0 {
let min_fps = step.max.denominator / step.max.numerator;
let max_fps = step.min.denominator / step.min.numerator;
if step.max.numerator > 0 && step.max.denominator > 0 {
let min_fps =
step.max.denominator as f64 / step.max.numerator as f64;
let max_fps =
step.min.denominator as f64 / step.min.numerator as f64;
fps_list.push(min_fps);
if max_fps != min_fps {
fps_list.push(max_fps);
@@ -298,7 +373,15 @@ impl VideoDevice {
e,
v4l2r::ioctl::FrameIntervalsError::IoctlError(err) if err == Errno::EINVAL
);
if !is_einval {
let is_unsupported = matches!(
e,
v4l2r::ioctl::FrameIntervalsError::IoctlError(err)
if matches!(err, Errno::ENOTTY | Errno::ENOSYS | Errno::EOPNOTSUPP)
);
if is_unsupported && fps_list.is_empty() {
should_fallback_to_current_mode = true;
}
if !is_einval && !is_unsupported {
debug!(
"Failed to enumerate frame intervals for {:?} {}x{}: {}",
fourcc, width, height, e
@@ -309,8 +392,11 @@ impl VideoDevice {
}
}
fps_list.sort_by(|a, b| b.cmp(a));
fps_list.dedup();
if should_fallback_to_current_mode {
fps_list.extend(self.current_mode_fps());
}
normalize_fps_list(&mut fps_list);
Ok(fps_list)
}
@@ -426,6 +512,105 @@ impl VideoDevice {
&self.fd
}
fn current_mode_resolution_info(&self) -> Option<ResolutionInfo> {
let (width, height) = self
.current_dv_timings_mode()
.map(|(width, height, _)| (width, height))
.or_else(|| self.current_format_resolution())?;
Some(ResolutionInfo::new(width, height, self.current_mode_fps()))
}
fn current_mode_fps(&self) -> Vec<f64> {
let mut fps = Vec::new();
if let Some(frame_rate) = self.current_parm_fps() {
fps.push(frame_rate);
}
if let Some((_, _, Some(frame_rate))) = self.current_dv_timings_mode() {
fps.push(frame_rate);
}
normalize_fps_list(&mut fps);
fps
}
fn current_parm_fps(&self) -> Option<f64> {
let queue = self.capture_queue_type().ok()?;
let params: v4l2_streamparm = ioctl::g_parm(&self.fd, queue).ok()?;
let capture = unsafe { params.parm.capture };
let timeperframe = capture.timeperframe;
if timeperframe.numerator == 0 || timeperframe.denominator == 0 {
return None;
}
Some(timeperframe.denominator as f64 / timeperframe.numerator as f64)
}
fn current_dv_timings_mode(&self) -> Option<(u32, u32, Option<f64>)> {
let timings = ioctl::query_dv_timings::<v4l2_dv_timings>(&self.fd)
.or_else(|_| ioctl::g_dv_timings::<v4l2_dv_timings>(&self.fd))
.ok()?;
if timings.type_ != V4L2_DV_BT_656_1120 {
return None;
}
let bt = unsafe { timings.__bindgen_anon_1.bt };
if bt.width == 0 || bt.height == 0 {
return None;
}
Some((bt.width, bt.height, dv_timings_fps(&bt)))
}
fn current_format_resolution(&self) -> Option<(u32, u32)> {
let format = self.get_format().ok()?;
if format.width == 0 || format.height == 0 {
return None;
}
Some((format.width, format.height))
}
fn current_active_format(&self) -> Option<PixelFormat> {
let format = self.get_format().ok()?;
PixelFormat::from_v4l2r(format.pixelformat)
}
fn format_description(&self, fourcc: v4l2r::PixelFormat) -> Option<String> {
let queue = self.capture_queue_type().ok()?;
FormatIterator::new(&self.fd, queue)
.find(|desc| desc.pixelformat == fourcc)
.map(|desc| desc.description)
}
fn format_works_for_resolution(
&self,
fourcc: v4l2r::PixelFormat,
width: u32,
height: u32,
) -> bool {
let queue = match self.capture_queue_type() {
Ok(queue) => queue,
Err(_) => return false,
};
let mut fmt = match ioctl::g_fmt::<V4l2rFormat>(&self.fd, queue) {
Ok(fmt) => fmt,
Err(_) => return false,
};
fmt.width = width;
fmt.height = height;
fmt.pixelformat = fourcc;
let actual = match ioctl::try_fmt::<_, V4l2rFormat>(&self.fd, (queue, &fmt)) {
Ok(actual) => actual,
Err(_) => return false,
};
actual.pixelformat == fourcc && actual.width == width && actual.height == height
}
fn capture_queue_type(&self) -> Result<QueueType> {
let caps = self.capabilities()?;
if caps.video_capture {
@@ -588,6 +773,36 @@ fn extract_uevent_value(content: &str, key: &str) -> Option<String> {
None
}
fn dv_timings_fps(bt: &v4l2_bt_timings) -> Option<f64> {
let total_width = bt.width + bt.hfrontporch + bt.hsync + bt.hbackporch;
let total_height = if bt.interlaced != 0 {
bt.height
+ bt.vfrontporch
+ bt.vsync
+ bt.vbackporch
+ bt.il_vfrontporch
+ bt.il_vsync
+ bt.il_vbackporch
} else {
bt.height + bt.vfrontporch + bt.vsync + bt.vbackporch
};
if bt.pixelclock == 0 || total_width == 0 || total_height == 0 {
return None;
}
Some(bt.pixelclock as f64 / total_width as f64 / total_height as f64)
}
fn normalize_fps_list(fps_list: &mut Vec<f64>) {
fps_list.retain(|fps| fps.is_finite() && *fps > 0.0);
for fps in fps_list.iter_mut() {
*fps = (*fps * 100.0).round() / 100.0;
}
fps_list.sort_by(|a, b| b.total_cmp(a));
fps_list.dedup_by(|a, b| (*a - *b).abs() < 0.01);
}
/// Find the best video device for KVM use
pub fn find_best_device() -> Result<VideoDeviceInfo> {
let devices = enumerate_devices()?;

View File

@@ -13,7 +13,7 @@ use std::sync::Once;
use tracing::{debug, error, info, warn};
use hwcodec::common::{Quality, RateControl};
use hwcodec::ffmpeg::AVPixelFormat;
use hwcodec::ffmpeg::{resolve_pixel_format, AVPixelFormat};
use hwcodec::ffmpeg_ram::encode::{EncodeContext, Encoder as HwEncoder};
use hwcodec::ffmpeg_ram::CodecInfo;
@@ -195,7 +195,7 @@ pub fn get_available_encoders(width: u32, height: u32) -> Vec<CodecInfo> {
mc_name: None,
width: width as i32,
height: height as i32,
pixfmt: AVPixelFormat::AV_PIX_FMT_YUV420P,
pixfmt: resolve_pixel_format("yuv420p", AVPixelFormat::AV_PIX_FMT_YUV420P),
align: 1,
fps: 30,
gop: 30,
@@ -273,16 +273,17 @@ impl H264Encoder {
let height = config.base.resolution.height;
// Select pixel format based on config
let pixfmt = match config.input_format {
H264InputFormat::Nv12 => AVPixelFormat::AV_PIX_FMT_NV12,
H264InputFormat::Nv21 => AVPixelFormat::AV_PIX_FMT_NV21,
H264InputFormat::Nv16 => AVPixelFormat::AV_PIX_FMT_NV16,
H264InputFormat::Nv24 => AVPixelFormat::AV_PIX_FMT_NV24,
H264InputFormat::Yuv420p => AVPixelFormat::AV_PIX_FMT_YUV420P,
H264InputFormat::Yuyv422 => AVPixelFormat::AV_PIX_FMT_YUYV422,
H264InputFormat::Rgb24 => AVPixelFormat::AV_PIX_FMT_RGB24,
H264InputFormat::Bgr24 => AVPixelFormat::AV_PIX_FMT_BGR24,
let (pixfmt_name, pixfmt_fallback) = match config.input_format {
H264InputFormat::Nv12 => ("nv12", AVPixelFormat::AV_PIX_FMT_NV12),
H264InputFormat::Nv21 => ("nv21", AVPixelFormat::AV_PIX_FMT_NV21),
H264InputFormat::Nv16 => ("nv16", AVPixelFormat::AV_PIX_FMT_NV16),
H264InputFormat::Nv24 => ("nv24", AVPixelFormat::AV_PIX_FMT_NV24),
H264InputFormat::Yuv420p => ("yuv420p", AVPixelFormat::AV_PIX_FMT_YUV420P),
H264InputFormat::Yuyv422 => ("yuyv422", AVPixelFormat::AV_PIX_FMT_YUYV422),
H264InputFormat::Rgb24 => ("rgb24", AVPixelFormat::AV_PIX_FMT_RGB24),
H264InputFormat::Bgr24 => ("bgr24", AVPixelFormat::AV_PIX_FMT_BGR24),
};
let pixfmt = resolve_pixel_format(pixfmt_name, pixfmt_fallback);
info!(
"Creating H.264 encoder: {} at {}x{} @ {} kbps (input: {:?})",

View File

@@ -11,7 +11,7 @@ use std::sync::Once;
use tracing::{debug, error, info, warn};
use hwcodec::common::{DataFormat, Quality, RateControl};
use hwcodec::ffmpeg::AVPixelFormat;
use hwcodec::ffmpeg::{resolve_pixel_format, AVPixelFormat};
use hwcodec::ffmpeg_ram::encode::{EncodeContext, Encoder as HwEncoder};
use hwcodec::ffmpeg_ram::CodecInfo;
@@ -198,7 +198,7 @@ pub fn get_available_h265_encoders(width: u32, height: u32) -> Vec<CodecInfo> {
mc_name: None,
width: width as i32,
height: height as i32,
pixfmt: AVPixelFormat::AV_PIX_FMT_NV12,
pixfmt: resolve_pixel_format("nv12", AVPixelFormat::AV_PIX_FMT_NV12),
align: 1,
fps: 30,
gop: 30,
@@ -310,24 +310,45 @@ impl H265Encoder {
let height = config.base.resolution.height;
// Software encoders (libx265) require YUV420P, hardware encoders use NV12 or YUYV422
let (pixfmt, actual_input_format) = if is_software {
(AVPixelFormat::AV_PIX_FMT_YUV420P, H265InputFormat::Yuv420p)
let (pixfmt_name, pixfmt_fallback, actual_input_format) = if is_software {
(
"yuv420p",
AVPixelFormat::AV_PIX_FMT_YUV420P,
H265InputFormat::Yuv420p,
)
} else {
match config.input_format {
H265InputFormat::Nv12 => (AVPixelFormat::AV_PIX_FMT_NV12, H265InputFormat::Nv12),
H265InputFormat::Nv21 => (AVPixelFormat::AV_PIX_FMT_NV21, H265InputFormat::Nv21),
H265InputFormat::Nv16 => (AVPixelFormat::AV_PIX_FMT_NV16, H265InputFormat::Nv16),
H265InputFormat::Nv24 => (AVPixelFormat::AV_PIX_FMT_NV24, H265InputFormat::Nv24),
H265InputFormat::Yuv420p => {
(AVPixelFormat::AV_PIX_FMT_YUV420P, H265InputFormat::Yuv420p)
H265InputFormat::Nv12 => {
("nv12", AVPixelFormat::AV_PIX_FMT_NV12, H265InputFormat::Nv12)
}
H265InputFormat::Yuyv422 => {
(AVPixelFormat::AV_PIX_FMT_YUYV422, H265InputFormat::Yuyv422)
H265InputFormat::Nv21 => {
("nv21", AVPixelFormat::AV_PIX_FMT_NV21, H265InputFormat::Nv21)
}
H265InputFormat::Nv16 => {
("nv16", AVPixelFormat::AV_PIX_FMT_NV16, H265InputFormat::Nv16)
}
H265InputFormat::Nv24 => {
("nv24", AVPixelFormat::AV_PIX_FMT_NV24, H265InputFormat::Nv24)
}
H265InputFormat::Yuv420p => (
"yuv420p",
AVPixelFormat::AV_PIX_FMT_YUV420P,
H265InputFormat::Yuv420p,
),
H265InputFormat::Yuyv422 => (
"yuyv422",
AVPixelFormat::AV_PIX_FMT_YUYV422,
H265InputFormat::Yuyv422,
),
H265InputFormat::Rgb24 => {
("rgb24", AVPixelFormat::AV_PIX_FMT_RGB24, H265InputFormat::Rgb24)
}
H265InputFormat::Bgr24 => {
("bgr24", AVPixelFormat::AV_PIX_FMT_BGR24, H265InputFormat::Bgr24)
}
H265InputFormat::Rgb24 => (AVPixelFormat::AV_PIX_FMT_RGB24, H265InputFormat::Rgb24),
H265InputFormat::Bgr24 => (AVPixelFormat::AV_PIX_FMT_BGR24, H265InputFormat::Bgr24),
}
};
let pixfmt = resolve_pixel_format(pixfmt_name, pixfmt_fallback);
info!(
"Creating H.265 encoder: {} at {}x{} @ {} kbps (input: {:?})",

View File

@@ -1,6 +1,6 @@
//! JPEG encoder implementation
//!
//! Provides JPEG encoding for raw video frames (YUYV, NV12, RGB, BGR)
//! Provides JPEG encoding for raw video frames (YUYV, NV12, NV16, NV24, RGB, BGR)
//! Uses libyuv for SIMD-accelerated color space conversion to I420,
//! then turbojpeg for direct YUV encoding (skips internal color conversion).
@@ -14,7 +14,7 @@ use crate::video::format::{PixelFormat, Resolution};
///
/// Encoding pipeline (all SIMD accelerated):
/// ```text
/// YUYV/NV12/BGR24/RGB24 ──libyuv──> I420 ──turbojpeg──> JPEG
/// YUYV/NV12/NV16/NV24/BGR24/RGB24 ──libyuv──> I420 ──turbojpeg──> JPEG
/// ```
///
/// Note: This encoder is NOT thread-safe due to turbojpeg limitations.
@@ -24,6 +24,10 @@ pub struct JpegEncoder {
compressor: turbojpeg::Compressor,
/// I420 buffer for YUV encoding (Y + U + V planes)
i420_buffer: Vec<u8>,
/// Scratch buffer for split chroma planes when converting semiplanar 4:2:2 / 4:4:4 input.
uv_split_buffer: Vec<u8>,
/// BGRA buffer used when a source format needs explicit YUV matrix expansion before JPEG.
bgra_buffer: Vec<u8>,
}
impl JpegEncoder {
@@ -34,6 +38,8 @@ impl JpegEncoder {
let height = resolution.height as usize;
// I420: Y = width*height, U = width*height/4, V = width*height/4
let i420_size = width * height * 3 / 2;
let max_uv_plane_size = width * height;
let bgra_size = width * height * 4;
let mut compressor = turbojpeg::Compressor::new().map_err(|e| {
AppError::VideoError(format!("Failed to create turbojpeg compressor: {}", e))
@@ -47,6 +53,8 @@ impl JpegEncoder {
config,
compressor,
i420_buffer: vec![0u8; i420_size],
uv_split_buffer: vec![0u8; max_uv_plane_size * 2],
bgra_buffer: vec![0u8; bgra_size],
})
}
@@ -93,6 +101,36 @@ impl JpegEncoder {
))
}
/// Encode BGRA buffer to JPEG using turbojpeg's RGB path.
#[inline]
fn encode_bgra_to_jpeg(&mut self, sequence: u64) -> Result<EncodedFrame> {
let width = self.config.resolution.width as usize;
let height = self.config.resolution.height as usize;
self.compressor
.set_subsamp(turbojpeg::Subsamp::Sub2x2)
.map_err(|e| AppError::VideoError(format!("Failed to set JPEG subsampling: {}", e)))?;
let image = turbojpeg::Image {
pixels: self.bgra_buffer.as_slice(),
width,
pitch: width * 4,
height,
format: turbojpeg::PixelFormat::BGRA,
};
let jpeg_data = self
.compressor
.compress_to_vec(image)
.map_err(|e| AppError::VideoError(format!("JPEG compression failed: {}", e)))?;
Ok(EncodedFrame::jpeg(
Bytes::from(jpeg_data),
self.config.resolution,
sequence,
))
}
/// Encode YUYV (YUV422) frame to JPEG
pub fn encode_yuyv(&mut self, data: &[u8], sequence: u64) -> Result<EncodedFrame> {
let width = self.config.resolution.width as usize;
@@ -135,6 +173,101 @@ impl JpegEncoder {
self.encode_i420_to_jpeg(sequence)
}
/// Encode NV16 frame to JPEG
pub fn encode_nv16(&mut self, data: &[u8], sequence: u64) -> Result<EncodedFrame> {
let width = self.config.resolution.width as usize;
let height = self.config.resolution.height as usize;
let y_size = width * height;
let uv_size = y_size;
let expected_size = y_size + uv_size;
if data.len() < expected_size {
return Err(AppError::VideoError(format!(
"NV16 data too small: {} < {}",
data.len(),
expected_size
)));
}
let src_uv = &data[y_size..expected_size];
let chroma_plane_size = y_size / 2;
let (u_plane_422, rest) = self.uv_split_buffer.split_at_mut(chroma_plane_size);
let (v_plane_422, _) = rest.split_at_mut(chroma_plane_size);
libyuv::split_uv_plane(
src_uv,
width as i32,
u_plane_422,
(width / 2) as i32,
v_plane_422,
(width / 2) as i32,
(width / 2) as i32,
height as i32,
)
.map_err(|e| AppError::VideoError(format!("libyuv NV16 split failed: {}", e)))?;
libyuv::i422_to_i420_planar(
&data[..y_size],
width as i32,
u_plane_422,
(width / 2) as i32,
v_plane_422,
(width / 2) as i32,
&mut self.i420_buffer,
width as i32,
height as i32,
)
.map_err(|e| AppError::VideoError(format!("libyuv NV16→I420 failed: {}", e)))?;
self.encode_i420_to_jpeg(sequence)
}
/// Encode NV24 frame to JPEG
pub fn encode_nv24(&mut self, data: &[u8], sequence: u64) -> Result<EncodedFrame> {
let width = self.config.resolution.width as usize;
let height = self.config.resolution.height as usize;
let y_size = width * height;
let uv_size = y_size * 2;
let expected_size = y_size + uv_size;
if data.len() < expected_size {
return Err(AppError::VideoError(format!(
"NV24 data too small: {} < {}",
data.len(),
expected_size
)));
}
let src_uv = &data[y_size..expected_size];
let chroma_plane_size = y_size;
let (u_plane_444, rest) = self.uv_split_buffer.split_at_mut(chroma_plane_size);
let (v_plane_444, _) = rest.split_at_mut(chroma_plane_size);
libyuv::split_uv_plane(
src_uv,
(width * 2) as i32,
u_plane_444,
width as i32,
v_plane_444,
width as i32,
width as i32,
height as i32,
)
.map_err(|e| AppError::VideoError(format!("libyuv NV24 split failed: {}", e)))?;
libyuv::h444_to_bgra(
&data[..y_size],
u_plane_444,
v_plane_444,
&mut self.bgra_buffer,
width as i32,
height as i32,
)
.map_err(|e| AppError::VideoError(format!("libyuv NV24(H444)→BGRA failed: {}", e)))?;
self.encode_bgra_to_jpeg(sequence)
}
/// Encode RGB24 frame to JPEG
pub fn encode_rgb(&mut self, data: &[u8], sequence: u64) -> Result<EncodedFrame> {
let width = self.config.resolution.width as usize;
@@ -192,6 +325,8 @@ impl crate::video::encoder::traits::Encoder for JpegEncoder {
match self.config.input_format {
PixelFormat::Yuyv | PixelFormat::Yvyu => self.encode_yuyv(data, sequence),
PixelFormat::Nv12 => self.encode_nv12(data, sequence),
PixelFormat::Nv16 => self.encode_nv16(data, sequence),
PixelFormat::Nv24 => self.encode_nv24(data, sequence),
PixelFormat::Rgb24 => self.encode_rgb(data, sequence),
PixelFormat::Bgr24 => self.encode_bgr(data, sequence),
_ => Err(AppError::VideoError(format!(
@@ -211,6 +346,8 @@ impl crate::video::encoder::traits::Encoder for JpegEncoder {
PixelFormat::Yuyv
| PixelFormat::Yvyu
| PixelFormat::Nv12
| PixelFormat::Nv16
| PixelFormat::Nv24
| PixelFormat::Rgb24
| PixelFormat::Bgr24
)

View File

@@ -11,7 +11,7 @@ use std::time::Duration;
use tracing::{debug, info, warn};
use hwcodec::common::{DataFormat, Quality, RateControl};
use hwcodec::ffmpeg::AVPixelFormat;
use hwcodec::ffmpeg::{resolve_pixel_format, AVPixelFormat};
use hwcodec::ffmpeg_ram::encode::{EncodeContext, Encoder as HwEncoder};
use hwcodec::ffmpeg_ram::CodecInfo;
@@ -309,7 +309,7 @@ impl EncoderRegistry {
mc_name: None,
width: width as i32,
height: height as i32,
pixfmt: AVPixelFormat::AV_PIX_FMT_NV12,
pixfmt: resolve_pixel_format("nv12", AVPixelFormat::AV_PIX_FMT_NV12),
align: 1,
fps: 30,
gop: 30,

View File

@@ -11,7 +11,7 @@ use std::sync::Once;
use tracing::{debug, error, info, warn};
use hwcodec::common::{DataFormat, Quality, RateControl};
use hwcodec::ffmpeg::AVPixelFormat;
use hwcodec::ffmpeg::{resolve_pixel_format, AVPixelFormat};
use hwcodec::ffmpeg_ram::encode::{EncodeContext, Encoder as HwEncoder};
use hwcodec::ffmpeg_ram::CodecInfo;
@@ -133,7 +133,7 @@ pub fn get_available_vp8_encoders(width: u32, height: u32) -> Vec<CodecInfo> {
mc_name: None,
width: width as i32,
height: height as i32,
pixfmt: AVPixelFormat::AV_PIX_FMT_NV12,
pixfmt: resolve_pixel_format("nv12", AVPixelFormat::AV_PIX_FMT_NV12),
align: 1,
fps: 30,
gop: 30,
@@ -244,16 +244,25 @@ impl VP8Encoder {
let height = config.base.resolution.height;
// Software encoders (libvpx) require YUV420P, hardware (VAAPI) uses NV12
let (pixfmt, actual_input_format) = if is_software {
(AVPixelFormat::AV_PIX_FMT_YUV420P, VP8InputFormat::Yuv420p)
let (pixfmt_name, pixfmt_fallback, actual_input_format) = if is_software {
(
"yuv420p",
AVPixelFormat::AV_PIX_FMT_YUV420P,
VP8InputFormat::Yuv420p,
)
} else {
match config.input_format {
VP8InputFormat::Nv12 => (AVPixelFormat::AV_PIX_FMT_NV12, VP8InputFormat::Nv12),
VP8InputFormat::Yuv420p => {
(AVPixelFormat::AV_PIX_FMT_YUV420P, VP8InputFormat::Yuv420p)
VP8InputFormat::Nv12 => {
("nv12", AVPixelFormat::AV_PIX_FMT_NV12, VP8InputFormat::Nv12)
}
VP8InputFormat::Yuv420p => (
"yuv420p",
AVPixelFormat::AV_PIX_FMT_YUV420P,
VP8InputFormat::Yuv420p,
),
}
};
let pixfmt = resolve_pixel_format(pixfmt_name, pixfmt_fallback);
info!(
"Creating VP8 encoder: {} at {}x{} @ {} kbps (input: {:?})",

View File

@@ -11,7 +11,7 @@ use std::sync::Once;
use tracing::{debug, error, info, warn};
use hwcodec::common::{DataFormat, Quality, RateControl};
use hwcodec::ffmpeg::AVPixelFormat;
use hwcodec::ffmpeg::{resolve_pixel_format, AVPixelFormat};
use hwcodec::ffmpeg_ram::encode::{EncodeContext, Encoder as HwEncoder};
use hwcodec::ffmpeg_ram::CodecInfo;
@@ -133,7 +133,7 @@ pub fn get_available_vp9_encoders(width: u32, height: u32) -> Vec<CodecInfo> {
mc_name: None,
width: width as i32,
height: height as i32,
pixfmt: AVPixelFormat::AV_PIX_FMT_NV12,
pixfmt: resolve_pixel_format("nv12", AVPixelFormat::AV_PIX_FMT_NV12),
align: 1,
fps: 30,
gop: 30,
@@ -244,16 +244,25 @@ impl VP9Encoder {
let height = config.base.resolution.height;
// Software encoders (libvpx-vp9) require YUV420P, hardware (VAAPI) uses NV12
let (pixfmt, actual_input_format) = if is_software {
(AVPixelFormat::AV_PIX_FMT_YUV420P, VP9InputFormat::Yuv420p)
let (pixfmt_name, pixfmt_fallback, actual_input_format) = if is_software {
(
"yuv420p",
AVPixelFormat::AV_PIX_FMT_YUV420P,
VP9InputFormat::Yuv420p,
)
} else {
match config.input_format {
VP9InputFormat::Nv12 => (AVPixelFormat::AV_PIX_FMT_NV12, VP9InputFormat::Nv12),
VP9InputFormat::Yuv420p => {
(AVPixelFormat::AV_PIX_FMT_YUV420P, VP9InputFormat::Yuv420p)
VP9InputFormat::Nv12 => {
("nv12", AVPixelFormat::AV_PIX_FMT_NV12, VP9InputFormat::Nv12)
}
VP9InputFormat::Yuv420p => (
"yuv420p",
AVPixelFormat::AV_PIX_FMT_YUV420P,
VP9InputFormat::Yuv420p,
),
}
};
let pixfmt = resolve_pixel_format(pixfmt_name, pixfmt_fallback);
info!(
"Creating VP9 encoder: {} at {}x{} @ {} kbps (input: {:?})",

View File

@@ -24,3 +24,11 @@ pub use shared_video_pipeline::{
};
pub use stream_manager::VideoStreamManager;
pub use streamer::{Streamer, StreamerState};
pub(crate) fn is_rk_hdmirx_driver(driver: &str, card: &str) -> bool {
driver.eq_ignore_ascii_case("rk_hdmirx") || card.eq_ignore_ascii_case("rk_hdmirx")
}
pub(crate) fn is_rk_hdmirx_device(device: &device::VideoDeviceInfo) -> bool {
is_rk_hdmirx_driver(&device.driver, &device.card)
}

View File

@@ -599,6 +599,14 @@ fn converters_for_pipeline(
info!("Using NV21->YUV420P converter");
Ok((None, Some(PixelConverter::nv21_to_yuv420p(resolution))))
}
PixelFormat::Nv16 => {
info!("Using NV16->YUV420P converter");
Ok((None, Some(PixelConverter::nv16_to_yuv420p(resolution))))
}
PixelFormat::Nv24 => {
info!("Using NV24->YUV420P converter");
Ok((None, Some(PixelConverter::nv24_to_yuv420p(resolution))))
}
PixelFormat::Rgb24 => {
info!("Using RGB24->YUV420P converter");
Ok((None, Some(PixelConverter::rgb24_to_yuv420p(resolution))))
@@ -631,6 +639,10 @@ fn converters_for_pipeline(
info!("Using NV16->NV12 converter");
Ok((Some(Nv12Converter::nv16_to_nv12(resolution)), None))
}
PixelFormat::Nv24 => {
info!("Using NV24->NV12 converter");
Ok((Some(Nv12Converter::nv24_to_nv12(resolution)), None))
}
PixelFormat::Yuv420 => {
info!("Using YUV420P->NV12 converter");
Ok((Some(Nv12Converter::yuv420_to_nv12(resolution)), None))

View File

@@ -38,6 +38,7 @@ use crate::hid::HidController;
use crate::stream::MjpegStreamHandler;
use crate::video::codec_constraints::StreamCodecConstraints;
use crate::video::format::{PixelFormat, Resolution};
use crate::video::is_rk_hdmirx_device;
use crate::video::streamer::{Streamer, StreamerState};
use crate::webrtc::WebRtcStreamer;
@@ -427,7 +428,8 @@ impl VideoStreamManager {
device.formats.iter().map(|f| f.format).collect();
// If current format is not MJPEG and device supports MJPEG, switch to it
if current_format != PixelFormat::Mjpeg
if !is_rk_hdmirx_device(&device)
&& current_format != PixelFormat::Mjpeg
&& available_formats.contains(&PixelFormat::Mjpeg)
{
info!("Auto-switching to MJPEG format for MJPEG mode");

View File

@@ -14,6 +14,7 @@ use tracing::{debug, error, info, trace, warn};
use super::device::{enumerate_devices, find_best_device, VideoDeviceInfo};
use super::format::{PixelFormat, Resolution};
use super::frame::{FrameBuffer, FrameBufferPool, VideoFrame};
use super::is_rk_hdmirx_device;
use crate::error::{AppError, Result};
use crate::events::{EventBus, SystemEvent};
use crate::stream::MjpegStreamHandler;
@@ -269,24 +270,8 @@ impl Streamer {
.find(|d| d.path.to_string_lossy() == device_path)
.ok_or_else(|| AppError::VideoError("Video device not found".to_string()))?;
// Validate format
let fmt_info = device
.formats
.iter()
.find(|f| f.format == format)
.ok_or_else(|| AppError::VideoError("Requested format not supported".to_string()))?;
// Validate resolution
if !fmt_info.resolutions.is_empty()
&& !fmt_info
.resolutions
.iter()
.any(|r| r.width == resolution.width && r.height == resolution.height)
{
return Err(AppError::VideoError(
"Requested resolution not supported".to_string(),
));
}
let (format, resolution) =
self.resolve_capture_config(&device, format, resolution)?;
// IMPORTANT: Disconnect all MJPEG clients FIRST before stopping capture
// This prevents race conditions where clients try to reconnect and reopen the device
@@ -385,6 +370,14 @@ impl Streamer {
device: &VideoDeviceInfo,
preferred: PixelFormat,
) -> Result<PixelFormat> {
if is_rk_hdmirx_device(device) {
return device
.formats
.first()
.map(|f| f.format)
.ok_or_else(|| AppError::VideoError("No supported formats found".to_string()));
}
// Check if preferred format is available
if device.formats.iter().any(|f| f.format == preferred) {
return Ok(preferred);
@@ -411,6 +404,14 @@ impl Streamer {
.find(|f| &f.format == format)
.ok_or_else(|| AppError::VideoError("Format not found".to_string()))?;
if is_rk_hdmirx_device(device) {
return Ok(format_info
.resolutions
.first()
.map(|r| r.resolution())
.unwrap_or(preferred));
}
// Check if preferred resolution is available
if format_info.resolutions.is_empty()
|| format_info
@@ -429,6 +430,17 @@ impl Streamer {
.ok_or_else(|| AppError::VideoError("No resolutions available".to_string()))
}
fn resolve_capture_config(
&self,
device: &VideoDeviceInfo,
requested_format: PixelFormat,
requested_resolution: Resolution,
) -> Result<(PixelFormat, Resolution)> {
let format = self.select_format(device, requested_format)?;
let resolution = self.select_resolution(device, &format, requested_resolution)?;
Ok((format, resolution))
}
/// Restart capture for recovery (direct capture path)
async fn restart_capture(self: &Arc<Self>) -> Result<()> {
self.direct_stop.store(false, Ordering::SeqCst);

View File

@@ -14,6 +14,7 @@ use v4l2r::ioctl::{
QBuffer, QueryBuffer, V4l2Buffer,
};
use v4l2r::memory::{MemoryType, MmapHandle};
use v4l2r::nix::errno::Errno;
use v4l2r::{Format as V4l2rFormat, PixelFormat as V4l2rPixelFormat, QueueType};
use crate::error::{AppError, Result};
@@ -91,8 +92,11 @@ impl V4l2rCaptureStream {
});
if fps > 0 {
if let Err(e) = set_fps(&fd, queue, fps) {
warn!("Failed to set hardware FPS: {}", e);
match set_fps(&fd, queue, fps) {
Ok(()) => {}
Err(ioctl::GParmError::IoctlError(err))
if matches!(err, Errno::ENOTTY | Errno::ENOSYS | Errno::EOPNOTSUPP) => {}
Err(e) => warn!("Failed to set hardware FPS: {}", e),
}
}
@@ -258,7 +262,7 @@ impl Drop for V4l2rCaptureStream {
}
}
fn set_fps(fd: &File, queue: QueueType, fps: u32) -> Result<()> {
fn set_fps(fd: &File, queue: QueueType, fps: u32) -> std::result::Result<(), ioctl::GParmError> {
let mut params = unsafe { std::mem::zeroed::<v4l2_streamparm>() };
params.type_ = queue as u32;
params.parm = v4l2_streamparm__bindgen_ty_1 {
@@ -271,7 +275,6 @@ fn set_fps(fd: &File, queue: QueueType, fps: u32) -> Result<()> {
},
};
let _actual: v4l2_streamparm = ioctl::s_parm(fd, params)
.map_err(|e| AppError::VideoError(format!("Failed to set FPS: {}", e)))?;
let _actual: v4l2_streamparm = ioctl::s_parm(fd, params)?;
Ok(())
}

View File

@@ -1001,7 +1001,7 @@ pub struct VideoFormat {
pub struct VideoResolution {
pub width: u32,
pub height: u32,
pub fps: Vec<u32>,
pub fps: Vec<f64>,
}
#[derive(Serialize)]

View File

@@ -4,6 +4,7 @@ import { RouterLink, useRoute, useRouter } from 'vue-router'
import { useI18n } from 'vue-i18n'
import { useAuthStore } from '@/stores/auth'
import { useSystemStore } from '@/stores/system'
import LanguageToggleButton from '@/components/LanguageToggleButton.vue'
import { Button } from '@/components/ui/button'
import {
DropdownMenu,
@@ -18,12 +19,10 @@ import {
LogOut,
Sun,
Moon,
Languages,
Menu,
} from 'lucide-vue-next'
import { setLanguage } from '@/i18n'
const { t, locale } = useI18n()
const { t } = useI18n()
const route = useRoute()
const router = useRouter()
const authStore = useAuthStore()
@@ -40,11 +39,6 @@ function toggleTheme() {
localStorage.setItem('theme', isDark ? 'light' : 'dark')
}
function toggleLanguage() {
const newLang = locale.value === 'zh-CN' ? 'en-US' : 'zh-CN'
setLanguage(newLang)
}
async function handleLogout() {
await authStore.logout()
router.push('/login')
@@ -93,10 +87,7 @@ async function handleLogout() {
</Button>
<!-- Language Toggle -->
<Button variant="ghost" size="icon" :aria-label="t('common.toggleLanguage')" @click="toggleLanguage">
<Languages class="h-4 w-4" />
<span class="sr-only">{{ t('common.toggleLanguage') }}</span>
</Button>
<LanguageToggleButton />
<!-- Mobile Menu -->
<DropdownMenu>

View File

@@ -0,0 +1,50 @@
<script setup lang="ts">
import { computed } from 'vue'
import { useI18n } from 'vue-i18n'
import type { HTMLAttributes } from 'vue'
import type { ButtonVariants } from '@/components/ui/button'
import { Button } from '@/components/ui/button'
import { cn } from '@/lib/utils'
import { setLanguage } from '@/i18n'
import { Languages } from 'lucide-vue-next'
interface Props {
class?: HTMLAttributes['class']
size?: ButtonVariants['size']
variant?: ButtonVariants['variant']
labelMode?: 'hidden' | 'current' | 'next'
}
const props = withDefaults(defineProps<Props>(), {
size: 'icon',
variant: 'ghost',
labelMode: 'hidden',
})
const { t, locale } = useI18n()
const currentLanguageLabel = computed(() => (locale.value === 'zh-CN' ? '中文' : 'English'))
const nextLanguageLabel = computed(() => (locale.value === 'zh-CN' ? 'English' : '中文'))
const buttonLabel = computed(() => (
props.labelMode === 'current' ? currentLanguageLabel.value : nextLanguageLabel.value
))
function toggleLanguage() {
const newLang = locale.value === 'zh-CN' ? 'en-US' : 'zh-CN'
setLanguage(newLang)
}
</script>
<template>
<Button
:variant="variant"
:size="size"
:class="cn(props.labelMode !== 'hidden' && 'gap-2', props.class)"
:aria-label="t('common.toggleLanguage')"
@click="toggleLanguage"
>
<Languages class="h-4 w-4" />
<span v-if="props.labelMode !== 'hidden'">{{ buttonLabel }}</span>
<span class="sr-only">{{ t('common.toggleLanguage') }}</span>
</Button>
</template>

View File

@@ -11,6 +11,7 @@ import {
} from '@/components/ui/sheet'
import { ScrollArea } from '@/components/ui/scroll-area'
import type { WebRTCStats } from '@/composables/useWebRTC'
import { formatFpsValue } from '@/lib/fps'
const { t } = useI18n()
@@ -547,7 +548,7 @@ onUnmounted(() => {
<div class="flex items-center justify-between">
<h4 class="text-sm font-medium">{{ t('stats.frameRate') }}</h4>
<span class="text-xs text-muted-foreground">
{{ currentStats.fps }} fps
{{ formatFpsValue(currentStats.fps) }} fps
</span>
</div>
<p class="text-xs text-muted-foreground">

View File

@@ -28,6 +28,7 @@ import {
type StreamConstraintsResponse,
} from '@/api'
import { getVideoFormatState, isVideoFormatSelectable } from '@/lib/video-format-support'
import { formatFpsLabel, toConfigFps } from '@/lib/fps'
import { useConfigStore } from '@/stores/config'
import { useRouter } from 'vue-router'
@@ -549,7 +550,7 @@ async function applyVideoConfig() {
format: selectedFormat.value,
width,
height,
fps: selectedFps.value,
fps: toConfigFps(selectedFps.value),
})
toast.success(t('config.applied'))
@@ -926,7 +927,7 @@ watch(
:value="String(fps)"
class="text-xs"
>
{{ fps }} FPS
{{ formatFpsLabel(fps) }}
</SelectItem>
</SelectContent>
</Select>

View File

@@ -268,9 +268,6 @@ export default {
// Help tooltips
ch9329Help: 'CH9329 is a serial-to-HID chip connected via serial port. Works with most hardware configurations.',
otgHelp: 'USB OTG mode emulates HID devices directly through USB Device Controller. Requires hardware OTG support.',
otgAdvanced: 'Advanced: OTG Preset',
otgProfile: 'Initial HID Preset',
otgProfileDesc: 'Choose the initial OTG HID preset. You can change this later in Settings.',
otgLowEndpointHint: 'Detected low-endpoint UDC; Consumer Control Keyboard will be disabled automatically.',
videoDeviceHelp: 'Select the video capture device for capturing the remote host display. Usually an HDMI capture card.',
videoFormatHelp: 'MJPEG has best compatibility. H.264/H.265 uses less bandwidth but requires encoding support.',

View File

@@ -268,9 +268,6 @@ export default {
// Help tooltips
ch9329Help: 'CH9329 是一款串口转 HID 芯片,通过串口连接到主机。适用于大多数硬件配置。',
otgHelp: 'USB OTG 模式通过 USB 设备控制器直接模拟 HID 设备。需要硬件支持 USB OTG 功能。',
otgAdvanced: '高级OTG 预设',
otgProfile: '初始 HID 预设',
otgProfileDesc: '选择 OTG HID 的初始预设,后续可在设置中修改。',
otgLowEndpointHint: '检测到低端点 UDC将自动禁用多媒体键盘。',
videoDeviceHelp: '选择用于捕获远程主机画面的视频采集设备。通常是 HDMI 采集卡。',
videoFormatHelp: 'MJPEG 格式兼容性最好H.264/H.265 带宽占用更低但需要编码支持。',

15
web/src/lib/fps.ts Normal file
View File

@@ -0,0 +1,15 @@
export function formatFpsValue(fps: number): string {
if (!Number.isFinite(fps)) return '0'
const rounded = Math.round(fps * 100) / 100
return Number.isInteger(rounded) ? String(rounded) : rounded.toFixed(2).replace(/\.?0+$/, '')
}
export function formatFpsLabel(fps: number): string {
return `${formatFpsValue(fps)} FPS`
}
export function toConfigFps(fps: number): number {
if (!Number.isFinite(fps)) return 30
return Math.round(fps)
}

View File

@@ -8,6 +8,8 @@ const MJPEG_MODE_SUPPORTED_FORMATS = new Set([
'YUYV',
'YVYU',
'NV12',
'NV16',
'NV24',
'RGB24',
'BGR24',
])
@@ -20,6 +22,7 @@ const CONFIG_SUPPORTED_FORMATS = new Set([
'NV12',
'NV21',
'NV16',
'NV24',
'YUV420',
'RGB24',
'BGR24',
@@ -32,6 +35,7 @@ const WEBRTC_SUPPORTED_FORMATS = new Set([
'NV12',
'NV21',
'NV16',
'NV24',
'YUV420',
'RGB24',
'BGR24',
@@ -45,14 +49,10 @@ function isCompressedFormat(formatName: string): boolean {
return formatName === 'MJPEG' || formatName === 'JPEG'
}
function isRkmppBackend(backendId?: string): boolean {
return backendId?.toLowerCase() === 'rkmpp'
}
export function getVideoFormatState(
formatName: string,
context: VideoFormatSupportContext,
encoderBackend = 'auto',
_encoderBackend = 'auto',
): VideoFormatState {
const normalizedFormat = normalizeFormat(formatName)
@@ -64,12 +64,6 @@ export function getVideoFormatState(
if (CONFIG_SUPPORTED_FORMATS.has(normalizedFormat)) {
return 'supported'
}
if (
normalizedFormat === 'NV24'
&& isRkmppBackend(encoderBackend)
) {
return 'supported'
}
return 'unsupported'
}
@@ -77,14 +71,6 @@ export function getVideoFormatState(
return isCompressedFormat(normalizedFormat) ? 'not_recommended' : 'supported'
}
if (
normalizedFormat === 'NV24'
&& isRkmppBackend(encoderBackend)
&& (context === 'h264' || context === 'h265')
) {
return 'supported'
}
return 'unsupported'
}

View File

@@ -17,6 +17,7 @@ import type { HidKeyboardEvent, HidMouseEvent } from '@/types/hid'
import { keyboardEventToCanonicalKey, updateModifierMaskForKey } from '@/lib/keyboardMappings'
import { toast } from 'vue-sonner'
import { generateUUID } from '@/lib/utils'
import { formatFpsValue } from '@/lib/fps'
import type { VideoMode } from '@/components/VideoConfigPopover.vue'
// Components
@@ -25,6 +26,7 @@ import ActionBar from '@/components/ActionBar.vue'
import InfoBar from '@/components/InfoBar.vue'
import VirtualKeyboard from '@/components/VirtualKeyboard.vue'
import StatsSheet from '@/components/StatsSheet.vue'
import LanguageToggleButton from '@/components/LanguageToggleButton.vue'
import { Button } from '@/components/ui/button'
import { Spinner } from '@/components/ui/spinner'
import {
@@ -50,16 +52,14 @@ import {
LogOut,
Sun,
Moon,
Languages,
ChevronDown,
Terminal,
ExternalLink,
KeyRound,
Loader2,
} from 'lucide-vue-next'
import { setLanguage } from '@/i18n'
const { t, locale } = useI18n()
const { t } = useI18n()
const router = useRouter()
const systemStore = useSystemStore()
const configStore = useConfigStore()
@@ -212,7 +212,7 @@ const videoQuickInfo = computed(() => {
const stream = systemStore.stream
if (!stream?.resolution) return ''
const resShort = getResolutionShortName(stream.resolution[0], stream.resolution[1])
return `${resShort} ${backendFps.value}fps`
return `${resShort} ${formatFpsValue(backendFps.value)}fps`
})
const videoDetails = computed<StatusDetail[]>(() => {
@@ -227,8 +227,8 @@ const videoDetails = computed<StatusDetail[]>(() => {
{ label: t('statusCard.mode'), value: modeDisplay, status: 'ok' },
{ label: t('statusCard.format'), value: stream.format || 'MJPEG' },
{ label: t('statusCard.resolution'), value: stream.resolution ? `${stream.resolution[0]}x${stream.resolution[1]}` : '-' },
{ label: t('statusCard.targetFps'), value: String(stream.targetFps ?? 0) },
{ label: t('statusCard.fps'), value: String(receivedFps), status: receivedFps > 5 ? 'ok' : receivedFps > 0 ? 'warning' : undefined },
{ label: t('statusCard.targetFps'), value: formatFpsValue(stream.targetFps ?? 0) },
{ label: t('statusCard.fps'), value: formatFpsValue(receivedFps), status: receivedFps > 5 ? 'ok' : receivedFps > 0 ? 'warning' : undefined },
]
// Show network error if WebSocket has network issue
@@ -875,7 +875,7 @@ async function handleStreamConfigApplied(data: any) {
videoRestarting.value = false
toast.success(t('console.videoRestarted'), {
description: `${data.device} - ${data.resolution[0]}x${data.resolution[1]} @ ${data.fps}fps`,
description: `${data.device} - ${data.resolution[0]}x${data.resolution[1]} @ ${formatFpsValue(data.fps)}fps`,
duration: 3000,
})
}
@@ -1458,12 +1458,6 @@ function toggleTheme() {
localStorage.setItem('theme', isDark.value ? 'dark' : 'light')
}
// Language toggle
function toggleLanguage() {
const newLang = locale.value === 'zh-CN' ? 'en-US' : 'zh-CN'
setLanguage(newLang)
}
// Logout
async function logout() {
await authStore.logout()
@@ -2306,9 +2300,7 @@ onUnmounted(() => {
</Button>
<!-- Language Toggle -->
<Button variant="ghost" size="icon" class="h-8 w-8 hidden md:flex" :aria-label="t('common.toggleLanguage')" @click="toggleLanguage">
<Languages class="h-4 w-4" />
</Button>
<LanguageToggleButton class="h-8 w-8 hidden md:flex" />
<!-- User Menu -->
<DropdownMenu>
@@ -2324,9 +2316,13 @@ onUnmounted(() => {
<Moon v-else class="h-4 w-4 mr-2" />
{{ isDark ? t('settings.lightMode') : t('settings.darkMode') }}
</DropdownMenuItem>
<DropdownMenuItem class="md:hidden" @click="toggleLanguage">
<Languages class="h-4 w-4 mr-2" />
{{ locale === 'zh-CN' ? 'English' : '中文' }}
<DropdownMenuItem as-child class="md:hidden p-0">
<LanguageToggleButton
label-mode="next"
size="sm"
variant="ghost"
class="w-full justify-start rounded-sm px-2 py-1.5 font-normal shadow-none"
/>
</DropdownMenuItem>
<DropdownMenuSeparator class="md:hidden" />
<DropdownMenuItem @click="changePasswordDialogOpen = true">

View File

@@ -3,15 +3,11 @@ import { ref } from 'vue'
import { useRouter, useRoute } from 'vue-router'
import { useI18n } from 'vue-i18n'
import { useAuthStore } from '@/stores/auth'
import {
setLanguage,
getCurrentLanguage,
type SupportedLocale,
} from '@/i18n'
import { Button } from '@/components/ui/button'
import { Input } from '@/components/ui/input'
import { Label } from '@/components/ui/label'
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from '@/components/ui/card'
import LanguageToggleButton from '@/components/LanguageToggleButton.vue'
import { Monitor, Lock, Eye, EyeOff, User } from 'lucide-vue-next'
const { t } = useI18n()
@@ -19,18 +15,12 @@ const router = useRouter()
const route = useRoute()
const authStore = useAuthStore()
const currentLanguage = ref<SupportedLocale>(getCurrentLanguage())
const username = ref('')
const password = ref('')
const showPassword = ref(false)
const loading = ref(false)
const error = ref('')
function handleLanguageChange(lang: SupportedLocale) {
currentLanguage.value = lang
setLanguage(lang)
}
async function handleLogin() {
if (!username.value) {
error.value = t('auth.enterUsername')
@@ -60,21 +50,8 @@ async function handleLogin() {
<template>
<div class="min-h-screen min-h-dvh flex items-center justify-center bg-background p-4">
<Card class="relative w-full max-w-sm">
<div class="absolute top-4 right-4 flex gap-2">
<Button
:variant="currentLanguage === 'zh-CN' ? 'default' : 'outline'"
size="sm"
@click="handleLanguageChange('zh-CN')"
>
中文
</Button>
<Button
:variant="currentLanguage === 'en-US' ? 'default' : 'outline'"
size="sm"
@click="handleLanguageChange('en-US')"
>
English
</Button>
<div class="absolute top-4 right-4">
<LanguageToggleButton />
</div>
<CardHeader class="space-y-2 pt-10 text-center sm:pt-12">

View File

@@ -37,10 +37,11 @@ import type {
OtgHidProfile,
OtgHidFunctions,
} from '@/types/generated'
import { setLanguage } from '@/i18n'
import { formatFpsLabel, toConfigFps } from '@/lib/fps'
import { useClipboard } from '@/composables/useClipboard'
import { getVideoFormatState } from '@/lib/video-format-support'
import AppLayout from '@/components/AppLayout.vue'
import LanguageToggleButton from '@/components/LanguageToggleButton.vue'
import { Button } from '@/components/ui/button'
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from '@/components/ui/card'
import { Input } from '@/components/ui/input'
@@ -85,7 +86,7 @@ import {
Radio,
} from 'lucide-vue-next'
const { t, te, locale } = useI18n()
const { t, te } = useI18n()
const route = useRoute()
const systemStore = useSystemStore()
const configStore = useConfigStore()
@@ -902,13 +903,6 @@ function setTheme(newTheme: 'light' | 'dark' | 'system') {
}
}
// Language handling
function handleLanguageChange(lang: string) {
if (lang === 'zh-CN' || lang === 'en-US') {
setLanguage(lang)
}
}
// Account updates
async function changeUsername() {
usernameError.value = ''
@@ -991,7 +985,7 @@ async function saveConfig() {
format: config.value.video_format || undefined,
width: config.value.video_width,
height: config.value.video_height,
fps: config.value.video_fps,
fps: toConfigFps(config.value.video_fps),
})
)
// Save Stream/Encoder and STUN/TURN config together
@@ -2022,9 +2016,8 @@ watch(() => route.query.tab, (tab) => {
<CardDescription>{{ t('settings.languageDesc') }}</CardDescription>
</CardHeader>
<CardContent>
<div class="flex gap-2">
<Button :variant="locale === 'zh-CN' ? 'default' : 'outline'" size="sm" @click="handleLanguageChange('zh-CN')">中文</Button>
<Button :variant="locale === 'en-US' ? 'default' : 'outline'" size="sm" @click="handleLanguageChange('en-US')">English</Button>
<div class="flex">
<LanguageToggleButton variant="outline" size="sm" label-mode="current" />
</div>
</CardContent>
</Card>
@@ -2132,8 +2125,8 @@ watch(() => route.query.tab, (tab) => {
<div class="space-y-2">
<Label for="video-fps">{{ t('settings.frameRate') }}</Label>
<select id="video-fps" v-model.number="config.video_fps" class="w-full h-9 px-3 rounded-md border border-input bg-background text-sm" :disabled="!config.video_format">
<option v-for="fps in availableFps" :key="fps" :value="fps">{{ fps }} FPS</option>
<option v-if="!availableFps.includes(config.video_fps)" :value="config.video_fps">{{ config.video_fps }} FPS</option>
<option v-for="fps in availableFps" :key="fps" :value="fps">{{ formatFpsLabel(fps) }}</option>
<option v-if="!availableFps.includes(config.video_fps)" :value="config.video_fps">{{ formatFpsLabel(config.video_fps) }}</option>
</select>
</div>
</div>

View File

@@ -4,12 +4,8 @@ import { useRouter } from 'vue-router'
import { useI18n } from 'vue-i18n'
import { useAuthStore } from '@/stores/auth'
import { configApi, streamApi, type EncoderBackendInfo } from '@/api'
import {
supportedLanguages,
setLanguage,
getCurrentLanguage,
type SupportedLocale,
} from '@/i18n'
import { formatFpsLabel, toConfigFps } from '@/lib/fps'
import LanguageToggleButton from '@/components/LanguageToggleButton.vue'
import { Button } from '@/components/ui/button'
import { Input } from '@/components/ui/input'
import { Label } from '@/components/ui/label'
@@ -21,12 +17,6 @@ import {
SelectTrigger,
SelectValue,
} from '@/components/ui/select'
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuTrigger,
} from '@/components/ui/dropdown-menu'
import {
HoverCard,
HoverCardContent,
@@ -44,7 +34,6 @@ import {
Keyboard,
Check,
HelpCircle,
Languages,
Puzzle,
} from 'lucide-vue-next'
@@ -52,14 +41,6 @@ const { t } = useI18n()
const router = useRouter()
const authStore = useAuthStore()
// Language switcher
const currentLanguage = ref<SupportedLocale>(getCurrentLanguage())
function switchLanguage(lang: SupportedLocale) {
currentLanguage.value = lang
setLanguage(lang)
}
// Steps: 1 = Account, 2 = Audio/Video, 3 = HID, 4 = Extensions
const step = ref(1)
const totalSteps = 4
@@ -96,14 +77,10 @@ const hidBackend = ref('ch9329')
const ch9329Port = ref('')
const ch9329Baudrate = ref(9600)
const otgUdc = ref('')
const hidOtgProfile = ref('full')
const hidOtgProfile = ref('full_no_consumer')
const otgMsdEnabled = ref(true)
const otgEndpointBudget = ref<'five' | 'six' | 'unlimited'>('six')
const otgKeyboardLeds = ref(true)
const otgProfileTouched = ref(false)
const otgEndpointBudgetTouched = ref(false)
const otgKeyboardLedsTouched = ref(false)
const showAdvancedOtg = ref(false)
// Extension settings
const ttydEnabled = ref(false)
@@ -237,57 +214,17 @@ const otgRequiredEndpoints = computed(() => {
return endpoints
})
const otgProfileHasKeyboard = computed(() =>
hidOtgProfile.value === 'full'
|| hidOtgProfile.value === 'full_no_consumer'
|| hidOtgProfile.value === 'legacy_keyboard'
)
const isOtgEndpointBudgetValid = computed(() => {
const limit = endpointLimitForBudget(otgEndpointBudget.value)
return limit === null || otgRequiredEndpoints.value <= limit
})
const otgEndpointUsageText = computed(() => {
const limit = endpointLimitForBudget(otgEndpointBudget.value)
if (limit === null) {
return t('settings.otgEndpointUsageUnlimited', { used: otgRequiredEndpoints.value })
}
return t('settings.otgEndpointUsage', { used: otgRequiredEndpoints.value, limit })
})
function applyOtgDefaults() {
if (hidBackend.value !== 'otg') return
const recommendedBudget = defaultOtgEndpointBudgetForUdc(otgUdc.value)
if (!otgEndpointBudgetTouched.value) {
otgEndpointBudget.value = recommendedBudget
}
if (!otgProfileTouched.value) {
otgEndpointBudget.value = defaultOtgEndpointBudgetForUdc(otgUdc.value)
hidOtgProfile.value = 'full_no_consumer'
}
if (!otgKeyboardLedsTouched.value) {
otgKeyboardLeds.value = otgEndpointBudget.value !== 'five'
}
}
function onOtgProfileChange(value: unknown) {
hidOtgProfile.value = typeof value === 'string' ? value : 'full'
otgProfileTouched.value = true
}
function onOtgEndpointBudgetChange(value: unknown) {
otgEndpointBudget.value =
value === 'five' || value === 'six' || value === 'unlimited' ? value : 'six'
otgEndpointBudgetTouched.value = true
if (!otgKeyboardLedsTouched.value) {
otgKeyboardLeds.value = otgEndpointBudget.value !== 'five'
}
}
function onOtgKeyboardLedsChange(value: boolean) {
otgKeyboardLeds.value = value
otgKeyboardLedsTouched.value = true
}
// Common baud rates for CH9329
@@ -412,12 +349,6 @@ watch(otgUdc, () => {
applyOtgDefaults()
})
watch(showAdvancedOtg, (open) => {
if (open) {
applyOtgDefaults()
}
})
onMounted(async () => {
try {
const result = await configApi.listDevices()
@@ -585,7 +516,7 @@ async function handleSetup() {
setupData.video_height = height
}
if (videoFps.value) {
setupData.video_fps = videoFps.value
setupData.video_fps = toConfigFps(videoFps.value)
}
// HID settings
@@ -637,27 +568,7 @@ const stepIcons = [User, Video, Keyboard, Puzzle]
<Card class="w-full max-w-lg relative">
<!-- Language Switcher -->
<div class="absolute top-4 right-4">
<DropdownMenu>
<DropdownMenuTrigger as-child>
<Button variant="ghost" size="sm" class="gap-2">
<Languages class="w-4 h-4" />
<span class="text-sm">
{{ supportedLanguages.find((l) => l.code === currentLanguage)?.name }}
</span>
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align="end">
<DropdownMenuItem
v-for="lang in supportedLanguages"
:key="lang.code"
:class="{ 'bg-accent': lang.code === currentLanguage }"
@click="switchLanguage(lang.code)"
>
<span class="mr-2">{{ lang.flag }}</span>
{{ lang.name }}
</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
<LanguageToggleButton />
</div>
<CardHeader class="text-center space-y-2 pt-10 sm:pt-12">
@@ -879,7 +790,7 @@ const stepIcons = [User, Video, Keyboard, Puzzle]
</SelectTrigger>
<SelectContent>
<SelectItem v-for="fps in availableFps" :key="fps" :value="fps">
{{ fps }} FPS
{{ formatFpsLabel(fps) }}
</SelectItem>
</SelectContent>
</Select>
@@ -1039,78 +950,6 @@ const stepIcons = [User, Video, Keyboard, Puzzle]
{{ t('setup.noUdcDevices') }}
</p>
</div>
<div class="mt-2 border rounded-lg">
<button
type="button"
class="w-full flex items-center justify-between p-3 text-left hover:bg-muted/50 rounded-lg transition-colors"
:aria-label="t('setup.advancedOtg')"
@click="showAdvancedOtg = !showAdvancedOtg"
>
<span class="text-sm font-medium">
{{ t('setup.otgAdvanced') }} ({{ t('common.optional') }})
</span>
<ChevronRight
class="h-4 w-4 transition-transform duration-200"
:class="{ 'rotate-90': showAdvancedOtg }"
/>
</button>
<div v-if="showAdvancedOtg" class="px-3 pb-3 space-y-3">
<p class="text-xs text-muted-foreground">
{{ t('setup.otgProfileDesc') }}
</p>
<div class="space-y-2">
<Label for="otgProfile">{{ t('setup.otgProfile') }}</Label>
<Select :model-value="hidOtgProfile" @update:modelValue="onOtgProfileChange">
<SelectTrigger>
<SelectValue />
</SelectTrigger>
<SelectContent>
<SelectItem value="full">{{ t('settings.otgProfileFull') }}</SelectItem>
<SelectItem value="full_no_consumer">{{ t('settings.otgProfileFullNoConsumer') }}</SelectItem>
<SelectItem value="legacy_keyboard">{{ t('settings.otgProfileLegacyKeyboard') }}</SelectItem>
<SelectItem value="legacy_mouse_relative">{{ t('settings.otgProfileLegacyMouseRelative') }}</SelectItem>
</SelectContent>
</Select>
</div>
<div class="space-y-2">
<Label for="otgEndpointBudget">{{ t('settings.otgEndpointBudget') }}</Label>
<Select :model-value="otgEndpointBudget" @update:modelValue="onOtgEndpointBudgetChange">
<SelectTrigger>
<SelectValue />
</SelectTrigger>
<SelectContent>
<SelectItem value="five">5</SelectItem>
<SelectItem value="six">6</SelectItem>
<SelectItem value="unlimited">{{ t('settings.otgEndpointBudgetUnlimited') }}</SelectItem>
</SelectContent>
</Select>
<p class="text-xs text-muted-foreground">
{{ otgEndpointUsageText }}
</p>
</div>
<div class="flex items-center justify-between rounded-md border border-border/60 p-3">
<div>
<Label>{{ t('settings.otgKeyboardLeds') }}</Label>
<p class="text-xs text-muted-foreground">{{ t('settings.otgKeyboardLedsDesc') }}</p>
</div>
<Switch :model-value="otgKeyboardLeds" :disabled="!otgProfileHasKeyboard" @update:model-value="onOtgKeyboardLedsChange" />
</div>
<div class="flex items-center justify-between rounded-md border border-border/60 p-3">
<div>
<Label>{{ t('settings.otgFunctionMsd') }}</Label>
<p class="text-xs text-muted-foreground">{{ t('settings.otgFunctionMsdDesc') }}</p>
</div>
<Switch v-model="otgMsdEnabled" />
</div>
<p class="text-xs text-muted-foreground">
{{ t('settings.otgEndpointBudgetHint') }}
</p>
<p v-if="!isOtgEndpointBudgetValid" class="text-xs text-amber-600 dark:text-amber-400">
{{ t('settings.otgEndpointExceeded', { used: otgRequiredEndpoints, limit: otgEndpointBudget === 'unlimited' ? t('settings.otgEndpointBudgetUnlimited') : otgEndpointBudget === 'five' ? '5' : '6' }) }}
</p>
</div>
</div>
</div>
</div>