mirror of
https://github.com/mofeng-git/One-KVM.git
synced 2026-01-28 16:41:52 +08:00
移除 IP-KVM 场景不需要的功能模块: - 移除 VRAM 模块 (GPU 显存直接编解码) - 移除 Mux 模块 (视频混流) - 移除 macOS/Android 平台支持 - 移除外部 SDK 依赖 (~9MB) - 移除开发工具和示例程序 简化解码器为仅支持 MJPEG (采集卡输出格式) 简化 NVIDIA 检测代码 (使用 dlopen 替代 SDK) 更新版本号至 0.8.0 更新相关技术文档
203 lines
6.1 KiB
Rust
203 lines
6.1 KiB
Rust
use crate::ffmpeg::{init_av_log, AVHWDeviceType::*};
|
|
|
|
use crate::{
|
|
common::DataFormat::*,
|
|
ffmpeg::{AVHWDeviceType, AVPixelFormat},
|
|
ffmpeg_ram::{
|
|
ffmpeg_ram_decode, ffmpeg_ram_free_decoder, ffmpeg_ram_new_decoder, CodecInfo,
|
|
AV_NUM_DATA_POINTERS, Priority,
|
|
},
|
|
};
|
|
use log::error;
|
|
use std::{
|
|
ffi::{c_void, CString},
|
|
os::raw::c_int,
|
|
slice::from_raw_parts,
|
|
vec,
|
|
};
|
|
|
|
#[derive(Debug, Clone)]
|
|
pub struct DecodeContext {
|
|
pub name: String,
|
|
pub device_type: AVHWDeviceType,
|
|
pub thread_count: i32,
|
|
}
|
|
|
|
pub struct DecodeFrame {
|
|
pub pixfmt: AVPixelFormat,
|
|
pub width: i32,
|
|
pub height: i32,
|
|
pub data: Vec<Vec<u8>>,
|
|
pub linesize: Vec<i32>,
|
|
pub key: bool,
|
|
}
|
|
|
|
impl std::fmt::Display for DecodeFrame {
|
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
let mut s = String::from("data:");
|
|
for data in self.data.iter() {
|
|
s.push_str(format!("{} ", data.len()).as_str());
|
|
}
|
|
s.push_str(", linesize:");
|
|
for linesize in self.linesize.iter() {
|
|
s.push_str(format!("{} ", linesize).as_str());
|
|
}
|
|
|
|
write!(
|
|
f,
|
|
"fixfmt:{}, width:{}, height:{},key:{}, {}",
|
|
self.pixfmt as i32, self.width, self.height, self.key, s,
|
|
)
|
|
}
|
|
}
|
|
|
|
pub struct Decoder {
|
|
codec: *mut c_void,
|
|
frames: *mut Vec<DecodeFrame>,
|
|
pub ctx: DecodeContext,
|
|
}
|
|
|
|
unsafe impl Send for Decoder {}
|
|
unsafe impl Sync for Decoder {}
|
|
|
|
impl Decoder {
|
|
pub fn new(ctx: DecodeContext) -> Result<Self, ()> {
|
|
init_av_log();
|
|
unsafe {
|
|
let codec = ffmpeg_ram_new_decoder(
|
|
CString::new(ctx.name.as_str()).map_err(|_| ())?.as_ptr(),
|
|
ctx.device_type as _,
|
|
ctx.thread_count,
|
|
Some(Decoder::callback),
|
|
);
|
|
|
|
if codec.is_null() {
|
|
return Err(());
|
|
}
|
|
|
|
Ok(Decoder {
|
|
codec,
|
|
frames: Box::into_raw(Box::new(Vec::<DecodeFrame>::new())),
|
|
ctx,
|
|
})
|
|
}
|
|
}
|
|
|
|
pub fn decode(&mut self, packet: &[u8]) -> Result<&mut Vec<DecodeFrame>, i32> {
|
|
unsafe {
|
|
(&mut *self.frames).clear();
|
|
let ret = ffmpeg_ram_decode(
|
|
self.codec,
|
|
packet.as_ptr(),
|
|
packet.len() as c_int,
|
|
self.frames as *const _ as *const c_void,
|
|
);
|
|
|
|
if ret < 0 {
|
|
Err(ret)
|
|
} else {
|
|
Ok(&mut *self.frames)
|
|
}
|
|
}
|
|
}
|
|
|
|
unsafe extern "C" fn callback(
|
|
obj: *const c_void,
|
|
width: c_int,
|
|
height: c_int,
|
|
pixfmt: c_int,
|
|
linesizes: *mut c_int,
|
|
datas: *mut *mut u8,
|
|
key: c_int,
|
|
) {
|
|
let frames = &mut *(obj as *mut Vec<DecodeFrame>);
|
|
let datas = from_raw_parts(datas, AV_NUM_DATA_POINTERS as _);
|
|
let linesizes = from_raw_parts(linesizes, AV_NUM_DATA_POINTERS as _);
|
|
|
|
let mut frame = DecodeFrame {
|
|
pixfmt: std::mem::transmute(pixfmt),
|
|
width,
|
|
height,
|
|
data: vec![],
|
|
linesize: vec![],
|
|
key: key != 0,
|
|
};
|
|
|
|
// Handle YUV420P and YUVJ420P (JPEG full-range) - same memory layout
|
|
if pixfmt == AVPixelFormat::AV_PIX_FMT_YUV420P as c_int
|
|
|| pixfmt == AVPixelFormat::AV_PIX_FMT_YUVJ420P as c_int
|
|
{
|
|
let y = from_raw_parts(datas[0], (linesizes[0] * height) as usize).to_vec();
|
|
let u = from_raw_parts(datas[1], (linesizes[1] * height / 2) as usize).to_vec();
|
|
let v = from_raw_parts(datas[2], (linesizes[2] * height / 2) as usize).to_vec();
|
|
|
|
frame.data.push(y);
|
|
frame.data.push(u);
|
|
frame.data.push(v);
|
|
|
|
frame.linesize.push(linesizes[0]);
|
|
frame.linesize.push(linesizes[1]);
|
|
frame.linesize.push(linesizes[2]);
|
|
|
|
frames.push(frame);
|
|
} else if pixfmt == AVPixelFormat::AV_PIX_FMT_YUV422P as c_int
|
|
|| pixfmt == AVPixelFormat::AV_PIX_FMT_YUVJ422P as c_int
|
|
{
|
|
// YUV422P: U and V planes have same height as Y (not half)
|
|
let y = from_raw_parts(datas[0], (linesizes[0] * height) as usize).to_vec();
|
|
let u = from_raw_parts(datas[1], (linesizes[1] * height) as usize).to_vec();
|
|
let v = from_raw_parts(datas[2], (linesizes[2] * height) as usize).to_vec();
|
|
|
|
frame.data.push(y);
|
|
frame.data.push(u);
|
|
frame.data.push(v);
|
|
|
|
frame.linesize.push(linesizes[0]);
|
|
frame.linesize.push(linesizes[1]);
|
|
frame.linesize.push(linesizes[2]);
|
|
|
|
frames.push(frame);
|
|
} else if pixfmt == AVPixelFormat::AV_PIX_FMT_NV12 as c_int
|
|
|| pixfmt == AVPixelFormat::AV_PIX_FMT_NV21 as c_int
|
|
{
|
|
let y = from_raw_parts(datas[0], (linesizes[0] * height) as usize).to_vec();
|
|
let uv = from_raw_parts(datas[1], (linesizes[1] * height / 2) as usize).to_vec();
|
|
|
|
frame.data.push(y);
|
|
frame.data.push(uv);
|
|
|
|
frame.linesize.push(linesizes[0]);
|
|
frame.linesize.push(linesizes[1]);
|
|
|
|
frames.push(frame);
|
|
} else {
|
|
error!("unsupported pixfmt {}", pixfmt as i32);
|
|
}
|
|
}
|
|
|
|
/// Returns available decoders for IP-KVM scenario.
|
|
/// Only MJPEG software decoder is supported as IP-KVM captures from video capture cards
|
|
/// that output MJPEG streams.
|
|
pub fn available_decoders() -> Vec<CodecInfo> {
|
|
// IP-KVM scenario only needs MJPEG decoding
|
|
// MJPEG comes from video capture cards, software decoding is sufficient
|
|
vec![CodecInfo {
|
|
name: "mjpeg".to_owned(),
|
|
format: MJPEG,
|
|
hwdevice: AV_HWDEVICE_TYPE_NONE,
|
|
priority: Priority::Best as _,
|
|
..Default::default()
|
|
}]
|
|
}
|
|
}
|
|
|
|
impl Drop for Decoder {
|
|
fn drop(&mut self) {
|
|
unsafe {
|
|
ffmpeg_ram_free_decoder(self.codec);
|
|
self.codec = std::ptr::null_mut();
|
|
let _ = Box::from_raw(self.frames);
|
|
}
|
|
}
|
|
}
|