mirror of
https://github.com/mofeng-git/One-KVM.git
synced 2026-01-28 16:41:52 +08:00
refactor(hwcodec): 精简FFmpeg编译配置并移除解码器
- 优化FFmpeg编译选项,禁用不需要的库(avformat/swscale/swresample/avfilter等) - 禁用所有解码器和大部分编码器,只保留实际使用的H264/H265/VP8/VP9编码器 - 移除hwcodec解码器模块,MJPEG解码改用libyuv实现 - 移除MJPEG编码器支持 - x86_64添加libmfx支持QSV编码器 - 修复H265 RKMPP编码器支持YUYV直接输入
This commit is contained in:
@@ -101,7 +101,8 @@ mod ffmpeg {
|
||||
fn link_system_ffmpeg(builder: &mut Build) {
|
||||
use std::process::Command;
|
||||
|
||||
let libs = ["libavcodec", "libavutil", "libavformat", "libswscale"];
|
||||
// Only need libavcodec and libavutil for encoding
|
||||
let libs = ["libavcodec", "libavutil"];
|
||||
|
||||
for lib in &libs {
|
||||
// Get cflags
|
||||
@@ -134,7 +135,7 @@ mod ffmpeg {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
panic!("pkg-config failed for {}. Install FFmpeg development libraries: sudo apt install libavcodec-dev libavformat-dev libavutil-dev libswscale-dev", lib);
|
||||
panic!("pkg-config failed for {}. Install FFmpeg development libraries: sudo apt install libavcodec-dev libavutil-dev", lib);
|
||||
}
|
||||
} else {
|
||||
panic!("pkg-config not found. Install pkg-config and FFmpeg development libraries.");
|
||||
@@ -178,7 +179,8 @@ mod ffmpeg {
|
||||
)
|
||||
);
|
||||
{
|
||||
let mut static_libs = vec!["avcodec", "avutil", "avformat"];
|
||||
// Only need avcodec and avutil for encoding
|
||||
let mut static_libs = vec!["avcodec", "avutil"];
|
||||
if target_os == "windows" {
|
||||
static_libs.push("libmfx");
|
||||
}
|
||||
@@ -251,7 +253,7 @@ mod ffmpeg {
|
||||
.unwrap();
|
||||
|
||||
builder.files(
|
||||
["ffmpeg_ram_encode.cpp", "ffmpeg_ram_decode.cpp"].map(|f| ffmpeg_ram_dir.join(f)),
|
||||
["ffmpeg_ram_encode.cpp"].map(|f| ffmpeg_ram_dir.join(f)),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,7 +28,6 @@ enum DataFormat {
|
||||
VP8,
|
||||
VP9,
|
||||
AV1,
|
||||
MJPEG,
|
||||
};
|
||||
|
||||
// same as Driver
|
||||
|
||||
@@ -1,328 +0,0 @@
|
||||
// https://github.com/FFmpeg/FFmpeg/blob/master/doc/examples/hw_decode.c
|
||||
// https://github.com/FFmpeg/FFmpeg/blob/master/doc/examples/decode_video.c
|
||||
|
||||
extern "C" {
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavutil/log.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/pixdesc.h>
|
||||
}
|
||||
|
||||
#include <memory>
|
||||
#include <stdbool.h>
|
||||
|
||||
#define LOG_MODULE "FFMPEG_RAM_DEC"
|
||||
#include <log.h>
|
||||
#include <util.h>
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <libavutil/hwcontext_d3d11va.h>
|
||||
#endif
|
||||
|
||||
#include "common.h"
|
||||
#include "system.h"
|
||||
|
||||
// #define CFG_PKG_TRACE
|
||||
|
||||
namespace {
|
||||
typedef void (*RamDecodeCallback)(const void *obj, int width, int height,
|
||||
enum AVPixelFormat pixfmt,
|
||||
int linesize[AV_NUM_DATA_POINTERS],
|
||||
uint8_t *data[AV_NUM_DATA_POINTERS], int key);
|
||||
|
||||
class FFmpegRamDecoder {
|
||||
public:
|
||||
AVCodecContext *c_ = NULL;
|
||||
AVBufferRef *hw_device_ctx_ = NULL;
|
||||
AVFrame *sw_frame_ = NULL;
|
||||
AVFrame *frame_ = NULL;
|
||||
AVPacket *pkt_ = NULL;
|
||||
bool hwaccel_ = true;
|
||||
|
||||
std::string name_;
|
||||
AVHWDeviceType device_type_ = AV_HWDEVICE_TYPE_NONE;
|
||||
int thread_count_ = 1;
|
||||
RamDecodeCallback callback_ = NULL;
|
||||
DataFormat data_format_;
|
||||
|
||||
#ifdef CFG_PKG_TRACE
|
||||
int in_ = 0;
|
||||
int out_ = 0;
|
||||
#endif
|
||||
|
||||
FFmpegRamDecoder(const char *name, int device_type, int thread_count,
|
||||
RamDecodeCallback callback) {
|
||||
this->name_ = name;
|
||||
this->device_type_ = (AVHWDeviceType)device_type;
|
||||
this->thread_count_ = thread_count;
|
||||
this->callback_ = callback;
|
||||
}
|
||||
|
||||
~FFmpegRamDecoder() {}
|
||||
|
||||
void free_decoder() {
|
||||
if (frame_)
|
||||
av_frame_free(&frame_);
|
||||
if (pkt_)
|
||||
av_packet_free(&pkt_);
|
||||
if (sw_frame_)
|
||||
av_frame_free(&sw_frame_);
|
||||
if (c_)
|
||||
avcodec_free_context(&c_);
|
||||
if (hw_device_ctx_)
|
||||
av_buffer_unref(&hw_device_ctx_);
|
||||
|
||||
frame_ = NULL;
|
||||
pkt_ = NULL;
|
||||
sw_frame_ = NULL;
|
||||
c_ = NULL;
|
||||
hw_device_ctx_ = NULL;
|
||||
}
|
||||
int reset() {
|
||||
if (name_.find("h264") != std::string::npos) {
|
||||
data_format_ = DataFormat::H264;
|
||||
} else if (name_.find("hevc") != std::string::npos) {
|
||||
data_format_ = DataFormat::H265;
|
||||
} else if (name_.find("mjpeg") != std::string::npos) {
|
||||
data_format_ = DataFormat::MJPEG;
|
||||
} else {
|
||||
LOG_ERROR(std::string("unsupported data format:") + name_);
|
||||
return -1;
|
||||
}
|
||||
free_decoder();
|
||||
const AVCodec *codec = NULL;
|
||||
hwaccel_ = device_type_ != AV_HWDEVICE_TYPE_NONE;
|
||||
int ret;
|
||||
if (!(codec = avcodec_find_decoder_by_name(name_.c_str()))) {
|
||||
LOG_ERROR(std::string("avcodec_find_decoder_by_name ") + name_ + " failed");
|
||||
return -1;
|
||||
}
|
||||
if (!(c_ = avcodec_alloc_context3(codec))) {
|
||||
LOG_ERROR(std::string("Could not allocate video codec context"));
|
||||
return -1;
|
||||
}
|
||||
|
||||
c_->flags |= AV_CODEC_FLAG_LOW_DELAY;
|
||||
c_->thread_count =
|
||||
device_type_ != AV_HWDEVICE_TYPE_NONE ? 1 : thread_count_;
|
||||
c_->thread_type = FF_THREAD_SLICE;
|
||||
|
||||
if (name_.find("qsv") != std::string::npos) {
|
||||
if ((ret = av_opt_set(c_->priv_data, "async_depth", "1", 0)) < 0) {
|
||||
LOG_ERROR(std::string("qsv set opt async_depth 1 failed"));
|
||||
return -1;
|
||||
}
|
||||
// https://github.com/FFmpeg/FFmpeg/blob/c6364b711bad1fe2fbd90e5b2798f87080ddf5ea/libavcodec/qsvdec.c#L932
|
||||
// for disable warning
|
||||
c_->pkt_timebase = av_make_q(1, 30);
|
||||
}
|
||||
|
||||
if (hwaccel_) {
|
||||
ret =
|
||||
av_hwdevice_ctx_create(&hw_device_ctx_, device_type_, NULL, NULL, 0);
|
||||
if (ret < 0) {
|
||||
LOG_ERROR(std::string("av_hwdevice_ctx_create failed, ret = ") + av_err2str(ret));
|
||||
return -1;
|
||||
}
|
||||
c_->hw_device_ctx = av_buffer_ref(hw_device_ctx_);
|
||||
if (!check_support()) {
|
||||
LOG_ERROR(std::string("check_support failed"));
|
||||
return -1;
|
||||
}
|
||||
if (!(sw_frame_ = av_frame_alloc())) {
|
||||
LOG_ERROR(std::string("av_frame_alloc failed"));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (!(pkt_ = av_packet_alloc())) {
|
||||
LOG_ERROR(std::string("av_packet_alloc failed"));
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!(frame_ = av_frame_alloc())) {
|
||||
LOG_ERROR(std::string("av_frame_alloc failed"));
|
||||
return -1;
|
||||
}
|
||||
|
||||
if ((ret = avcodec_open2(c_, codec, NULL)) != 0) {
|
||||
LOG_ERROR(std::string("avcodec_open2 failed, ret = ") + av_err2str(ret));
|
||||
return -1;
|
||||
}
|
||||
#ifdef CFG_PKG_TRACE
|
||||
in_ = 0;
|
||||
out_ = 0;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int decode(const uint8_t *data, int length, const void *obj) {
|
||||
int ret = -1;
|
||||
#ifdef CFG_PKG_TRACE
|
||||
in_++;
|
||||
LOG_DEBUG(std::string("delay DI: in:") + in_ + " out:" + out_);
|
||||
#endif
|
||||
|
||||
if (!data || !length) {
|
||||
LOG_ERROR(std::string("illegal decode parameter"));
|
||||
return -1;
|
||||
}
|
||||
pkt_->data = (uint8_t *)data;
|
||||
pkt_->size = length;
|
||||
ret = do_decode(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
private:
|
||||
int do_decode(const void *obj) {
|
||||
int ret;
|
||||
AVFrame *tmp_frame = NULL;
|
||||
bool decoded = false;
|
||||
|
||||
ret = avcodec_send_packet(c_, pkt_);
|
||||
if (ret < 0) {
|
||||
LOG_ERROR(std::string("avcodec_send_packet failed, ret = ") + av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
auto start = util::now();
|
||||
while (ret >= 0 && util::elapsed_ms(start) < ENCODE_TIMEOUT_MS) {
|
||||
if ((ret = avcodec_receive_frame(c_, frame_)) != 0) {
|
||||
if (ret != AVERROR(EAGAIN)) {
|
||||
LOG_ERROR(std::string("avcodec_receive_frame failed, ret = ") + av_err2str(ret));
|
||||
}
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
if (hwaccel_) {
|
||||
if (!frame_->hw_frames_ctx) {
|
||||
LOG_ERROR(std::string("hw_frames_ctx is NULL"));
|
||||
goto _exit;
|
||||
}
|
||||
if ((ret = av_hwframe_transfer_data(sw_frame_, frame_, 0)) < 0) {
|
||||
LOG_ERROR(std::string("av_hwframe_transfer_data failed, ret = ") +
|
||||
av_err2str(ret));
|
||||
goto _exit;
|
||||
}
|
||||
|
||||
tmp_frame = sw_frame_;
|
||||
} else {
|
||||
tmp_frame = frame_;
|
||||
}
|
||||
decoded = true;
|
||||
#ifdef CFG_PKG_TRACE
|
||||
out_++;
|
||||
LOG_DEBUG(std::string("delay DO: in:") + in_ + " out:" + out_);
|
||||
#endif
|
||||
#if FF_API_FRAME_KEY
|
||||
int key_frame = frame_->flags & AV_FRAME_FLAG_KEY;
|
||||
#else
|
||||
int key_frame = frame_->key_frame;
|
||||
#endif
|
||||
|
||||
callback_(obj, tmp_frame->width, tmp_frame->height,
|
||||
(AVPixelFormat)tmp_frame->format, tmp_frame->linesize,
|
||||
tmp_frame->data, key_frame);
|
||||
}
|
||||
_exit:
|
||||
av_packet_unref(pkt_);
|
||||
return decoded ? 0 : -1;
|
||||
}
|
||||
|
||||
bool check_support() {
|
||||
#ifdef _WIN32
|
||||
if (device_type_ == AV_HWDEVICE_TYPE_D3D11VA) {
|
||||
if (!c_->hw_device_ctx) {
|
||||
LOG_ERROR(std::string("hw_device_ctx is NULL"));
|
||||
return false;
|
||||
}
|
||||
AVHWDeviceContext *deviceContext =
|
||||
(AVHWDeviceContext *)hw_device_ctx_->data;
|
||||
if (!deviceContext) {
|
||||
LOG_ERROR(std::string("deviceContext is NULL"));
|
||||
return false;
|
||||
}
|
||||
AVD3D11VADeviceContext *d3d11vaDeviceContext =
|
||||
(AVD3D11VADeviceContext *)deviceContext->hwctx;
|
||||
if (!d3d11vaDeviceContext) {
|
||||
LOG_ERROR(std::string("d3d11vaDeviceContext is NULL"));
|
||||
return false;
|
||||
}
|
||||
ID3D11Device *device = d3d11vaDeviceContext->device;
|
||||
if (!device) {
|
||||
LOG_ERROR(std::string("device is NULL"));
|
||||
return false;
|
||||
}
|
||||
std::unique_ptr<NativeDevice> native_ = std::make_unique<NativeDevice>();
|
||||
if (!native_) {
|
||||
LOG_ERROR(std::string("Failed to create native device"));
|
||||
return false;
|
||||
}
|
||||
if (!native_->Init(0, (ID3D11Device *)device, 0)) {
|
||||
LOG_ERROR(std::string("Failed to init native device"));
|
||||
return false;
|
||||
}
|
||||
if (!native_->support_decode(data_format_)) {
|
||||
LOG_ERROR(std::string("Failed to check support ") + name_);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
extern "C" void ffmpeg_ram_free_decoder(FFmpegRamDecoder *decoder) {
|
||||
try {
|
||||
if (!decoder)
|
||||
return;
|
||||
decoder->free_decoder();
|
||||
delete decoder;
|
||||
decoder = NULL;
|
||||
} catch (const std::exception &e) {
|
||||
LOG_ERROR(std::string("ffmpeg_ram_free_decoder exception:") + e.what());
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" FFmpegRamDecoder *
|
||||
ffmpeg_ram_new_decoder(const char *name, int device_type, int thread_count,
|
||||
RamDecodeCallback callback) {
|
||||
FFmpegRamDecoder *decoder = NULL;
|
||||
try {
|
||||
decoder = new FFmpegRamDecoder(name, device_type, thread_count, callback);
|
||||
if (decoder) {
|
||||
if (decoder->reset() == 0) {
|
||||
return decoder;
|
||||
}
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
LOG_ERROR(std::string("new decoder exception:") + e.what());
|
||||
}
|
||||
if (decoder) {
|
||||
decoder->free_decoder();
|
||||
delete decoder;
|
||||
decoder = NULL;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
extern "C" int ffmpeg_ram_decode(FFmpegRamDecoder *decoder, const uint8_t *data,
|
||||
int length, const void *obj) {
|
||||
try {
|
||||
int ret = decoder->decode(data, length, obj);
|
||||
if (DataFormat::H265 == decoder->data_format_ && util_decode::has_flag_could_not_find_ref_with_poc()) {
|
||||
return HWCODEC_ERR_HEVC_COULD_NOT_FIND_POC;
|
||||
} else {
|
||||
return ret == 0 ? HWCODEC_SUCCESS : HWCODEC_ERR_COMMON;
|
||||
}
|
||||
} catch (const std::exception &e) {
|
||||
LOG_ERROR(std::string("ffmpeg_ram_decode exception:") + e.what());
|
||||
}
|
||||
return HWCODEC_ERR_COMMON;
|
||||
}
|
||||
@@ -5,10 +5,6 @@
|
||||
|
||||
#define AV_NUM_DATA_POINTERS 8
|
||||
|
||||
typedef void (*RamDecodeCallback)(const void *obj, int width, int height,
|
||||
int pixfmt,
|
||||
int linesize[AV_NUM_DATA_POINTERS],
|
||||
uint8_t *data[AV_NUM_DATA_POINTERS], int key);
|
||||
typedef void (*RamEncodeCallback)(const uint8_t *data, int len, int64_t pts,
|
||||
int key, const void *obj);
|
||||
|
||||
@@ -18,18 +14,13 @@ void *ffmpeg_ram_new_encoder(const char *name, const char *mc_name, int width,
|
||||
int thread_count, int gpu, int *linesize,
|
||||
int *offset, int *length,
|
||||
RamEncodeCallback callback);
|
||||
void *ffmpeg_ram_new_decoder(const char *name, int device_type,
|
||||
int thread_count, RamDecodeCallback callback);
|
||||
int ffmpeg_ram_encode(void *encoder, const uint8_t *data, int length,
|
||||
const void *obj, int64_t ms);
|
||||
int ffmpeg_ram_decode(void *decoder, const uint8_t *data, int length,
|
||||
const void *obj);
|
||||
void ffmpeg_ram_free_encoder(void *encoder);
|
||||
void ffmpeg_ram_free_decoder(void *decoder);
|
||||
int ffmpeg_ram_get_linesize_offset_length(int pix_fmt, int width, int height,
|
||||
int align, int *linesize, int *offset,
|
||||
int *length);
|
||||
int ffmpeg_ram_set_bitrate(void *encoder, int kbs);
|
||||
void ffmpeg_ram_request_keyframe(void *encoder);
|
||||
|
||||
#endif // FFMPEG_RAM_FFI_H
|
||||
#endif // FFMPEG_RAM_FFI_H
|
||||
|
||||
@@ -1,202 +0,0 @@
|
||||
use crate::ffmpeg::{init_av_log, AVHWDeviceType::*};
|
||||
|
||||
use crate::{
|
||||
common::DataFormat::*,
|
||||
ffmpeg::{AVHWDeviceType, AVPixelFormat},
|
||||
ffmpeg_ram::{
|
||||
ffmpeg_ram_decode, ffmpeg_ram_free_decoder, ffmpeg_ram_new_decoder, CodecInfo,
|
||||
AV_NUM_DATA_POINTERS, Priority,
|
||||
},
|
||||
};
|
||||
use log::error;
|
||||
use std::{
|
||||
ffi::{c_void, CString},
|
||||
os::raw::c_int,
|
||||
slice::from_raw_parts,
|
||||
vec,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DecodeContext {
|
||||
pub name: String,
|
||||
pub device_type: AVHWDeviceType,
|
||||
pub thread_count: i32,
|
||||
}
|
||||
|
||||
pub struct DecodeFrame {
|
||||
pub pixfmt: AVPixelFormat,
|
||||
pub width: i32,
|
||||
pub height: i32,
|
||||
pub data: Vec<Vec<u8>>,
|
||||
pub linesize: Vec<i32>,
|
||||
pub key: bool,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for DecodeFrame {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let mut s = String::from("data:");
|
||||
for data in self.data.iter() {
|
||||
s.push_str(format!("{} ", data.len()).as_str());
|
||||
}
|
||||
s.push_str(", linesize:");
|
||||
for linesize in self.linesize.iter() {
|
||||
s.push_str(format!("{} ", linesize).as_str());
|
||||
}
|
||||
|
||||
write!(
|
||||
f,
|
||||
"fixfmt:{}, width:{}, height:{},key:{}, {}",
|
||||
self.pixfmt as i32, self.width, self.height, self.key, s,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Decoder {
|
||||
codec: *mut c_void,
|
||||
frames: *mut Vec<DecodeFrame>,
|
||||
pub ctx: DecodeContext,
|
||||
}
|
||||
|
||||
unsafe impl Send for Decoder {}
|
||||
unsafe impl Sync for Decoder {}
|
||||
|
||||
impl Decoder {
|
||||
pub fn new(ctx: DecodeContext) -> Result<Self, ()> {
|
||||
init_av_log();
|
||||
unsafe {
|
||||
let codec = ffmpeg_ram_new_decoder(
|
||||
CString::new(ctx.name.as_str()).map_err(|_| ())?.as_ptr(),
|
||||
ctx.device_type as _,
|
||||
ctx.thread_count,
|
||||
Some(Decoder::callback),
|
||||
);
|
||||
|
||||
if codec.is_null() {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
Ok(Decoder {
|
||||
codec,
|
||||
frames: Box::into_raw(Box::new(Vec::<DecodeFrame>::new())),
|
||||
ctx,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decode(&mut self, packet: &[u8]) -> Result<&mut Vec<DecodeFrame>, i32> {
|
||||
unsafe {
|
||||
(&mut *self.frames).clear();
|
||||
let ret = ffmpeg_ram_decode(
|
||||
self.codec,
|
||||
packet.as_ptr(),
|
||||
packet.len() as c_int,
|
||||
self.frames as *const _ as *const c_void,
|
||||
);
|
||||
|
||||
if ret < 0 {
|
||||
Err(ret)
|
||||
} else {
|
||||
Ok(&mut *self.frames)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe extern "C" fn callback(
|
||||
obj: *const c_void,
|
||||
width: c_int,
|
||||
height: c_int,
|
||||
pixfmt: c_int,
|
||||
linesizes: *mut c_int,
|
||||
datas: *mut *mut u8,
|
||||
key: c_int,
|
||||
) {
|
||||
let frames = &mut *(obj as *mut Vec<DecodeFrame>);
|
||||
let datas = from_raw_parts(datas, AV_NUM_DATA_POINTERS as _);
|
||||
let linesizes = from_raw_parts(linesizes, AV_NUM_DATA_POINTERS as _);
|
||||
|
||||
let mut frame = DecodeFrame {
|
||||
pixfmt: std::mem::transmute(pixfmt),
|
||||
width,
|
||||
height,
|
||||
data: vec![],
|
||||
linesize: vec![],
|
||||
key: key != 0,
|
||||
};
|
||||
|
||||
// Handle YUV420P and YUVJ420P (JPEG full-range) - same memory layout
|
||||
if pixfmt == AVPixelFormat::AV_PIX_FMT_YUV420P as c_int
|
||||
|| pixfmt == AVPixelFormat::AV_PIX_FMT_YUVJ420P as c_int
|
||||
{
|
||||
let y = from_raw_parts(datas[0], (linesizes[0] * height) as usize).to_vec();
|
||||
let u = from_raw_parts(datas[1], (linesizes[1] * height / 2) as usize).to_vec();
|
||||
let v = from_raw_parts(datas[2], (linesizes[2] * height / 2) as usize).to_vec();
|
||||
|
||||
frame.data.push(y);
|
||||
frame.data.push(u);
|
||||
frame.data.push(v);
|
||||
|
||||
frame.linesize.push(linesizes[0]);
|
||||
frame.linesize.push(linesizes[1]);
|
||||
frame.linesize.push(linesizes[2]);
|
||||
|
||||
frames.push(frame);
|
||||
} else if pixfmt == AVPixelFormat::AV_PIX_FMT_YUV422P as c_int
|
||||
|| pixfmt == AVPixelFormat::AV_PIX_FMT_YUVJ422P as c_int
|
||||
{
|
||||
// YUV422P: U and V planes have same height as Y (not half)
|
||||
let y = from_raw_parts(datas[0], (linesizes[0] * height) as usize).to_vec();
|
||||
let u = from_raw_parts(datas[1], (linesizes[1] * height) as usize).to_vec();
|
||||
let v = from_raw_parts(datas[2], (linesizes[2] * height) as usize).to_vec();
|
||||
|
||||
frame.data.push(y);
|
||||
frame.data.push(u);
|
||||
frame.data.push(v);
|
||||
|
||||
frame.linesize.push(linesizes[0]);
|
||||
frame.linesize.push(linesizes[1]);
|
||||
frame.linesize.push(linesizes[2]);
|
||||
|
||||
frames.push(frame);
|
||||
} else if pixfmt == AVPixelFormat::AV_PIX_FMT_NV12 as c_int
|
||||
|| pixfmt == AVPixelFormat::AV_PIX_FMT_NV21 as c_int
|
||||
{
|
||||
let y = from_raw_parts(datas[0], (linesizes[0] * height) as usize).to_vec();
|
||||
let uv = from_raw_parts(datas[1], (linesizes[1] * height / 2) as usize).to_vec();
|
||||
|
||||
frame.data.push(y);
|
||||
frame.data.push(uv);
|
||||
|
||||
frame.linesize.push(linesizes[0]);
|
||||
frame.linesize.push(linesizes[1]);
|
||||
|
||||
frames.push(frame);
|
||||
} else {
|
||||
error!("unsupported pixfmt {}", pixfmt as i32);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns available decoders for IP-KVM scenario.
|
||||
/// Only MJPEG software decoder is supported as IP-KVM captures from video capture cards
|
||||
/// that output MJPEG streams.
|
||||
pub fn available_decoders() -> Vec<CodecInfo> {
|
||||
// IP-KVM scenario only needs MJPEG decoding
|
||||
// MJPEG comes from video capture cards, software decoding is sufficient
|
||||
vec![CodecInfo {
|
||||
name: "mjpeg".to_owned(),
|
||||
format: MJPEG,
|
||||
hwdevice: AV_HWDEVICE_TYPE_NONE,
|
||||
priority: Priority::Best as _,
|
||||
..Default::default()
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Decoder {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
ffmpeg_ram_free_decoder(self.codec);
|
||||
self.codec = std::ptr::null_mut();
|
||||
let _ = Box::from_raw(self.frames);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -447,14 +447,6 @@ impl Encoder {
|
||||
}
|
||||
}
|
||||
|
||||
// Add MJPEG software encoder if not already present
|
||||
if !res.iter().any(|c| c.format == MJPEG) {
|
||||
if let Some(mjpeg_soft) = soft_codecs.mjpeg {
|
||||
debug!("Adding software MJPEG encoder: {}", mjpeg_soft.name);
|
||||
res.push(mjpeg_soft);
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@ use std::ffi::c_int;
|
||||
|
||||
include!(concat!(env!("OUT_DIR"), "/ffmpeg_ram_ffi.rs"));
|
||||
|
||||
pub mod decode;
|
||||
pub mod encode;
|
||||
|
||||
pub enum Priority {
|
||||
@@ -52,7 +51,6 @@ impl CodecInfo {
|
||||
let mut vp8: Option<CodecInfo> = None;
|
||||
let mut vp9: Option<CodecInfo> = None;
|
||||
let mut av1: Option<CodecInfo> = None;
|
||||
let mut mjpeg: Option<CodecInfo> = None;
|
||||
|
||||
for coder in coders {
|
||||
match coder.format {
|
||||
@@ -96,14 +94,6 @@ impl CodecInfo {
|
||||
}
|
||||
None => av1 = Some(coder),
|
||||
},
|
||||
DataFormat::MJPEG => match &mjpeg {
|
||||
Some(old) => {
|
||||
if old.priority > coder.priority {
|
||||
mjpeg = Some(coder)
|
||||
}
|
||||
}
|
||||
None => mjpeg = Some(coder),
|
||||
},
|
||||
}
|
||||
}
|
||||
CodecInfos {
|
||||
@@ -112,7 +102,6 @@ impl CodecInfo {
|
||||
vp8,
|
||||
vp9,
|
||||
av1,
|
||||
mjpeg,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -147,13 +136,6 @@ impl CodecInfo {
|
||||
priority: Priority::Soft as _,
|
||||
}),
|
||||
av1: None,
|
||||
mjpeg: Some(CodecInfo {
|
||||
name: "mjpeg".to_owned(),
|
||||
mc_name: Default::default(),
|
||||
format: MJPEG,
|
||||
hwdevice: AV_HWDEVICE_TYPE_NONE,
|
||||
priority: Priority::Soft as _,
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -165,7 +147,6 @@ pub struct CodecInfos {
|
||||
pub vp8: Option<CodecInfo>,
|
||||
pub vp9: Option<CodecInfo>,
|
||||
pub av1: Option<CodecInfo>,
|
||||
pub mjpeg: Option<CodecInfo>,
|
||||
}
|
||||
|
||||
impl CodecInfos {
|
||||
|
||||
Reference in New Issue
Block a user