feat: 完善架构优化性能

- 调整音视频架构,提升 RKMPP 编码 MJPEG-->H264 性能,同时解决丢帧马赛克问题;
- 删除多用户逻辑,只保留单用户,支持设置 web 单会话;
- 修复删除体验不好的的回退逻辑,前端页面菜单位置微调;
- 增加 OTG USB 设备动态调整功能;
- 修复 mdns 问题,webrtc 视频切换更顺畅。
This commit is contained in:
mofeng
2026-01-25 16:04:29 +08:00
parent 01e01430da
commit 1786b7689d
66 changed files with 4225 additions and 2936 deletions

View File

@@ -117,21 +117,11 @@ pub enum CaptureState {
Error,
}
/// Audio capture statistics
#[derive(Debug, Clone, Default)]
pub struct AudioStats {
pub frames_captured: u64,
pub frames_dropped: u64,
pub buffer_overruns: u64,
pub current_latency_ms: f32,
}
/// ALSA audio capturer
pub struct AudioCapturer {
config: AudioConfig,
state: Arc<watch::Sender<CaptureState>>,
state_rx: watch::Receiver<CaptureState>,
stats: Arc<Mutex<AudioStats>>,
frame_tx: broadcast::Sender<AudioFrame>,
stop_flag: Arc<AtomicBool>,
sequence: Arc<AtomicU64>,
@@ -150,7 +140,6 @@ impl AudioCapturer {
config,
state: Arc::new(state_tx),
state_rx,
stats: Arc::new(Mutex::new(AudioStats::default())),
frame_tx,
stop_flag: Arc::new(AtomicBool::new(false)),
sequence: Arc::new(AtomicU64::new(0)),
@@ -174,11 +163,6 @@ impl AudioCapturer {
self.frame_tx.subscribe()
}
/// Get statistics
pub async fn stats(&self) -> AudioStats {
self.stats.lock().await.clone()
}
/// Start capturing
pub async fn start(&self) -> Result<()> {
if self.state() == CaptureState::Running {
@@ -194,7 +178,6 @@ impl AudioCapturer {
let config = self.config.clone();
let state = self.state.clone();
let stats = self.stats.clone();
let frame_tx = self.frame_tx.clone();
let stop_flag = self.stop_flag.clone();
let sequence = self.sequence.clone();
@@ -204,7 +187,6 @@ impl AudioCapturer {
capture_loop(
config,
state,
stats,
frame_tx,
stop_flag,
sequence,
@@ -239,7 +221,6 @@ impl AudioCapturer {
fn capture_loop(
config: AudioConfig,
state: Arc<watch::Sender<CaptureState>>,
stats: Arc<Mutex<AudioStats>>,
frame_tx: broadcast::Sender<AudioFrame>,
stop_flag: Arc<AtomicBool>,
sequence: Arc<AtomicU64>,
@@ -248,7 +229,6 @@ fn capture_loop(
let result = run_capture(
&config,
&state,
&stats,
&frame_tx,
&stop_flag,
&sequence,
@@ -266,7 +246,6 @@ fn capture_loop(
fn run_capture(
config: &AudioConfig,
state: &watch::Sender<CaptureState>,
stats: &Arc<Mutex<AudioStats>>,
frame_tx: &broadcast::Sender<AudioFrame>,
stop_flag: &AtomicBool,
sequence: &AtomicU64,
@@ -334,9 +313,6 @@ fn run_capture(
match pcm.state() {
State::XRun => {
warn_throttled!(log_throttler, "xrun", "Audio buffer overrun, recovering");
if let Ok(mut s) = stats.try_lock() {
s.buffer_overruns += 1;
}
let _ = pcm.prepare();
continue;
}
@@ -377,11 +353,6 @@ fn run_capture(
debug!("No audio receivers: {}", e);
}
}
// Update stats
if let Ok(mut s) = stats.try_lock() {
s.frames_captured += 1;
}
}
Err(e) => {
// Check for buffer overrun (EPIPE = 32 on Linux)
@@ -389,21 +360,12 @@ fn run_capture(
if desc.contains("EPIPE") || desc.contains("Broken pipe") {
// Buffer overrun
warn_throttled!(log_throttler, "buffer_overrun", "Audio buffer overrun");
if let Ok(mut s) = stats.try_lock() {
s.buffer_overruns += 1;
}
let _ = pcm.prepare();
} else if desc.contains("No such device") || desc.contains("ENODEV") {
// Device disconnected - use longer throttle for this
error_throttled!(log_throttler, "no_device", "Audio read error: {}", e);
if let Ok(mut s) = stats.try_lock() {
s.frames_dropped += 1;
}
} else {
error_throttled!(log_throttler, "read_error", "Audio read error: {}", e);
if let Ok(mut s) = stats.try_lock() {
s.frames_dropped += 1;
}
}
}
}

View File

@@ -4,7 +4,7 @@
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use tokio::sync::{broadcast, RwLock};
use tokio::sync::RwLock;
use tracing::info;
use super::capture::AudioConfig;
@@ -104,10 +104,6 @@ pub struct AudioStatus {
pub quality: AudioQuality,
/// Number of connected subscribers
pub subscriber_count: usize,
/// Frames encoded
pub frames_encoded: u64,
/// Bytes output
pub bytes_output: u64,
/// Error message if any
pub error: Option<String>,
}
@@ -352,17 +348,11 @@ impl AudioController {
let streaming = self.is_streaming().await;
let error = self.last_error.read().await.clone();
let (subscriber_count, frames_encoded, bytes_output) =
if let Some(ref streamer) = *self.streamer.read().await {
let stats = streamer.stats().await;
(
stats.subscriber_count,
stats.frames_encoded,
stats.bytes_output,
)
} else {
(0, 0, 0)
};
let subscriber_count = if let Some(ref streamer) = *self.streamer.read().await {
streamer.stats().await.subscriber_count
} else {
0
};
AudioStatus {
enabled: config.enabled,
@@ -374,14 +364,12 @@ impl AudioController {
},
quality: config.quality,
subscriber_count,
frames_encoded,
bytes_output,
error,
}
}
/// Subscribe to Opus frames (for WebSocket clients)
pub fn subscribe_opus(&self) -> Option<broadcast::Receiver<OpusFrame>> {
pub fn subscribe_opus(&self) -> Option<tokio::sync::watch::Receiver<Option<Arc<OpusFrame>>>> {
// Use try_read to avoid blocking - this is called from sync context sometimes
if let Ok(guard) = self.streamer.try_read() {
guard.as_ref().map(|s| s.subscribe_opus())
@@ -391,7 +379,9 @@ impl AudioController {
}
/// Subscribe to Opus frames (async version)
pub async fn subscribe_opus_async(&self) -> Option<broadcast::Receiver<OpusFrame>> {
pub async fn subscribe_opus_async(
&self,
) -> Option<tokio::sync::watch::Receiver<Option<Arc<OpusFrame>>>> {
self.streamer
.read()
.await

View File

@@ -6,7 +6,6 @@
//! - Audio device enumeration
//! - Audio streaming pipeline
//! - High-level audio controller
//! - Shared audio pipeline for WebRTC multi-session support
//! - Device health monitoring
pub mod capture;
@@ -14,7 +13,6 @@ pub mod controller;
pub mod device;
pub mod encoder;
pub mod monitor;
pub mod shared_pipeline;
pub mod streamer;
pub use capture::{AudioCapturer, AudioConfig, AudioFrame};
@@ -22,7 +20,4 @@ pub use controller::{AudioController, AudioControllerConfig, AudioQuality, Audio
pub use device::{enumerate_audio_devices, enumerate_audio_devices_with_current, AudioDeviceInfo};
pub use encoder::{OpusConfig, OpusEncoder, OpusFrame};
pub use monitor::{AudioHealthMonitor, AudioHealthStatus, AudioMonitorConfig};
pub use shared_pipeline::{
SharedAudioPipeline, SharedAudioPipelineConfig, SharedAudioPipelineStats,
};
pub use streamer::{AudioStreamState, AudioStreamer, AudioStreamerConfig};

View File

@@ -1,450 +0,0 @@
//! Shared Audio Pipeline for WebRTC
//!
//! This module provides a shared audio encoding pipeline that can serve
//! multiple WebRTC sessions with a single encoder instance.
//!
//! # Architecture
//!
//! ```text
//! AudioCapturer (ALSA)
//! |
//! v (broadcast::Receiver<AudioFrame>)
//! SharedAudioPipeline (single Opus encoder)
//! |
//! v (broadcast::Sender<OpusFrame>)
//! ┌────┴────┬────────┬────────┐
//! v v v v
//! Session1 Session2 Session3 ...
//! (RTP) (RTP) (RTP) (RTP)
//! ```
//!
//! # Key Features
//!
//! - **Single encoder**: All sessions share one Opus encoder
//! - **Broadcast distribution**: Encoded frames are broadcast to all subscribers
//! - **Dynamic bitrate**: Bitrate can be changed at runtime
//! - **Statistics**: Tracks encoding performance metrics
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::Arc;
use std::time::Instant;
use tokio::sync::{broadcast, Mutex, RwLock};
use tracing::{debug, error, info, trace, warn};
use super::capture::AudioFrame;
use super::encoder::{OpusConfig, OpusEncoder, OpusFrame};
use crate::error::{AppError, Result};
/// Shared audio pipeline configuration
#[derive(Debug, Clone)]
pub struct SharedAudioPipelineConfig {
/// Sample rate (must match audio capture)
pub sample_rate: u32,
/// Number of channels (1 or 2)
pub channels: u32,
/// Target bitrate in bps
pub bitrate: u32,
/// Opus application mode
pub application: OpusApplicationMode,
/// Enable forward error correction
pub fec: bool,
/// Broadcast channel capacity
pub channel_capacity: usize,
}
impl Default for SharedAudioPipelineConfig {
fn default() -> Self {
Self {
sample_rate: 48000,
channels: 2,
bitrate: 64000,
application: OpusApplicationMode::Audio,
fec: true,
channel_capacity: 16, // Reduced from 64 for lower latency
}
}
}
impl SharedAudioPipelineConfig {
/// Create config optimized for voice
pub fn voice() -> Self {
Self {
bitrate: 32000,
application: OpusApplicationMode::Voip,
..Default::default()
}
}
/// Create config optimized for music/high quality
pub fn high_quality() -> Self {
Self {
bitrate: 128000,
application: OpusApplicationMode::Audio,
..Default::default()
}
}
/// Convert to OpusConfig
pub fn to_opus_config(&self) -> OpusConfig {
OpusConfig {
sample_rate: self.sample_rate,
channels: self.channels,
bitrate: self.bitrate,
application: match self.application {
OpusApplicationMode::Voip => super::encoder::OpusApplication::Voip,
OpusApplicationMode::Audio => super::encoder::OpusApplication::Audio,
OpusApplicationMode::LowDelay => super::encoder::OpusApplication::LowDelay,
},
fec: self.fec,
}
}
}
/// Opus application mode
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum OpusApplicationMode {
/// Voice over IP - optimized for speech
Voip,
/// General audio - balanced quality
Audio,
/// Low delay mode - minimal latency
LowDelay,
}
/// Shared audio pipeline statistics
#[derive(Debug, Clone, Default)]
pub struct SharedAudioPipelineStats {
/// Frames received from audio capture
pub frames_received: u64,
/// Frames successfully encoded
pub frames_encoded: u64,
/// Frames dropped (encode errors)
pub frames_dropped: u64,
/// Total bytes encoded
pub bytes_encoded: u64,
/// Number of active subscribers
pub subscribers: u64,
/// Average encode time in milliseconds
pub avg_encode_time_ms: f32,
/// Current bitrate in bps
pub current_bitrate: u32,
/// Pipeline running time in seconds
pub running_time_secs: f64,
}
/// Shared Audio Pipeline
///
/// Provides a single Opus encoder that serves multiple WebRTC sessions.
/// All sessions receive the same encoded audio stream via broadcast channel.
pub struct SharedAudioPipeline {
/// Configuration
config: RwLock<SharedAudioPipelineConfig>,
/// Opus encoder (protected by mutex for encoding)
encoder: Mutex<Option<OpusEncoder>>,
/// Broadcast sender for encoded Opus frames
opus_tx: broadcast::Sender<OpusFrame>,
/// Running state
running: AtomicBool,
/// Statistics
stats: Mutex<SharedAudioPipelineStats>,
/// Start time for running time calculation
start_time: RwLock<Option<Instant>>,
/// Encode time accumulator for averaging
encode_time_sum_us: AtomicU64,
/// Encode count for averaging
encode_count: AtomicU64,
/// Stop signal (atomic for lock-free checking)
stop_flag: AtomicBool,
/// Encoding task handle
task_handle: Mutex<Option<tokio::task::JoinHandle<()>>>,
}
impl SharedAudioPipeline {
/// Create a new shared audio pipeline
pub fn new(config: SharedAudioPipelineConfig) -> Result<Arc<Self>> {
let (opus_tx, _) = broadcast::channel(config.channel_capacity);
Ok(Arc::new(Self {
config: RwLock::new(config),
encoder: Mutex::new(None),
opus_tx,
running: AtomicBool::new(false),
stats: Mutex::new(SharedAudioPipelineStats::default()),
start_time: RwLock::new(None),
encode_time_sum_us: AtomicU64::new(0),
encode_count: AtomicU64::new(0),
stop_flag: AtomicBool::new(false),
task_handle: Mutex::new(None),
}))
}
/// Create with default configuration
pub fn default_config() -> Result<Arc<Self>> {
Self::new(SharedAudioPipelineConfig::default())
}
/// Start the audio encoding pipeline
///
/// # Arguments
/// * `audio_rx` - Receiver for raw audio frames from AudioCapturer
pub async fn start(self: &Arc<Self>, audio_rx: broadcast::Receiver<AudioFrame>) -> Result<()> {
if self.running.load(Ordering::SeqCst) {
return Ok(());
}
let config = self.config.read().await.clone();
info!(
"Starting shared audio pipeline: {}Hz {}ch {}bps",
config.sample_rate, config.channels, config.bitrate
);
// Create encoder
let opus_config = config.to_opus_config();
let encoder = OpusEncoder::new(opus_config)?;
*self.encoder.lock().await = Some(encoder);
// Reset stats
{
let mut stats = self.stats.lock().await;
*stats = SharedAudioPipelineStats::default();
stats.current_bitrate = config.bitrate;
}
// Reset counters
self.encode_time_sum_us.store(0, Ordering::SeqCst);
self.encode_count.store(0, Ordering::SeqCst);
*self.start_time.write().await = Some(Instant::now());
self.stop_flag.store(false, Ordering::SeqCst);
self.running.store(true, Ordering::SeqCst);
// Start encoding task
let pipeline = self.clone();
let handle = tokio::spawn(async move {
pipeline.encoding_task(audio_rx).await;
});
*self.task_handle.lock().await = Some(handle);
info!("Shared audio pipeline started");
Ok(())
}
/// Stop the audio encoding pipeline
pub fn stop(&self) {
if !self.running.load(Ordering::SeqCst) {
return;
}
info!("Stopping shared audio pipeline");
// Signal stop (atomic, no lock needed)
self.stop_flag.store(true, Ordering::SeqCst);
self.running.store(false, Ordering::SeqCst);
}
/// Check if pipeline is running
pub fn is_running(&self) -> bool {
self.running.load(Ordering::SeqCst)
}
/// Subscribe to encoded Opus frames
pub fn subscribe(&self) -> broadcast::Receiver<OpusFrame> {
self.opus_tx.subscribe()
}
/// Get number of active subscribers
pub fn subscriber_count(&self) -> usize {
self.opus_tx.receiver_count()
}
/// Get current statistics
pub async fn stats(&self) -> SharedAudioPipelineStats {
let mut stats = self.stats.lock().await.clone();
stats.subscribers = self.subscriber_count() as u64;
// Calculate average encode time
let count = self.encode_count.load(Ordering::SeqCst);
if count > 0 {
let sum_us = self.encode_time_sum_us.load(Ordering::SeqCst);
stats.avg_encode_time_ms = (sum_us as f64 / count as f64 / 1000.0) as f32;
}
// Calculate running time
if let Some(start) = *self.start_time.read().await {
stats.running_time_secs = start.elapsed().as_secs_f64();
}
stats
}
/// Set bitrate dynamically
pub async fn set_bitrate(&self, bitrate: u32) -> Result<()> {
// Update config
self.config.write().await.bitrate = bitrate;
// Update encoder if running
if let Some(ref mut encoder) = *self.encoder.lock().await {
encoder.set_bitrate(bitrate)?;
}
// Update stats
self.stats.lock().await.current_bitrate = bitrate;
info!("Shared audio pipeline bitrate changed to {}bps", bitrate);
Ok(())
}
/// Update configuration (requires restart)
pub async fn update_config(&self, config: SharedAudioPipelineConfig) -> Result<()> {
if self.is_running() {
return Err(AppError::AudioError(
"Cannot update config while pipeline is running".to_string(),
));
}
*self.config.write().await = config;
Ok(())
}
/// Internal encoding task
async fn encoding_task(self: Arc<Self>, mut audio_rx: broadcast::Receiver<AudioFrame>) {
info!("Audio encoding task started");
loop {
// Check stop flag (atomic, no async lock needed)
if self.stop_flag.load(Ordering::Relaxed) {
break;
}
// Receive audio frame with timeout
let recv_result =
tokio::time::timeout(std::time::Duration::from_secs(2), audio_rx.recv()).await;
match recv_result {
Ok(Ok(audio_frame)) => {
// Update received count
{
let mut stats = self.stats.lock().await;
stats.frames_received += 1;
}
// Encode frame
let encode_start = Instant::now();
let encode_result = {
let mut encoder_guard = self.encoder.lock().await;
if let Some(ref mut encoder) = *encoder_guard {
Some(encoder.encode_frame(&audio_frame))
} else {
None
}
};
let encode_time = encode_start.elapsed();
// Update encode time stats
self.encode_time_sum_us
.fetch_add(encode_time.as_micros() as u64, Ordering::SeqCst);
self.encode_count.fetch_add(1, Ordering::SeqCst);
match encode_result {
Some(Ok(opus_frame)) => {
// Update stats
{
let mut stats = self.stats.lock().await;
stats.frames_encoded += 1;
stats.bytes_encoded += opus_frame.data.len() as u64;
}
// Broadcast to subscribers
if self.opus_tx.receiver_count() > 0 {
if let Err(e) = self.opus_tx.send(opus_frame) {
trace!("No audio subscribers: {}", e);
}
}
}
Some(Err(e)) => {
error!("Opus encode error: {}", e);
let mut stats = self.stats.lock().await;
stats.frames_dropped += 1;
}
None => {
warn!("Encoder not available");
break;
}
}
}
Ok(Err(broadcast::error::RecvError::Closed)) => {
info!("Audio source channel closed");
break;
}
Ok(Err(broadcast::error::RecvError::Lagged(n))) => {
warn!("Audio pipeline lagged by {} frames", n);
let mut stats = self.stats.lock().await;
stats.frames_dropped += n;
}
Err(_) => {
// Timeout - check if still running
if !self.running.load(Ordering::SeqCst) {
break;
}
debug!("Audio receive timeout, continuing...");
}
}
}
// Cleanup
self.running.store(false, Ordering::SeqCst);
*self.encoder.lock().await = None;
let stats = self.stats().await;
info!(
"Audio encoding task ended: {} frames encoded, {} dropped, {:.1}s runtime",
stats.frames_encoded, stats.frames_dropped, stats.running_time_secs
);
}
}
impl Drop for SharedAudioPipeline {
fn drop(&mut self) {
self.stop();
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_config_default() {
let config = SharedAudioPipelineConfig::default();
assert_eq!(config.sample_rate, 48000);
assert_eq!(config.channels, 2);
assert_eq!(config.bitrate, 64000);
}
#[test]
fn test_config_voice() {
let config = SharedAudioPipelineConfig::voice();
assert_eq!(config.bitrate, 32000);
assert_eq!(config.application, OpusApplicationMode::Voip);
}
#[test]
fn test_config_high_quality() {
let config = SharedAudioPipelineConfig::high_quality();
assert_eq!(config.bitrate, 128000);
}
#[tokio::test]
async fn test_pipeline_creation() {
let config = SharedAudioPipelineConfig::default();
let pipeline = SharedAudioPipeline::new(config);
assert!(pipeline.is_ok());
let pipeline = pipeline.unwrap();
assert!(!pipeline.is_running());
assert_eq!(pipeline.subscriber_count(), 0);
}
}

View File

@@ -7,7 +7,7 @@ use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::Arc;
use std::time::Instant;
use tokio::sync::{broadcast, watch, Mutex, RwLock};
use tracing::{error, info, trace, warn};
use tracing::{error, info, warn};
use super::capture::{AudioCapturer, AudioConfig, CaptureState};
use super::encoder::{OpusConfig, OpusEncoder, OpusFrame};
@@ -72,18 +72,9 @@ impl AudioStreamerConfig {
/// Audio stream statistics
#[derive(Debug, Clone, Default)]
pub struct AudioStreamStats {
/// Frames captured from ALSA
pub frames_captured: u64,
/// Frames encoded to Opus
pub frames_encoded: u64,
/// Total bytes output (Opus)
pub bytes_output: u64,
/// Current encoding bitrate
pub current_bitrate: u32,
/// Number of active subscribers
pub subscriber_count: usize,
/// Buffer overruns
pub buffer_overruns: u64,
}
/// Audio streamer
@@ -95,7 +86,7 @@ pub struct AudioStreamer {
state_rx: watch::Receiver<AudioStreamState>,
capturer: RwLock<Option<Arc<AudioCapturer>>>,
encoder: Arc<Mutex<Option<OpusEncoder>>>,
opus_tx: broadcast::Sender<OpusFrame>,
opus_tx: watch::Sender<Option<Arc<OpusFrame>>>,
stats: Arc<Mutex<AudioStreamStats>>,
sequence: AtomicU64,
stream_start_time: RwLock<Option<Instant>>,
@@ -111,7 +102,7 @@ impl AudioStreamer {
/// Create a new audio streamer with specified configuration
pub fn with_config(config: AudioStreamerConfig) -> Self {
let (state_tx, state_rx) = watch::channel(AudioStreamState::Stopped);
let (opus_tx, _) = broadcast::channel(16); // Buffer size 16 for low latency
let (opus_tx, _opus_rx) = watch::channel(None);
Self {
config: RwLock::new(config),
@@ -138,7 +129,7 @@ impl AudioStreamer {
}
/// Subscribe to Opus frames
pub fn subscribe_opus(&self) -> broadcast::Receiver<OpusFrame> {
pub fn subscribe_opus(&self) -> watch::Receiver<Option<Arc<OpusFrame>>> {
self.opus_tx.subscribe()
}
@@ -175,9 +166,6 @@ impl AudioStreamer {
encoder.set_bitrate(bitrate)?;
}
// Update stats
self.stats.lock().await.current_bitrate = bitrate;
info!("Audio bitrate changed to {}bps", bitrate);
Ok(())
}
@@ -216,7 +204,6 @@ impl AudioStreamer {
{
let mut stats = self.stats.lock().await;
*stats = AudioStreamStats::default();
stats.current_bitrate = config.opus.bitrate;
}
// Record start time
@@ -227,12 +214,11 @@ impl AudioStreamer {
let capturer_for_task = capturer.clone();
let encoder = self.encoder.clone();
let opus_tx = self.opus_tx.clone();
let stats = self.stats.clone();
let state = self.state.clone();
let stop_flag = self.stop_flag.clone();
tokio::spawn(async move {
Self::stream_task(capturer_for_task, encoder, opus_tx, stats, state, stop_flag).await;
Self::stream_task(capturer_for_task, encoder, opus_tx, state, stop_flag).await;
});
Ok(())
@@ -273,8 +259,7 @@ impl AudioStreamer {
async fn stream_task(
capturer: Arc<AudioCapturer>,
encoder: Arc<Mutex<Option<OpusEncoder>>>,
opus_tx: broadcast::Sender<OpusFrame>,
stats: Arc<Mutex<AudioStreamStats>>,
opus_tx: watch::Sender<Option<Arc<OpusFrame>>>,
state: watch::Sender<AudioStreamState>,
stop_flag: Arc<AtomicBool>,
) {
@@ -302,12 +287,6 @@ impl AudioStreamer {
match recv_result {
Ok(Ok(audio_frame)) => {
// Update capture stats
{
let mut s = stats.lock().await;
s.frames_captured += 1;
}
// Encode to Opus
let opus_result = {
let mut enc_guard = encoder.lock().await;
@@ -320,18 +299,9 @@ impl AudioStreamer {
match opus_result {
Some(Ok(opus_frame)) => {
// Update stats
{
let mut s = stats.lock().await;
s.frames_encoded += 1;
s.bytes_output += opus_frame.data.len() as u64;
}
// Broadcast to subscribers
// Publish latest frame to subscribers
if opus_tx.receiver_count() > 0 {
if let Err(e) = opus_tx.send(opus_frame) {
trace!("No audio subscribers: {}", e);
}
let _ = opus_tx.send(Some(Arc::new(opus_frame)));
}
}
Some(Err(e)) => {
@@ -349,8 +319,6 @@ impl AudioStreamer {
}
Ok(Err(broadcast::error::RecvError::Lagged(n))) => {
warn!("Audio receiver lagged by {} frames", n);
let mut s = stats.lock().await;
s.buffer_overruns += n;
}
Err(_) => {
// Timeout - check if still capturing