X-Git-Url: https://git.nihav.org/?a=blobdiff_plain;f=videoplayer%2Fsrc%2Faudiodec.rs;h=1f5c52ecc3142b016b743b12f772346c1f5c69a2;hb=HEAD;hp=bddec5745a6dce2df1eae5cfffb84e8d6fde039b;hpb=54ede18180302b14e7de9cc033d1be66592db796;p=nihav-player.git diff --git a/videoplayer/src/audiodec.rs b/videoplayer/src/audiodec.rs index bddec57..1f5c52e 100644 --- a/videoplayer/src/audiodec.rs +++ b/videoplayer/src/audiodec.rs @@ -78,7 +78,7 @@ impl AudioQueue { if self.end + src.len() > self.queue.len() { self.queue.resize(self.end + src.len(), 0); } - self.queue[self.end..][..src.len()].copy_from_slice(&src); + self.queue[self.end..][..src.len()].copy_from_slice(src); self.end += src.len(); self.spos = samplepos; self.set_time(); @@ -155,20 +155,21 @@ fn dummy_audio_thread(aprecv: Receiver) -> JoinHandle<()> { type AudioPlaybackType = Option>; -fn start_audio_decoding(asystem: &AudioSubsystem, ainfo: NAAudioInfo, mut audio_dec: DecoderStuff, aprecv: Receiver) -> (AudioPlaybackType, JoinHandle<()>) { +fn start_audio_decoding(asystem: &AudioSubsystem, ainfo: NAAudioInfo, sbr_hack: bool, mut audio_dec: DecoderStuff, aprecv: Receiver) -> (AudioPlaybackType, JoinHandle<()>) { let ch = ainfo.channels.max(2); + let sample_rate = if !sbr_hack { ainfo.sample_rate } else { ainfo.sample_rate * 2 }; let desired_spec = AudioSpecDesired { - freq: Some(ainfo.sample_rate as i32), + freq: Some(sample_rate as i32), channels: Some(ch), samples: None }; let dst_info = NAAudioInfo { - sample_rate: ainfo.sample_rate, + sample_rate, channels: ch, format: SND_S16_FORMAT, block_len: 0, }; - let queue = Arc::new(Mutex::new(AudioQueue::new(ainfo.sample_rate as usize, ch as usize))); + let queue = Arc::new(Mutex::new(AudioQueue::new(sample_rate as usize, ch as usize))); let qclone = queue.clone(); let ret = asystem.open_playback(None, &desired_spec, |_spec| { AudioOutput { @@ -203,6 +204,9 @@ fn start_audio_decoding(asystem: &AudioSubsystem, ainfo: NAAudioInfo, mut audio_ let buf = frm.get_buffer(); if let Some(pts) = frm.get_pts() { samplepos = NATimeInfo::ts_to_time(pts, u64::from(dst_info.sample_rate), frm.ts.tb_num, frm.ts.tb_den) as usize; + if sbr_hack { + samplepos >>= 2; + } } samplepos += buf.get_audio_length(); if let Ok(out_buf) = convert_audio_frame(&buf, &dst_info, &dst_chmap) { @@ -260,10 +264,10 @@ pub struct AudioControl { } impl AudioControl { - pub fn new(audio_dec: Option, ainfo: Option, asystem: &AudioSubsystem) -> Self { + pub fn new(audio_dec: Option, ainfo: Option, sbr_hack: bool, asystem: &AudioSubsystem) -> Self { let (apsend, aprecv) = std::sync::mpsc::sync_channel::(20); let (adevice, athread) = if let Some(audio_dec) = audio_dec { - start_audio_decoding(asystem, ainfo.expect("audio info should be present"), audio_dec, aprecv) + start_audio_decoding(asystem, ainfo.expect("audio info should be present"), sbr_hack, audio_dec, aprecv) } else { (None, dummy_audio_thread(aprecv)) }; @@ -317,7 +321,7 @@ impl AudioControl { pub fn get_queue_size(&self) -> usize { self.aqueue.len() } pub fn try_send_audio(&mut self, evt: PktSendEvent) -> bool { - if self.aqueue.len() > 0 { + if !self.aqueue.is_empty() { self.aqueue.push(evt); false } else {