X-Git-Url: https://git.nihav.org/?p=nihav-encoder.git;a=blobdiff_plain;f=src%2Facvt.rs;h=254bed692c0056371bc116b7c31da08b307832eb;hp=5343039d1fb62e8fc8249ebf77d88660fe8cdd81;hb=HEAD;hpb=b0481c9e4084c2946507754bc194a64306a278f8 diff --git a/src/acvt.rs b/src/acvt.rs index 5343039..254bed6 100644 --- a/src/acvt.rs +++ b/src/acvt.rs @@ -10,6 +10,37 @@ struct AudioQueue { ileaved: bool, } +fn copy_audio(dst: &mut [T], dstride: usize, + src: &[T], sstride: usize, + len: usize, channels: usize) +{ + match (sstride == 1, dstride == 1) { + (false, false) => { + for (dchan, schan) in dst.chunks_mut(dstride).zip( + src.chunks(sstride)).take(channels) { + dchan[..len].copy_from_slice(&schan[..len]); + } + }, + (false, true) => { + for (ch, schan) in src.chunks(sstride).take(channels).enumerate() { + for (dchunk, &samp) in dst[ch..].chunks_mut(channels).zip(schan.iter()).take(len / channels) { + dchunk[0] = samp; + } + } + }, + (true, false) => { + for (i, frame) in src.chunks_exact(channels).take(len).enumerate() { + for (&samp, dchan) in frame.iter().zip(dst[i..].chunks_mut(dstride)) { + dchan[0] = samp; + } + } + }, + (true, true) => { + dst[..len].copy_from_slice(&src[..len]); + }, + } +} + impl> AudioQueue { fn new(channels: usize, rec_size: usize, ileaved: bool) -> Self { Self { @@ -22,6 +53,14 @@ impl> AudioQueue { } } fn get_cur_size(&self) -> usize { self.end - self.start } + fn get_length(&self) -> usize { + let size = self.get_cur_size(); + if !self.ileaved { + size + } else { + size / self.channels + } + } fn get_cur_avail(&self) -> usize { self.stride - self.end } fn get_potentially_avail(&self) -> usize { self.stride - self.get_cur_size() } fn read(&mut self, src: &NAAudioBuffer) { @@ -39,11 +78,20 @@ impl> AudioQueue { let old_len = self.get_cur_size(); let new_len = src.get_length(); - for (dst, (old, new)) in new_buf.chunks_exact_mut(new_stride).zip( - self.data.chunks_exact(self.stride).zip( - src.get_data().chunks(src.get_stride()))) { - dst[..old_len].copy_from_slice(&old[self.start..self.end]); - dst[old_len..][..new_len].copy_from_slice(&new[..new_len]); + if old_len > 0 { + if !self.ileaved { + for (dst, (old, new)) in new_buf.chunks_exact_mut(new_stride).zip( + self.data.chunks_exact(self.stride).zip( + src.get_data().chunks(src.get_stride()))) { + dst[..old_len].copy_from_slice(&old[self.start..self.end]); + dst[old_len..][..new_len].copy_from_slice(&new[..new_len]); + } + } else { + new_buf[..old_len].copy_from_slice(&self.data[self.start..self.end]); + copy_audio(&mut new_buf[old_len..], 1, src.get_data(), src.get_stride(), new_len, self.channels); + } + } else { + copy_audio(&mut new_buf, if !self.ileaved { new_stride } else { 1 }, src.get_data(), src.get_stride(), new_len, self.channels); } self.data = new_buf; self.stride = new_stride; @@ -52,19 +100,20 @@ impl> AudioQueue { return; } } - for (dst, src) in self.data.chunks_exact_mut(self.stride).zip(src.get_data().chunks_exact(src.get_stride())) { - dst[self.end..][..to_copy].copy_from_slice(&src[..to_copy]); - } + copy_audio(&mut self.data[self.end..], if !self.ileaved { self.stride } else { 1 }, src.get_data(), src.get_stride(), + to_copy, self.channels); self.end += to_copy; } fn write(&mut self, dbuf: &mut NAAudioBuffer) { - let dst_len = dbuf.get_length(); + let mut dst_len = dbuf.get_length(); let dst_stride = dbuf.get_stride(); let dst = dbuf.get_data_mut().unwrap(); - for (dst, src) in dst.chunks_mut(dst_stride).zip(self.data.chunks_exact(self.stride)) { - dst[..dst_len].copy_from_slice(&src[self.start..][..dst_len]); + if dst_stride == 1 { + dst_len *= self.channels; } + copy_audio(dst, dst_stride, &self.data[self.start..], if !self.ileaved { self.stride } else { 1 }, + dst_len, self.channels); self.start += dst_len; } fn renorm(&mut self) { @@ -96,11 +145,11 @@ enum AudioDataType { impl AudioDataType { fn get_length(&self) -> usize { match self { - AudioDataType::U8(ref queue) => queue.get_cur_size(), - AudioDataType::I16(ref queue) => queue.get_cur_size(), - AudioDataType::I32(ref queue) => queue.get_cur_size(), - AudioDataType::F32(ref queue) => queue.get_cur_size(), - AudioDataType::Packed(ref queue) => queue.get_cur_size(), + AudioDataType::U8(ref queue) => queue.get_length(), + AudioDataType::I16(ref queue) => queue.get_length(), + AudioDataType::I32(ref queue) => queue.get_length(), + AudioDataType::F32(ref queue) => queue.get_length(), + AudioDataType::Packed(ref queue) => queue.get_length(), } } } @@ -110,10 +159,11 @@ pub struct AudioConverter { dst_fmt: NAAudioInfo, dst_chmap: NAChannelMap, apts: Option, + resampler: NAResample, } impl AudioConverter { - pub fn new(_sinfo: &NAAudioInfo, dinfo: &NAAudioInfo, dst_chmap: NAChannelMap) -> Self { + pub fn new(sinfo: &NAAudioInfo, dinfo: &NAAudioInfo, dst_chmap: NAChannelMap) -> Self { let ch = usize::from(dinfo.channels); let size = dinfo.block_len * 2; let il = !dinfo.format.planar; @@ -124,15 +174,18 @@ impl AudioConverter { (32, true, _) => AudioDataType::F32(AudioQueue::new(ch, size, il)), _ => AudioDataType::Packed(AudioQueue::new(ch, size, il)), }; + const RESAMPLE_FILTER_ORDER: usize = 16; + let resampler = NAResample::new(sinfo.sample_rate, dinfo, &dst_chmap, RESAMPLE_FILTER_ORDER); Self { queue, dst_fmt: *dinfo, dst_chmap, apts: None, + resampler, } } pub fn queue_frame(&mut self, buf: NABufferType, tinfo: NATimeInfo) -> bool { - let ret = convert_audio_frame(&buf, &self.dst_fmt, &self.dst_chmap); + let ret = self.resampler.convert_audio_frame(&buf); if let Ok(dbuf) = ret { if self.apts.is_none() && tinfo.get_pts().is_some() { self.apts = tinfo.get_pts();