From 98c6f2f05967e79b7d946c908d1198c108704293 Mon Sep 17 00:00:00 2001 From: Kostya Shishkov Date: Fri, 6 Mar 2020 19:12:21 +0100 Subject: [PATCH] support interleaved audio properly Now NAAudioBuffer has step field which tells the distance to the next sample in the channel. This can be used to work with interleaved audio stored as native samples (instead of packed buffer like before). --- nihav-codec-support/src/test/wavwriter.rs | 14 +++++-- nihav-core/src/frame.rs | 45 ++++++++++++++++++----- nihav-core/src/soundcvt/mod.rs | 22 ++++++++--- 3 files changed, 62 insertions(+), 19 deletions(-) diff --git a/nihav-codec-support/src/test/wavwriter.rs b/nihav-codec-support/src/test/wavwriter.rs index 982fbc3..061abfa 100644 --- a/nihav-codec-support/src/test/wavwriter.rs +++ b/nihav-codec-support/src/test/wavwriter.rs @@ -36,11 +36,19 @@ macro_rules! write_data { let nch = ainfo.get_channels() as usize; let mut offs: Vec = Vec::with_capacity(nch); for ch in 0..nch { offs.push($buf.get_offset(ch)); } + let is_planar = $buf.get_step() == 1; let data = $buf.get_data(); - for i in 0..len { - for ch in 0..nch { - let sample = data[offs[ch] + i]; + if is_planar { + for i in 0..len { + for ch in 0..nch { + let sample = data[offs[ch] + i]; + $write($wr, sample)?; + } + } + } else { + for i in 0..len*nch { + let sample = data[i]; $write($wr, sample)?; } } diff --git a/nihav-core/src/frame.rs b/nihav-core/src/frame.rs index 95bad26..145bd2a 100644 --- a/nihav-core/src/frame.rs +++ b/nihav-core/src/frame.rs @@ -197,6 +197,7 @@ pub struct NAAudioBuffer { data: NABufferRef>, offs: Vec, stride: usize, + step: usize, chmap: NAChannelMap, len: usize, } @@ -209,6 +210,8 @@ impl NAAudioBuffer { } /// Returns the distance between the start of one channel and the next one. pub fn get_stride(&self) -> usize { self.stride } + /// Returns the distance between the samples in one channel. + pub fn get_step(&self) -> usize { self.step } /// Returns audio format information. pub fn get_info(&self) -> NAAudioInfo { self.info } /// Returns channel map. @@ -223,7 +226,7 @@ impl NAAudioBuffer { data.clone_from(self.data.as_ref()); let mut offs: Vec = Vec::with_capacity(self.offs.len()); offs.clone_from(&self.offs); - NAAudioBuffer { info: self.info, data: NABufferRef::new(data), offs, chmap: self.get_chmap().clone(), len: self.len, stride: self.stride } + NAAudioBuffer { info: self.info, data: NABufferRef::new(data), offs, chmap: self.get_chmap().clone(), len: self.len, stride: self.stride, step: self.step } } /// Return the length of frame in samples. pub fn get_length(&self) -> usize { self.len } @@ -233,7 +236,7 @@ impl NAAudioBuffer { /// Constructs a new `NAAudioBuffer` instance. pub fn new_from_buf(info: NAAudioInfo, data: NABufferRef>, chmap: NAChannelMap) -> Self { let len = data.len(); - NAAudioBuffer { info, data, chmap, offs: Vec::new(), len, stride: 0 } + NAAudioBuffer { info, data, chmap, offs: Vec::new(), len, stride: 0, step: 0 } } } @@ -355,6 +358,17 @@ impl NABufferType { _ => 0, } } + /// Returns the distance between two samples in one channel. + pub fn get_audio_step(&self) -> usize { + match *self { + NABufferType::AudioU8(ref ab) => ab.get_step(), + NABufferType::AudioI16(ref ab) => ab.get_step(), + NABufferType::AudioI32(ref ab) => ab.get_step(), + NABufferType::AudioF32(ref ab) => ab.get_step(), + NABufferType::AudioPacked(ref ab) => ab.get_step(), + _ => 0, + } + } /// Returns reference to 8-bit (or packed) audio buffer. pub fn get_abuf_u8(&self) -> Option> { match *self { @@ -562,18 +576,29 @@ pub fn alloc_video_buffer(vinfo: NAVideoInfo, align: u8) -> Result Result { let mut offs: Vec = Vec::new(); - if ainfo.format.is_planar() || (ainfo.channels == 1 && (ainfo.format.get_bits() % 8) == 0) { + if ainfo.format.is_planar() || ((ainfo.format.get_bits() % 8) == 0) { let len = nsamples.checked_mul(ainfo.channels as usize); if len == None { return Err(AllocatorError::TooLargeDimensions); } let length = len.unwrap(); - let stride = nsamples; - for i in 0..ainfo.channels { - offs.push((i as usize) * stride); + let stride; + let step; + if ainfo.format.is_planar() { + stride = nsamples; + step = 1; + for i in 0..ainfo.channels { + offs.push((i as usize) * stride); + } + } else { + stride = 1; + step = ainfo.channels as usize; + for i in 0..ainfo.channels { + offs.push(i as usize); + } } if ainfo.format.is_float() { if ainfo.format.get_bits() == 32 { let data: Vec = vec![0.0; length]; - let buf: NAAudioBuffer = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride }; + let buf: NAAudioBuffer = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride, step }; Ok(NABufferType::AudioF32(buf)) } else { Err(AllocatorError::TooLargeDimensions) @@ -581,11 +606,11 @@ pub fn alloc_audio_buffer(ainfo: NAAudioInfo, nsamples: usize, chmap: NAChannelM } else { if ainfo.format.get_bits() == 8 && !ainfo.format.is_signed() { let data: Vec = vec![0; length]; - let buf: NAAudioBuffer = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride }; + let buf: NAAudioBuffer = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride, step }; Ok(NABufferType::AudioU8(buf)) } else if ainfo.format.get_bits() == 16 && ainfo.format.is_signed() { let data: Vec = vec![0; length]; - let buf: NAAudioBuffer = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride }; + let buf: NAAudioBuffer = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride, step }; Ok(NABufferType::AudioI16(buf)) } else { Err(AllocatorError::TooLargeDimensions) @@ -596,7 +621,7 @@ pub fn alloc_audio_buffer(ainfo: NAAudioInfo, nsamples: usize, chmap: NAChannelM if len == None { return Err(AllocatorError::TooLargeDimensions); } let length = ainfo.format.get_audio_size(len.unwrap() as u64); let data: Vec = vec![0; length]; - let buf: NAAudioBuffer = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride: 0 }; + let buf: NAAudioBuffer = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride: 0, step: 0 }; Ok(NABufferType::AudioPacked(buf)) } } diff --git a/nihav-core/src/soundcvt/mod.rs b/nihav-core/src/soundcvt/mod.rs index 4f9d81c..1b7b030 100644 --- a/nihav-core/src/soundcvt/mod.rs +++ b/nihav-core/src/soundcvt/mod.rs @@ -368,6 +368,8 @@ Result { } let mut dst_buf = ret.unwrap(); + let sstep = src.get_audio_step(); + let dstep = dst_buf.get_audio_step(); let sr: Box = match src { NABufferType::AudioU8(ref ab) => { let stride = ab.get_stride(); @@ -427,26 +429,34 @@ Result { if !into_float { let mut svec = vec![0; src_chmap.num_channels()]; let mut dvec = vec![0; dst_chmap.num_channels()]; - for i in 0..nsamples { - sr.get_samples_i32(i, &mut svec); + let mut spos = 0; + let mut dpos = 0; + for _ in 0..nsamples { + sr.get_samples_i32(spos, &mut svec); if !channel_op.is_remix() { apply_channel_op(&channel_op, &svec, &mut dvec); } else { remix_i32(&channel_op, &svec, &mut dvec); } - sw.store_samples_i32(i, &dvec); + sw.store_samples_i32(dpos, &dvec); + spos += sstep; + dpos += dstep; } } else { let mut svec = vec![0.0; src_chmap.num_channels()]; let mut dvec = vec![0.0; dst_chmap.num_channels()]; - for i in 0..nsamples { - sr.get_samples_f32(i, &mut svec); + let mut spos = 0; + let mut dpos = 0; + for _ in 0..nsamples { + sr.get_samples_f32(spos, &mut svec); if !channel_op.is_remix() { apply_channel_op(&channel_op, &svec, &mut dvec); } else { remix_f32(&channel_op, &svec, &mut dvec); } - sw.store_samples_f32(i, &dvec); + sw.store_samples_f32(dpos, &dvec); + spos += sstep; + dpos += dstep; } } drop(sw); -- 2.30.2