Now NAAudioBuffer has step field which tells the distance to the next
sample in the channel. This can be used to work with interleaved audio
stored as native samples (instead of packed buffer like before).
let nch = ainfo.get_channels() as usize;
let mut offs: Vec<usize> = Vec::with_capacity(nch);
for ch in 0..nch { offs.push($buf.get_offset(ch)); }
let nch = ainfo.get_channels() as usize;
let mut offs: Vec<usize> = Vec::with_capacity(nch);
for ch in 0..nch { offs.push($buf.get_offset(ch)); }
+ let is_planar = $buf.get_step() == 1;
let data = $buf.get_data();
let data = $buf.get_data();
- for i in 0..len {
- for ch in 0..nch {
- let sample = data[offs[ch] + i];
+ if is_planar {
+ for i in 0..len {
+ for ch in 0..nch {
+ let sample = data[offs[ch] + i];
+ $write($wr, sample)?;
+ }
+ }
+ } else {
+ for i in 0..len*nch {
+ let sample = data[i];
$write($wr, sample)?;
}
}
$write($wr, sample)?;
}
}
data: NABufferRef<Vec<T>>,
offs: Vec<usize>,
stride: usize,
data: NABufferRef<Vec<T>>,
offs: Vec<usize>,
stride: usize,
chmap: NAChannelMap,
len: usize,
}
chmap: NAChannelMap,
len: usize,
}
}
/// Returns the distance between the start of one channel and the next one.
pub fn get_stride(&self) -> usize { self.stride }
}
/// Returns the distance between the start of one channel and the next one.
pub fn get_stride(&self) -> usize { self.stride }
+ /// Returns the distance between the samples in one channel.
+ pub fn get_step(&self) -> usize { self.step }
/// Returns audio format information.
pub fn get_info(&self) -> NAAudioInfo { self.info }
/// Returns channel map.
/// Returns audio format information.
pub fn get_info(&self) -> NAAudioInfo { self.info }
/// Returns channel map.
data.clone_from(self.data.as_ref());
let mut offs: Vec<usize> = Vec::with_capacity(self.offs.len());
offs.clone_from(&self.offs);
data.clone_from(self.data.as_ref());
let mut offs: Vec<usize> = Vec::with_capacity(self.offs.len());
offs.clone_from(&self.offs);
- NAAudioBuffer { info: self.info, data: NABufferRef::new(data), offs, chmap: self.get_chmap().clone(), len: self.len, stride: self.stride }
+ NAAudioBuffer { info: self.info, data: NABufferRef::new(data), offs, chmap: self.get_chmap().clone(), len: self.len, stride: self.stride, step: self.step }
}
/// Return the length of frame in samples.
pub fn get_length(&self) -> usize { self.len }
}
/// Return the length of frame in samples.
pub fn get_length(&self) -> usize { self.len }
/// Constructs a new `NAAudioBuffer` instance.
pub fn new_from_buf(info: NAAudioInfo, data: NABufferRef<Vec<u8>>, chmap: NAChannelMap) -> Self {
let len = data.len();
/// Constructs a new `NAAudioBuffer` instance.
pub fn new_from_buf(info: NAAudioInfo, data: NABufferRef<Vec<u8>>, chmap: NAChannelMap) -> Self {
let len = data.len();
- NAAudioBuffer { info, data, chmap, offs: Vec::new(), len, stride: 0 }
+ NAAudioBuffer { info, data, chmap, offs: Vec::new(), len, stride: 0, step: 0 }
+ /// Returns the distance between two samples in one channel.
+ pub fn get_audio_step(&self) -> usize {
+ match *self {
+ NABufferType::AudioU8(ref ab) => ab.get_step(),
+ NABufferType::AudioI16(ref ab) => ab.get_step(),
+ NABufferType::AudioI32(ref ab) => ab.get_step(),
+ NABufferType::AudioF32(ref ab) => ab.get_step(),
+ NABufferType::AudioPacked(ref ab) => ab.get_step(),
+ _ => 0,
+ }
+ }
/// Returns reference to 8-bit (or packed) audio buffer.
pub fn get_abuf_u8(&self) -> Option<NAAudioBuffer<u8>> {
match *self {
/// Returns reference to 8-bit (or packed) audio buffer.
pub fn get_abuf_u8(&self) -> Option<NAAudioBuffer<u8>> {
match *self {
#[allow(clippy::collapsible_if)]
pub fn alloc_audio_buffer(ainfo: NAAudioInfo, nsamples: usize, chmap: NAChannelMap) -> Result<NABufferType, AllocatorError> {
let mut offs: Vec<usize> = Vec::new();
#[allow(clippy::collapsible_if)]
pub fn alloc_audio_buffer(ainfo: NAAudioInfo, nsamples: usize, chmap: NAChannelMap) -> Result<NABufferType, AllocatorError> {
let mut offs: Vec<usize> = Vec::new();
- if ainfo.format.is_planar() || (ainfo.channels == 1 && (ainfo.format.get_bits() % 8) == 0) {
+ if ainfo.format.is_planar() || ((ainfo.format.get_bits() % 8) == 0) {
let len = nsamples.checked_mul(ainfo.channels as usize);
if len == None { return Err(AllocatorError::TooLargeDimensions); }
let length = len.unwrap();
let len = nsamples.checked_mul(ainfo.channels as usize);
if len == None { return Err(AllocatorError::TooLargeDimensions); }
let length = len.unwrap();
- let stride = nsamples;
- for i in 0..ainfo.channels {
- offs.push((i as usize) * stride);
+ let stride;
+ let step;
+ if ainfo.format.is_planar() {
+ stride = nsamples;
+ step = 1;
+ for i in 0..ainfo.channels {
+ offs.push((i as usize) * stride);
+ }
+ } else {
+ stride = 1;
+ step = ainfo.channels as usize;
+ for i in 0..ainfo.channels {
+ offs.push(i as usize);
+ }
}
if ainfo.format.is_float() {
if ainfo.format.get_bits() == 32 {
let data: Vec<f32> = vec![0.0; length];
}
if ainfo.format.is_float() {
if ainfo.format.get_bits() == 32 {
let data: Vec<f32> = vec![0.0; length];
- let buf: NAAudioBuffer<f32> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride };
+ let buf: NAAudioBuffer<f32> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride, step };
Ok(NABufferType::AudioF32(buf))
} else {
Err(AllocatorError::TooLargeDimensions)
Ok(NABufferType::AudioF32(buf))
} else {
Err(AllocatorError::TooLargeDimensions)
} else {
if ainfo.format.get_bits() == 8 && !ainfo.format.is_signed() {
let data: Vec<u8> = vec![0; length];
} else {
if ainfo.format.get_bits() == 8 && !ainfo.format.is_signed() {
let data: Vec<u8> = vec![0; length];
- let buf: NAAudioBuffer<u8> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride };
+ let buf: NAAudioBuffer<u8> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride, step };
Ok(NABufferType::AudioU8(buf))
} else if ainfo.format.get_bits() == 16 && ainfo.format.is_signed() {
let data: Vec<i16> = vec![0; length];
Ok(NABufferType::AudioU8(buf))
} else if ainfo.format.get_bits() == 16 && ainfo.format.is_signed() {
let data: Vec<i16> = vec![0; length];
- let buf: NAAudioBuffer<i16> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride };
+ let buf: NAAudioBuffer<i16> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride, step };
Ok(NABufferType::AudioI16(buf))
} else {
Err(AllocatorError::TooLargeDimensions)
Ok(NABufferType::AudioI16(buf))
} else {
Err(AllocatorError::TooLargeDimensions)
if len == None { return Err(AllocatorError::TooLargeDimensions); }
let length = ainfo.format.get_audio_size(len.unwrap() as u64);
let data: Vec<u8> = vec![0; length];
if len == None { return Err(AllocatorError::TooLargeDimensions); }
let length = ainfo.format.get_audio_size(len.unwrap() as u64);
let data: Vec<u8> = vec![0; length];
- let buf: NAAudioBuffer<u8> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride: 0 };
+ let buf: NAAudioBuffer<u8> = NAAudioBuffer { data: NABufferRef::new(data), info: ainfo, offs, chmap, len: nsamples, stride: 0, step: 0 };
Ok(NABufferType::AudioPacked(buf))
}
}
Ok(NABufferType::AudioPacked(buf))
}
}
}
let mut dst_buf = ret.unwrap();
}
let mut dst_buf = ret.unwrap();
+ let sstep = src.get_audio_step();
+ let dstep = dst_buf.get_audio_step();
let sr: Box<dyn SampleReader> = match src {
NABufferType::AudioU8(ref ab) => {
let stride = ab.get_stride();
let sr: Box<dyn SampleReader> = match src {
NABufferType::AudioU8(ref ab) => {
let stride = ab.get_stride();
if !into_float {
let mut svec = vec![0; src_chmap.num_channels()];
let mut dvec = vec![0; dst_chmap.num_channels()];
if !into_float {
let mut svec = vec![0; src_chmap.num_channels()];
let mut dvec = vec![0; dst_chmap.num_channels()];
- for i in 0..nsamples {
- sr.get_samples_i32(i, &mut svec);
+ let mut spos = 0;
+ let mut dpos = 0;
+ for _ in 0..nsamples {
+ sr.get_samples_i32(spos, &mut svec);
if !channel_op.is_remix() {
apply_channel_op(&channel_op, &svec, &mut dvec);
} else {
remix_i32(&channel_op, &svec, &mut dvec);
}
if !channel_op.is_remix() {
apply_channel_op(&channel_op, &svec, &mut dvec);
} else {
remix_i32(&channel_op, &svec, &mut dvec);
}
- sw.store_samples_i32(i, &dvec);
+ sw.store_samples_i32(dpos, &dvec);
+ spos += sstep;
+ dpos += dstep;
}
} else {
let mut svec = vec![0.0; src_chmap.num_channels()];
let mut dvec = vec![0.0; dst_chmap.num_channels()];
}
} else {
let mut svec = vec![0.0; src_chmap.num_channels()];
let mut dvec = vec![0.0; dst_chmap.num_channels()];
- for i in 0..nsamples {
- sr.get_samples_f32(i, &mut svec);
+ let mut spos = 0;
+ let mut dpos = 0;
+ for _ in 0..nsamples {
+ sr.get_samples_f32(spos, &mut svec);
if !channel_op.is_remix() {
apply_channel_op(&channel_op, &svec, &mut dvec);
} else {
remix_f32(&channel_op, &svec, &mut dvec);
}
if !channel_op.is_remix() {
apply_channel_op(&channel_op, &svec, &mut dvec);
} else {
remix_f32(&channel_op, &svec, &mut dvec);
}
- sw.store_samples_f32(i, &dvec);
+ sw.store_samples_f32(dpos, &dvec);
+ spos += sstep;
+ dpos += dstep;