+
+ let mut offset = pos * self.bpp * dst.len();
+
+ for el in dst.iter_mut() {
+ let src = &self.data[offset..];
+ *el = if !self.fmt.float {
+ match (self.bpp, self.fmt.be) {
+ (1, _) => if !self.fmt.signed { src[0].cvt_into() } else { (src[0] ^ 0x80).cvt_into() },
+ (2, true) => (read_u16be(src).unwrap() as i16).cvt_into(),
+ (2, false) => (read_u16le(src).unwrap() as i16).cvt_into(),
+ (3, true) => ((read_u24be(src).unwrap() << 8) as i32).cvt_into(),
+ (3, false) => ((read_u24be(src).unwrap() << 8) as i32).cvt_into(),
+ (4, true) => (read_u32be(src).unwrap() as i32).cvt_into(),
+ (4, false) => (read_u32be(src).unwrap() as i32).cvt_into(),
+ _ => unreachable!(),
+ }
+ } else {
+ match (self.bpp, self.fmt.be) {
+ (4, true) => read_f32be(src).unwrap().cvt_into(),
+ (4, false) => read_f32le(src).unwrap().cvt_into(),
+ (8, true) => (read_f64be(src).unwrap() as f32).cvt_into(),
+ (8, false) => (read_f64le(src).unwrap() as f32).cvt_into(),
+ (_, _) => unreachable!(),
+ }
+ };
+ offset += self.bpp;
+ }
+ }
+}
+
+impl SampleReader for PackedSampleReader<'_> {
+ fn get_samples_i32(&self, pos: usize, dst: &mut [i32]) {
+ self.get_samples(pos, dst);
+ }
+ fn get_samples_f32(&self, pos: usize, dst: &mut [f32]) {
+ self.get_samples(pos, dst);
+ }
+}
+
+trait SampleWriter {
+ fn store_samples_i32(&mut self, pos: usize, src: &[i32]);
+ fn store_samples_f32(&mut self, pos: usize, src: &[f32]);
+}
+
+struct GenericSampleWriter<'a, T:Copy> {
+ data: &'a mut [T],
+ stride: usize,
+}
+
+impl<'a, T:Copy+FromFmt<i32>+FromFmt<f32>> SampleWriter for GenericSampleWriter<'a, T> {
+ fn store_samples_i32(&mut self, pos: usize, src: &[i32]) {
+ let mut off = pos;
+ for el in src.iter() {
+ self.data[off] = (*el).cvt_into();
+ off += self.stride;
+ }
+ }
+ fn store_samples_f32(&mut self, pos: usize, src: &[f32]) {
+ let mut off = pos;
+ for el in src.iter() {
+ self.data[off] = (*el).cvt_into();
+ off += self.stride;
+ }
+ }
+}
+
+struct PackedSampleWriter<'a> {
+ data: &'a mut [u8],
+ fmt: NASoniton,
+ bpp: usize,
+}
+
+impl<'a> PackedSampleWriter<'a> {
+ fn new(data: &'a mut [u8], fmt: NASoniton) -> Self {
+ if (fmt.bits & 7) != 0 { unimplemented!(); }
+ let bpp = (fmt.bits >> 3) as usize;
+ Self { data, fmt, bpp }
+ }
+
+ fn store_samples<T:Copy>(&mut self, pos: usize, src: &[T]) where u8: FromFmt<T>, i16: FromFmt<T>, i32: FromFmt<T>, f32: FromFmt<T> {
+ let mut offset = pos * self.bpp * src.len();
+ for el in src.iter() {
+ let dst = &mut self.data[offset..];
+ if !self.fmt.float {
+ match (self.bpp, self.fmt.be) {
+ (1, _) => {
+ dst[0] = u8::cvt_from(*el);
+ if self.fmt.signed {
+ dst[0] ^= 0x80;
+ }
+ },
+ (2, true) => write_u16be(dst, i16::cvt_from(*el) as u16).unwrap(),
+ (2, false) => write_u16le(dst, i16::cvt_from(*el) as u16).unwrap(),
+ (3, true) => write_u24be(dst, (i32::cvt_from(*el) >> 8) as u32).unwrap(),
+ (3, false) => write_u24le(dst, (i32::cvt_from(*el) >> 8) as u32).unwrap(),
+ (4, true) => write_u32be(dst, i32::cvt_from(*el) as u32).unwrap(),
+ (4, false) => write_u32le(dst, i32::cvt_from(*el) as u32).unwrap(),
+ _ => unreachable!(),
+ };
+ } else {
+ match (self.bpp, self.fmt.be) {
+ (4, true) => write_f32be(dst, f32::cvt_from(*el)).unwrap(),
+ (4, false) => write_f32le(dst, f32::cvt_from(*el)).unwrap(),
+ (8, true) => write_f64be(dst, f64::from(f32::cvt_from(*el))).unwrap(),
+ (8, false) => write_f64le(dst, f64::from(f32::cvt_from(*el))).unwrap(),
+ (_, _) => unreachable!(),
+ };
+ }
+ offset += self.bpp;
+ }
+ }
+}
+
+impl SampleWriter for PackedSampleWriter<'_> {
+ fn store_samples_i32(&mut self, pos: usize, src: &[i32]) {
+ self.store_samples(pos, src);
+ }
+ fn store_samples_f32(&mut self, pos: usize, src: &[f32]) {
+ self.store_samples(pos, src);