improve audio conversion
[nihav-encoder.git] / src / acvt.rs
CommitLineData
b0481c9e
KS
1use nihav_core::frame::*;
2use nihav_core::soundcvt::*;
3
4struct AudioQueue<T> {
5 start: usize,
6 end: usize,
7 stride: usize,
8 channels: usize,
9 data: Vec<T>,
10 ileaved: bool,
11}
12
13impl<T:Clone+Copy+From<u8>> AudioQueue<T> {
14 fn new(channels: usize, rec_size: usize, ileaved: bool) -> Self {
15 Self {
16 start: 0,
17 end: 0,
18 stride: if ileaved { rec_size * channels } else { rec_size },
19 channels,
20 ileaved,
21 data: vec![0.into(); rec_size * channels],
22 }
23 }
24 fn get_cur_size(&self) -> usize { self.end - self.start }
25 fn get_cur_avail(&self) -> usize { self.stride - self.end }
26 fn get_potentially_avail(&self) -> usize { self.stride - self.get_cur_size() }
27 fn read(&mut self, src: &NAAudioBuffer<T>) {
28 let mut to_copy = src.get_length();
29 if self.ileaved {
30 to_copy *= self.channels;
31 }
32 if self.get_cur_avail() < to_copy {
33 if self.get_potentially_avail() >= to_copy {
34 self.renorm();
35 } else {
36 let new_len = (self.stride * 2).max(self.get_cur_size() + src.get_length());
37 let mut new_buf = vec![0.into(); new_len * self.channels];
38 let new_stride = if !self.ileaved { new_len } else { new_len * self.channels };
39
40 let old_len = self.get_cur_size();
41 let new_len = src.get_length();
6f6ba7bf
KS
42 if old_len > 0 {
43 for (dst, (old, new)) in new_buf.chunks_exact_mut(new_stride).zip(
44 self.data.chunks_exact(self.stride).zip(
45 src.get_data().chunks(src.get_stride()))) {
46 dst[..old_len].copy_from_slice(&old[self.start..self.end]);
47 dst[old_len..][..new_len].copy_from_slice(&new[..new_len]);
48 }
49 } else {
50 for (dst, new) in new_buf.chunks_exact_mut(new_stride).zip(
51 src.get_data().chunks(src.get_stride())) {
52 dst[..new_len].copy_from_slice(&new[..new_len]);
53 }
b0481c9e
KS
54 }
55 self.data = new_buf;
56 self.stride = new_stride;
57 self.start = 0;
58 self.end = old_len + new_len;
59 return;
60 }
61 }
62 for (dst, src) in self.data.chunks_exact_mut(self.stride).zip(src.get_data().chunks_exact(src.get_stride())) {
63 dst[self.end..][..to_copy].copy_from_slice(&src[..to_copy]);
64 }
65 self.end += to_copy;
66 }
67 fn write(&mut self, dbuf: &mut NAAudioBuffer<T>) {
68 let dst_len = dbuf.get_length();
69 let dst_stride = dbuf.get_stride();
70 let dst = dbuf.get_data_mut().unwrap();
71
72 for (dst, src) in dst.chunks_mut(dst_stride).zip(self.data.chunks_exact(self.stride)) {
73 dst[..dst_len].copy_from_slice(&src[self.start..][..dst_len]);
74 }
75 self.start += dst_len;
76 }
77 fn renorm(&mut self) {
78 if self.start == 0 {
79 return;
80 }
81
82 let move_size = self.end - self.start;
83 if move_size > 0 {
84 for chan in self.data.chunks_exact_mut(self.stride) {
85 for i in 0..move_size {
86 chan[i] = chan[self.start + i];
87 }
88 }
89 }
90 self.end -= self.start;
91 self.start = 0;
92 }
93}
94
95enum AudioDataType {
96 U8(AudioQueue<u8>),
97 I16(AudioQueue<i16>),
98 I32(AudioQueue<i32>),
99 F32(AudioQueue<f32>),
100 Packed(AudioQueue<u8>),
101}
102
103impl AudioDataType {
104 fn get_length(&self) -> usize {
105 match self {
106 AudioDataType::U8(ref queue) => queue.get_cur_size(),
107 AudioDataType::I16(ref queue) => queue.get_cur_size(),
108 AudioDataType::I32(ref queue) => queue.get_cur_size(),
109 AudioDataType::F32(ref queue) => queue.get_cur_size(),
110 AudioDataType::Packed(ref queue) => queue.get_cur_size(),
111 }
112 }
113}
114
115pub struct AudioConverter {
116 queue: AudioDataType,
117 dst_fmt: NAAudioInfo,
118 dst_chmap: NAChannelMap,
119 apts: Option<u64>,
6f6ba7bf 120 resampler: NAResample,
b0481c9e
KS
121}
122
123impl AudioConverter {
6f6ba7bf 124 pub fn new(sinfo: &NAAudioInfo, dinfo: &NAAudioInfo, dst_chmap: NAChannelMap) -> Self {
b0481c9e
KS
125 let ch = usize::from(dinfo.channels);
126 let size = dinfo.block_len * 2;
127 let il = !dinfo.format.planar;
128 let queue = match (dinfo.format.bits, dinfo.format.float, dinfo.format.signed) {
129 ( 8, false, false) => AudioDataType::U8(AudioQueue::new(ch, size, il)),
130 (16, false, true) => AudioDataType::I16(AudioQueue::new(ch, size, il)),
131 (32, false, true) => AudioDataType::I32(AudioQueue::new(ch, size, il)),
132 (32, true, _) => AudioDataType::F32(AudioQueue::new(ch, size, il)),
133 _ => AudioDataType::Packed(AudioQueue::new(ch, size, il)),
134 };
6f6ba7bf
KS
135 const RESAMPLE_FILTER_ORDER: usize = 16;
136 let resampler = NAResample::new(sinfo.sample_rate, dinfo, &dst_chmap, RESAMPLE_FILTER_ORDER);
b0481c9e
KS
137 Self {
138 queue,
139 dst_fmt: *dinfo,
140 dst_chmap,
141 apts: None,
6f6ba7bf 142 resampler,
b0481c9e
KS
143 }
144 }
145 pub fn queue_frame(&mut self, buf: NABufferType, tinfo: NATimeInfo) -> bool {
6f6ba7bf 146 let ret = self.resampler.convert_audio_frame(&buf);
b0481c9e
KS
147 if let Ok(dbuf) = ret {
148 if self.apts.is_none() && tinfo.get_pts().is_some() {
149 self.apts = tinfo.get_pts();
150 }
151 match (&mut self.queue, dbuf) {
152 (AudioDataType::U8(ref mut queue), NABufferType::AudioU8(ref buf)) => queue.read(buf),
153 (AudioDataType::I16(ref mut queue), NABufferType::AudioI16(ref buf)) => queue.read(buf),
154 (AudioDataType::I32(ref mut queue), NABufferType::AudioI32(ref buf)) => queue.read(buf),
155 (AudioDataType::F32(ref mut queue), NABufferType::AudioF32(ref buf)) => queue.read(buf),
156 (AudioDataType::Packed(ref mut queue), NABufferType::AudioPacked(ref buf)) => queue.read(buf),
157 _ => unimplemented!(),
158 };
159 true
160 } else {
161 false
162 }
163 }
164 pub fn get_frame(&mut self, info: NACodecInfoRef) -> Option<NAFrame> {
165 if self.queue.get_length() >= self.dst_fmt.block_len {
166 if let Ok(mut abuf) = alloc_audio_buffer(self.dst_fmt, self.dst_fmt.block_len, self.dst_chmap.clone()) {
167 match (&mut self.queue, &mut abuf) {
168 (AudioDataType::U8(ref mut queue), NABufferType::AudioU8(ref mut buf)) => queue.write(buf),
169 (AudioDataType::I16(ref mut queue), NABufferType::AudioI16(ref mut buf)) => queue.write(buf),
170 (AudioDataType::I32(ref mut queue), NABufferType::AudioI32(ref mut buf)) => queue.write(buf),
171 (AudioDataType::F32(ref mut queue), NABufferType::AudioF32(ref mut buf)) => queue.write(buf),
172 (AudioDataType::Packed(ref mut queue), NABufferType::AudioPacked(ref mut buf)) => queue.write(buf),
173 _ => unimplemented!(),
174 };
175 let tinfo = NATimeInfo::new(self.apts, None, Some(self.dst_fmt.block_len as u64), 1, self.dst_fmt.sample_rate);
176 if let Some(ref mut val) = self.apts {
177 *val += self.dst_fmt.block_len as u64;
178 }
179 Some(NAFrame::new(tinfo, FrameType::I, true, info, abuf))
180 } else {
181 println!(" failed to allocate audio frame");
182 None
183 }
184 } else {
185 None
186 }
187 }
188}