ef4191820b71fb10de261b89fde715a93e381d8c
[nihav-encoder.git] / src / acvt.rs
1 use nihav_core::frame::*;
2 use nihav_core::soundcvt::*;
3
4 struct AudioQueue<T> {
5 start: usize,
6 end: usize,
7 stride: usize,
8 channels: usize,
9 data: Vec<T>,
10 ileaved: bool,
11 }
12
13 impl<T:Clone+Copy+From<u8>> AudioQueue<T> {
14 fn new(channels: usize, rec_size: usize, ileaved: bool) -> Self {
15 Self {
16 start: 0,
17 end: 0,
18 stride: if ileaved { rec_size * channels } else { rec_size },
19 channels,
20 ileaved,
21 data: vec![0.into(); rec_size * channels],
22 }
23 }
24 fn get_cur_size(&self) -> usize {
25 let size = self.end - self.start;
26 if !self.ileaved {
27 size
28 } else {
29 size / self.channels
30 }
31 }
32 fn get_cur_avail(&self) -> usize { self.stride - self.end }
33 fn get_potentially_avail(&self) -> usize { self.stride - self.get_cur_size() }
34 fn read(&mut self, src: &NAAudioBuffer<T>) {
35 let mut to_copy = src.get_length();
36 if self.ileaved {
37 to_copy *= self.channels;
38 }
39 if self.get_cur_avail() < to_copy {
40 if self.get_potentially_avail() >= to_copy {
41 self.renorm();
42 } else {
43 let new_len = (self.stride * 2).max(self.get_cur_size() + src.get_length());
44 let mut new_buf = vec![0.into(); new_len * self.channels];
45 let new_stride = if !self.ileaved { new_len } else { new_len * self.channels };
46
47 let old_len = self.get_cur_size();
48 let new_len = src.get_length();
49 if old_len > 0 {
50 for (dst, (old, new)) in new_buf.chunks_exact_mut(new_stride).zip(
51 self.data.chunks_exact(self.stride).zip(
52 src.get_data().chunks(src.get_stride()))) {
53 dst[..old_len].copy_from_slice(&old[self.start..self.end]);
54 dst[old_len..][..new_len].copy_from_slice(&new[..new_len]);
55 }
56 } else {
57 for (dst, new) in new_buf.chunks_exact_mut(new_stride).zip(
58 src.get_data().chunks(src.get_stride())) {
59 dst[..new_len].copy_from_slice(&new[..new_len]);
60 }
61 }
62 self.data = new_buf;
63 self.stride = new_stride;
64 self.start = 0;
65 self.end = old_len + new_len;
66 return;
67 }
68 }
69 match (src.get_step() != 1, self.ileaved) {
70 (false, false) => {
71 for (dst, src) in self.data.chunks_exact_mut(self.stride).zip(src.get_data().chunks_exact(src.get_stride())) {
72 dst[self.end..][..to_copy].copy_from_slice(&src[..to_copy]);
73 }
74 },
75 (true, false) => {
76 for (i, chunk) in src.get_data().chunks_exact(src.get_step()).enumerate() {
77 for (ch, &samp) in chunk.iter().enumerate() {
78 self.data[self.stride * ch + self.end + i] = samp;
79 }
80 }
81 }
82 (true, true) => {
83 let sdata = src.get_data();
84 self.data[self.end..][..to_copy].copy_from_slice(&sdata[..to_copy]);
85 },
86 _ => unimplemented!(),
87 };
88 self.end += to_copy;
89 }
90 fn write(&mut self, dbuf: &mut NAAudioBuffer<T>) {
91 let mut dst_len = dbuf.get_length();
92 let dst_stride = dbuf.get_stride();
93 let dst_step = dbuf.get_step();
94 let dst = dbuf.get_data_mut().unwrap();
95
96 match (self.ileaved, dst_step != 1) {
97 (false, false) => {
98 for (dst, src) in dst.chunks_mut(dst_stride).zip(self.data.chunks_exact(self.stride)) {
99 dst[..dst_len].copy_from_slice(&src[self.start..][..dst_len]);
100 }
101 },
102 (true, true) => {
103 dst_len *= self.channels;
104 dst[..dst_len].copy_from_slice(&self.data[self.start..][..dst_len]);
105 },
106 _ => unimplemented!(),
107 };
108 self.start += dst_len;
109 }
110 fn renorm(&mut self) {
111 if self.start == 0 {
112 return;
113 }
114
115 let move_size = self.end - self.start;
116 if move_size > 0 {
117 for chan in self.data.chunks_exact_mut(self.stride) {
118 for i in 0..move_size {
119 chan[i] = chan[self.start + i];
120 }
121 }
122 }
123 self.end -= self.start;
124 self.start = 0;
125 }
126 }
127
128 enum AudioDataType {
129 U8(AudioQueue<u8>),
130 I16(AudioQueue<i16>),
131 I32(AudioQueue<i32>),
132 F32(AudioQueue<f32>),
133 Packed(AudioQueue<u8>),
134 }
135
136 impl AudioDataType {
137 fn get_length(&self) -> usize {
138 match self {
139 AudioDataType::U8(ref queue) => queue.get_cur_size(),
140 AudioDataType::I16(ref queue) => queue.get_cur_size(),
141 AudioDataType::I32(ref queue) => queue.get_cur_size(),
142 AudioDataType::F32(ref queue) => queue.get_cur_size(),
143 AudioDataType::Packed(ref queue) => queue.get_cur_size(),
144 }
145 }
146 }
147
148 pub struct AudioConverter {
149 queue: AudioDataType,
150 dst_fmt: NAAudioInfo,
151 dst_chmap: NAChannelMap,
152 apts: Option<u64>,
153 resampler: NAResample,
154 }
155
156 impl AudioConverter {
157 pub fn new(sinfo: &NAAudioInfo, dinfo: &NAAudioInfo, dst_chmap: NAChannelMap) -> Self {
158 let ch = usize::from(dinfo.channels);
159 let size = dinfo.block_len * 2;
160 let il = !dinfo.format.planar;
161 let queue = match (dinfo.format.bits, dinfo.format.float, dinfo.format.signed) {
162 ( 8, false, false) => AudioDataType::U8(AudioQueue::new(ch, size, il)),
163 (16, false, true) => AudioDataType::I16(AudioQueue::new(ch, size, il)),
164 (32, false, true) => AudioDataType::I32(AudioQueue::new(ch, size, il)),
165 (32, true, _) => AudioDataType::F32(AudioQueue::new(ch, size, il)),
166 _ => AudioDataType::Packed(AudioQueue::new(ch, size, il)),
167 };
168 const RESAMPLE_FILTER_ORDER: usize = 16;
169 let resampler = NAResample::new(sinfo.sample_rate, dinfo, &dst_chmap, RESAMPLE_FILTER_ORDER);
170 Self {
171 queue,
172 dst_fmt: *dinfo,
173 dst_chmap,
174 apts: None,
175 resampler,
176 }
177 }
178 pub fn queue_frame(&mut self, buf: NABufferType, tinfo: NATimeInfo) -> bool {
179 let ret = self.resampler.convert_audio_frame(&buf);
180 if let Ok(dbuf) = ret {
181 if self.apts.is_none() && tinfo.get_pts().is_some() {
182 self.apts = tinfo.get_pts();
183 }
184 match (&mut self.queue, dbuf) {
185 (AudioDataType::U8(ref mut queue), NABufferType::AudioU8(ref buf)) => queue.read(buf),
186 (AudioDataType::I16(ref mut queue), NABufferType::AudioI16(ref buf)) => queue.read(buf),
187 (AudioDataType::I32(ref mut queue), NABufferType::AudioI32(ref buf)) => queue.read(buf),
188 (AudioDataType::F32(ref mut queue), NABufferType::AudioF32(ref buf)) => queue.read(buf),
189 (AudioDataType::Packed(ref mut queue), NABufferType::AudioPacked(ref buf)) => queue.read(buf),
190 _ => unimplemented!(),
191 };
192 true
193 } else {
194 false
195 }
196 }
197 pub fn get_frame(&mut self, info: NACodecInfoRef) -> Option<NAFrame> {
198 if self.queue.get_length() >= self.dst_fmt.block_len {
199 if let Ok(mut abuf) = alloc_audio_buffer(self.dst_fmt, self.dst_fmt.block_len, self.dst_chmap.clone()) {
200 match (&mut self.queue, &mut abuf) {
201 (AudioDataType::U8(ref mut queue), NABufferType::AudioU8(ref mut buf)) => queue.write(buf),
202 (AudioDataType::I16(ref mut queue), NABufferType::AudioI16(ref mut buf)) => queue.write(buf),
203 (AudioDataType::I32(ref mut queue), NABufferType::AudioI32(ref mut buf)) => queue.write(buf),
204 (AudioDataType::F32(ref mut queue), NABufferType::AudioF32(ref mut buf)) => queue.write(buf),
205 (AudioDataType::Packed(ref mut queue), NABufferType::AudioPacked(ref mut buf)) => queue.write(buf),
206 _ => unimplemented!(),
207 };
208 let tinfo = NATimeInfo::new(self.apts, None, Some(self.dst_fmt.block_len as u64), 1, self.dst_fmt.sample_rate);
209 if let Some(ref mut val) = self.apts {
210 *val += self.dst_fmt.block_len as u64;
211 }
212 Some(NAFrame::new(tinfo, FrameType::I, true, info, abuf))
213 } else {
214 println!(" failed to allocate audio frame");
215 None
216 }
217 } else {
218 None
219 }
220 }
221 }