improve audio processing pipeline
[nihav-encoder.git] / src / acvt.rs
CommitLineData
b0481c9e
KS
1use nihav_core::frame::*;
2use nihav_core::soundcvt::*;
3
4struct AudioQueue<T> {
5 start: usize,
6 end: usize,
7 stride: usize,
8 channels: usize,
9 data: Vec<T>,
10 ileaved: bool,
11}
12
13impl<T:Clone+Copy+From<u8>> AudioQueue<T> {
14 fn new(channels: usize, rec_size: usize, ileaved: bool) -> Self {
15 Self {
16 start: 0,
17 end: 0,
18 stride: if ileaved { rec_size * channels } else { rec_size },
19 channels,
20 ileaved,
21 data: vec![0.into(); rec_size * channels],
22 }
23 }
24 fn get_cur_size(&self) -> usize { self.end - self.start }
25 fn get_cur_avail(&self) -> usize { self.stride - self.end }
26 fn get_potentially_avail(&self) -> usize { self.stride - self.get_cur_size() }
27 fn read(&mut self, src: &NAAudioBuffer<T>) {
28 let mut to_copy = src.get_length();
29 if self.ileaved {
30 to_copy *= self.channels;
31 }
32 if self.get_cur_avail() < to_copy {
33 if self.get_potentially_avail() >= to_copy {
34 self.renorm();
35 } else {
36 let new_len = (self.stride * 2).max(self.get_cur_size() + src.get_length());
37 let mut new_buf = vec![0.into(); new_len * self.channels];
38 let new_stride = if !self.ileaved { new_len } else { new_len * self.channels };
39
40 let old_len = self.get_cur_size();
41 let new_len = src.get_length();
42 for (dst, (old, new)) in new_buf.chunks_exact_mut(new_stride).zip(
43 self.data.chunks_exact(self.stride).zip(
44 src.get_data().chunks(src.get_stride()))) {
45 dst[..old_len].copy_from_slice(&old[self.start..self.end]);
46 dst[old_len..][..new_len].copy_from_slice(&new[..new_len]);
47 }
48 self.data = new_buf;
49 self.stride = new_stride;
50 self.start = 0;
51 self.end = old_len + new_len;
52 return;
53 }
54 }
55 for (dst, src) in self.data.chunks_exact_mut(self.stride).zip(src.get_data().chunks_exact(src.get_stride())) {
56 dst[self.end..][..to_copy].copy_from_slice(&src[..to_copy]);
57 }
58 self.end += to_copy;
59 }
60 fn write(&mut self, dbuf: &mut NAAudioBuffer<T>) {
61 let dst_len = dbuf.get_length();
62 let dst_stride = dbuf.get_stride();
63 let dst = dbuf.get_data_mut().unwrap();
64
65 for (dst, src) in dst.chunks_mut(dst_stride).zip(self.data.chunks_exact(self.stride)) {
66 dst[..dst_len].copy_from_slice(&src[self.start..][..dst_len]);
67 }
68 self.start += dst_len;
69 }
70 fn renorm(&mut self) {
71 if self.start == 0 {
72 return;
73 }
74
75 let move_size = self.end - self.start;
76 if move_size > 0 {
77 for chan in self.data.chunks_exact_mut(self.stride) {
78 for i in 0..move_size {
79 chan[i] = chan[self.start + i];
80 }
81 }
82 }
83 self.end -= self.start;
84 self.start = 0;
85 }
86}
87
88enum AudioDataType {
89 U8(AudioQueue<u8>),
90 I16(AudioQueue<i16>),
91 I32(AudioQueue<i32>),
92 F32(AudioQueue<f32>),
93 Packed(AudioQueue<u8>),
94}
95
96impl AudioDataType {
97 fn get_length(&self) -> usize {
98 match self {
99 AudioDataType::U8(ref queue) => queue.get_cur_size(),
100 AudioDataType::I16(ref queue) => queue.get_cur_size(),
101 AudioDataType::I32(ref queue) => queue.get_cur_size(),
102 AudioDataType::F32(ref queue) => queue.get_cur_size(),
103 AudioDataType::Packed(ref queue) => queue.get_cur_size(),
104 }
105 }
106}
107
108pub struct AudioConverter {
109 queue: AudioDataType,
110 dst_fmt: NAAudioInfo,
111 dst_chmap: NAChannelMap,
112 apts: Option<u64>,
113}
114
115impl AudioConverter {
116 pub fn new(_sinfo: &NAAudioInfo, dinfo: &NAAudioInfo, dst_chmap: NAChannelMap) -> Self {
117 let ch = usize::from(dinfo.channels);
118 let size = dinfo.block_len * 2;
119 let il = !dinfo.format.planar;
120 let queue = match (dinfo.format.bits, dinfo.format.float, dinfo.format.signed) {
121 ( 8, false, false) => AudioDataType::U8(AudioQueue::new(ch, size, il)),
122 (16, false, true) => AudioDataType::I16(AudioQueue::new(ch, size, il)),
123 (32, false, true) => AudioDataType::I32(AudioQueue::new(ch, size, il)),
124 (32, true, _) => AudioDataType::F32(AudioQueue::new(ch, size, il)),
125 _ => AudioDataType::Packed(AudioQueue::new(ch, size, il)),
126 };
127 Self {
128 queue,
129 dst_fmt: *dinfo,
130 dst_chmap,
131 apts: None,
132 }
133 }
134 pub fn queue_frame(&mut self, buf: NABufferType, tinfo: NATimeInfo) -> bool {
135 let ret = convert_audio_frame(&buf, &self.dst_fmt, &self.dst_chmap);
136 if let Ok(dbuf) = ret {
137 if self.apts.is_none() && tinfo.get_pts().is_some() {
138 self.apts = tinfo.get_pts();
139 }
140 match (&mut self.queue, dbuf) {
141 (AudioDataType::U8(ref mut queue), NABufferType::AudioU8(ref buf)) => queue.read(buf),
142 (AudioDataType::I16(ref mut queue), NABufferType::AudioI16(ref buf)) => queue.read(buf),
143 (AudioDataType::I32(ref mut queue), NABufferType::AudioI32(ref buf)) => queue.read(buf),
144 (AudioDataType::F32(ref mut queue), NABufferType::AudioF32(ref buf)) => queue.read(buf),
145 (AudioDataType::Packed(ref mut queue), NABufferType::AudioPacked(ref buf)) => queue.read(buf),
146 _ => unimplemented!(),
147 };
148 true
149 } else {
150 false
151 }
152 }
153 pub fn get_frame(&mut self, info: NACodecInfoRef) -> Option<NAFrame> {
154 if self.queue.get_length() >= self.dst_fmt.block_len {
155 if let Ok(mut abuf) = alloc_audio_buffer(self.dst_fmt, self.dst_fmt.block_len, self.dst_chmap.clone()) {
156 match (&mut self.queue, &mut abuf) {
157 (AudioDataType::U8(ref mut queue), NABufferType::AudioU8(ref mut buf)) => queue.write(buf),
158 (AudioDataType::I16(ref mut queue), NABufferType::AudioI16(ref mut buf)) => queue.write(buf),
159 (AudioDataType::I32(ref mut queue), NABufferType::AudioI32(ref mut buf)) => queue.write(buf),
160 (AudioDataType::F32(ref mut queue), NABufferType::AudioF32(ref mut buf)) => queue.write(buf),
161 (AudioDataType::Packed(ref mut queue), NABufferType::AudioPacked(ref mut buf)) => queue.write(buf),
162 _ => unimplemented!(),
163 };
164 let tinfo = NATimeInfo::new(self.apts, None, Some(self.dst_fmt.block_len as u64), 1, self.dst_fmt.sample_rate);
165 if let Some(ref mut val) = self.apts {
166 *val += self.dst_fmt.block_len as u64;
167 }
168 Some(NAFrame::new(tinfo, FrameType::I, true, info, abuf))
169 } else {
170 println!(" failed to allocate audio frame");
171 None
172 }
173 } else {
174 None
175 }
176 }
177}