]>
Commit | Line | Data |
---|---|---|
a178c22c KS |
1 | use nihav_core::codecs::*; |
2 | use nihav_core::io::byteio::*; | |
3 | use nihav_codec_support::vq::*; | |
4 | ||
5 | #[derive(Default,Clone,Copy,PartialEq,Debug)] | |
6 | struct YUVCode { | |
7 | y: [u8; 4], | |
8 | u: u8, | |
9 | v: u8, | |
10 | } | |
11 | impl VQElement for YUVCode { | |
12 | fn dist(&self, rval: Self) -> u32 { | |
13 | let mut ysum = 0; | |
14 | for (y0, y1) in self.y.iter().zip(rval.y.iter()) { | |
15 | let yd = i32::from(*y0) - i32::from(*y1); | |
16 | ysum += yd * yd; | |
17 | } | |
18 | let ud = i32::from(self.u) - i32::from(rval.u); | |
19 | let vd = i32::from(self.v) - i32::from(rval.v); | |
20 | (ysum + ud * ud + vd * vd) as u32 | |
21 | } | |
22 | fn min_cw() -> Self { YUVCode { y: [0; 4], u: 0, v: 0 } } | |
23 | fn max_cw() -> Self { YUVCode { y: [255; 4], u: 255, v: 255 } } | |
24 | fn min(&self, rval: Self) -> Self { | |
25 | let mut ycode = YUVCode::default(); | |
26 | for i in 0..4 { | |
27 | ycode.y[i] = self.y[i].min(rval.y[i]); | |
28 | } | |
29 | ycode.u = self.u.min(rval.u); | |
30 | ycode.v = self.v.min(rval.v); | |
31 | ycode | |
32 | } | |
33 | fn max(&self, rval: Self) -> Self { | |
34 | let mut ycode = YUVCode::default(); | |
35 | for i in 0..4 { | |
36 | ycode.y[i] = self.y[i].max(rval.y[i]); | |
37 | } | |
38 | ycode.u = self.u.max(rval.u); | |
39 | ycode.v = self.v.max(rval.v); | |
40 | ycode | |
41 | } | |
42 | fn num_components() -> usize { 6 } | |
43 | fn sort_by_component(arr: &mut [Self], component: usize) { | |
44 | let mut counts = [0; 256]; | |
45 | for entry in arr.iter() { | |
46 | let idx = match component { | |
47 | 0 | 1 | 2 | 3 => entry.y[component], | |
48 | 4 => entry.u, | |
49 | _ => entry.v, | |
50 | } as usize; | |
51 | counts[idx] += 1; | |
52 | } | |
53 | let mut offs = [0; 256]; | |
54 | for i in 0..255 { | |
55 | offs[i + 1] = offs[i] + counts[i]; | |
56 | } | |
57 | let mut dst = vec![YUVCode::default(); arr.len()]; | |
58 | for entry in arr.iter() { | |
59 | let idx = match component { | |
60 | 0 | 1 | 2 | 3 => entry.y[component], | |
61 | 4 => entry.u, | |
62 | _ => entry.v, | |
63 | } as usize; | |
64 | dst[offs[idx]] = *entry; | |
65 | offs[idx] += 1; | |
66 | } | |
67 | arr.copy_from_slice(dst.as_slice()); | |
68 | } | |
69 | fn max_dist_component(min: &Self, max: &Self) -> usize { | |
70 | let mut comp = 0; | |
71 | let mut diff = 0; | |
72 | for i in 0..4 { | |
73 | let d = u32::from(max.y[i]) - u32::from(min.y[i]); | |
74 | if d > diff { | |
75 | diff = d; | |
76 | comp = i; | |
77 | } | |
78 | } | |
79 | let ud = u32::from(max.u) - u32::from(min.u); | |
80 | if ud > diff { | |
81 | diff = ud; | |
82 | comp = 4; | |
83 | } | |
84 | let vd = u32::from(max.v) - u32::from(min.v); | |
85 | if vd > diff { | |
86 | comp = 5; | |
87 | } | |
88 | comp | |
89 | } | |
90 | } | |
91 | ||
92 | #[derive(Default)] | |
93 | struct YUVCodeSum { | |
94 | ysum: [u64; 4], | |
95 | usum: u64, | |
96 | vsum: u64, | |
97 | count: u64, | |
98 | } | |
99 | ||
100 | impl VQElementSum<YUVCode> for YUVCodeSum { | |
101 | fn zero() -> Self { Self::default() } | |
102 | fn add(&mut self, rval: YUVCode, count: u64) { | |
103 | for i in 0..4 { | |
104 | self.ysum[i] += u64::from(rval.y[i]) * count; | |
105 | } | |
106 | self.usum += u64::from(rval.u) * count; | |
107 | self.vsum += u64::from(rval.v) * count; | |
108 | self.count += count; | |
109 | } | |
110 | fn get_centroid(&self) -> YUVCode { | |
111 | if self.count != 0 { | |
112 | let mut ycode = YUVCode::default(); | |
113 | for i in 0..4 { | |
114 | ycode.y[i] = ((self.ysum[i] + self.count / 2) / self.count) as u8; | |
115 | } | |
116 | ycode.u = ((self.usum + self.count / 2) / self.count) as u8; | |
117 | ycode.v = ((self.vsum + self.count / 2) / self.count) as u8; | |
118 | ycode | |
119 | } else { | |
120 | YUVCode::default() | |
121 | } | |
122 | } | |
123 | } | |
124 | ||
125 | struct RNG { | |
126 | seed: u32, | |
127 | } | |
128 | ||
129 | impl RNG { | |
130 | fn new() -> Self { Self { seed: 0x12345678 } } | |
131 | fn next(&mut self) -> u8 { | |
132 | let mut x = self.seed; | |
133 | x ^= x.wrapping_shl(13); | |
134 | x ^= x >> 17; | |
135 | self.seed = x; | |
136 | (self.seed >> 24) as u8 | |
137 | } | |
138 | fn fill_entry(&mut self, entry: &mut YUVCode) { | |
139 | for y in entry.y.iter_mut() { | |
140 | *y = self.next(); | |
141 | } | |
142 | entry.u = self.next(); | |
143 | entry.v = self.next(); | |
144 | } | |
145 | } | |
146 | ||
147 | const GRAY_FORMAT: NAPixelFormaton = NAPixelFormaton { | |
148 | model: ColorModel::YUV(YUVSubmodel::YUVJ), | |
149 | components: 1, | |
150 | comp_info: [Some(NAPixelChromaton{h_ss: 0, v_ss: 0, packed: false, depth: 8, shift: 0, comp_offs: 0, next_elem: 1}), None, None, None, None], | |
151 | elem_size: 1, | |
152 | be: true, | |
153 | alpha: false, | |
154 | palette: false, | |
155 | }; | |
156 | ||
157 | struct MaskWriter { | |
158 | masks: Vec<u32>, | |
159 | mask: u32, | |
160 | pos: u8, | |
161 | } | |
162 | ||
163 | impl MaskWriter { | |
164 | fn new() -> Self { | |
165 | Self { | |
166 | masks: Vec::new(), | |
167 | mask: 0, | |
168 | pos: 0, | |
169 | } | |
170 | } | |
171 | fn reset(&mut self) { | |
172 | self.masks.truncate(0); | |
173 | self.mask = 0; | |
174 | self.pos = 0; | |
175 | } | |
176 | fn put_v1(&mut self) { | |
177 | self.mask <<= 1; | |
178 | self.pos += 1; | |
179 | if self.pos == 32 { | |
180 | self.flush(); | |
181 | } | |
182 | } | |
183 | fn put_v4(&mut self) { | |
184 | self.mask <<= 1; | |
185 | self.mask |= 1; | |
186 | self.pos += 1; | |
187 | if self.pos == 32 { | |
188 | self.flush(); | |
189 | } | |
190 | } | |
191 | fn put_inter(&mut self, skip: bool) { | |
192 | self.mask <<= 1; | |
193 | self.mask |= !skip as u32; | |
194 | self.pos += 1; | |
195 | if self.pos == 32 { | |
196 | self.flush(); | |
197 | } | |
198 | } | |
199 | fn flush(&mut self) { | |
200 | self.masks.push(self.mask); | |
201 | self.mask = 0; | |
202 | self.pos = 0; | |
203 | } | |
204 | fn end(&mut self) { | |
205 | if self.pos == 0 { return; } | |
206 | while self.pos < 32 { | |
207 | self.mask <<= 1; | |
208 | self.pos += 1; | |
209 | } | |
210 | self.flush(); | |
211 | } | |
212 | } | |
213 | ||
3c406629 KS |
214 | #[derive(Clone,Copy,PartialEq)] |
215 | enum QuantMode { | |
216 | ELBG, | |
217 | Hybrid, | |
218 | MedianCut, | |
219 | } | |
220 | ||
61cab15b | 221 | impl std::string::ToString for QuantMode { |
3c406629 KS |
222 | fn to_string(&self) -> String { |
223 | match *self { | |
224 | QuantMode::ELBG => "elbg".to_string(), | |
225 | QuantMode::Hybrid => "hybrid".to_string(), | |
226 | QuantMode::MedianCut => "mediancut".to_string(), | |
227 | } | |
228 | } | |
229 | } | |
230 | ||
a178c22c KS |
231 | struct CinepakEncoder { |
232 | stream: Option<NAStreamRef>, | |
233 | lastfrm: Option<NAVideoBufferRef<u8>>, | |
234 | pkt: Option<NAPacket>, | |
235 | frmcount: u8, | |
3c406629 KS |
236 | key_int: u8, |
237 | qmode: QuantMode, | |
a178c22c KS |
238 | quality: u8, |
239 | nstrips: usize, | |
240 | v1_entries: Vec<YUVCode>, | |
241 | v4_entries: Vec<YUVCode>, | |
242 | v1_cb: [YUVCode; 256], | |
243 | v4_cb: [YUVCode; 256], | |
244 | v1_cur_cb: [YUVCode; 256], | |
245 | v4_cur_cb: [YUVCode; 256], | |
592d2889 KS |
246 | v1_len: usize, |
247 | v4_len: usize, | |
a178c22c KS |
248 | v1_idx: Vec<u8>, |
249 | v4_idx: Vec<u8>, | |
250 | grayscale: bool, | |
251 | rng: RNG, | |
252 | masks: MaskWriter, | |
253 | skip_dist: Vec<u32>, | |
254 | } | |
255 | ||
256 | fn avg4(a: u8, b: u8, c: u8, d: u8) -> u8 { | |
257 | ((u16::from(a) + u16::from(b) + u16::from(c) + u16::from(d) + 3) >> 2) as u8 | |
258 | } | |
259 | ||
260 | fn patch_size(bw: &mut ByteWriter, pos: u64) -> EncoderResult<()> { | |
261 | let size = bw.tell() - pos; | |
262 | bw.seek(SeekFrom::Current(-((size + 3) as i64)))?; | |
263 | bw.write_u24be((size + 4) as u32)?; | |
264 | bw.seek(SeekFrom::End(0))?; | |
265 | Ok(()) | |
266 | } | |
267 | ||
268 | impl CinepakEncoder { | |
269 | fn new() -> Self { | |
270 | Self { | |
271 | stream: None, | |
272 | pkt: None, | |
273 | lastfrm: None, | |
274 | frmcount: 0, | |
36ce88be | 275 | qmode: QuantMode::MedianCut, |
3c406629 | 276 | key_int: 25, |
a178c22c KS |
277 | quality: 0, |
278 | nstrips: 2, | |
279 | v1_entries: Vec::new(), | |
280 | v4_entries: Vec::new(), | |
281 | v1_cb: [YUVCode::default(); 256], | |
282 | v4_cb: [YUVCode::default(); 256], | |
283 | v1_cur_cb: [YUVCode::default(); 256], | |
284 | v4_cur_cb: [YUVCode::default(); 256], | |
592d2889 KS |
285 | v1_len: 0, |
286 | v4_len: 0, | |
a178c22c KS |
287 | grayscale: false, |
288 | rng: RNG::new(), | |
289 | v1_idx: Vec::new(), | |
290 | v4_idx: Vec::new(), | |
291 | masks: MaskWriter::new(), | |
292 | skip_dist: Vec::new(), | |
293 | } | |
294 | } | |
295 | fn read_strip(&mut self, in_frm: &NAVideoBuffer<u8>, start: usize, end: usize) { | |
296 | let ystride = in_frm.get_stride(0); | |
297 | let mut yoff = in_frm.get_offset(0) + start * ystride; | |
298 | let ustride = in_frm.get_stride(1); | |
299 | let mut uoff = in_frm.get_offset(1) + start / 2 * ustride; | |
300 | let vstride = in_frm.get_stride(2); | |
301 | let mut voff = in_frm.get_offset(2) + start / 2 * vstride; | |
302 | let (width, _) = in_frm.get_dimensions(0); | |
303 | let data = in_frm.get_data(); | |
304 | self.v1_entries.truncate(0); | |
305 | self.v4_entries.truncate(0); | |
306 | for _ in (start..end).step_by(4) { | |
307 | for x in (0..width).step_by(4) { | |
308 | let mut yblk = [0; 16]; | |
309 | let mut ublk = [128; 4]; | |
310 | let mut vblk = [128; 4]; | |
311 | for j in 0..4 { | |
312 | for i in 0..4 { | |
313 | yblk[i + j * 4] = data[yoff + x + i + j * ystride]; | |
314 | } | |
315 | } | |
316 | if !self.grayscale { | |
317 | for j in 0..2 { | |
318 | for i in 0..2 { | |
319 | ublk[i + j * 2] = data[uoff + x / 2 + i + j * ustride]; | |
320 | vblk[i + j * 2] = data[voff + x / 2 + i + j * vstride]; | |
321 | } | |
322 | } | |
323 | } | |
324 | self.v1_entries.push(YUVCode { | |
325 | y: [avg4(yblk[ 0], yblk[ 1], yblk[ 4], yblk[ 5]), | |
326 | avg4(yblk[ 2], yblk[ 3], yblk[ 6], yblk[ 7]), | |
327 | avg4(yblk[ 8], yblk[ 9], yblk[12], yblk[13]), | |
328 | avg4(yblk[10], yblk[11], yblk[14], yblk[15])], | |
329 | u: avg4(ublk[0], ublk[1], ublk[2], ublk[3]), | |
330 | v: avg4(vblk[0], vblk[1], vblk[2], vblk[3]), | |
331 | }); | |
332 | for i in 0..4 { | |
333 | let yidx = (i & 1) * 2 + (i & 2) * 4; | |
334 | self.v4_entries.push(YUVCode { | |
335 | y: [ yblk[yidx], yblk[yidx + 1], yblk[yidx + 4], yblk[yidx + 5] ], | |
336 | u: ublk[i], | |
337 | v: vblk[i], | |
338 | }); | |
339 | } | |
340 | } | |
341 | yoff += ystride * 4; | |
342 | uoff += ustride * 2; | |
343 | voff += vstride * 2; | |
344 | } | |
345 | } | |
592d2889 | 346 | fn find_nearest(codebook: &[YUVCode], code: YUVCode) -> (u8, u32) { |
a178c22c KS |
347 | let mut min_dist = std::u32::MAX; |
348 | let mut idx = 0; | |
349 | for (i, cw) in codebook.iter().enumerate() { | |
350 | let dist = cw.dist(code); | |
351 | if dist < min_dist { | |
352 | min_dist = dist; | |
353 | idx = i; | |
354 | if dist == 0 { | |
355 | break; | |
356 | } | |
357 | } | |
358 | } | |
359 | (idx as u8, min_dist) | |
360 | } | |
361 | fn can_update_cb(new_cb: &[YUVCode; 256], old_cb: &[YUVCode; 256], cb_size: usize) -> bool { | |
362 | let mut skip_count = 0; | |
363 | for (new, old) in new_cb.iter().zip(old_cb.iter()) { | |
364 | if new == old { | |
365 | skip_count += 1; | |
366 | } | |
367 | } | |
368 | let full_size = cb_size * 256; | |
369 | let upd_size = cb_size * (256 - skip_count) + 64; | |
370 | upd_size < full_size | |
371 | } | |
372 | fn write_cb(bw: &mut ByteWriter, mut id: u8, new_cb: &[YUVCode; 256], old_cb: &[YUVCode; 256], grayscale: bool, update: bool) -> EncoderResult<()> { | |
373 | if grayscale { | |
374 | id |= 4; | |
375 | } | |
376 | if update { | |
377 | id |= 1; | |
378 | } | |
379 | bw.write_byte(id)?; | |
380 | bw.write_u24be(0)?; | |
381 | let chunk_pos = bw.tell(); | |
382 | if !update { | |
383 | for entry in new_cb.iter() { | |
384 | bw.write_buf(&entry.y)?; | |
385 | if !grayscale { | |
386 | bw.write_byte(entry.u ^ 0x80)?; | |
387 | bw.write_byte(entry.v ^ 0x80)?; | |
388 | } | |
389 | } | |
390 | } else { | |
391 | let mut end = 256; | |
392 | for (i, (ncw, ocw)) in new_cb.iter().rev().zip(old_cb.iter().rev()).enumerate() { | |
393 | if ncw == ocw { | |
394 | end = i; | |
395 | } else { | |
396 | break; | |
397 | } | |
398 | } | |
399 | for i in (0..end).step_by(32) { | |
400 | let mut mask = 0; | |
401 | for j in 0..32 { | |
402 | mask <<= 1; | |
403 | if new_cb[i + j] != old_cb[i + j] { | |
404 | mask |= 1; | |
405 | } | |
406 | } | |
407 | bw.write_u32be(mask)?; | |
408 | for j in 0..32 { | |
409 | if new_cb[i + j] == old_cb[i + j] { continue; } | |
410 | bw.write_buf(&new_cb[i + j].y)?; | |
411 | if !grayscale { | |
412 | bw.write_byte(new_cb[i + j].u ^ 0x80)?; | |
413 | bw.write_byte(new_cb[i + j].v ^ 0x80)?; | |
414 | } | |
415 | } | |
416 | } | |
417 | } | |
418 | patch_size(bw, chunk_pos)?; | |
419 | Ok(()) | |
420 | } | |
421 | fn render_stripe(&mut self, intra: bool, start: usize, end: usize) { | |
422 | if let Some(ref mut dst_frm) = self.lastfrm { | |
423 | let ystride = dst_frm.get_stride(0); | |
424 | let mut yoff = dst_frm.get_offset(0) + start * ystride; | |
425 | let ustride = dst_frm.get_stride(1); | |
426 | let mut uoff = dst_frm.get_offset(1) + start / 2 * ustride; | |
427 | let vstride = dst_frm.get_stride(2); | |
428 | let mut voff = dst_frm.get_offset(2) + start / 2 * vstride; | |
429 | let (width, _) = dst_frm.get_dimensions(0); | |
430 | let data = dst_frm.get_data_mut().unwrap(); | |
431 | let mut miter = self.masks.masks.iter(); | |
432 | let mut v1_iter = self.v1_idx.iter(); | |
433 | let mut v4_iter = self.v4_idx.iter(); | |
434 | let mut cur_mask = 0; | |
435 | let mut cur_bit = 0; | |
436 | for _ in (start..end).step_by(4) { | |
437 | for x in (0..width).step_by(4) { | |
438 | if cur_bit == 0 { | |
61cab15b | 439 | if !intra || !self.v1_idx.is_empty() { |
a178c22c KS |
440 | cur_mask = *miter.next().unwrap(); |
441 | } else { | |
442 | cur_mask = 0xFFFFFFFF; | |
443 | } | |
444 | cur_bit = 1 << 31; | |
445 | } | |
446 | if !intra { | |
447 | if (cur_mask & cur_bit) == 0 { | |
448 | cur_bit >>= 1; | |
449 | continue; | |
450 | } | |
451 | cur_bit >>= 1; | |
452 | if cur_bit == 0 { | |
453 | cur_mask = *miter.next().unwrap(); | |
454 | cur_bit = 1 << 31; | |
455 | } | |
456 | } | |
457 | if (cur_mask & cur_bit) == 0 { | |
458 | let idx = *v1_iter.next().unwrap() as usize; | |
459 | let cb = &self.v1_cur_cb[idx]; | |
460 | ||
461 | let mut coff = yoff + x; | |
462 | data[coff] = cb.y[0]; data[coff + 1] = cb.y[0]; | |
463 | data[coff + 2] = cb.y[1]; data[coff + 3] = cb.y[1]; | |
464 | coff += ystride; | |
465 | data[coff] = cb.y[0]; data[coff + 1] = cb.y[0]; | |
466 | data[coff + 2] = cb.y[1]; data[coff + 3] = cb.y[1]; | |
467 | coff += ystride; | |
468 | data[coff] = cb.y[2]; data[coff + 1] = cb.y[2]; | |
469 | data[coff + 2] = cb.y[3]; data[coff + 3] = cb.y[3]; | |
470 | coff += ystride; | |
471 | data[coff] = cb.y[2]; data[coff + 1] = cb.y[2]; | |
472 | data[coff + 2] = cb.y[3]; data[coff + 3] = cb.y[3]; | |
473 | ||
474 | if !self.grayscale { | |
475 | let mut coff = uoff + x / 2; | |
476 | data[coff] = cb.u; data[coff + 1] = cb.u; | |
477 | coff += ustride; | |
478 | data[coff] = cb.u; data[coff + 1] = cb.u; | |
479 | ||
480 | let mut coff = voff + x / 2; | |
481 | data[coff] = cb.v; data[coff + 1] = cb.v; | |
482 | coff += vstride; | |
483 | data[coff] = cb.v; data[coff + 1] = cb.v; | |
484 | } | |
485 | } else { | |
486 | let idx0 = *v4_iter.next().unwrap() as usize; | |
487 | let cb0 = &self.v4_cur_cb[idx0]; | |
488 | let idx1 = *v4_iter.next().unwrap() as usize; | |
489 | let cb1 = &self.v4_cur_cb[idx1]; | |
490 | let idx2 = *v4_iter.next().unwrap() as usize; | |
491 | let cb2 = &self.v4_cur_cb[idx2]; | |
492 | let idx3 = *v4_iter.next().unwrap() as usize; | |
493 | let cb3 = &self.v4_cur_cb[idx3]; | |
494 | ||
495 | let mut coff = yoff + x; | |
496 | data[coff] = cb0.y[0]; data[coff + 1] = cb0.y[1]; | |
497 | data[coff + 2] = cb1.y[0]; data[coff + 3] = cb1.y[1]; | |
498 | coff += ystride; | |
499 | data[coff] = cb0.y[2]; data[coff + 1] = cb0.y[3]; | |
500 | data[coff + 2] = cb1.y[2]; data[coff + 3] = cb1.y[3]; | |
501 | coff += ystride; | |
502 | data[coff] = cb2.y[0]; data[coff + 1] = cb2.y[1]; | |
503 | data[coff + 2] = cb3.y[0]; data[coff + 3] = cb3.y[1]; | |
504 | coff += ystride; | |
505 | data[coff] = cb2.y[2]; data[coff + 1] = cb2.y[3]; | |
506 | data[coff + 2] = cb3.y[2]; data[coff + 3] = cb3.y[3]; | |
507 | ||
508 | if !self.grayscale { | |
509 | let mut coff = uoff + x / 2; | |
510 | data[coff] = cb0.u; data[coff + 1] = cb1.u; | |
511 | coff += ustride; | |
512 | data[coff] = cb2.u; data[coff + 1] = cb3.u; | |
513 | ||
514 | let mut coff = voff + x / 2; | |
515 | data[coff] = cb0.v; data[coff + 1] = cb1.v; | |
516 | coff += vstride; | |
517 | data[coff] = cb2.v; data[coff + 1] = cb3.v; | |
518 | } | |
519 | } | |
520 | cur_bit >>= 1; | |
521 | } | |
522 | yoff += ystride * 4; | |
523 | uoff += ustride * 2; | |
524 | voff += vstride * 2; | |
525 | } | |
526 | } else { | |
527 | unreachable!(); | |
528 | } | |
529 | } | |
530 | fn calc_skip_dist(&mut self, in_frm: &NAVideoBuffer<u8>, start: usize, end: usize) { | |
531 | self.skip_dist.truncate(0); | |
532 | if let Some(ref ref_frm) = self.lastfrm { | |
533 | let rystride = ref_frm.get_stride(0); | |
534 | let mut ryoff = ref_frm.get_offset(0) + start * rystride; | |
535 | let rustride = ref_frm.get_stride(1); | |
536 | let mut ruoff = ref_frm.get_offset(1) + start / 2 * rustride; | |
537 | let rvstride = ref_frm.get_stride(2); | |
538 | let mut rvoff = ref_frm.get_offset(2) + start / 2 * rvstride; | |
539 | let (width, _) = ref_frm.get_dimensions(0); | |
540 | let rdata = ref_frm.get_data(); | |
541 | ||
542 | let iystride = in_frm.get_stride(0); | |
543 | let mut iyoff = in_frm.get_offset(0) + start * iystride; | |
544 | let iustride = in_frm.get_stride(1); | |
545 | let mut iuoff = in_frm.get_offset(1) + start / 2 * iustride; | |
546 | let ivstride = in_frm.get_stride(2); | |
547 | let mut ivoff = in_frm.get_offset(2) + start / 2 * ivstride; | |
548 | let idata = in_frm.get_data(); | |
549 | ||
550 | for _ in (start..end).step_by(4) { | |
551 | for x in (0..width).step_by(4) { | |
552 | let mut dist = 0; | |
553 | let mut roff = ryoff + x; | |
554 | let mut ioff = iyoff + x; | |
555 | for _ in 0..4 { | |
556 | for i in 0..4 { | |
557 | let d = i32::from(rdata[roff + i]) - i32::from(idata[ioff + i]); | |
558 | dist += d * d; | |
559 | } | |
560 | roff += rystride; | |
561 | ioff += iystride; | |
562 | } | |
563 | if !self.grayscale { | |
564 | let mut roff = ruoff + x / 2; | |
565 | let mut ioff = iuoff + x / 2; | |
566 | let ud = i32::from(rdata[roff]) - i32::from(idata[ioff]); | |
567 | dist += ud * ud; | |
568 | let ud = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]); | |
569 | dist += ud * ud; | |
570 | roff += rustride; ioff += iustride; | |
571 | let ud = i32::from(rdata[roff]) - i32::from(idata[ioff]); | |
572 | dist += ud * ud; | |
573 | let ud = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]); | |
574 | dist += ud * ud; | |
575 | ||
576 | let mut roff = rvoff + x / 2; | |
577 | let mut ioff = ivoff + x / 2; | |
578 | let vd = i32::from(rdata[roff]) - i32::from(idata[ioff]); | |
579 | dist += vd * vd; | |
580 | let vd = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]); | |
581 | dist += vd * vd; | |
582 | roff += rvstride; ioff += ivstride; | |
583 | let vd = i32::from(rdata[roff]) - i32::from(idata[ioff]); | |
584 | dist += vd * vd; | |
585 | let vd = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]); | |
586 | dist += vd * vd; | |
587 | } | |
588 | self.skip_dist.push(dist as u32); | |
589 | } | |
590 | ||
591 | iyoff += iystride * 4; | |
592 | iuoff += iustride * 2; | |
593 | ivoff += ivstride * 2; | |
594 | ryoff += rystride * 4; | |
595 | ruoff += rustride * 2; | |
596 | rvoff += rvstride * 2; | |
597 | } | |
598 | } else { | |
599 | unreachable!(); | |
600 | } | |
601 | } | |
3c406629 KS |
602 | fn quant_vectors(&mut self) { |
603 | match self.qmode { | |
604 | QuantMode::ELBG => { | |
605 | let mut elbg_v1: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v1_cb); | |
606 | let mut elbg_v4: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v4_cb); | |
592d2889 KS |
607 | |
608 | for entry in self.v1_cb.iter_mut().skip(self.v1_len) { | |
609 | self.rng.fill_entry(entry); | |
610 | } | |
611 | for entry in self.v4_cb.iter_mut().skip(self.v4_len) { | |
612 | self.rng.fill_entry(entry); | |
613 | } | |
614 | ||
615 | self.v1_len = elbg_v1.quantise(&self.v1_entries, &mut self.v1_cur_cb); | |
616 | self.v4_len = elbg_v4.quantise(&self.v4_entries, &mut self.v4_cur_cb); | |
3c406629 KS |
617 | }, |
618 | QuantMode::Hybrid => { | |
619 | quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v1_entries, &mut self.v1_cur_cb); | |
620 | quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v4_entries, &mut self.v4_cur_cb); | |
621 | let mut elbg_v1: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v1_cur_cb); | |
622 | let mut elbg_v4: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v4_cur_cb); | |
592d2889 KS |
623 | self.v1_len = elbg_v1.quantise(&self.v1_entries, &mut self.v1_cur_cb); |
624 | self.v4_len = elbg_v4.quantise(&self.v4_entries, &mut self.v4_cur_cb); | |
3c406629 KS |
625 | }, |
626 | QuantMode::MedianCut => { | |
592d2889 KS |
627 | self.v1_len = quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v1_entries, &mut self.v1_cur_cb); |
628 | self.v4_len = quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v4_entries, &mut self.v4_cur_cb); | |
3c406629 KS |
629 | }, |
630 | }; | |
592d2889 KS |
631 | |
632 | for e in self.v1_cur_cb.iter_mut().skip(self.v1_len) { *e = YUVCode::default(); } | |
633 | for e in self.v4_cur_cb.iter_mut().skip(self.v4_len) { *e = YUVCode::default(); } | |
3c406629 | 634 | } |
a178c22c KS |
635 | fn encode_intra(&mut self, bw: &mut ByteWriter, in_frm: &NAVideoBuffer<u8>) -> EncoderResult<bool> { |
636 | let (width, height) = in_frm.get_dimensions(0); | |
637 | let mut strip_h = (height / self.nstrips + 3) & !3; | |
638 | if strip_h == 0 { | |
639 | self.nstrips = 1; | |
640 | strip_h = height; | |
641 | } | |
642 | let mut start_line = 0; | |
643 | let mut end_line = strip_h; | |
644 | ||
645 | bw.write_byte(0)?; // intra flag | |
646 | bw.write_u24be(0)?; // frame size | |
647 | let frame_data_pos = bw.tell(); | |
648 | bw.write_u16be(width as u16)?; | |
649 | bw.write_u16be(height as u16)?; | |
650 | bw.write_u16be(self.nstrips as u16)?; | |
651 | ||
652 | for entry in self.v1_cb.iter_mut() { | |
653 | self.rng.fill_entry(entry); | |
654 | } | |
655 | for entry in self.v4_cb.iter_mut() { | |
656 | self.rng.fill_entry(entry); | |
657 | } | |
658 | while start_line < height { | |
659 | self.read_strip(in_frm, start_line, end_line); | |
660 | ||
3c406629 | 661 | self.quant_vectors(); |
a178c22c KS |
662 | if self.grayscale { |
663 | for cw in self.v1_cur_cb.iter_mut() { | |
664 | cw.u = 128; | |
665 | cw.v = 128; | |
666 | } | |
667 | for cw in self.v4_cur_cb.iter_mut() { | |
668 | cw.u = 128; | |
669 | cw.v = 128; | |
670 | } | |
671 | } | |
672 | ||
673 | self.v1_idx.truncate(0); | |
674 | self.v4_idx.truncate(0); | |
675 | self.masks.reset(); | |
676 | ||
677 | for (v1_entry, v4_entries) in self.v1_entries.iter().zip(self.v4_entries.chunks(4)) { | |
592d2889 | 678 | let (v1_idx, v1_dist) = Self::find_nearest(&self.v1_cur_cb[..self.v1_len], *v1_entry); |
a178c22c KS |
679 | if v1_dist == 0 { |
680 | self.masks.put_v1(); | |
681 | self.v1_idx.push(v1_idx); | |
682 | continue; | |
683 | } | |
592d2889 KS |
684 | let (v40_idx, v40_dist) = Self::find_nearest(&self.v4_cur_cb[..self.v4_len], v4_entries[0]); |
685 | let (v41_idx, v41_dist) = Self::find_nearest(&self.v4_cur_cb[..self.v4_len], v4_entries[1]); | |
686 | let (v42_idx, v42_dist) = Self::find_nearest(&self.v4_cur_cb[..self.v4_len], v4_entries[2]); | |
687 | let (v43_idx, v43_dist) = Self::find_nearest(&self.v4_cur_cb[..self.v4_len], v4_entries[3]); | |
a178c22c KS |
688 | if v40_dist + v41_dist + v42_dist + v43_dist > v1_dist { |
689 | self.masks.put_v4(); | |
690 | self.v4_idx.push(v40_idx); | |
691 | self.v4_idx.push(v41_idx); | |
692 | self.v4_idx.push(v42_idx); | |
693 | self.v4_idx.push(v43_idx); | |
694 | } else { | |
695 | self.masks.put_v1(); | |
696 | self.v1_idx.push(v1_idx); | |
697 | } | |
698 | } | |
699 | self.masks.end(); | |
700 | ||
701 | let mut is_intra_strip = start_line == 0; | |
702 | let (upd_v1, upd_v4) = if !is_intra_strip { | |
703 | let cb_size = if self.grayscale { 4 } else { 6 }; | |
704 | (Self::can_update_cb(&self.v1_cur_cb, &self.v1_cb, cb_size), | |
705 | Self::can_update_cb(&self.v4_cur_cb, &self.v4_cb, cb_size)) | |
706 | } else { | |
707 | (false, false) | |
708 | }; | |
709 | if !is_intra_strip && !upd_v1 && !upd_v4 { | |
710 | is_intra_strip = true; | |
711 | } | |
712 | bw.write_byte(if is_intra_strip { 0x10 } else { 0x11 })?; | |
713 | bw.write_u24be(0)?; // strip size | |
714 | let strip_data_pos = bw.tell(); | |
715 | bw.write_u16be(0)?; // yoff | |
716 | bw.write_u16be(0)?; // xoff | |
717 | bw.write_u16be((end_line - start_line) as u16)?; | |
718 | bw.write_u16be(width as u16)?; | |
719 | ||
720 | Self::write_cb(bw, 0x20, &self.v4_cur_cb, &self.v4_cb, self.grayscale, upd_v4)?; | |
721 | Self::write_cb(bw, 0x22, &self.v1_cur_cb, &self.v1_cb, self.grayscale, upd_v1)?; | |
722 | ||
723 | self.render_stripe(true, start_line, end_line); | |
724 | ||
61cab15b | 725 | if self.v4_idx.is_empty() { |
a178c22c | 726 | bw.write_byte(0x32)?; |
1047e983 KS |
727 | bw.write_u24be((self.v1_idx.len() + 4) as u32)?; |
728 | bw.write_buf(self.v1_idx.as_slice())?; | |
a178c22c KS |
729 | } else { |
730 | bw.write_byte(0x30)?; | |
731 | bw.write_u24be(0)?; | |
732 | let chunk_pos = bw.tell(); | |
733 | let mut v1_pos = 0; | |
734 | let mut v4_pos = 0; | |
735 | for _ in 0..32 { | |
736 | self.v1_idx.push(0); | |
737 | self.v4_idx.push(0); | |
738 | self.v4_idx.push(0); | |
739 | self.v4_idx.push(0); | |
740 | self.v4_idx.push(0); | |
741 | } | |
742 | for mask in self.masks.masks.iter() { | |
743 | bw.write_u32be(*mask)?; | |
744 | for j in (0..32).rev() { | |
745 | if (mask & (1 << j)) == 0 { | |
746 | bw.write_byte(self.v1_idx[v1_pos])?; | |
747 | v1_pos += 1; | |
748 | } else { | |
749 | bw.write_byte(self.v4_idx[v4_pos])?; | |
750 | bw.write_byte(self.v4_idx[v4_pos + 1])?; | |
751 | bw.write_byte(self.v4_idx[v4_pos + 2])?; | |
752 | bw.write_byte(self.v4_idx[v4_pos + 3])?; | |
753 | v4_pos += 4; | |
754 | } | |
755 | } | |
756 | } | |
757 | patch_size(bw, chunk_pos)?; | |
758 | } | |
759 | ||
760 | patch_size(bw, strip_data_pos)?; | |
761 | ||
762 | self.v1_cb.copy_from_slice(&self.v1_cur_cb); | |
763 | self.v4_cb.copy_from_slice(&self.v4_cur_cb); | |
764 | start_line = end_line; | |
765 | end_line = (end_line + strip_h).min(height); | |
766 | } | |
767 | patch_size(bw, frame_data_pos)?; | |
768 | Ok(true) | |
769 | } | |
770 | fn encode_inter(&mut self, bw: &mut ByteWriter, in_frm: &NAVideoBuffer<u8>) -> EncoderResult<bool> { | |
771 | let (width, height) = in_frm.get_dimensions(0); | |
772 | let mut strip_h = (height / self.nstrips + 3) & !3; | |
773 | if strip_h == 0 { | |
774 | self.nstrips = 1; | |
775 | strip_h = height; | |
776 | } | |
777 | let mut start_line = 0; | |
778 | let mut end_line = strip_h; | |
779 | ||
780 | bw.write_byte(1)?; // intra flag | |
781 | bw.write_u24be(0)?; // frame size | |
782 | let frame_data_pos = bw.tell(); | |
783 | bw.write_u16be(width as u16)?; | |
784 | bw.write_u16be(height as u16)?; | |
785 | bw.write_u16be(self.nstrips as u16)?; | |
786 | ||
787 | while start_line < height { | |
788 | self.read_strip(in_frm, start_line, end_line); | |
789 | self.calc_skip_dist(in_frm, start_line, end_line); | |
790 | ||
3c406629 | 791 | self.quant_vectors(); |
a178c22c KS |
792 | if self.grayscale { |
793 | for cw in self.v1_cur_cb.iter_mut() { | |
794 | cw.u = 128; | |
795 | cw.v = 128; | |
796 | } | |
797 | for cw in self.v4_cur_cb.iter_mut() { | |
798 | cw.u = 128; | |
799 | cw.v = 128; | |
800 | } | |
801 | } | |
802 | ||
803 | self.v1_idx.truncate(0); | |
804 | self.v4_idx.truncate(0); | |
805 | self.masks.reset(); | |
806 | ||
807 | let mut skip_iter = self.skip_dist.iter(); | |
808 | for (v1_entry, v4_entries) in self.v1_entries.iter().zip(self.v4_entries.chunks(4)) { | |
809 | let skip_dist = *skip_iter.next().unwrap(); | |
810 | if skip_dist == 0 { | |
811 | self.masks.put_inter(true); | |
812 | continue; | |
813 | } | |
592d2889 | 814 | let (v1_idx, v1_dist) = Self::find_nearest(&self.v1_cur_cb[..self.v1_len], *v1_entry); |
a178c22c KS |
815 | if skip_dist < v1_dist { |
816 | self.masks.put_inter(true); | |
817 | continue; | |
818 | } else { | |
819 | self.masks.put_inter(false); | |
820 | } | |
821 | if v1_dist == 0 { | |
822 | self.masks.put_v1(); | |
823 | self.v1_idx.push(v1_idx); | |
824 | continue; | |
825 | } | |
592d2889 KS |
826 | let (v40_idx, v40_dist) = Self::find_nearest(&self.v4_cur_cb[..self.v4_len], v4_entries[0]); |
827 | let (v41_idx, v41_dist) = Self::find_nearest(&self.v4_cur_cb[..self.v4_len], v4_entries[1]); | |
828 | let (v42_idx, v42_dist) = Self::find_nearest(&self.v4_cur_cb[..self.v4_len], v4_entries[2]); | |
829 | let (v43_idx, v43_dist) = Self::find_nearest(&self.v4_cur_cb[..self.v4_len], v4_entries[3]); | |
a178c22c KS |
830 | if v40_dist + v41_dist + v42_dist + v43_dist > v1_dist { |
831 | self.masks.put_v4(); | |
832 | self.v4_idx.push(v40_idx); | |
833 | self.v4_idx.push(v41_idx); | |
834 | self.v4_idx.push(v42_idx); | |
835 | self.v4_idx.push(v43_idx); | |
836 | } else { | |
837 | self.masks.put_v1(); | |
838 | self.v1_idx.push(v1_idx); | |
839 | } | |
840 | } | |
841 | self.masks.end(); | |
842 | ||
843 | let (upd_v1, upd_v4) = { | |
844 | let cb_size = if self.grayscale { 4 } else { 6 }; | |
845 | (Self::can_update_cb(&self.v1_cur_cb, &self.v1_cb, cb_size), | |
846 | Self::can_update_cb(&self.v4_cur_cb, &self.v4_cb, cb_size)) | |
847 | }; | |
848 | bw.write_byte(0x11)?; | |
849 | bw.write_u24be(0)?; // strip size | |
850 | let strip_data_pos = bw.tell(); | |
851 | bw.write_u16be(0)?; // yoff | |
852 | bw.write_u16be(0)?; // xoff | |
853 | bw.write_u16be((end_line - start_line) as u16)?; | |
854 | bw.write_u16be(width as u16)?; | |
855 | ||
856 | Self::write_cb(bw, 0x20, &self.v4_cur_cb, &self.v4_cb, self.grayscale, upd_v4)?; | |
857 | Self::write_cb(bw, 0x22, &self.v1_cur_cb, &self.v1_cb, self.grayscale, upd_v1)?; | |
858 | ||
859 | self.render_stripe(false, start_line, end_line); | |
860 | ||
861 | bw.write_byte(0x31)?; | |
862 | bw.write_u24be(0)?; | |
863 | let chunk_pos = bw.tell(); | |
864 | let mut v1_pos = 0; | |
865 | let mut v4_pos = 0; | |
866 | for _ in 0..32 { | |
867 | self.v1_idx.push(0); | |
868 | self.v4_idx.push(0); | |
869 | self.v4_idx.push(0); | |
870 | self.v4_idx.push(0); | |
871 | self.v4_idx.push(0); | |
872 | } | |
873 | let mut skip = true; | |
874 | for mask in self.masks.masks.iter() { | |
875 | bw.write_u32be(*mask)?; | |
876 | if *mask == 0 { continue; } | |
877 | let mut bit = 1 << 31; | |
878 | while bit > 0 { | |
879 | if skip { | |
880 | skip = (mask & bit) == 0; | |
881 | bit >>= 1; | |
882 | } else { | |
883 | if (mask & bit) == 0 { | |
884 | bw.write_byte(self.v1_idx[v1_pos])?; | |
885 | v1_pos += 1; | |
886 | } else { | |
887 | bw.write_byte(self.v4_idx[v4_pos])?; | |
888 | bw.write_byte(self.v4_idx[v4_pos + 1])?; | |
889 | bw.write_byte(self.v4_idx[v4_pos + 2])?; | |
890 | bw.write_byte(self.v4_idx[v4_pos + 3])?; | |
891 | v4_pos += 4; | |
892 | } | |
893 | bit >>= 1; | |
894 | skip = true; | |
895 | } | |
896 | } | |
897 | } | |
898 | patch_size(bw, chunk_pos)?; | |
899 | ||
900 | patch_size(bw, strip_data_pos)?; | |
901 | ||
902 | self.v1_cb.copy_from_slice(&self.v1_cur_cb); | |
903 | self.v4_cb.copy_from_slice(&self.v4_cur_cb); | |
904 | start_line = end_line; | |
905 | end_line = (end_line + strip_h).min(height); | |
906 | } | |
907 | patch_size(bw, frame_data_pos)?; | |
908 | Ok(true) | |
909 | } | |
910 | } | |
911 | ||
912 | impl NAEncoder for CinepakEncoder { | |
913 | fn negotiate_format(&self, encinfo: &EncodeParameters) -> EncoderResult<EncodeParameters> { | |
914 | match encinfo.format { | |
915 | NACodecTypeInfo::None => { | |
916 | let mut ofmt = EncodeParameters::default(); | |
917 | ofmt.format = NACodecTypeInfo::Video(NAVideoInfo::new(0, 0, true, YUV420_FORMAT)); | |
918 | Ok(ofmt) | |
919 | }, | |
61cab15b | 920 | NACodecTypeInfo::Audio(_) => Err(EncoderError::FormatError), |
a178c22c KS |
921 | NACodecTypeInfo::Video(vinfo) => { |
922 | let pix_fmt = if vinfo.format == GRAY_FORMAT { GRAY_FORMAT } else { YUV420_FORMAT }; | |
4abaf99e | 923 | let outinfo = NAVideoInfo::new((vinfo.width + 3) & !3, (vinfo.height + 3) & !3, false, pix_fmt); |
d722ffe9 | 924 | let mut ofmt = *encinfo; |
a178c22c KS |
925 | ofmt.format = NACodecTypeInfo::Video(outinfo); |
926 | Ok(ofmt) | |
927 | } | |
928 | } | |
929 | } | |
930 | fn init(&mut self, stream_id: u32, encinfo: EncodeParameters) -> EncoderResult<NAStreamRef> { | |
931 | match encinfo.format { | |
932 | NACodecTypeInfo::None => Err(EncoderError::FormatError), | |
933 | NACodecTypeInfo::Audio(_) => Err(EncoderError::FormatError), | |
934 | NACodecTypeInfo::Video(vinfo) => { | |
935 | if vinfo.format != YUV420_FORMAT && vinfo.format != GRAY_FORMAT { | |
936 | return Err(EncoderError::FormatError); | |
937 | } | |
938 | if ((vinfo.width | vinfo.height) & 3) != 0 { | |
939 | return Err(EncoderError::FormatError); | |
940 | } | |
941 | if (vinfo.width | vinfo.height) >= (1 << 16) { | |
942 | return Err(EncoderError::FormatError); | |
943 | } | |
944 | ||
945 | let out_info = NAVideoInfo::new(vinfo.width, vinfo.height, false, vinfo.format); | |
61cab15b | 946 | let info = NACodecInfo::new("cinepak", NACodecTypeInfo::Video(out_info), None); |
2ff56201 KS |
947 | let mut stream = NAStream::new(StreamType::Video, stream_id, info, encinfo.tb_num, encinfo.tb_den); |
948 | stream.set_num(stream_id as usize); | |
949 | let stream = stream.into_ref(); | |
a178c22c KS |
950 | |
951 | self.stream = Some(stream.clone()); | |
952 | self.quality = encinfo.quality; | |
953 | self.grayscale = vinfo.format != YUV420_FORMAT; | |
954 | let num_blocks = vinfo.width / 2 * vinfo.height / 2; | |
955 | self.v1_entries = Vec::with_capacity(num_blocks); | |
956 | self.v4_entries = Vec::with_capacity(num_blocks * 4); | |
957 | self.v1_idx = Vec::with_capacity(num_blocks); | |
958 | self.v4_idx = Vec::with_capacity(num_blocks * 4); | |
959 | self.skip_dist = Vec::with_capacity(vinfo.width / 4 * vinfo.height / 4); | |
960 | ||
961 | let buf = alloc_video_buffer(out_info, 2)?; | |
962 | self.lastfrm = Some(buf.get_vbuf().unwrap()); | |
c8db9313 | 963 | |
a178c22c KS |
964 | Ok(stream) |
965 | }, | |
966 | } | |
967 | } | |
968 | fn encode(&mut self, frm: &NAFrame) -> EncoderResult<()> { | |
969 | let buf = frm.get_buffer(); | |
970 | if let Some(ref vbuf) = buf.get_vbuf() { | |
971 | let mut dbuf = Vec::with_capacity(4); | |
972 | let mut gw = GrowableMemoryWriter::new_write(&mut dbuf); | |
973 | let mut bw = ByteWriter::new(&mut gw); | |
974 | let is_intra = if self.frmcount == 0 { | |
975 | self.encode_intra(&mut bw, vbuf)? | |
976 | } else { | |
977 | self.encode_inter(&mut bw, vbuf)? | |
978 | }; | |
979 | self.pkt = Some(NAPacket::new(self.stream.clone().unwrap(), frm.ts, is_intra, dbuf)); | |
980 | self.frmcount += 1; | |
3c406629 | 981 | if self.frmcount == self.key_int { |
a178c22c KS |
982 | self.frmcount = 0; |
983 | } | |
984 | Ok(()) | |
985 | } else { | |
986 | Err(EncoderError::InvalidParameters) | |
987 | } | |
988 | } | |
989 | fn get_packet(&mut self) -> EncoderResult<Option<NAPacket>> { | |
990 | let mut npkt = None; | |
991 | std::mem::swap(&mut self.pkt, &mut npkt); | |
992 | Ok(npkt) | |
993 | } | |
994 | fn flush(&mut self) -> EncoderResult<()> { | |
995 | self.frmcount = 0; | |
996 | Ok(()) | |
997 | } | |
998 | } | |
999 | ||
3c406629 KS |
1000 | const ENCODER_OPTS: &[NAOptionDefinition] = &[ |
1001 | NAOptionDefinition { | |
ee0ca773 | 1002 | name: KEYFRAME_OPTION, description: KEYFRAME_OPTION_DESC, |
3c406629 KS |
1003 | opt_type: NAOptionDefinitionType::Int(Some(0), Some(128)) }, |
1004 | NAOptionDefinition { | |
1005 | name: "nstrips", description: "Number of strips per frame (0 - automatic)", | |
1006 | opt_type: NAOptionDefinitionType::Int(Some(0), Some(16)) }, | |
1007 | NAOptionDefinition { | |
1008 | name: "quant_mode", description: "Quantisation mode", | |
1009 | opt_type: NAOptionDefinitionType::String(Some(&["elbg", "hybrid", "mediancut"])) }, | |
1010 | ]; | |
1011 | ||
a178c22c | 1012 | impl NAOptionHandler for CinepakEncoder { |
3c406629 KS |
1013 | fn get_supported_options(&self) -> &[NAOptionDefinition] { ENCODER_OPTS } |
1014 | fn set_options(&mut self, options: &[NAOption]) { | |
1015 | for option in options.iter() { | |
3c406629 KS |
1016 | for opt_def in ENCODER_OPTS.iter() { |
1017 | if opt_def.check(option).is_ok() { | |
1018 | match option.name { | |
ee0ca773 | 1019 | KEYFRAME_OPTION => { |
3c406629 KS |
1020 | if let NAValue::Int(intval) = option.value { |
1021 | self.key_int = intval as u8; | |
1022 | } | |
1023 | }, | |
1024 | "nstrips" => { | |
1025 | if let NAValue::Int(intval) = option.value { | |
1026 | self.nstrips = intval as usize; | |
1027 | } | |
1028 | }, | |
1029 | "quant_mode" => { | |
1030 | if let NAValue::String(ref str) = option.value { | |
1031 | match str.as_str() { | |
1032 | "elbg" => self.qmode = QuantMode::ELBG, | |
1033 | "hybrid" => self.qmode = QuantMode::Hybrid, | |
1034 | "mediancut" => self.qmode = QuantMode::MedianCut, | |
1035 | _ => {}, | |
1036 | }; | |
1037 | } | |
1038 | }, | |
1039 | _ => {}, | |
1040 | }; | |
1041 | } | |
1042 | } | |
1043 | } | |
1044 | } | |
1045 | fn query_option_value(&self, name: &str) -> Option<NAValue> { | |
1046 | match name { | |
8211e0aa | 1047 | KEYFRAME_OPTION => Some(NAValue::Int(i64::from(self.key_int))), |
3c406629 KS |
1048 | "nstrips" => Some(NAValue::Int(self.nstrips as i64)), |
1049 | "quant_mode" => Some(NAValue::String(self.qmode.to_string())), | |
1050 | _ => None, | |
1051 | } | |
1052 | } | |
a178c22c KS |
1053 | } |
1054 | ||
1055 | pub fn get_encoder() -> Box<dyn NAEncoder + Send> { | |
1056 | Box::new(CinepakEncoder::new()) | |
1057 | } | |
1058 | ||
1059 | #[cfg(test)] | |
1060 | mod test { | |
1061 | use nihav_core::codecs::*; | |
1062 | use nihav_core::demuxers::*; | |
1063 | use nihav_core::muxers::*; | |
1064 | use crate::*; | |
1065 | use nihav_codec_support::test::enc_video::*; | |
1066 | ||
1067 | #[test] | |
1068 | fn test_cinepak_encoder() { | |
1069 | let mut dmx_reg = RegisteredDemuxers::new(); | |
1070 | generic_register_all_demuxers(&mut dmx_reg); | |
1071 | let mut dec_reg = RegisteredDecoders::new(); | |
78fb6560 | 1072 | generic_register_all_decoders(&mut dec_reg); |
a178c22c KS |
1073 | let mut mux_reg = RegisteredMuxers::new(); |
1074 | generic_register_all_muxers(&mut mux_reg); | |
1075 | let mut enc_reg = RegisteredEncoders::new(); | |
1076 | generic_register_all_encoders(&mut enc_reg); | |
1077 | ||
1078 | let dec_config = DecoderTestParams { | |
1079 | demuxer: "avi", | |
1080 | in_name: "assets/Misc/TalkingHead_352x288.avi", | |
1081 | stream_type: StreamType::Video, | |
1082 | limit: Some(2), | |
1083 | dmx_reg, dec_reg, | |
1084 | }; | |
1085 | let enc_config = EncoderTestParams { | |
1086 | muxer: "avi", | |
1087 | enc_name: "cinepak", | |
1088 | out_name: "cinepak.avi", | |
1089 | mux_reg, enc_reg, | |
1090 | }; | |
1091 | let dst_vinfo = NAVideoInfo { | |
1092 | width: 0, | |
1093 | height: 0, | |
1094 | format: YUV420_FORMAT, | |
1095 | flipped: true, | |
6bc499a0 | 1096 | bits: 12, |
a178c22c KS |
1097 | }; |
1098 | let enc_params = EncodeParameters { | |
1099 | format: NACodecTypeInfo::Video(dst_vinfo), | |
1100 | quality: 0, | |
1101 | bitrate: 0, | |
1102 | tb_num: 0, | |
1103 | tb_den: 0, | |
1104 | flags: 0, | |
1105 | }; | |
1106 | test_encoding_to_file(&dec_config, &enc_config, enc_params); | |
1107 | } | |
1108 | } |