c05b86d3f5c2ddfd126d9108044aa683a53345fa
[nihav.git] / nihav-commonfmt / src / codecs / cinepakenc.rs
1 use nihav_core::codecs::*;
2 use nihav_core::io::byteio::*;
3 use nihav_codec_support::vq::*;
4
5 #[derive(Default,Clone,Copy,PartialEq,Debug)]
6 struct YUVCode {
7 y: [u8; 4],
8 u: u8,
9 v: u8,
10 }
11 impl VQElement for YUVCode {
12 fn dist(&self, rval: Self) -> u32 {
13 let mut ysum = 0;
14 for (y0, y1) in self.y.iter().zip(rval.y.iter()) {
15 let yd = i32::from(*y0) - i32::from(*y1);
16 ysum += yd * yd;
17 }
18 let ud = i32::from(self.u) - i32::from(rval.u);
19 let vd = i32::from(self.v) - i32::from(rval.v);
20 (ysum + ud * ud + vd * vd) as u32
21 }
22 fn min_cw() -> Self { YUVCode { y: [0; 4], u: 0, v: 0 } }
23 fn max_cw() -> Self { YUVCode { y: [255; 4], u: 255, v: 255 } }
24 fn min(&self, rval: Self) -> Self {
25 let mut ycode = YUVCode::default();
26 for i in 0..4 {
27 ycode.y[i] = self.y[i].min(rval.y[i]);
28 }
29 ycode.u = self.u.min(rval.u);
30 ycode.v = self.v.min(rval.v);
31 ycode
32 }
33 fn max(&self, rval: Self) -> Self {
34 let mut ycode = YUVCode::default();
35 for i in 0..4 {
36 ycode.y[i] = self.y[i].max(rval.y[i]);
37 }
38 ycode.u = self.u.max(rval.u);
39 ycode.v = self.v.max(rval.v);
40 ycode
41 }
42 fn num_components() -> usize { 6 }
43 fn sort_by_component(arr: &mut [Self], component: usize) {
44 let mut counts = [0; 256];
45 for entry in arr.iter() {
46 let idx = match component {
47 0 | 1 | 2 | 3 => entry.y[component],
48 4 => entry.u,
49 _ => entry.v,
50 } as usize;
51 counts[idx] += 1;
52 }
53 let mut offs = [0; 256];
54 for i in 0..255 {
55 offs[i + 1] = offs[i] + counts[i];
56 }
57 let mut dst = vec![YUVCode::default(); arr.len()];
58 for entry in arr.iter() {
59 let idx = match component {
60 0 | 1 | 2 | 3 => entry.y[component],
61 4 => entry.u,
62 _ => entry.v,
63 } as usize;
64 dst[offs[idx]] = *entry;
65 offs[idx] += 1;
66 }
67 arr.copy_from_slice(dst.as_slice());
68 }
69 fn max_dist_component(min: &Self, max: &Self) -> usize {
70 let mut comp = 0;
71 let mut diff = 0;
72 for i in 0..4 {
73 let d = u32::from(max.y[i]) - u32::from(min.y[i]);
74 if d > diff {
75 diff = d;
76 comp = i;
77 }
78 }
79 let ud = u32::from(max.u) - u32::from(min.u);
80 if ud > diff {
81 diff = ud;
82 comp = 4;
83 }
84 let vd = u32::from(max.v) - u32::from(min.v);
85 if vd > diff {
86 comp = 5;
87 }
88 comp
89 }
90 }
91
92 #[derive(Default)]
93 struct YUVCodeSum {
94 ysum: [u64; 4],
95 usum: u64,
96 vsum: u64,
97 count: u64,
98 }
99
100 impl VQElementSum<YUVCode> for YUVCodeSum {
101 fn zero() -> Self { Self::default() }
102 fn add(&mut self, rval: YUVCode, count: u64) {
103 for i in 0..4 {
104 self.ysum[i] += u64::from(rval.y[i]) * count;
105 }
106 self.usum += u64::from(rval.u) * count;
107 self.vsum += u64::from(rval.v) * count;
108 self.count += count;
109 }
110 fn get_centroid(&self) -> YUVCode {
111 if self.count != 0 {
112 let mut ycode = YUVCode::default();
113 for i in 0..4 {
114 ycode.y[i] = ((self.ysum[i] + self.count / 2) / self.count) as u8;
115 }
116 ycode.u = ((self.usum + self.count / 2) / self.count) as u8;
117 ycode.v = ((self.vsum + self.count / 2) / self.count) as u8;
118 ycode
119 } else {
120 YUVCode::default()
121 }
122 }
123 }
124
125 struct RNG {
126 seed: u32,
127 }
128
129 impl RNG {
130 fn new() -> Self { Self { seed: 0x12345678 } }
131 fn next(&mut self) -> u8 {
132 let mut x = self.seed;
133 x ^= x.wrapping_shl(13);
134 x ^= x >> 17;
135 self.seed = x;
136 (self.seed >> 24) as u8
137 }
138 fn fill_entry(&mut self, entry: &mut YUVCode) {
139 for y in entry.y.iter_mut() {
140 *y = self.next();
141 }
142 entry.u = self.next();
143 entry.v = self.next();
144 }
145 }
146
147 const GRAY_FORMAT: NAPixelFormaton = NAPixelFormaton {
148 model: ColorModel::YUV(YUVSubmodel::YUVJ),
149 components: 1,
150 comp_info: [Some(NAPixelChromaton{h_ss: 0, v_ss: 0, packed: false, depth: 8, shift: 0, comp_offs: 0, next_elem: 1}), None, None, None, None],
151 elem_size: 1,
152 be: true,
153 alpha: false,
154 palette: false,
155 };
156
157 struct MaskWriter {
158 masks: Vec<u32>,
159 mask: u32,
160 pos: u8,
161 }
162
163 impl MaskWriter {
164 fn new() -> Self {
165 Self {
166 masks: Vec::new(),
167 mask: 0,
168 pos: 0,
169 }
170 }
171 fn reset(&mut self) {
172 self.masks.truncate(0);
173 self.mask = 0;
174 self.pos = 0;
175 }
176 fn put_v1(&mut self) {
177 self.mask <<= 1;
178 self.pos += 1;
179 if self.pos == 32 {
180 self.flush();
181 }
182 }
183 fn put_v4(&mut self) {
184 self.mask <<= 1;
185 self.mask |= 1;
186 self.pos += 1;
187 if self.pos == 32 {
188 self.flush();
189 }
190 }
191 fn put_inter(&mut self, skip: bool) {
192 self.mask <<= 1;
193 self.mask |= !skip as u32;
194 self.pos += 1;
195 if self.pos == 32 {
196 self.flush();
197 }
198 }
199 fn flush(&mut self) {
200 self.masks.push(self.mask);
201 self.mask = 0;
202 self.pos = 0;
203 }
204 fn end(&mut self) {
205 if self.pos == 0 { return; }
206 while self.pos < 32 {
207 self.mask <<= 1;
208 self.pos += 1;
209 }
210 self.flush();
211 }
212 }
213
214 #[derive(Clone,Copy,PartialEq)]
215 enum QuantMode {
216 ELBG,
217 Hybrid,
218 MedianCut,
219 }
220
221 impl QuantMode {
222 fn to_string(&self) -> String {
223 match *self {
224 QuantMode::ELBG => "elbg".to_string(),
225 QuantMode::Hybrid => "hybrid".to_string(),
226 QuantMode::MedianCut => "mediancut".to_string(),
227 }
228 }
229 }
230
231 struct CinepakEncoder {
232 stream: Option<NAStreamRef>,
233 lastfrm: Option<NAVideoBufferRef<u8>>,
234 pkt: Option<NAPacket>,
235 frmcount: u8,
236 key_int: u8,
237 qmode: QuantMode,
238 quality: u8,
239 nstrips: usize,
240 v1_entries: Vec<YUVCode>,
241 v4_entries: Vec<YUVCode>,
242 v1_cb: [YUVCode; 256],
243 v4_cb: [YUVCode; 256],
244 v1_cur_cb: [YUVCode; 256],
245 v4_cur_cb: [YUVCode; 256],
246 v1_idx: Vec<u8>,
247 v4_idx: Vec<u8>,
248 grayscale: bool,
249 rng: RNG,
250 masks: MaskWriter,
251 skip_dist: Vec<u32>,
252 }
253
254 fn avg4(a: u8, b: u8, c: u8, d: u8) -> u8 {
255 ((u16::from(a) + u16::from(b) + u16::from(c) + u16::from(d) + 3) >> 2) as u8
256 }
257
258 fn patch_size(bw: &mut ByteWriter, pos: u64) -> EncoderResult<()> {
259 let size = bw.tell() - pos;
260 bw.seek(SeekFrom::Current(-((size + 3) as i64)))?;
261 bw.write_u24be((size + 4) as u32)?;
262 bw.seek(SeekFrom::End(0))?;
263 Ok(())
264 }
265
266 impl CinepakEncoder {
267 fn new() -> Self {
268 Self {
269 stream: None,
270 pkt: None,
271 lastfrm: None,
272 frmcount: 0,
273 qmode: QuantMode::ELBG,
274 key_int: 25,
275 quality: 0,
276 nstrips: 2,
277 v1_entries: Vec::new(),
278 v4_entries: Vec::new(),
279 v1_cb: [YUVCode::default(); 256],
280 v4_cb: [YUVCode::default(); 256],
281 v1_cur_cb: [YUVCode::default(); 256],
282 v4_cur_cb: [YUVCode::default(); 256],
283 grayscale: false,
284 rng: RNG::new(),
285 v1_idx: Vec::new(),
286 v4_idx: Vec::new(),
287 masks: MaskWriter::new(),
288 skip_dist: Vec::new(),
289 }
290 }
291 fn read_strip(&mut self, in_frm: &NAVideoBuffer<u8>, start: usize, end: usize) {
292 let ystride = in_frm.get_stride(0);
293 let mut yoff = in_frm.get_offset(0) + start * ystride;
294 let ustride = in_frm.get_stride(1);
295 let mut uoff = in_frm.get_offset(1) + start / 2 * ustride;
296 let vstride = in_frm.get_stride(2);
297 let mut voff = in_frm.get_offset(2) + start / 2 * vstride;
298 let (width, _) = in_frm.get_dimensions(0);
299 let data = in_frm.get_data();
300 self.v1_entries.truncate(0);
301 self.v4_entries.truncate(0);
302 for _ in (start..end).step_by(4) {
303 for x in (0..width).step_by(4) {
304 let mut yblk = [0; 16];
305 let mut ublk = [128; 4];
306 let mut vblk = [128; 4];
307 for j in 0..4 {
308 for i in 0..4 {
309 yblk[i + j * 4] = data[yoff + x + i + j * ystride];
310 }
311 }
312 if !self.grayscale {
313 for j in 0..2 {
314 for i in 0..2 {
315 ublk[i + j * 2] = data[uoff + x / 2 + i + j * ustride];
316 vblk[i + j * 2] = data[voff + x / 2 + i + j * vstride];
317 }
318 }
319 }
320 self.v1_entries.push(YUVCode {
321 y: [avg4(yblk[ 0], yblk[ 1], yblk[ 4], yblk[ 5]),
322 avg4(yblk[ 2], yblk[ 3], yblk[ 6], yblk[ 7]),
323 avg4(yblk[ 8], yblk[ 9], yblk[12], yblk[13]),
324 avg4(yblk[10], yblk[11], yblk[14], yblk[15])],
325 u: avg4(ublk[0], ublk[1], ublk[2], ublk[3]),
326 v: avg4(vblk[0], vblk[1], vblk[2], vblk[3]),
327 });
328 for i in 0..4 {
329 let yidx = (i & 1) * 2 + (i & 2) * 4;
330 self.v4_entries.push(YUVCode {
331 y: [ yblk[yidx], yblk[yidx + 1], yblk[yidx + 4], yblk[yidx + 5] ],
332 u: ublk[i],
333 v: vblk[i],
334 });
335 }
336 }
337 yoff += ystride * 4;
338 uoff += ustride * 2;
339 voff += vstride * 2;
340 }
341 }
342 fn find_nearest(codebook: &[YUVCode; 256], code: YUVCode) -> (u8, u32) {
343 let mut min_dist = std::u32::MAX;
344 let mut idx = 0;
345 for (i, cw) in codebook.iter().enumerate() {
346 let dist = cw.dist(code);
347 if dist < min_dist {
348 min_dist = dist;
349 idx = i;
350 if dist == 0 {
351 break;
352 }
353 }
354 }
355 (idx as u8, min_dist)
356 }
357 fn can_update_cb(new_cb: &[YUVCode; 256], old_cb: &[YUVCode; 256], cb_size: usize) -> bool {
358 let mut skip_count = 0;
359 for (new, old) in new_cb.iter().zip(old_cb.iter()) {
360 if new == old {
361 skip_count += 1;
362 }
363 }
364 let full_size = cb_size * 256;
365 let upd_size = cb_size * (256 - skip_count) + 64;
366 upd_size < full_size
367 }
368 fn write_cb(bw: &mut ByteWriter, mut id: u8, new_cb: &[YUVCode; 256], old_cb: &[YUVCode; 256], grayscale: bool, update: bool) -> EncoderResult<()> {
369 if grayscale {
370 id |= 4;
371 }
372 if update {
373 id |= 1;
374 }
375 bw.write_byte(id)?;
376 bw.write_u24be(0)?;
377 let chunk_pos = bw.tell();
378 if !update {
379 for entry in new_cb.iter() {
380 bw.write_buf(&entry.y)?;
381 if !grayscale {
382 bw.write_byte(entry.u ^ 0x80)?;
383 bw.write_byte(entry.v ^ 0x80)?;
384 }
385 }
386 } else {
387 let mut end = 256;
388 for (i, (ncw, ocw)) in new_cb.iter().rev().zip(old_cb.iter().rev()).enumerate() {
389 if ncw == ocw {
390 end = i;
391 } else {
392 break;
393 }
394 }
395 for i in (0..end).step_by(32) {
396 let mut mask = 0;
397 for j in 0..32 {
398 mask <<= 1;
399 if new_cb[i + j] != old_cb[i + j] {
400 mask |= 1;
401 }
402 }
403 bw.write_u32be(mask)?;
404 for j in 0..32 {
405 if new_cb[i + j] == old_cb[i + j] { continue; }
406 bw.write_buf(&new_cb[i + j].y)?;
407 if !grayscale {
408 bw.write_byte(new_cb[i + j].u ^ 0x80)?;
409 bw.write_byte(new_cb[i + j].v ^ 0x80)?;
410 }
411 }
412 }
413 }
414 patch_size(bw, chunk_pos)?;
415 Ok(())
416 }
417 fn render_stripe(&mut self, intra: bool, start: usize, end: usize) {
418 if let Some(ref mut dst_frm) = self.lastfrm {
419 let ystride = dst_frm.get_stride(0);
420 let mut yoff = dst_frm.get_offset(0) + start * ystride;
421 let ustride = dst_frm.get_stride(1);
422 let mut uoff = dst_frm.get_offset(1) + start / 2 * ustride;
423 let vstride = dst_frm.get_stride(2);
424 let mut voff = dst_frm.get_offset(2) + start / 2 * vstride;
425 let (width, _) = dst_frm.get_dimensions(0);
426 let data = dst_frm.get_data_mut().unwrap();
427 let mut miter = self.masks.masks.iter();
428 let mut v1_iter = self.v1_idx.iter();
429 let mut v4_iter = self.v4_idx.iter();
430 let mut cur_mask = 0;
431 let mut cur_bit = 0;
432 for _ in (start..end).step_by(4) {
433 for x in (0..width).step_by(4) {
434 if cur_bit == 0 {
435 if !intra || self.v1_idx.len() > 0 {
436 cur_mask = *miter.next().unwrap();
437 } else {
438 cur_mask = 0xFFFFFFFF;
439 }
440 cur_bit = 1 << 31;
441 }
442 if !intra {
443 if (cur_mask & cur_bit) == 0 {
444 cur_bit >>= 1;
445 continue;
446 }
447 cur_bit >>= 1;
448 if cur_bit == 0 {
449 cur_mask = *miter.next().unwrap();
450 cur_bit = 1 << 31;
451 }
452 }
453 if (cur_mask & cur_bit) == 0 {
454 let idx = *v1_iter.next().unwrap() as usize;
455 let cb = &self.v1_cur_cb[idx];
456
457 let mut coff = yoff + x;
458 data[coff] = cb.y[0]; data[coff + 1] = cb.y[0];
459 data[coff + 2] = cb.y[1]; data[coff + 3] = cb.y[1];
460 coff += ystride;
461 data[coff] = cb.y[0]; data[coff + 1] = cb.y[0];
462 data[coff + 2] = cb.y[1]; data[coff + 3] = cb.y[1];
463 coff += ystride;
464 data[coff] = cb.y[2]; data[coff + 1] = cb.y[2];
465 data[coff + 2] = cb.y[3]; data[coff + 3] = cb.y[3];
466 coff += ystride;
467 data[coff] = cb.y[2]; data[coff + 1] = cb.y[2];
468 data[coff + 2] = cb.y[3]; data[coff + 3] = cb.y[3];
469
470 if !self.grayscale {
471 let mut coff = uoff + x / 2;
472 data[coff] = cb.u; data[coff + 1] = cb.u;
473 coff += ustride;
474 data[coff] = cb.u; data[coff + 1] = cb.u;
475
476 let mut coff = voff + x / 2;
477 data[coff] = cb.v; data[coff + 1] = cb.v;
478 coff += vstride;
479 data[coff] = cb.v; data[coff + 1] = cb.v;
480 }
481 } else {
482 let idx0 = *v4_iter.next().unwrap() as usize;
483 let cb0 = &self.v4_cur_cb[idx0];
484 let idx1 = *v4_iter.next().unwrap() as usize;
485 let cb1 = &self.v4_cur_cb[idx1];
486 let idx2 = *v4_iter.next().unwrap() as usize;
487 let cb2 = &self.v4_cur_cb[idx2];
488 let idx3 = *v4_iter.next().unwrap() as usize;
489 let cb3 = &self.v4_cur_cb[idx3];
490
491 let mut coff = yoff + x;
492 data[coff] = cb0.y[0]; data[coff + 1] = cb0.y[1];
493 data[coff + 2] = cb1.y[0]; data[coff + 3] = cb1.y[1];
494 coff += ystride;
495 data[coff] = cb0.y[2]; data[coff + 1] = cb0.y[3];
496 data[coff + 2] = cb1.y[2]; data[coff + 3] = cb1.y[3];
497 coff += ystride;
498 data[coff] = cb2.y[0]; data[coff + 1] = cb2.y[1];
499 data[coff + 2] = cb3.y[0]; data[coff + 3] = cb3.y[1];
500 coff += ystride;
501 data[coff] = cb2.y[2]; data[coff + 1] = cb2.y[3];
502 data[coff + 2] = cb3.y[2]; data[coff + 3] = cb3.y[3];
503
504 if !self.grayscale {
505 let mut coff = uoff + x / 2;
506 data[coff] = cb0.u; data[coff + 1] = cb1.u;
507 coff += ustride;
508 data[coff] = cb2.u; data[coff + 1] = cb3.u;
509
510 let mut coff = voff + x / 2;
511 data[coff] = cb0.v; data[coff + 1] = cb1.v;
512 coff += vstride;
513 data[coff] = cb2.v; data[coff + 1] = cb3.v;
514 }
515 }
516 cur_bit >>= 1;
517 }
518 yoff += ystride * 4;
519 uoff += ustride * 2;
520 voff += vstride * 2;
521 }
522 } else {
523 unreachable!();
524 }
525 }
526 fn calc_skip_dist(&mut self, in_frm: &NAVideoBuffer<u8>, start: usize, end: usize) {
527 self.skip_dist.truncate(0);
528 if let Some(ref ref_frm) = self.lastfrm {
529 let rystride = ref_frm.get_stride(0);
530 let mut ryoff = ref_frm.get_offset(0) + start * rystride;
531 let rustride = ref_frm.get_stride(1);
532 let mut ruoff = ref_frm.get_offset(1) + start / 2 * rustride;
533 let rvstride = ref_frm.get_stride(2);
534 let mut rvoff = ref_frm.get_offset(2) + start / 2 * rvstride;
535 let (width, _) = ref_frm.get_dimensions(0);
536 let rdata = ref_frm.get_data();
537
538 let iystride = in_frm.get_stride(0);
539 let mut iyoff = in_frm.get_offset(0) + start * iystride;
540 let iustride = in_frm.get_stride(1);
541 let mut iuoff = in_frm.get_offset(1) + start / 2 * iustride;
542 let ivstride = in_frm.get_stride(2);
543 let mut ivoff = in_frm.get_offset(2) + start / 2 * ivstride;
544 let idata = in_frm.get_data();
545
546 for _ in (start..end).step_by(4) {
547 for x in (0..width).step_by(4) {
548 let mut dist = 0;
549 let mut roff = ryoff + x;
550 let mut ioff = iyoff + x;
551 for _ in 0..4 {
552 for i in 0..4 {
553 let d = i32::from(rdata[roff + i]) - i32::from(idata[ioff + i]);
554 dist += d * d;
555 }
556 roff += rystride;
557 ioff += iystride;
558 }
559 if !self.grayscale {
560 let mut roff = ruoff + x / 2;
561 let mut ioff = iuoff + x / 2;
562 let ud = i32::from(rdata[roff]) - i32::from(idata[ioff]);
563 dist += ud * ud;
564 let ud = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
565 dist += ud * ud;
566 roff += rustride; ioff += iustride;
567 let ud = i32::from(rdata[roff]) - i32::from(idata[ioff]);
568 dist += ud * ud;
569 let ud = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
570 dist += ud * ud;
571
572 let mut roff = rvoff + x / 2;
573 let mut ioff = ivoff + x / 2;
574 let vd = i32::from(rdata[roff]) - i32::from(idata[ioff]);
575 dist += vd * vd;
576 let vd = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
577 dist += vd * vd;
578 roff += rvstride; ioff += ivstride;
579 let vd = i32::from(rdata[roff]) - i32::from(idata[ioff]);
580 dist += vd * vd;
581 let vd = i32::from(rdata[roff + 1]) - i32::from(idata[ioff + 1]);
582 dist += vd * vd;
583 }
584 self.skip_dist.push(dist as u32);
585 }
586
587 iyoff += iystride * 4;
588 iuoff += iustride * 2;
589 ivoff += ivstride * 2;
590 ryoff += rystride * 4;
591 ruoff += rustride * 2;
592 rvoff += rvstride * 2;
593 }
594 } else {
595 unreachable!();
596 }
597 }
598 fn quant_vectors(&mut self) {
599 match self.qmode {
600 QuantMode::ELBG => {
601 let mut elbg_v1: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v1_cb);
602 let mut elbg_v4: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v4_cb);
603 elbg_v1.quantise(&self.v1_entries, &mut self.v1_cur_cb);
604 elbg_v4.quantise(&self.v4_entries, &mut self.v4_cur_cb);
605 },
606 QuantMode::Hybrid => {
607 quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v1_entries, &mut self.v1_cur_cb);
608 quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v4_entries, &mut self.v4_cur_cb);
609 let mut elbg_v1: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v1_cur_cb);
610 let mut elbg_v4: ELBG<YUVCode, YUVCodeSum> = ELBG::new(&self.v4_cur_cb);
611 elbg_v1.quantise(&self.v1_entries, &mut self.v1_cur_cb);
612 elbg_v4.quantise(&self.v4_entries, &mut self.v4_cur_cb);
613 },
614 QuantMode::MedianCut => {
615 quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v1_entries, &mut self.v1_cur_cb);
616 quantise_median_cut::<YUVCode, YUVCodeSum>(&self.v4_entries, &mut self.v4_cur_cb);
617 },
618 };
619 }
620 fn encode_intra(&mut self, bw: &mut ByteWriter, in_frm: &NAVideoBuffer<u8>) -> EncoderResult<bool> {
621 let (width, height) = in_frm.get_dimensions(0);
622 let mut strip_h = (height / self.nstrips + 3) & !3;
623 if strip_h == 0 {
624 self.nstrips = 1;
625 strip_h = height;
626 }
627 let mut start_line = 0;
628 let mut end_line = strip_h;
629
630 bw.write_byte(0)?; // intra flag
631 bw.write_u24be(0)?; // frame size
632 let frame_data_pos = bw.tell();
633 bw.write_u16be(width as u16)?;
634 bw.write_u16be(height as u16)?;
635 bw.write_u16be(self.nstrips as u16)?;
636
637 for entry in self.v1_cb.iter_mut() {
638 self.rng.fill_entry(entry);
639 }
640 for entry in self.v4_cb.iter_mut() {
641 self.rng.fill_entry(entry);
642 }
643 while start_line < height {
644 self.read_strip(in_frm, start_line, end_line);
645
646 self.quant_vectors();
647 if self.grayscale {
648 for cw in self.v1_cur_cb.iter_mut() {
649 cw.u = 128;
650 cw.v = 128;
651 }
652 for cw in self.v4_cur_cb.iter_mut() {
653 cw.u = 128;
654 cw.v = 128;
655 }
656 }
657
658 self.v1_idx.truncate(0);
659 self.v4_idx.truncate(0);
660 self.masks.reset();
661
662 for (v1_entry, v4_entries) in self.v1_entries.iter().zip(self.v4_entries.chunks(4)) {
663 let (v1_idx, v1_dist) = Self::find_nearest(&self.v1_cur_cb, *v1_entry);
664 if v1_dist == 0 {
665 self.masks.put_v1();
666 self.v1_idx.push(v1_idx);
667 continue;
668 }
669 let (v40_idx, v40_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[0]);
670 let (v41_idx, v41_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[1]);
671 let (v42_idx, v42_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[2]);
672 let (v43_idx, v43_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[3]);
673 if v40_dist + v41_dist + v42_dist + v43_dist > v1_dist {
674 self.masks.put_v4();
675 self.v4_idx.push(v40_idx);
676 self.v4_idx.push(v41_idx);
677 self.v4_idx.push(v42_idx);
678 self.v4_idx.push(v43_idx);
679 } else {
680 self.masks.put_v1();
681 self.v1_idx.push(v1_idx);
682 }
683 }
684 self.masks.end();
685
686 let mut is_intra_strip = start_line == 0;
687 let (upd_v1, upd_v4) = if !is_intra_strip {
688 let cb_size = if self.grayscale { 4 } else { 6 };
689 (Self::can_update_cb(&self.v1_cur_cb, &self.v1_cb, cb_size),
690 Self::can_update_cb(&self.v4_cur_cb, &self.v4_cb, cb_size))
691 } else {
692 (false, false)
693 };
694 if !is_intra_strip && !upd_v1 && !upd_v4 {
695 is_intra_strip = true;
696 }
697 bw.write_byte(if is_intra_strip { 0x10 } else { 0x11 })?;
698 bw.write_u24be(0)?; // strip size
699 let strip_data_pos = bw.tell();
700 bw.write_u16be(0)?; // yoff
701 bw.write_u16be(0)?; // xoff
702 bw.write_u16be((end_line - start_line) as u16)?;
703 bw.write_u16be(width as u16)?;
704
705 Self::write_cb(bw, 0x20, &self.v4_cur_cb, &self.v4_cb, self.grayscale, upd_v4)?;
706 Self::write_cb(bw, 0x22, &self.v1_cur_cb, &self.v1_cb, self.grayscale, upd_v1)?;
707
708 self.render_stripe(true, start_line, end_line);
709
710 if self.v1_idx.len() == 0 {
711 bw.write_byte(0x32)?;
712 bw.write_u24be((self.v4_idx.len() + 4) as u32)?;
713 bw.write_buf(self.v4_idx.as_slice())?;
714 } else {
715 bw.write_byte(0x30)?;
716 bw.write_u24be(0)?;
717 let chunk_pos = bw.tell();
718 let mut v1_pos = 0;
719 let mut v4_pos = 0;
720 for _ in 0..32 {
721 self.v1_idx.push(0);
722 self.v4_idx.push(0);
723 self.v4_idx.push(0);
724 self.v4_idx.push(0);
725 self.v4_idx.push(0);
726 }
727 for mask in self.masks.masks.iter() {
728 bw.write_u32be(*mask)?;
729 for j in (0..32).rev() {
730 if (mask & (1 << j)) == 0 {
731 bw.write_byte(self.v1_idx[v1_pos])?;
732 v1_pos += 1;
733 } else {
734 bw.write_byte(self.v4_idx[v4_pos])?;
735 bw.write_byte(self.v4_idx[v4_pos + 1])?;
736 bw.write_byte(self.v4_idx[v4_pos + 2])?;
737 bw.write_byte(self.v4_idx[v4_pos + 3])?;
738 v4_pos += 4;
739 }
740 }
741 }
742 patch_size(bw, chunk_pos)?;
743 }
744
745 patch_size(bw, strip_data_pos)?;
746
747 self.v1_cb.copy_from_slice(&self.v1_cur_cb);
748 self.v4_cb.copy_from_slice(&self.v4_cur_cb);
749 start_line = end_line;
750 end_line = (end_line + strip_h).min(height);
751 }
752 patch_size(bw, frame_data_pos)?;
753 Ok(true)
754 }
755 fn encode_inter(&mut self, bw: &mut ByteWriter, in_frm: &NAVideoBuffer<u8>) -> EncoderResult<bool> {
756 let (width, height) = in_frm.get_dimensions(0);
757 let mut strip_h = (height / self.nstrips + 3) & !3;
758 if strip_h == 0 {
759 self.nstrips = 1;
760 strip_h = height;
761 }
762 let mut start_line = 0;
763 let mut end_line = strip_h;
764
765 bw.write_byte(1)?; // intra flag
766 bw.write_u24be(0)?; // frame size
767 let frame_data_pos = bw.tell();
768 bw.write_u16be(width as u16)?;
769 bw.write_u16be(height as u16)?;
770 bw.write_u16be(self.nstrips as u16)?;
771
772 while start_line < height {
773 self.read_strip(in_frm, start_line, end_line);
774 self.calc_skip_dist(in_frm, start_line, end_line);
775
776 self.quant_vectors();
777 if self.grayscale {
778 for cw in self.v1_cur_cb.iter_mut() {
779 cw.u = 128;
780 cw.v = 128;
781 }
782 for cw in self.v4_cur_cb.iter_mut() {
783 cw.u = 128;
784 cw.v = 128;
785 }
786 }
787
788 self.v1_idx.truncate(0);
789 self.v4_idx.truncate(0);
790 self.masks.reset();
791
792 let mut skip_iter = self.skip_dist.iter();
793 for (v1_entry, v4_entries) in self.v1_entries.iter().zip(self.v4_entries.chunks(4)) {
794 let skip_dist = *skip_iter.next().unwrap();
795 if skip_dist == 0 {
796 self.masks.put_inter(true);
797 continue;
798 }
799 let (v1_idx, v1_dist) = Self::find_nearest(&self.v1_cur_cb, *v1_entry);
800 if skip_dist < v1_dist {
801 self.masks.put_inter(true);
802 continue;
803 } else {
804 self.masks.put_inter(false);
805 }
806 if v1_dist == 0 {
807 self.masks.put_v1();
808 self.v1_idx.push(v1_idx);
809 continue;
810 }
811 let (v40_idx, v40_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[0]);
812 let (v41_idx, v41_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[1]);
813 let (v42_idx, v42_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[2]);
814 let (v43_idx, v43_dist) = Self::find_nearest(&self.v4_cur_cb, v4_entries[3]);
815 if v40_dist + v41_dist + v42_dist + v43_dist > v1_dist {
816 self.masks.put_v4();
817 self.v4_idx.push(v40_idx);
818 self.v4_idx.push(v41_idx);
819 self.v4_idx.push(v42_idx);
820 self.v4_idx.push(v43_idx);
821 } else {
822 self.masks.put_v1();
823 self.v1_idx.push(v1_idx);
824 }
825 }
826 self.masks.end();
827
828 let (upd_v1, upd_v4) = {
829 let cb_size = if self.grayscale { 4 } else { 6 };
830 (Self::can_update_cb(&self.v1_cur_cb, &self.v1_cb, cb_size),
831 Self::can_update_cb(&self.v4_cur_cb, &self.v4_cb, cb_size))
832 };
833 bw.write_byte(0x11)?;
834 bw.write_u24be(0)?; // strip size
835 let strip_data_pos = bw.tell();
836 bw.write_u16be(0)?; // yoff
837 bw.write_u16be(0)?; // xoff
838 bw.write_u16be((end_line - start_line) as u16)?;
839 bw.write_u16be(width as u16)?;
840
841 Self::write_cb(bw, 0x20, &self.v4_cur_cb, &self.v4_cb, self.grayscale, upd_v4)?;
842 Self::write_cb(bw, 0x22, &self.v1_cur_cb, &self.v1_cb, self.grayscale, upd_v1)?;
843
844 self.render_stripe(false, start_line, end_line);
845
846 bw.write_byte(0x31)?;
847 bw.write_u24be(0)?;
848 let chunk_pos = bw.tell();
849 let mut v1_pos = 0;
850 let mut v4_pos = 0;
851 for _ in 0..32 {
852 self.v1_idx.push(0);
853 self.v4_idx.push(0);
854 self.v4_idx.push(0);
855 self.v4_idx.push(0);
856 self.v4_idx.push(0);
857 }
858 let mut skip = true;
859 for mask in self.masks.masks.iter() {
860 bw.write_u32be(*mask)?;
861 if *mask == 0 { continue; }
862 let mut bit = 1 << 31;
863 while bit > 0 {
864 if skip {
865 skip = (mask & bit) == 0;
866 bit >>= 1;
867 } else {
868 if (mask & bit) == 0 {
869 bw.write_byte(self.v1_idx[v1_pos])?;
870 v1_pos += 1;
871 } else {
872 bw.write_byte(self.v4_idx[v4_pos])?;
873 bw.write_byte(self.v4_idx[v4_pos + 1])?;
874 bw.write_byte(self.v4_idx[v4_pos + 2])?;
875 bw.write_byte(self.v4_idx[v4_pos + 3])?;
876 v4_pos += 4;
877 }
878 bit >>= 1;
879 skip = true;
880 }
881 }
882 }
883 patch_size(bw, chunk_pos)?;
884
885 patch_size(bw, strip_data_pos)?;
886
887 self.v1_cb.copy_from_slice(&self.v1_cur_cb);
888 self.v4_cb.copy_from_slice(&self.v4_cur_cb);
889 start_line = end_line;
890 end_line = (end_line + strip_h).min(height);
891 }
892 patch_size(bw, frame_data_pos)?;
893 Ok(true)
894 }
895 }
896
897 impl NAEncoder for CinepakEncoder {
898 fn negotiate_format(&self, encinfo: &EncodeParameters) -> EncoderResult<EncodeParameters> {
899 match encinfo.format {
900 NACodecTypeInfo::None => {
901 let mut ofmt = EncodeParameters::default();
902 ofmt.format = NACodecTypeInfo::Video(NAVideoInfo::new(0, 0, true, YUV420_FORMAT));
903 Ok(ofmt)
904 },
905 NACodecTypeInfo::Audio(_) => return Err(EncoderError::FormatError),
906 NACodecTypeInfo::Video(vinfo) => {
907 let pix_fmt = if vinfo.format == GRAY_FORMAT { GRAY_FORMAT } else { YUV420_FORMAT };
908 let outinfo = NAVideoInfo::new((vinfo.width + 3) & !3, (vinfo.height + 3) & !3, true, pix_fmt);
909 let mut ofmt = *encinfo;
910 ofmt.format = NACodecTypeInfo::Video(outinfo);
911 Ok(ofmt)
912 }
913 }
914 }
915 fn init(&mut self, stream_id: u32, encinfo: EncodeParameters) -> EncoderResult<NAStreamRef> {
916 match encinfo.format {
917 NACodecTypeInfo::None => Err(EncoderError::FormatError),
918 NACodecTypeInfo::Audio(_) => Err(EncoderError::FormatError),
919 NACodecTypeInfo::Video(vinfo) => {
920 if vinfo.format != YUV420_FORMAT && vinfo.format != GRAY_FORMAT {
921 return Err(EncoderError::FormatError);
922 }
923 if ((vinfo.width | vinfo.height) & 3) != 0 {
924 return Err(EncoderError::FormatError);
925 }
926 if (vinfo.width | vinfo.height) >= (1 << 16) {
927 return Err(EncoderError::FormatError);
928 }
929
930 let out_info = NAVideoInfo::new(vinfo.width, vinfo.height, false, vinfo.format);
931 let info = NACodecInfo::new("cinepak", NACodecTypeInfo::Video(out_info.clone()), None);
932 let mut stream = NAStream::new(StreamType::Video, stream_id, info, encinfo.tb_num, encinfo.tb_den);
933 stream.set_num(stream_id as usize);
934 let stream = stream.into_ref();
935
936 self.stream = Some(stream.clone());
937 self.quality = encinfo.quality;
938 self.grayscale = vinfo.format != YUV420_FORMAT;
939 let num_blocks = vinfo.width / 2 * vinfo.height / 2;
940 self.v1_entries = Vec::with_capacity(num_blocks);
941 self.v4_entries = Vec::with_capacity(num_blocks * 4);
942 self.v1_idx = Vec::with_capacity(num_blocks);
943 self.v4_idx = Vec::with_capacity(num_blocks * 4);
944 self.skip_dist = Vec::with_capacity(vinfo.width / 4 * vinfo.height / 4);
945
946 let buf = alloc_video_buffer(out_info, 2)?;
947 self.lastfrm = Some(buf.get_vbuf().unwrap());
948
949 Ok(stream)
950 },
951 }
952 }
953 fn encode(&mut self, frm: &NAFrame) -> EncoderResult<()> {
954 let buf = frm.get_buffer();
955 if let Some(ref vbuf) = buf.get_vbuf() {
956 let mut dbuf = Vec::with_capacity(4);
957 let mut gw = GrowableMemoryWriter::new_write(&mut dbuf);
958 let mut bw = ByteWriter::new(&mut gw);
959 let is_intra = if self.frmcount == 0 {
960 self.encode_intra(&mut bw, vbuf)?
961 } else {
962 self.encode_inter(&mut bw, vbuf)?
963 };
964 self.pkt = Some(NAPacket::new(self.stream.clone().unwrap(), frm.ts, is_intra, dbuf));
965 self.frmcount += 1;
966 if self.frmcount == self.key_int {
967 self.frmcount = 0;
968 }
969 Ok(())
970 } else {
971 Err(EncoderError::InvalidParameters)
972 }
973 }
974 fn get_packet(&mut self) -> EncoderResult<Option<NAPacket>> {
975 let mut npkt = None;
976 std::mem::swap(&mut self.pkt, &mut npkt);
977 Ok(npkt)
978 }
979 fn flush(&mut self) -> EncoderResult<()> {
980 self.frmcount = 0;
981 Ok(())
982 }
983 }
984
985 const ENCODER_OPTS: &[NAOptionDefinition] = &[
986 NAOptionDefinition {
987 name: KEYFRAME_OPTION, description: KEYFRAME_OPTION_DESC,
988 opt_type: NAOptionDefinitionType::Int(Some(0), Some(128)) },
989 NAOptionDefinition {
990 name: "nstrips", description: "Number of strips per frame (0 - automatic)",
991 opt_type: NAOptionDefinitionType::Int(Some(0), Some(16)) },
992 NAOptionDefinition {
993 name: "quant_mode", description: "Quantisation mode",
994 opt_type: NAOptionDefinitionType::String(Some(&["elbg", "hybrid", "mediancut"])) },
995 ];
996
997 impl NAOptionHandler for CinepakEncoder {
998 fn get_supported_options(&self) -> &[NAOptionDefinition] { ENCODER_OPTS }
999 fn set_options(&mut self, options: &[NAOption]) {
1000 for option in options.iter() {
1001 for opt_def in ENCODER_OPTS.iter() {
1002 if opt_def.check(option).is_ok() {
1003 match option.name {
1004 KEYFRAME_OPTION => {
1005 if let NAValue::Int(intval) = option.value {
1006 self.key_int = intval as u8;
1007 }
1008 },
1009 "nstrips" => {
1010 if let NAValue::Int(intval) = option.value {
1011 self.nstrips = intval as usize;
1012 }
1013 },
1014 "quant_mode" => {
1015 if let NAValue::String(ref str) = option.value {
1016 match str.as_str() {
1017 "elbg" => self.qmode = QuantMode::ELBG,
1018 "hybrid" => self.qmode = QuantMode::Hybrid,
1019 "mediancut" => self.qmode = QuantMode::MedianCut,
1020 _ => {},
1021 };
1022 }
1023 },
1024 _ => {},
1025 };
1026 }
1027 }
1028 }
1029 }
1030 fn query_option_value(&self, name: &str) -> Option<NAValue> {
1031 match name {
1032 "key_int" => Some(NAValue::Int(i64::from(self.key_int))),
1033 "nstrips" => Some(NAValue::Int(self.nstrips as i64)),
1034 "quant_mode" => Some(NAValue::String(self.qmode.to_string())),
1035 _ => None,
1036 }
1037 }
1038 }
1039
1040 pub fn get_encoder() -> Box<dyn NAEncoder + Send> {
1041 Box::new(CinepakEncoder::new())
1042 }
1043
1044 #[cfg(test)]
1045 mod test {
1046 use nihav_core::codecs::*;
1047 use nihav_core::demuxers::*;
1048 use nihav_core::muxers::*;
1049 use crate::*;
1050 use nihav_codec_support::test::enc_video::*;
1051
1052 #[test]
1053 fn test_cinepak_encoder() {
1054 let mut dmx_reg = RegisteredDemuxers::new();
1055 generic_register_all_demuxers(&mut dmx_reg);
1056 let mut dec_reg = RegisteredDecoders::new();
1057 generic_register_all_codecs(&mut dec_reg);
1058 let mut mux_reg = RegisteredMuxers::new();
1059 generic_register_all_muxers(&mut mux_reg);
1060 let mut enc_reg = RegisteredEncoders::new();
1061 generic_register_all_encoders(&mut enc_reg);
1062
1063 let dec_config = DecoderTestParams {
1064 demuxer: "avi",
1065 in_name: "assets/Misc/TalkingHead_352x288.avi",
1066 stream_type: StreamType::Video,
1067 limit: Some(2),
1068 dmx_reg, dec_reg,
1069 };
1070 let enc_config = EncoderTestParams {
1071 muxer: "avi",
1072 enc_name: "cinepak",
1073 out_name: "cinepak.avi",
1074 mux_reg, enc_reg,
1075 };
1076 let dst_vinfo = NAVideoInfo {
1077 width: 0,
1078 height: 0,
1079 format: YUV420_FORMAT,
1080 flipped: true,
1081 };
1082 let enc_params = EncodeParameters {
1083 format: NACodecTypeInfo::Video(dst_vinfo),
1084 quality: 0,
1085 bitrate: 0,
1086 tb_num: 0,
1087 tb_den: 0,
1088 flags: 0,
1089 };
1090 test_encoding_to_file(&dec_config, &enc_config, enc_params);
1091 }
1092 }