19, 19, 19
];
+#[allow(clippy::approx_constant)]
const COOK_DITHER_TAB: [f32; 9] = [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.176777, 0.25, 0.707107, 1.0 ];
const COOK_QUANT_CENTROID: [[f32; 14]; 7] = [
#[cfg(any(feature="decoder_realvideo3", feature="decoder_realvideo4"))]
mod rv3040;
#[cfg(any(feature="decoder_realvideo3", feature="decoder_realvideo4"))]
+#[allow(clippy::erasing_op)]
mod rv34codes;
#[cfg(any(feature="decoder_realvideo3", feature="decoder_realvideo4"))]
+#[allow(clippy::erasing_op)]
mod rv34dsp;
#[cfg(feature="decoder_realvideo1")]
#[cfg(feature="decoder_realvideo3")]
pub mod rv30;
#[cfg(feature="decoder_realvideo3")]
+#[allow(clippy::erasing_op)]
pub mod rv30dsp;
#[cfg(feature="decoder_realvideo4")]
pub mod rv40;
#[cfg(feature="decoder_realvideo4")]
+#[allow(clippy::erasing_op)]
pub mod rv40dsp;
#[cfg(feature="decoder_realvideo6")]
pub mod rv60;
#[cfg(feature="decoder_realvideo6")]
pub mod rv60codes;
#[cfg(feature="decoder_realvideo6")]
+#[allow(clippy::erasing_op)]
pub mod rv60dsp;
#[cfg(feature="decoder_realaudio144")]
];
pub fn realmedia_register_all_codecs(rd: &mut RegisteredDecoders) {
- for decoder in RM_CODECS.into_iter() {
+ for decoder in RM_CODECS.iter() {
rd.add_decoder(decoder.clone());
}
}
}
fn predict(&self, src: &[i32], bits: u8) -> i32 {
let mut acc: i32 = 0;
- for (f, s) in src.iter().rev().take(self.length).zip(self.coeffs.into_iter()) {
+ for (f, s) in src.iter().rev().take(self.length).zip(self.coeffs.iter()) {
acc += *f * *s;
}
let bias = 1 << (self.bits - 1);
pub ipred16x16: [fn(buf: &mut [u8], idx: usize, stride: usize); 7],
}
+#[allow(clippy::erasing_op)]
fn row_transform(src: &[i16], dst: &mut [i32]) {
for i in 0..4 {
let z0 = 13 * ((src[i + 4*0] as i32) + (src[i + 4*2] as i32));
else { a as u8 }
}
+#[allow(clippy::erasing_op)]
impl RV34CommonDSP {
pub fn new() -> Self {
Self {
lim_p0q0: i16, lim_p1: i16, lim_q1: i16) {
rv40_weak_loop_filter4(pix, off, stride, 1, filter_p1, filter_q1, alpha, beta, lim_p0q0, lim_p1, lim_q1);
}
+#[allow(clippy::eq_op)]
fn rv40_weak_loop_filter4_v(pix: &mut [u8], off: usize, stride: usize,
filter_p1: bool, filter_q1: bool, alpha: i16, beta: i16,
lim_p0q0: i16, lim_p1: i16, lim_q1: i16) {
rv40_loop_strength(pix, off, stride, 1, beta, beta2, edge)
}
+#[allow(clippy::eq_op)]
fn rv40_loop_strength_v(pix: &[u8], off: usize, stride: usize,
beta: i16, beta2: i16, edge: bool) -> (bool, bool, bool) {
let src = &pix[off - 3..][..stride * 3 + 3 + 3];
fn add(&mut self, cand: A) {
if self.fill == self.max_size { return; }
let mut unique = true;
- for el in self.list.into_iter().take(self.fill) {
+ for el in self.list.iter().take(self.fill) {
if *el == cand {
unique = false;
break;
}
}
}
- for el in RV60_CANDIDATE_INTRA_ANGLES.into_iter() {
+ for el in RV60_CANDIDATE_INTRA_ANGLES.iter() {
ipm_cand.add(*el);
}
// actually decode prediction mode
let mut imode = mode;
let mut ipm_cs: [u8; 3] = [ipm_cand.list[0], ipm_cand.list[1], ipm_cand.list[2]];
ipm_cs.sort();
- for ic in ipm_cs.into_iter() {
+ for ic in ipm_cs.iter() {
if imode >= *ic {
imode += 1;
}