use nihav_core::muxers::*;
use nihav_core::reorder::*;
use nihav_core::scale::*;
-use nihav_core::soundcvt::*;
use nihav_registry::detect;
use nihav_registry::register;
use std::env;
mod demux;
use crate::demux::*;
mod null;
+mod acvt;
+use crate::acvt::*;
fn format_time(ms: u64) -> String {
let s = ms / 1000;
}
}
+fn parse_bitrate(strval: &str) -> Result<u32, ()> {
+ let mut val = 0;
+ let mut has_suffix = false;
+ for ch in strval.chars() {
+ match ch {
+ _ if has_suffix => return Err(()),
+ '0'..='9' => {
+ if val >= std::u32::MAX / 100 {
+ return Err(());
+ }
+ val = val * 10 + ch.to_digit(10).unwrap_or(0);
+ },
+ 'k' | 'K' => {
+ if val >= std::u32::MAX / 1000 {
+ return Err(());
+ }
+ val *= 1000;
+ has_suffix = true;
+ },
+ 'm' | 'M' => {
+ if val >= std::u32::MAX / 1000000 {
+ return Err(());
+ }
+ val *= 1000000;
+ has_suffix = true;
+ },
+ _ => return Err(()),
+ };
+ }
+ Ok(val)
+}
+
struct OptionArgs {
name: String,
value: Option<String>,
enum OutputConvert {
Video(NAScale, NABufferType),
- Audio(NAAudioInfo, NAChannelMap),
+ Audio(AudioConverter),
None,
}
}
},*/
"bitrate" => {
- let ret = oval[1].parse::<u32>();
+ let ret = parse_bitrate(oval[1]);
if let Ok(val) = ret {
ostr.enc_params.bitrate = val;
} else {
return false;
},
};
+ let acvt = AudioConverter::new(sainfo, dainfo, dchmap);
//todo channelmap
- OutputConvert::Audio(*dainfo, dchmap)
+ OutputConvert::Audio(acvt)
}
},
_ => OutputConvert::None,
},
};
//todo channelmap
- OutputConvert::Audio(*dainfo, dchmap)
+ let acvt = AudioConverter::new(sainfo, dainfo, dchmap);
+ OutputConvert::Audio(acvt)
}
},
_ => OutputConvert::None,
let buf = frm.get_buffer();
let cbuf = if let NABufferType::None = buf {
if (encoder.get_capabilities() & ENC_CAPS_SKIPFRAME) == 0 {
- println!("encoder does not support skip frames, skipping");
- return true;
+ match cvt {
+ OutputConvert::Video(_, ref mut dbuf) => dbuf.clone(),
+ _ => {
+ println!("encoder does not support skip frames, skipping");
+ return true;
+ },
+ }
+ } else {
+ buf
}
- buf
} else {
match cvt {
OutputConvert::None => buf,
}
dbuf.clone()
},
- OutputConvert::Audio(ref dinfo, ref dchmap) => {
- let ret = convert_audio_frame(&buf, dinfo, dchmap);
- if ret.is_err() {
+ OutputConvert::Audio(ref mut acvt) => {
+ if !acvt.queue_frame(buf, frm.get_time_information()) {
println!("error converting audio for stream {}", dst_id);
return false;
}
- ret.unwrap()
+ return true;
},
}
};
break;
}
let frm = ret.unwrap();
+ let tinfo = frm.get_info();
reorderer.add_frame(frm);
while let Some(frm) = reorderer.get_frame() {
if !encode_frame(dst_id, encoder, cvt, frm, &transcoder.scale_opts) {
mux.mux_frame(pkt).unwrap();
}
}
+ if let OutputConvert::Audio(ref mut acvt) = cvt {
+ while let Some(ofrm) = acvt.get_frame(tinfo.clone()) {
+ if encoder.encode(&ofrm).is_err() {
+ break;
+ }
+ while let Ok(Some(pkt)) = encoder.get_packet() {
+ if transcoder.end != NATimePoint::None && !pkt.ts.less_than(transcoder.end) { break 'main_loop; }
+ let pkt_size = pkt.get_buffer().len();
+ adata_size += pkt_size;
+ mux.mux_frame(pkt).unwrap();
+ }
+ }
+ }
} else {
println!("no decoder for stream {}", src_id);
break;