diff --git a/src/backend/cynthion.rs b/src/backend/cynthion.rs index 308a678a..76a6264e 100644 --- a/src/backend/cynthion.rs +++ b/src/backend/cynthion.rs @@ -29,7 +29,7 @@ const PID: u16 = 0x615b; const CLASS: u8 = 0xff; const SUBCLASS: u8 = 0x10; -const PROTOCOL: u8 = 0x00; +const PROTOCOL: u8 = 0x01; const ENDPOINT: u8 = 0x81; @@ -142,6 +142,8 @@ pub struct CynthionQueue { pub struct CynthionStream { receiver: mpsc::Receiver>, buffer: VecDeque, + padding_due: bool, + total_clk_cycles: u64, } pub struct CynthionStop { @@ -149,6 +151,19 @@ pub struct CynthionStop { worker: JoinHandle::<()>, } +pub struct CynthionPacket { + pub timestamp_ns: u64, + pub bytes: Vec, +} + +/// Convert 60MHz clock cycles to nanoseconds, rounding down. +fn clk_to_ns(clk_cycles: u64) -> u64 { + const TABLE: [u64; 3] = [0, 16, 33]; + let quotient = clk_cycles / 3; + let remainder = clk_cycles % 3; + quotient * 50 + TABLE[remainder as usize] +} + /// Check whether a Cynthion device has an accessible analyzer interface. fn check_device(device_info: &DeviceInfo) -> Result<(InterfaceSelection, Vec), Error> @@ -302,6 +317,8 @@ impl CynthionHandle { CynthionStream { receiver: rx, buffer: VecDeque::new(), + padding_due: false, + total_clk_cycles: 0, }, CynthionStop { stop_request: stop_tx, @@ -437,9 +454,9 @@ impl CynthionQueue { } impl Iterator for CynthionStream { - type Item = Vec; + type Item = CynthionPacket; - fn next(&mut self) -> Option> { + fn next(&mut self) -> Option { loop { // Do we have another packet already in the buffer? match self.next_buffered_packet() { @@ -458,25 +475,71 @@ impl Iterator for CynthionStream { } impl CynthionStream { - fn next_buffered_packet(&mut self) -> Option> { - // Do we have the length header for the next packet? - let buffer_len = self.buffer.len(); - if buffer_len <= 2 { - return None; + fn next_buffered_packet(&mut self) -> Option { + // Are we waiting for a padding byte? + if self.padding_due { + if self.buffer.is_empty() { + return None; + } else { + self.buffer.pop_front(); + self.padding_due= false; + } + } + + // Loop over any non-packet events, until we get to a packet. + loop { + // Do we have the length and timestamp for the next packet/event? + if self.buffer.len() < 4 { + return None; + } + + if self.buffer[0] == 0xFF { + // This is an event. + let _event_code = self.buffer[1]; + + // Update our cycle count. + self.update_cycle_count(); + + // Remove event from buffer. + self.buffer.drain(0..4); + } else { + // This is a packet, handle it below. + break; + } } // Do we have all the data for the next packet? let packet_len = u16::from_be_bytes( [self.buffer[0], self.buffer[1]]) as usize; - if buffer_len <= 2 + packet_len { + if self.buffer.len() <= 4 + packet_len { return None; } - // Remove the length header from the buffer. - self.buffer.drain(0..2); + // Update our cycle count. + self.update_cycle_count(); + + // Remove the length and timestamp from the buffer. + self.buffer.drain(0..4); + + // If packet length is odd, we will need to skip a padding byte after. + if packet_len % 2 == 1 { + self.padding_due = true; + } + + // Remove the rest of the packet from the buffer and return it. + Some(CynthionPacket { + timestamp_ns: clk_to_ns(self.total_clk_cycles), + bytes: self.buffer.drain(0..packet_len).collect() + }) + } + + fn update_cycle_count(&mut self) { + // Decode the cycle count. + let clk_cycles = u16::from_be_bytes( + [self.buffer[2], self.buffer[3]]); - // Remove the packet from the buffer and return it. - Some(self.buffer.drain(0..packet_len).collect()) + // Update our running total. + self.total_clk_cycles += clk_cycles as u64; } } diff --git a/src/capture.rs b/src/capture.rs index 0553303a..33559b4e 100644 --- a/src/capture.rs +++ b/src/capture.rs @@ -35,6 +35,7 @@ pub struct CaptureWriter { pub shared: Arc, pub packet_data: DataWriter, pub packet_index: CompactWriter, + pub packet_times: CompactWriter, pub transaction_index: CompactWriter, pub transfer_index: DataWriter, pub item_index: CompactWriter, @@ -53,6 +54,7 @@ pub struct CaptureReader { endpoint_readers: VecMap, pub packet_data: DataReader, pub packet_index: CompactReader, + pub packet_times: CompactReader, pub transaction_index: CompactReader, pub transfer_index: DataReader, pub item_index: CompactReader, @@ -72,6 +74,7 @@ pub fn create_capture() let (data_writer, data_reader) = data_stream_with_block_size::<_, PACKET_DATA_BLOCK_SIZE>()?; let (packets_writer, packets_reader) = compact_index()?; + let (timestamp_writer, timestamp_reader) = compact_index()?; let (transactions_writer, transactions_reader) = compact_index()?; let (transfers_writer, transfers_reader) = data_stream()?; let (items_writer, items_reader) = compact_index()?; @@ -93,6 +96,7 @@ pub fn create_capture() shared: shared.clone(), packet_data: data_writer, packet_index: packets_writer, + packet_times: timestamp_writer, transaction_index: transactions_writer, transfer_index: transfers_writer, item_index: items_writer, @@ -109,6 +113,7 @@ pub fn create_capture() endpoint_readers: VecMap::new(), packet_data: data_reader, packet_index: packets_reader, + packet_times: timestamp_reader, transaction_index: transactions_reader, transfer_index: transfers_reader, item_index: items_reader, @@ -194,6 +199,7 @@ pub fn create_endpoint() pub type PacketByteId = Id; pub type PacketId = Id; +pub type Timestamp = u64; pub type TransactionId = Id; pub type TransferId = Id; pub type EndpointTransactionId = Id; @@ -515,7 +521,7 @@ pub struct Transaction { start_pid: PID, end_pid: PID, split: Option<(SplitFields, PID)>, - packet_id_range: Range, + pub packet_id_range: Range, data_packet_id: Option, payload_byte_range: Option>>, } @@ -862,6 +868,12 @@ impl CaptureReader { self.packet_data.get_range(&range) } + pub fn packet_time(&mut self, id: PacketId) + -> Result + { + self.packet_times.get(id) + } + fn packet_pid(&mut self, id: PacketId) -> Result { @@ -869,7 +881,7 @@ impl CaptureReader { Ok(PID::from(self.packet_data.get(offset)?)) } - fn transaction(&mut self, id: TransactionId) + pub fn transaction(&mut self, id: TransactionId) -> Result { let packet_id_range = self.transaction_index.target_range( @@ -1056,6 +1068,7 @@ pub trait ItemSource { -> Result<(CompletionStatus, u64), Error>; fn summary(&mut self, item: &Item) -> Result; fn connectors(&mut self, item: &Item) -> Result; + fn timestamp(&mut self, item: &Item) -> Result; } impl ItemSource for CaptureReader { @@ -1361,6 +1374,27 @@ impl ItemSource for CaptureReader { ); Ok(connectors) } + + fn timestamp(&mut self, item: &TrafficItem) + -> Result + { + use TrafficItem::*; + let packet_id = match item { + Transfer(transfer_id) => { + let entry = self.transfer_index.get(*transfer_id)?; + let ep_traf = self.endpoint_traffic(entry.endpoint_id())?; + let ep_transaction_id = + ep_traf.transfer_index.get(entry.transfer_id())?; + let transaction_id = + ep_traf.transaction_ids.get(ep_transaction_id)?; + self.transaction_index.get(transaction_id)? + }, + Transaction(.., transaction_id) => + self.transaction_index.get(*transaction_id)?, + Packet(.., packet_id) => *packet_id, + }; + self.packet_time(packet_id) + } } impl ItemSource for CaptureReader { @@ -1579,6 +1613,12 @@ impl ItemSource for CaptureReader { }; Ok(" ".repeat(depth)) } + + fn timestamp(&mut self, _item: &DeviceItem) + -> Result + { + unreachable!() + } } #[cfg(test)] @@ -1588,8 +1628,8 @@ mod tests { use std::io::{BufReader, BufWriter, BufRead, Write}; use std::path::PathBuf; use crate::decoder::Decoder; + use crate::loader::Loader; use itertools::Itertools; - use pcap_file::pcap::PcapReader; fn summarize_item(cap: &mut CaptureReader, item: &TrafficItem, depth: usize) -> String @@ -1643,13 +1683,14 @@ mod tests { ref_path.push("reference.txt"); out_path.push("output.txt"); { - let pcap_file = File::open(cap_path).unwrap(); - let mut pcap_reader = PcapReader::new(pcap_file).unwrap(); + let mut loader = Loader::open(cap_path).unwrap(); let (writer, mut reader) = create_capture().unwrap(); let mut decoder = Decoder::new(writer).unwrap(); - while let Some(result) = pcap_reader.next_raw_packet() { - let packet = result.unwrap().data; - decoder.handle_raw_packet(&packet).unwrap(); + while let Some(result) = loader.next() { + let (packet, timestamp_ns) = result.unwrap(); + decoder + .handle_raw_packet(&packet.data, timestamp_ns) + .unwrap(); } decoder.finish().unwrap(); let out_file = File::create(out_path.clone()).unwrap(); diff --git a/src/decoder.rs b/src/decoder.rs index a1f5bfb9..ef5a5742 100644 --- a/src/decoder.rs +++ b/src/decoder.rs @@ -578,11 +578,12 @@ impl Decoder { Ok(decoder) } - pub fn handle_raw_packet(&mut self, packet: &[u8]) + pub fn handle_raw_packet(&mut self, packet: &[u8], timestamp_ns: u64) -> Result<(), Error> { let data_range = self.capture.packet_data.append(packet)?; let packet_id = self.capture.packet_index.push(data_range.start)?; + self.capture.packet_times.push(timestamp_ns)?; self.transaction_update(packet_id, packet)?; Ok(()) } diff --git a/src/loader.rs b/src/loader.rs new file mode 100644 index 00000000..e3b91fd3 --- /dev/null +++ b/src/loader.rs @@ -0,0 +1,57 @@ +use std::fs::File; +use std::io::BufReader; +use std::mem::size_of; +use std::path::PathBuf; + +use pcap_file::{ + pcap::{PcapReader, PcapHeader, RawPcapPacket}, + TsResolution, +}; + +use anyhow::Error; + +pub struct Loader { + pcap: PcapReader>, + pub file_size: u64, + pub bytes_read: u64, + frac_ns: u64, + start_time: Option, +} + +impl Loader { + pub fn open(path: PathBuf) -> Result { + let file = File::open(path)?; + let file_size = file.metadata()?.len(); + let reader = BufReader::new(file); + let pcap = PcapReader::new(reader)?; + let header = pcap.header(); + let bytes_read = size_of::() as u64; + let frac_ns = match header.ts_resolution { + TsResolution::MicroSecond => 1_000, + TsResolution::NanoSecond => 1, + }; + let start_time = None; + Ok(Loader{pcap, file_size, bytes_read, frac_ns, start_time}) + } + + pub fn next(&mut self) -> Option> { + match self.pcap.next_raw_packet() { + None => None, + Some(Err(e)) => Some(Err(Error::from(e))), + Some(Ok(packet)) => { + let raw_timestamp = + packet.ts_sec as u64 * 1_000_000_000 + + packet.ts_frac as u64 * self.frac_ns; + let timestamp = if let Some(start) = self.start_time { + raw_timestamp - start + } else { + self.start_time = Some(raw_timestamp); + 0 + }; + let size = 16 + packet.data.len(); + self.bytes_read += size as u64; + Some(Ok((packet, timestamp))) + } + } + } +} diff --git a/src/main.rs b/src/main.rs index 1794039f..e6f5ca43 100644 --- a/src/main.rs +++ b/src/main.rs @@ -20,6 +20,7 @@ mod decoder; mod expander; mod id; mod index_stream; +mod loader; mod model; mod rcu; mod row_data; diff --git a/src/model/mod.rs b/src/model/mod.rs index 114afcfd..5b25a66b 100644 --- a/src/model/mod.rs +++ b/src/model/mod.rs @@ -25,6 +25,7 @@ glib::wrapper! { } pub trait GenericModel where Self: Sized { + const HAS_TIMES: bool; fn new(capture: CaptureReader, #[cfg(any(test, feature="record-ui-test"))] on_item_update: Rc>) @@ -36,10 +37,13 @@ pub trait GenericModel where Self: Sized { -> Result<(), Error>; fn update(&self) -> Result; fn summary(&self, item: &Item) -> String; + fn timestamp(&self, item: &Item) -> u64; fn connectors(&self, item: &Item) -> String; } impl GenericModel for TrafficModel { + const HAS_TIMES: bool = true; + fn new(capture: CaptureReader, #[cfg(any(test, feature="record-ui-test"))] on_item_update: Rc>) @@ -77,6 +81,12 @@ impl GenericModel for TrafficModel { tree.summary(item) } + fn timestamp(&self, item: &TrafficItem) -> u64 { + let tree_opt = self.imp().tree.borrow(); + let tree = tree_opt.as_ref().unwrap(); + tree.timestamp(item) + } + fn connectors(&self, item: &TrafficItem) -> String { let tree_opt = self.imp().tree.borrow(); let tree = tree_opt.as_ref().unwrap(); @@ -85,6 +95,8 @@ impl GenericModel for TrafficModel { } impl GenericModel for DeviceModel { + const HAS_TIMES: bool = false; + fn new(capture: CaptureReader, #[cfg(any(test, feature="record-ui-test"))] on_item_update: Rc>) @@ -122,6 +134,10 @@ impl GenericModel for DeviceModel { tree.summary(item) } + fn timestamp(&self, _item: &DeviceItem) -> u64 { + unreachable!(); + } + fn connectors(&self, item: &DeviceItem) -> String { let tree_opt = self.imp().tree.borrow(); let tree = tree_opt.as_ref().unwrap(); diff --git a/src/test_cynthion.rs b/src/test_cynthion.rs index 71ea4337..5fd754c4 100644 --- a/src/test_cynthion.rs +++ b/src/test_cynthion.rs @@ -1,5 +1,12 @@ use crate::backend::cynthion::{CynthionDevice, CynthionUsability, Speed}; -use crate::capture::{create_capture, CaptureReader, DeviceId, EndpointId, EndpointTransferId}; +use crate::capture::{ + create_capture, + CaptureReader, + DeviceId, + EndpointId, + EndpointTransferId, + PacketId, +}; use crate::decoder::Decoder; use anyhow::{Context, Error}; @@ -10,16 +17,21 @@ use std::thread::sleep; use std::time::Duration; pub fn run_test() { - for (speed, ep_addr, length) in [ - (Speed::High, 0x81, 4096), - (Speed::Full, 0x82, 512), - (Speed::Low, 0x83, 64)] + for (speed, ep_addr, length, sof) in [ + (Speed::High, 0x81, 4096, Some((124500, 125500, 500))), + (Speed::Full, 0x82, 512, Some((995000, 1005000, 50))), + (Speed::Low, 0x83, 64, None)] { - test(speed, ep_addr, length).unwrap(); + test(speed, ep_addr, length, sof).unwrap(); } } -fn test(speed: Speed, ep_addr: u8, length: usize) -> Result<(), Error> { +fn test(speed: Speed, + ep_addr: u8, + length: usize, + sof: Option<(u64, u64, u64)>) + -> Result<(), Error> +{ let desc = speed.description(); println!("\nTesting at {desc}:\n"); @@ -80,7 +92,7 @@ fn test(speed: Speed, ep_addr: u8, length: usize) -> Result<(), Error> { // Decode all packets that were received. for packet in packets { - decoder.handle_raw_packet(&packet) + decoder.handle_raw_packet(&packet.bytes, packet.timestamp_ns) .context("Error decoding packet")?; } @@ -95,8 +107,50 @@ fn test(speed: Speed, ep_addr: u8, length: usize) -> Result<(), Error> { .context("Error counting captured bytes on endpoint")?; println!("Captured {}/{} bytes of data read from test device", bytes_captured.len(), length); - assert_eq!(bytes_captured, completion.data[0..bytes_captured.len()], - "Captured data did not match received data"); + assert_eq!(bytes_captured.len(), length, + "Not all data was captured"); + assert_eq!(bytes_captured, completion.data, + "Captured data did not match received data"); + + if let Some((min_interval, max_interval, min_count)) = sof { + println!("Checking SOF timestamp intervals"); + // Check SOF timestamps have the expected spacing. + // SOF packets are assigned to endpoint ID 1. + // We're looking for the first and only transfer on the endpoint. + let endpoint_id = EndpointId::from(1); + let ep_transfer_id = EndpointTransferId::from(0); + let ep_traf = reader.endpoint_traffic(endpoint_id)?; + let ep_transaction_ids = ep_traf.transfer_index + .target_range(ep_transfer_id, ep_traf.transaction_ids.len())?; + let mut sof_count = 0; + let mut last = None; + for transaction_id in ep_traf.transaction_ids + .get_range(&ep_transaction_ids)? + { + let range = reader.transaction_index + .target_range(transaction_id, reader.packet_index.len())?; + for id in range.start.value..range.end.value { + let packet_id = PacketId::from(id); + let timestamp = reader.packet_times.get(packet_id)?; + if let Some(prev) = last { + let interval = timestamp - prev; + if !(interval > min_interval && interval < max_interval) { + if interval > 10000000 { + // More than 10ms gap, assume host stopped sending. + continue + } else { + panic!("SOF interval of {}ns is out of range", + interval); + } + } + } + sof_count += 1; + last = Some(timestamp); + } + } + println!("Found {} SOF packets with expected interval range", sof_count); + assert!(sof_count > min_count, "Not enough SOF packets captured"); + } Ok(()) } diff --git a/src/test_replay.rs b/src/test_replay.rs index 4b03616f..2889d0e7 100644 --- a/src/test_replay.rs +++ b/src/test_replay.rs @@ -5,10 +5,10 @@ use std::path::PathBuf; use gtk::prelude::*; use itertools::assert_equal; -use pcap_file::pcap::PcapReader; use serde_json::Deserializer; use crate::decoder::Decoder; +use crate::loader::Loader; use crate::model::GenericModel; use crate::row_data::{GenericRowData, TrafficRowData, DeviceRowData}; use crate::record_ui::UiAction; @@ -112,18 +112,15 @@ fn check_replays() { Ok(()) }).unwrap(); if let Some(capture) = capture { - let file = File::open(path) + let loader = Loader::open(path) .expect("Failed to open pcap file"); - let reader = BufReader::new(file); - let pcap = PcapReader::new(reader) - .expect("Failed to read pcap file"); let decoder = Decoder::new(writer) .expect("Failed to create decoder"); - replay = Some((pcap, decoder, capture)); + replay = Some((loader, decoder, capture)); } }, (Update(count), - Some((pcap, decoder, capture))) => { + Some((loader, decoder, capture))) => { with_ui(|ui| { ui.recording .borrow_mut() @@ -131,12 +128,11 @@ fn check_replays() { Ok(()) }).unwrap(); while capture.packet_index.len() < count { - let packet = pcap - .next_raw_packet() + let (packet, timestamp_ns) = loader.next() .expect("No next pcap packet") .expect("Error in pcap reader"); decoder - .handle_raw_packet(&packet.data) + .handle_raw_packet(&packet.data, timestamp_ns) .expect("Failed to decode packet"); } update_view() diff --git a/src/tree_list_model.rs b/src/tree_list_model.rs index 93a02512..0c10fb91 100644 --- a/src/tree_list_model.rs +++ b/src/tree_list_model.rs @@ -943,6 +943,11 @@ where Item: 'static + Copy + Debug, Ok(node_rc) } + pub fn timestamp(&self, item: &Item) -> u64 { + let mut cap = self.capture.borrow_mut(); + cap.timestamp(item).unwrap_or(0) + } + pub fn summary(&self, item: &Item) -> String { let mut cap = self.capture.borrow_mut(); match cap.summary(item) { diff --git a/src/ui.rs b/src/ui.rs index 2c492e96..b89887c2 100644 --- a/src/ui.rs +++ b/src/ui.rs @@ -1,8 +1,7 @@ use std::borrow::Cow; use std::cell::RefCell; use std::fs::File; -use std::io::{BufReader, BufWriter, Write}; -use std::mem::size_of; +use std::io::{BufWriter, Write}; use std::path::PathBuf; use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use std::time::Duration; @@ -49,7 +48,8 @@ use gtk::{ use pcap_file::{ DataLink, - pcap::{PcapReader, PcapWriter, PcapHeader, RawPcapPacket}, + TsResolution, + pcap::{PcapWriter, PcapHeader, RawPcapPacket}, }; use crate::backend::cynthion::{ @@ -70,6 +70,7 @@ use crate::capture::{ }; use crate::decoder::Decoder; use crate::expander::ExpanderWrapper; +use crate::loader::Loader; use crate::model::{GenericModel, TrafficModel, DeviceModel}; use crate::row_data::{ GenericRowData, @@ -615,6 +616,46 @@ fn create_view( view.append_column(&column); view.add_css_class("data-table"); + if Model::HAS_TIMES { + let model = model.clone(); + let factory = SignalListItemFactory::new(); + factory.connect_setup(move |_, list_item| { + let label = Label::new(None); + list_item.set_child(Some(&label)); + }); + let bind = move |list_item: &ListItem| { + let row = list_item + .item() + .context("ListItem has no item")? + .downcast::() + .or_else(|_| bail!("Item is not RowData"))?; + let label = list_item + .child() + .context("ListItem has no child widget")? + .downcast::