use crate::tracker_manager::{TrackerId, TrackerManager}; use crate::MAX_OPEN_REQUESTS; use bytes::Bytes; use ring::digest::{digest, SHA1_FOR_LEGACY_USE_ONLY}; use std::cmp::max; use std::collections::{HashMap, HashSet, VecDeque}; use std::net::SocketAddr; use std::ops::Add; use std::option::Option::Some; use std::path::PathBuf; use std::time::{Duration, Instant}; use torment_core::infohash::v1::U160; use torment_core::metainfo::{MetaInfo, Torrent}; use torment_core::utils::EphemeralSet; use torment_core::{Bitfield, REQUEST_SIZE}; use torment_peer::message::{Handshake, Message, PieceMessage}; use torment_peer::{Peer, PeerProtocol}; use torment_storage::{StorageMap, ToStorageMap}; #[derive(Debug)] pub enum Source { Torrent(TorrentSource), MetaInfo(MetaInfoSource), } #[derive(Debug)] pub struct MetaInfoSource { info_hash: U160, } #[derive(Debug)] pub struct TorrentSource { file: Option, torrent: Option, } #[derive(Debug)] pub struct TorrentTarget { pub path: PathBuf, pub is_base_path: bool, } #[derive(Debug)] pub struct TorrentManager { info_hash: U160, bitfield: Bitfield, trackers: Vec>, source: Source, target: TorrentTarget, peers: HashMap, queue: VecDeque<(U160, Message)>, storage_map: StorageMap, requests_queue: VecDeque, open_requests: HashMap>, uploaded: usize, downloaded: usize, } impl TorrentManager { pub fn peer_count(&self) -> usize { self.peers.len() } pub fn trackers(&self) -> &Vec> { &self.trackers } pub fn info_hash(&self) -> U160 { self.info_hash } pub fn is_done(&self) -> bool { self.bitfield.all() } pub fn uploaded(&self) -> usize { self.uploaded } pub fn downloaded(&self) -> usize { self.downloaded } pub fn bytes_left(&self) -> i64 { let mut size = self.storage_map.size() as i64; let piece_length = self.storage_map.get_piece_length(0) as i64; for i in 0..self.bitfield.size() as u32 { if self.bitfield.get(i) { size -= piece_length; } } max(0, size) } pub fn dump_queue(&mut self) -> VecDeque<(U160, Message)> { std::mem::replace(&mut self.queue, VecDeque::new()) } pub fn from_torrent( torrent: Torrent, target: TorrentTarget, tracker_manager: Option<&mut TrackerManager>, ) -> TorrentManager { let trackers = tracker_manager .map(|manager| { torrent .announce_list() .iter() .map(|tier_list| { tier_list .iter() .filter_map(|tracker| manager.get_tracker_id(tracker)) .collect::>() }) .collect::>() }) .unwrap_or(vec![]); TorrentManager { info_hash: torrent.info_hash(), bitfield: Bitfield::with_size(torrent.meta_info().pieces()), trackers, source: Source::Torrent(TorrentSource { file: None, torrent: Some(torrent.clone()), }), storage_map: torrent.to_storage_map(&target.path, target.is_base_path), requests_queue: Default::default(), target, peers: HashMap::new(), queue: Default::default(), open_requests: Default::default(), uploaded: 0, downloaded: 0, } } pub fn handshake( &mut self, handshake: Handshake, addr: SocketAddr, protocol: PeerProtocol, ) -> bool { if handshake.info_hash() != self.info_hash { return false; } if let Some(peer) = self.peers.get(&handshake.peer_id()) { if peer.addr() != addr { return false; } } let meta_info = self.meta_info(); let mut peer = Peer::new(addr, protocol, handshake, meta_info); peer.send_bitfield(&self.bitfield); self.peers.insert(peer.id(), peer); true } fn meta_info(&mut self) -> &MetaInfo { if let Source::Torrent(torrent) = &self.source { return torrent .torrent .as_ref() .expect("No torrent loaded uh oh missing functionality") .meta_info(); } panic!("Can't resolve MetaInfo for torrent") } pub fn process(&mut self, peer_id: U160, message: Message) -> bool { let mut queue = vec![]; if let Message::Piece(piece) = &message { self.downloaded += piece.piece.len() } let ok = if let Some(peer) = self.peers.get_mut(&peer_id) { if peer.process(message) { while let Some(piece) = peer.next_piece() { if self.storage_map.has_piece(piece.index() as usize) { continue; } if self .storage_map .write( piece.index() as usize, piece.offset() as usize, piece.piece(), ) .unwrap() { queue.push(piece.index() as usize); } } while let Some(_) = peer.next_have() { // Something i guess } while let Some(piece_request) = peer.next_request() { if !self.bitfield.get(piece_request.index()) { continue; } let mut buffer = vec![0u8; piece_request.length() as usize]; self.storage_map .read( piece_request.index() as usize, piece_request.offset() as usize, &mut buffer, ) .unwrap(); self.queue.push_back(( peer_id, Message::Piece(PieceMessage { index: piece_request.index(), offset: piece_request.offset(), piece: Bytes::from(buffer), }), )) } self.queue_requests(peer_id); let msgs = self.peers.get_mut(&peer_id).unwrap().dump_queue(); self.queue_messages(peer_id, msgs); true } else { false } } else { false }; // offload to io thread? for have in queue { let piece_hash = self.meta_info().hash(have); let piece_data = self.storage_map.read_piece(have).unwrap(); let res = digest(&SHA1_FOR_LEGACY_USE_ONLY, &piece_data); if piece_hash != res.as_ref() { println!("=> Piece#{} failed verification", have); self.storage_map.wipe_piece(have); continue; } else { println!("=> Piece#{} verified", have); } self.bitfield.set(have as u32); let keys = self.peers.keys().copied().collect::>(); for key in keys { self.queue.push_back((key, Message::Have(have as u32))); if !self.peers[&key].has_piece(have as u32) && self.peers[&key].we_choked() { self.peers.get_mut(&key).unwrap().set_we_choked(false); } } } ok } fn queue_messages(&mut self, peer_id: U160, messages: VecDeque) { for msg in messages { if let Message::Piece(piece) = &msg { self.uploaded += piece.piece.len(); } self.queue.push_back((peer_id, msg)); } } pub fn next(&mut self) -> Option<(U160, Message)> { self.queue.pop_front() } fn get_next_request_piece_for_peer(&mut self, peer: &U160) -> Option<(u32, u32, u32)> { let mut queue_index = 0; while let Some(index) = self.requests_queue.get(queue_index).copied() { if !self.peers[peer].has_piece(index) { queue_index += 1; continue; } let bits = self.storage_map.get_bits_in_pieces(index as usize); if self.open_requests.get(&index).map_or(0, |set| set.len()) >= bits { self.requests_queue.remove(0); } let set = self .open_requests .entry(index) .or_insert(EphemeralSet::new()); for i in 0..bits as u32 { let bit_offset = i * REQUEST_SIZE as u32; if set.contains(&bit_offset) { continue; } set.insert(bit_offset, Instant::now().add(Duration::from_secs(10))); return Some(( index, bit_offset, self.storage_map .get_bit_length(index as usize, bit_offset as usize), )); } } None } pub fn house_keeping(&mut self) { let mut map: HashMap = HashMap::new(); let mut interested_peers = HashSet::new(); if self.peers.len() <= 0 { return; } let mut done = 0; for i in 0..self.meta_info().pieces() as u32 { if self.bitfield.get(i) { done += 1; // Don't need to queue anything continue; } let mut entries = 0; for (_, peer) in &mut self.peers { if peer.has_piece(i) { entries += 1; interested_peers.insert(peer.id()); } } if entries == 0 { // Can't queue entries without peers continue; } map.insert(i, entries); } println!( "{}%", (done as f64 / self.meta_info().pieces() as f64) * 100f64 ); let mut pieces: Vec<_> = map.keys().copied().collect(); pieces.sort_by_key(|piece| map[piece]); self.requests_queue = VecDeque::from(pieces); for (_, peer) in &mut self.peers { if !interested_peers.contains(&peer.id()) || self.requests_queue.len() == 0 { peer.lost_interest(); continue; } peer.interested(); if peer.is_choked() { // Don't send requests if they're choked continue; } } let peer_ids = self.peers.keys().copied().collect::>(); for peer_id in peer_ids { self.queue_requests(peer_id); let message_queue = self.peers.get_mut(&peer_id).unwrap().dump_queue(); self.queue_messages(peer_id, message_queue); } } fn queue_requests(&mut self, peer_id: U160) { if self.peers[&peer_id].is_choked() { return; } let amount = MAX_OPEN_REQUESTS - self.peers[&peer_id].count_open_requests(); for _ in 0..amount { if let Some((index, offset, length)) = self.get_next_request_piece_for_peer(&peer_id) { self.peers .get_mut(&peer_id) .unwrap() .request(index, offset, length); } else { break; } } } }