diff --git a/components/net/blob_loader.rs b/components/net/blob_loader.rs index 50c5455808d..64d918224a6 100644 --- a/components/net/blob_loader.rs +++ b/components/net/blob_loader.rs @@ -13,9 +13,9 @@ use mime::{self, Mime}; use net_traits::blob_url_store::parse_blob_url; use net_traits::filemanager_thread::ReadFileProgress; use net_traits::response::{Response, ResponseBody}; -use net_traits::{http_percent_encode, NetworkError}; +use net_traits::{http_percent_encode, NetworkError, ResourceFetchTiming}; use servo_url::ServoUrl; -use std::sync::mpsc::channel; +use servo_channel::channel; // TODO: Check on GET // https://w3c.github.io/FileAPI/#requestResponseModel @@ -24,22 +24,21 @@ use std::sync::mpsc::channel; pub fn load_blob_async( url: ServoUrl, filemanager: FileManager, - response: &Response, done_chan: &mut DoneChannel -) -> Result<(), NetworkError> { +)-> Response { let (id, origin) = match parse_blob_url(&url) { Ok((id, origin)) => (id, origin), Err(()) => { - let e = format!("Invalid blob URL format {:?}", url); - return Err(NetworkError::Internal(e)); - }, + return Response::network_error(NetworkError::Internal("Invalid blob url".into())); + } }; + let mut response = Response::new(url, ResourceFetchTiming::new(request.timing_type())); let (sender, receiver) = channel(); *done_chan = Some((sender.clone(), receiver)); *response.body.lock().unwrap() = ResponseBody::Receiving(vec![]); let check_url_validity = true; - filemanager.fetch_file(sender, id, check_url_validity, origin, response); + filemanager.fetch_file(sender, id, check_url_validity, origin, &mut response); - Ok(()) + response } diff --git a/components/net/fetch/methods.rs b/components/net/fetch/methods.rs index 67a02775369..ddd22fcda34 100644 --- a/components/net/fetch/methods.rs +++ b/components/net/fetch/methods.rs @@ -657,11 +657,7 @@ fn scheme_fetch( )); } - let mut response = Response::new(url, ResourceFetchTiming::new(request.timing_type())); - - load_blob_async(url.clone(), context.filemanager.clone(), &response, done_chan); - - response + load_blob_async(url.clone(), context.filemanager.clone(), done_chan); }, "ftp" => { diff --git a/components/net/filemanager_thread.rs b/components/net/filemanager_thread.rs index 0be8de815b4..6d4fdb3f2d5 100644 --- a/components/net/filemanager_thread.rs +++ b/components/net/filemanager_thread.rs @@ -16,6 +16,7 @@ use net_traits::filemanager_thread::{ FileManagerThreadError, ReadFileProgress, RelativePos, SelectedFile, }; use net_traits::response::{Response, ResponseBody}; +use servo_channel; use servo_config::prefs::PREFS; use std::collections::HashMap; use std::fs::File; @@ -23,7 +24,7 @@ use std::io::{Read, Seek, SeekFrom}; use std::ops::Index; use std::path::{Path, PathBuf}; use std::sync::atomic::{self, AtomicBool, AtomicUsize, Ordering}; -use std::sync::{Arc, RwLock, Mutex, mpsc}; +use std::sync::{Arc, RwLock}; use std::thread; use url::Url; use uuid::Uuid; @@ -98,15 +99,15 @@ impl FileManager { } pub fn fetch_file(&self, - sender: mpsc::Sender, + sender: servo_channel::Sender, id: Uuid, check_url_validity: bool, origin: FileOrigin, - response: &Response) { + response: &mut Response) { let store = self.store.clone(); - let mut res_body = response.body.clone(); + let mut r2 = response.clone(); thread::Builder::new().name("read file".to_owned()).spawn(move || { - store.try_fetch_file(&sender, id, check_url_validity, origin, response, res_body) + store.try_fetch_file(&sender, id, check_url_validity, origin, &mut r2) }).expect("Thread spawning failed"); } @@ -508,9 +509,9 @@ impl FileManagerStore { ) } - fn fetch_blob_buf(&self, sender: &mpsc::Sender, + fn fetch_blob_buf(&self, sender: &servo_channel::Sender, id: &Uuid, origin_in: &FileOrigin, rel_pos: RelativePos, - check_url_validity: bool, response: &Response, res_body: Arc>) -> Result<(), BlobURLStoreError> { + check_url_validity: bool, response: &mut Response) -> Result<(), BlobURLStoreError> { let mut bytes = vec![]; let file_impl = self.get_impl(id, origin_in, check_url_validity)?; match file_impl { @@ -544,7 +545,7 @@ impl FileManagerStore { bytes.extend_from_slice(&blob_buf.bytes); response.headers = headers; - *res_body.lock().unwrap() = ResponseBody::Done(bytes); + *response.body.lock().unwrap() = ResponseBody::Done(bytes); let _ = sender.send(Data::Done); Ok(()) } @@ -574,7 +575,7 @@ impl FileManagerStore { }; chunked_fetch(sender, &mut file, range.len(), opt_filename, - type_string, response, res_body, &mut bytes); + type_string, response, &mut bytes); Ok(()) } else { Err(BlobURLStoreError::InvalidEntry) @@ -584,16 +585,15 @@ impl FileManagerStore { // Next time we don't need to check validity since // we have already done that for requesting URL if necessary self.fetch_blob_buf(sender, &parent_id, origin_in, - rel_pos.slice_inner(&inner_rel_pos), false, response, res_body) + rel_pos.slice_inner(&inner_rel_pos), false, response) } } } - fn try_fetch_file(&self, sender: &mpsc::Sender, id: Uuid, check_url_validity: bool, - origin_in: FileOrigin, response: &Response, res_body: Arc>) + fn try_fetch_file(&self, sender: &servo_channel::Sender, id: Uuid, check_url_validity: bool, + origin_in: FileOrigin, response: &mut Response) -> Result<(), BlobURLStoreError> { - self.fetch_blob_buf(sender, &id, &origin_in, RelativePos::full_range(), check_url_validity, - response, res_body) + self.fetch_blob_buf(sender, &id, &origin_in, RelativePos::full_range(), check_url_validity, response) } fn dec_ref(&self, id: &Uuid, origin_in: &FileOrigin) -> Result<(), BlobURLStoreError> { @@ -764,9 +764,9 @@ fn chunked_read( } } -fn chunked_fetch(sender: &mpsc::Sender, +fn chunked_fetch(sender: &servo_channel::Sender, file: &mut File, size: usize, opt_filename: Option, - type_string: String, response: &Response, res_body: Arc>, bytes: &mut Vec) { + type_string: String, response: &mut Response, bytes: &mut Vec) { // First chunk let mut buf = vec![0; CHUNK_SIZE]; match file.read(&mut buf) { @@ -778,7 +778,9 @@ fn chunked_fetch(sender: &mpsc::Sender, size: size as u64, bytes: buf, }; + bytes.extend_from_slice(&blob_buf.bytes); + let _ = sender.send(Data::Payload(blob_buf.bytes)); } Err(_) => { @@ -792,7 +794,7 @@ fn chunked_fetch(sender: &mpsc::Sender, let mut buf = vec![0; CHUNK_SIZE]; match file.read(&mut buf) { Ok(0) => { - *res_body.lock().unwrap() = ResponseBody::Done(bytes.to_vec()); + *response.body.lock().unwrap() = ResponseBody::Done(bytes.to_vec()); let _ = sender.send(Data::Done); return; } @@ -801,7 +803,7 @@ fn chunked_fetch(sender: &mpsc::Sender, bytes.extend_from_slice(&buf); let _ = sender.send(Data::Payload(buf)); } - Err(e) => { + Err(_) => { *response = Response::network_error(NetworkError::Internal("Opening file failed".into())); return; }