handle caching of response with a body of ResponseBody::Receiving

This commit is contained in:
Gregory Terzian 2017-11-26 21:31:26 +08:00
parent 609e975c50
commit 993e2f55ed
5 changed files with 80 additions and 43 deletions

View file

@ -408,6 +408,12 @@ pub fn main_fetch(request: &mut Request,
// Step 24.
target.process_response_eof(&response);
if !response.is_network_error() {
if let Ok(mut http_cache) = context.state.http_cache.write() {
http_cache.update_awaiting_consumers(&request, &response);
}
}
// Steps 25-27.
// TODO: remove this line when only asynchronous fetches are used
response

View file

@ -7,7 +7,7 @@
//! A memory cache implementing the logic specified in http://tools.ietf.org/html/rfc7234
//! and <http://tools.ietf.org/html/rfc7232>.
use fetch::methods::DoneChannel;
use fetch::methods::{Data, DoneChannel};
use hyper::header;
use hyper::header::ContentType;
use hyper::header::Headers;
@ -23,6 +23,7 @@ use std::collections::HashMap;
use std::str;
use std::sync::{Arc, Mutex};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Sender};
use time;
use time::{Duration, Tm};
@ -66,6 +67,7 @@ struct CachedResource {
expires: Duration,
last_validated: Tm,
aborted: Arc<AtomicBool>,
awaiting_body: Arc<Mutex<Vec<Sender<Data>>>>
}
/// Metadata about a loaded resource, such as is obtained from HTTP headers.
@ -271,11 +273,19 @@ fn get_expiry_adjustment_from_request_headers(request: &Request, expires: Durati
}
/// Create a CachedResponse from a request and a CachedResource.
fn create_cached_response(request: &Request, cached_resource: &CachedResource, cached_headers: &Headers)
fn create_cached_response(request: &Request,
cached_resource: &CachedResource,
cached_headers: &Headers,
done_chan: &mut DoneChannel)
-> CachedResponse {
let mut response = Response::new(cached_resource.metadata.final_url.clone());
response.headers = cached_headers.clone();
response.body = cached_resource.body.clone();
if let ResponseBody::Receiving(_) = *cached_resource.body.lock().unwrap() {
let (done_sender, done_receiver) = channel();
*done_chan = Some((done_sender.clone(), done_receiver));
cached_resource.awaiting_body.lock().unwrap().push(done_sender);
}
response.location_url = cached_resource.location_url.clone();
response.status = cached_resource.status.clone();
response.raw_status = cached_resource.raw_status.clone();
@ -313,11 +323,15 @@ fn create_resource_with_bytes_from_resource(bytes: &[u8], resource: &CachedResou
expires: resource.expires.clone(),
last_validated: resource.last_validated.clone(),
aborted: Arc::new(AtomicBool::new(false)),
awaiting_body: Arc::new(Mutex::new(vec![]))
}
}
/// Support for range requests <https://tools.ietf.org/html/rfc7233>.
fn handle_range_request(request: &Request, candidates: Vec<&CachedResource>, range_spec: &[header::ByteRangeSpec])
fn handle_range_request(request: &Request,
candidates: Vec<&CachedResource>,
range_spec: &[header::ByteRangeSpec],
done_chan: &mut DoneChannel)
-> Option<CachedResponse> {
let mut complete_cached_resources = candidates.iter().filter(|resource| {
match resource.raw_status {
@ -348,7 +362,7 @@ fn handle_range_request(request: &Request, candidates: Vec<&CachedResource>, ran
if let Some(bytes) = requested {
let new_resource = create_resource_with_bytes_from_resource(bytes, complete_resource);
let cached_headers = new_resource.metadata.headers.lock().unwrap();
let cached_response = create_cached_response(request, &new_resource, &*cached_headers);
let cached_response = create_cached_response(request, &new_resource, &*cached_headers, done_chan);
return Some(cached_response);
}
}
@ -375,7 +389,7 @@ fn handle_range_request(request: &Request, candidates: Vec<&CachedResource>, ran
};
if let Some(bytes) = requested {
let new_resource = create_resource_with_bytes_from_resource(&bytes, partial_resource);
let cached_response = create_cached_response(request, &new_resource, &*headers);
let cached_response = create_cached_response(request, &new_resource, &*headers, done_chan);
return Some(cached_response);
}
}
@ -388,7 +402,7 @@ fn handle_range_request(request: &Request, candidates: Vec<&CachedResource>, ran
if let Some(bytes) = requested {
let new_resource = create_resource_with_bytes_from_resource(bytes, complete_resource);
let cached_headers = new_resource.metadata.headers.lock().unwrap();
let cached_response = create_cached_response(request, &new_resource, &*cached_headers);
let cached_response = create_cached_response(request, &new_resource, &*cached_headers, done_chan);
return Some(cached_response);
}
}
@ -415,7 +429,7 @@ fn handle_range_request(request: &Request, candidates: Vec<&CachedResource>, ran
};
if let Some(bytes) = requested {
let new_resource = create_resource_with_bytes_from_resource(&bytes, partial_resource);
let cached_response = create_cached_response(request, &new_resource, &*headers);
let cached_response = create_cached_response(request, &new_resource, &*headers, done_chan);
return Some(cached_response);
}
}
@ -428,7 +442,7 @@ fn handle_range_request(request: &Request, candidates: Vec<&CachedResource>, ran
if let Some(bytes) = requested {
let new_resource = create_resource_with_bytes_from_resource(bytes, complete_resource);
let cached_headers = new_resource.metadata.headers.lock().unwrap();
let cached_response = create_cached_response(request, &new_resource, &*cached_headers);
let cached_response = create_cached_response(request, &new_resource, &*cached_headers, done_chan);
return Some(cached_response);
}
}
@ -455,7 +469,7 @@ fn handle_range_request(request: &Request, candidates: Vec<&CachedResource>, ran
};
if let Some(bytes) = requested {
let new_resource = create_resource_with_bytes_from_resource(&bytes, partial_resource);
let cached_response = create_cached_response(request, &new_resource, &*headers);
let cached_response = create_cached_response(request, &new_resource, &*headers, done_chan);
return Some(cached_response);
}
}
@ -476,22 +490,14 @@ impl HttpCache {
/// Constructing Responses from Caches.
/// <https://tools.ietf.org/html/rfc7234#section-4>
pub fn construct_response(&self, request: &Request) -> Option<CachedResponse> {
pub fn construct_response(&self, request: &Request, done_chan: &mut DoneChannel) -> Option<CachedResponse> {
// TODO: generate warning headers as appropriate <https://tools.ietf.org/html/rfc7234#section-5.5>
if request.method != Method::Get {
// Only Get requests are cached, avoid a url based match for others.
return None;
}
let entry_key = CacheKey::new(request.clone());
let resources = self.entries.get(&entry_key)?.into_iter().filter(|r| {
match *r.body.lock().unwrap() {
ResponseBody::Done(_) => !r.aborted.load(Ordering::Relaxed),
// TODO: use fetch::methods::DoneChannel, in order to be able to
// construct a response with a body in ResponseBody::Receiving mode.
ResponseBody::Receiving(_) => false,
ResponseBody::Empty => true
}
});
let resources = self.entries.get(&entry_key)?.into_iter().filter(|r| { !r.aborted.load(Ordering::Relaxed) });
let mut candidates = vec![];
for cached_resource in resources {
let mut can_be_constructed = true;
@ -541,7 +547,7 @@ impl HttpCache {
}
// Support for range requests
if let Some(&header::Range::Bytes(ref range_spec)) = request.headers.get::<header::Range>() {
return handle_range_request(request, candidates, &range_spec);
return handle_range_request(request, candidates, &range_spec, done_chan);
} else {
// Not a Range request.
if let Some(ref cached_resource) = candidates.first() {
@ -549,13 +555,33 @@ impl HttpCache {
// TODO: select the most appropriate one, using a known mechanism from a selecting header field,
// or using the Date header to return the most recent one.
let cached_headers = cached_resource.metadata.headers.lock().unwrap();
let cached_response = create_cached_response(request, cached_resource, &*cached_headers);
let cached_response = create_cached_response(request, cached_resource, &*cached_headers, done_chan);
return Some(cached_response);
}
}
None
}
/// Updating consumers who received a response constructed with a ResponseBody::Receiving.
pub fn update_awaiting_consumers(&mut self, request: &Request, response: &Response) {
if let ResponseBody::Done(ref completed_body) = *response.body.lock().unwrap() {
let entry_key = CacheKey::new(request.clone());
if let Some(cached_resources) = self.entries.get(&entry_key) {
for cached_resource in cached_resources.iter() {
let mut awaiting_consumers = cached_resource.awaiting_body.lock().unwrap();
for done_sender in awaiting_consumers.drain(..) {
if cached_resource.aborted.load(Ordering::Relaxed) {
let _ = done_sender.send(Data::Cancelled);
} else {
let _ = done_sender.send(Data::Payload(completed_body.clone()));
let _ = done_sender.send(Data::Done);
}
};
}
}
}
}
/// Freshening Stored Responses upon Validation.
/// <https://tools.ietf.org/html/rfc7234#section-4.3.4>
pub fn refresh(&mut self, request: &Request, response: Response, done_chan: &mut DoneChannel) -> Option<Response> {
@ -655,7 +681,8 @@ impl HttpCache {
url_list: response.url_list.clone(),
expires: expiry,
last_validated: time::now(),
aborted: response.aborted.clone()
aborted: response.aborted.clone(),
awaiting_body: Arc::new(Mutex::new(vec![]))
};
let entry = self.entries.entry(entry_key).or_insert(vec![]);
entry.push(entry_resource);

View file

@ -872,7 +872,7 @@ fn http_network_or_cache_fetch(request: &mut Request,
// Step 21
if let Ok(http_cache) = context.state.http_cache.read() {
if let Some(response_from_cache) = http_cache.construct_response(&http_request) {
if let Some(response_from_cache) = http_cache.construct_response(&http_request, done_chan) {
let response_headers = response_from_cache.response.headers.clone();
// Substep 1, 2, 3, 4
let (cached_response, needs_revalidation) = match (http_request.cache_mode, &http_request.mode) {
@ -903,6 +903,27 @@ fn http_network_or_cache_fetch(request: &mut Request,
}
}
if let Some(ref ch) = *done_chan {
// The cache constructed a response with a body of ResponseBody::Receiving.
// We wait for the response in the cache to "finish",
// with a body of either Done or Cancelled.
loop {
match ch.1.recv()
.expect("HTTP cache should always send Done or Cancelled") {
Data::Payload(_) => {},
Data::Done => break, // Return the full response as if it was initially cached as such.
Data::Cancelled => {
// The response was cancelled while the fetch was ongoing.
// Set response to None, which will trigger a network fetch below.
response = None;
break;
}
}
}
}
// Set done_chan back to None, it's cache-related usefulness ends here.
*done_chan = None;
// Step 22
if response.is_none() {
// Substep 1

View file

@ -66360,7 +66360,7 @@
"testharness"
],
"mozilla/http-cache-xhr.html": [
"0f8e347991f25a08ba5f8bdff0ef99055e33e7a1",
"d4747fdc9ec6acc50718c13a668451987a44d219",
"testharness"
],
"mozilla/http-cache.html": [

View file

@ -17,14 +17,6 @@
],
response_body: '12'
},
{
response_status: [200, 'OK'],
"response_headers": [
['Expires', http_date(100000)],
['Last-Modified', http_date(0)]
],
response_body: '12'
},
{
response_status: [200, 'OK'],
"response_headers": [
@ -45,20 +37,11 @@
assert_equals(state.length, 2);
// The empty, aborted response.
assert_equals(client.response, "");
// The complete, non-aborted, response.
assert_equals(client2.response, "12");
}));
var client3 = new XMLHttpRequest();
client3.onloadend = test.step_func(function(event) {
server_state(uuid).then(test.step_func(function(state) {
// No server hits, the response from client2 was cached and re-used.
assert_equals(state.length, 0)
assert_equals(client3.response, '12');
test.done();
}));
});
client3.open("GET", url);
client3.send();
});
client.onabort = test.step_func(function() {
client2.open("GET", url);
client2.send();