Auto merge of #24664 - gterzian:fix_port_transfer, r=jdm

Fix loophole in messageport transfer

<!-- Please describe your changes on the following line: -->

---
<!-- Thank you for contributing to Servo! Please replace each `[ ]` by `[X]` when the step is complete, and replace `___` with appropriate data: -->
- [ ] `./mach build -d` does not report any errors
- [ ] `./mach test-tidy` does not report any errors
- [ ] These changes fix #24600 (GitHub issue number if applicable)

<!-- Either: -->
- [ ] There are tests for these changes OR
- [ ] These changes do not require tests because ___

<!-- Also, please make sure that "Allow edits from maintainers" checkbox is checked, so that we can help you if you get stuck somewhere along the way.-->

<!-- Pull requests that do not address these steps are welcome, but they will require additional verification as part of the review process. -->
This commit is contained in:
bors-servo 2019-11-18 11:35:25 -05:00 committed by GitHub
commit 0d2c2045cc
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 468 additions and 52 deletions

View file

@ -184,6 +184,18 @@ enum TransferState {
/// The port is currently in-transfer, /// The port is currently in-transfer,
/// and incoming tasks should be buffered until it becomes managed again. /// and incoming tasks should be buffered until it becomes managed again.
TransferInProgress(VecDeque<PortMessageTask>), TransferInProgress(VecDeque<PortMessageTask>),
/// A global has requested the transfer to be completed,
/// it's pending a confirmation of either failure or success to complete the transfer.
CompletionInProgress(MessagePortRouterId),
/// While a completion of a transfer was in progress, the port was shipped,
/// hence the transfer failed to complete.
/// We start buffering incoming messages,
/// while awaiting the return of the previous buffer from the global
/// that failed to complete the transfer.
CompletionFailed(VecDeque<PortMessageTask>),
/// While a completion failed, another global requested to complete the transfer.
/// We are still buffering messages, and awaiting the return of the buffer from the global who failed.
CompletionRequested(MessagePortRouterId, VecDeque<PortMessageTask>),
/// The entangled port has been removed while the port was in-transfer, /// The entangled port has been removed while the port was in-transfer,
/// the current port should be removed as well once it is managed again. /// the current port should be removed as well once it is managed again.
EntangledRemoved, EntangledRemoved,
@ -1549,6 +1561,13 @@ where
}; };
match content { match content {
FromScriptMsg::CompleteMessagePortTransfer(router_id, ports) => {
self.handle_complete_message_port_transfer(router_id, ports);
},
FromScriptMsg::MessagePortTransferResult(router_id, succeeded, failed) => {
self.handle_message_port_transfer_completed(router_id, succeeded);
self.handle_message_port_transfer_failed(failed);
},
FromScriptMsg::RerouteMessagePort(port_id, task) => { FromScriptMsg::RerouteMessagePort(port_id, task) => {
self.handle_reroute_messageport(port_id, task); self.handle_reroute_messageport(port_id, task);
}, },
@ -1775,6 +1794,224 @@ where
} }
} }
fn handle_message_port_transfer_completed(
&mut self,
router_id: Option<MessagePortRouterId>,
ports: Vec<MessagePortId>,
) {
let router_id = match router_id {
Some(router_id) => router_id,
None => {
if !ports.is_empty() {
warn!("Constellation unable to process port transfer successes, since no router id was received");
}
return;
},
};
for port_id in ports.into_iter() {
let mut entry = match self.message_ports.entry(port_id) {
Entry::Vacant(_) => {
warn!(
"Constellation received a port transfer completed msg for unknown messageport {:?}",
port_id
);
continue;
},
Entry::Occupied(entry) => entry,
};
match entry.get().state {
TransferState::EntangledRemoved => {
// If the entangled port has been removed while this one was in-transfer,
// remove it now.
if let Some(sender) = self.message_port_routers.get(&router_id) {
let _ = sender.send(MessagePortMsg::RemoveMessagePort(port_id));
} else {
warn!("No message-port sender for {:?}", router_id);
}
entry.remove_entry();
continue;
},
TransferState::CompletionInProgress(expected_router_id) => {
// Here, the transfer was normally completed.
if expected_router_id != router_id {
return warn!(
"Transfer completed by an unexpected router: {:?}",
router_id
);
}
// Update the state to managed.
let new_info = MessagePortInfo {
state: TransferState::Managed(router_id),
entangled_with: entry.get().entangled_with,
};
entry.insert(new_info);
},
_ => warn!("Constellation received unexpected port transfer completed message"),
}
}
}
fn handle_message_port_transfer_failed(
&mut self,
ports: HashMap<MessagePortId, VecDeque<PortMessageTask>>,
) {
for (port_id, mut previous_buffer) in ports.into_iter() {
let entry = match self.message_ports.remove(&port_id) {
None => {
warn!(
"Constellation received a port transfer completed msg for unknown messageport {:?}",
port_id
);
continue;
},
Some(entry) => entry,
};
let new_info = match entry.state {
TransferState::EntangledRemoved => {
// If the entangled port has been removed while this one was in-transfer,
// just drop it.
continue;
},
TransferState::CompletionFailed(mut current_buffer) => {
// The transfer failed,
// and now the global has returned us the buffer we previously sent.
// So the next update is back to a "normal" transfer in progress.
// Tasks in the previous buffer are older,
// hence need to be added to the front of the current one.
while let Some(task) = previous_buffer.pop_back() {
current_buffer.push_front(task);
}
// Update the state to transfer-in-progress.
MessagePortInfo {
state: TransferState::TransferInProgress(current_buffer),
entangled_with: entry.entangled_with,
}
},
TransferState::CompletionRequested(target_router_id, mut current_buffer) => {
// Here, before the global who failed the last transfer could return us the buffer,
// another global already sent us a request to complete a new transfer.
// So we use the returned buffer to update
// the current-buffer(of new incoming messages),
// and we send everything to the global
// who is waiting for completion of the current transfer.
// Tasks in the previous buffer are older,
// hence need to be added to the front of the current one.
while let Some(task) = previous_buffer.pop_back() {
current_buffer.push_front(task);
}
// Forward the buffered message-queue to complete the current transfer.
if let Some(sender) = self.message_port_routers.get(&target_router_id) {
if sender
.send(MessagePortMsg::CompletePendingTransfer(
port_id,
current_buffer,
))
.is_err()
{
warn!("Constellation failed to send complete port transfer response.");
}
} else {
warn!("No message-port sender for {:?}", target_router_id);
}
// Update the state to completion-in-progress.
MessagePortInfo {
state: TransferState::CompletionInProgress(target_router_id),
entangled_with: entry.entangled_with,
}
},
_ => {
warn!("Unexpected port transfer failed message received");
continue;
},
};
self.message_ports.insert(port_id, new_info);
}
}
fn handle_complete_message_port_transfer(
&mut self,
router_id: MessagePortRouterId,
ports: Vec<MessagePortId>,
) {
let mut response = HashMap::new();
for port_id in ports.into_iter() {
let entry = match self.message_ports.remove(&port_id) {
None => {
warn!(
"Constellation asked to complete transfer for unknown messageport {:?}",
port_id
);
continue;
},
Some(entry) => entry,
};
let new_info = match entry.state {
TransferState::EntangledRemoved => {
// If the entangled port has been removed while this one was in-transfer,
// remove it now.
if let Some(sender) = self.message_port_routers.get(&router_id) {
let _ = sender.send(MessagePortMsg::RemoveMessagePort(port_id));
} else {
warn!("No message-port sender for {:?}", router_id);
}
continue;
},
TransferState::TransferInProgress(buffer) => {
response.insert(port_id, buffer);
// If the port was in transfer, and a global is requesting completion,
// we note the start of the completion.
MessagePortInfo {
state: TransferState::CompletionInProgress(router_id),
entangled_with: entry.entangled_with,
}
},
TransferState::CompletionFailed(buffer) |
TransferState::CompletionRequested(_, buffer) => {
// If the completion had already failed,
// this is a request coming from a global to complete a new transfer,
// but we're still awaiting the return of the buffer
// from the first global who failed.
//
// So we note the request from the new global,
// and continue to buffer incoming messages
// and wait for the buffer used in the previous transfer to be returned.
//
// If another global requests completion in the CompletionRequested state,
// we simply swap the target router-id for the new one,
// keeping the buffer.
MessagePortInfo {
state: TransferState::CompletionRequested(router_id, buffer),
entangled_with: entry.entangled_with,
}
},
_ => {
warn!("Unexpected complete port transfer message received");
continue;
},
};
self.message_ports.insert(port_id, new_info);
}
if !response.is_empty() {
// Forward the buffered message-queue.
if let Some(sender) = self.message_port_routers.get(&router_id) {
if sender
.send(MessagePortMsg::CompleteTransfer(response))
.is_err()
{
warn!("Constellation failed to send complete port transfer response.");
}
} else {
warn!("No message-port sender for {:?}", router_id);
}
}
}
fn handle_reroute_messageport(&mut self, port_id: MessagePortId, task: PortMessageTask) { fn handle_reroute_messageport(&mut self, port_id: MessagePortId, task: PortMessageTask) {
let info = match self.message_ports.get_mut(&port_id) { let info = match self.message_ports.get_mut(&port_id) {
Some(info) => info, Some(info) => info,
@ -1786,7 +2023,10 @@ where
}, },
}; };
match &mut info.state { match &mut info.state {
TransferState::Managed(router_id) => { TransferState::Managed(router_id) | TransferState::CompletionInProgress(router_id) => {
// In both the managed and completion of a transfer case, we forward the message.
// Note that in both cases, if the port is transferred before the message is handled,
// it will be sent back here and buffered while the transfer is ongoing.
if let Some(sender) = self.message_port_routers.get(&router_id) { if let Some(sender) = self.message_port_routers.get(&router_id) {
let _ = sender.send(MessagePortMsg::NewTask(port_id, task)); let _ = sender.send(MessagePortMsg::NewTask(port_id, task));
} else { } else {
@ -1794,6 +2034,8 @@ where
} }
}, },
TransferState::TransferInProgress(queue) => queue.push_back(task), TransferState::TransferInProgress(queue) => queue.push_back(task),
TransferState::CompletionFailed(queue) => queue.push_back(task),
TransferState::CompletionRequested(_, queue) => queue.push_back(task),
TransferState::EntangledRemoved => warn!( TransferState::EntangledRemoved => warn!(
"Messageport received a message, but entangled has alread been removed {:?}", "Messageport received a message, but entangled has alread been removed {:?}",
port_id port_id
@ -1803,8 +2045,19 @@ where
fn handle_messageport_shipped(&mut self, port_id: MessagePortId) { fn handle_messageport_shipped(&mut self, port_id: MessagePortId) {
if let Some(info) = self.message_ports.get_mut(&port_id) { if let Some(info) = self.message_ports.get_mut(&port_id) {
if let TransferState::Managed(_) = info.state { match info.state {
TransferState::Managed(_) => {
// If shipped while managed, note the start of a transfer.
info.state = TransferState::TransferInProgress(VecDeque::new()); info.state = TransferState::TransferInProgress(VecDeque::new());
},
TransferState::CompletionInProgress(_) => {
// If shipped while completion of a transfer was in progress,
// the completion failed.
// This will be followed by a MessagePortTransferFailed message,
// containing the buffer we previously sent.
info.state = TransferState::CompletionFailed(VecDeque::new());
},
_ => warn!("Unexpected messageport shipped received"),
} }
} else { } else {
warn!( warn!(
@ -1828,37 +2081,11 @@ where
fn handle_new_messageport(&mut self, router_id: MessagePortRouterId, port_id: MessagePortId) { fn handle_new_messageport(&mut self, router_id: MessagePortRouterId, port_id: MessagePortId) {
match self.message_ports.entry(port_id) { match self.message_ports.entry(port_id) {
// If we know about this port, it means it was transferred. // If it's a new port, we should not know about it.
Entry::Occupied(mut entry) => { Entry::Occupied(_) => warn!(
if let TransferState::EntangledRemoved = entry.get().state { "Constellation asked to start tracking an existing messageport {:?}",
// If the entangled port has been removed while this one was in-transfer, port_id
// remove it now. ),
if let Some(sender) = self.message_port_routers.get(&router_id) {
let _ = sender.send(MessagePortMsg::RemoveMessagePort(port_id));
} else {
warn!("No message-port sender for {:?}", router_id);
}
entry.remove_entry();
return;
}
let new_info = MessagePortInfo {
state: TransferState::Managed(router_id),
entangled_with: entry.get().entangled_with.clone(),
};
let old_info = entry.insert(new_info);
let buffer = match old_info.state {
TransferState::TransferInProgress(buffer) => buffer,
_ => {
return warn!("Completing transfer of a port that did not have a transfer in progress.");
},
};
// Forward the buffered message-queue.
if let Some(sender) = self.message_port_routers.get(&router_id) {
let _ = sender.send(MessagePortMsg::CompleteTransfer(port_id.clone(), buffer));
} else {
warn!("No message-port sender for {:?}", router_id);
}
},
Entry::Vacant(entry) => { Entry::Vacant(entry) => {
let info = MessagePortInfo { let info = MessagePortInfo {
state: TransferState::Managed(router_id), state: TransferState::Managed(router_id),
@ -1897,7 +2124,10 @@ where
"Constellation asked to remove entangled messageport by a port that was already removed {:?}", "Constellation asked to remove entangled messageport by a port that was already removed {:?}",
port_id port_id
), ),
TransferState::TransferInProgress(_) => { TransferState::TransferInProgress(_) |
TransferState::CompletionInProgress(_) |
TransferState::CompletionFailed(_) |
TransferState::CompletionRequested(_, _) => {
// Note: since the port is in-transer, we don't have a router to send it a message // Note: since the port is in-transer, we don't have a router to send it a message
// to let it know that its entangled port has been removed. // to let it know that its entangled port has been removed.
// Hence we mark it so that it will be messaged and removed once the transfer completes. // Hence we mark it so that it will be messaged and removed once the transfer completes.

View file

@ -218,12 +218,48 @@ impl MessageListener {
/// and we can only access the root from the event-loop. /// and we can only access the root from the event-loop.
fn notify(&self, msg: MessagePortMsg) { fn notify(&self, msg: MessagePortMsg) {
match msg { match msg {
MessagePortMsg::CompleteTransfer(port_id, tasks) => { MessagePortMsg::CompleteTransfer(ports) => {
let context = self.context.clone(); let context = self.context.clone();
let _ = self.task_source.queue_with_canceller( let _ = self.task_source.queue_with_canceller(
task!(process_complete_transfer: move || { task!(process_complete_transfer: move || {
let global = context.root(); let global = context.root();
global.complete_port_transfer(port_id, tasks);
let router_id = match global.port_router_id() {
Some(router_id) => router_id,
None => {
// If not managing any ports, no transfer can succeed,
// so just send back everything.
let _ = global.script_to_constellation_chan().send(
ScriptMsg::MessagePortTransferResult(None, vec![], ports),
);
return;
}
};
let mut succeeded = vec![];
let mut failed = HashMap::new();
for (id, buffer) in ports.into_iter() {
if global.is_managing_port(&id) {
succeeded.push(id.clone());
global.complete_port_transfer(id, buffer);
} else {
failed.insert(id, buffer);
}
}
let _ = global.script_to_constellation_chan().send(
ScriptMsg::MessagePortTransferResult(Some(router_id), succeeded, failed),
);
}),
&self.canceller,
);
},
MessagePortMsg::CompletePendingTransfer(port_id, buffer) => {
let context = self.context.clone();
let _ = self.task_source.queue_with_canceller(
task!(complete_pending: move || {
let global = context.root();
global.complete_port_transfer(port_id, buffer);
}), }),
&self.canceller, &self.canceller,
); );
@ -294,6 +330,25 @@ impl GlobalScope {
} }
} }
/// The message-port router Id of the global, if any
fn port_router_id(&self) -> Option<MessagePortRouterId> {
if let MessagePortState::Managed(id, _message_ports) = &*self.message_port_state.borrow() {
Some(id.clone())
} else {
None
}
}
/// Is this global managing a given port?
fn is_managing_port(&self, port_id: &MessagePortId) -> bool {
if let MessagePortState::Managed(_router_id, message_ports) =
&*self.message_port_state.borrow()
{
return message_ports.contains_key(port_id);
}
false
}
/// Complete the transfer of a message-port. /// Complete the transfer of a message-port.
fn complete_port_transfer(&self, port_id: MessagePortId, tasks: VecDeque<PortMessageTask>) { fn complete_port_transfer(&self, port_id: MessagePortId, tasks: VecDeque<PortMessageTask>) {
let should_start = if let MessagePortState::Managed(_id, message_ports) = let should_start = if let MessagePortState::Managed(_id, message_ports) =
@ -301,7 +356,7 @@ impl GlobalScope {
{ {
match message_ports.get_mut(&port_id) { match message_ports.get_mut(&port_id) {
None => { None => {
panic!("CompleteTransfer msg received in a global not managing the port."); panic!("complete_port_transfer called for an unknown port.");
}, },
Some(ManagedMessagePort::Pending(_, _)) => { Some(ManagedMessagePort::Pending(_, _)) => {
panic!("CompleteTransfer msg received for a pending port."); panic!("CompleteTransfer msg received for a pending port.");
@ -312,7 +367,7 @@ impl GlobalScope {
}, },
} }
} else { } else {
return warn!("CompleteTransfer msg received in a global not managing any ports."); panic!("complete_port_transfer called for an unknown port.");
}; };
if should_start { if should_start {
self.start_message_port(&port_id); self.start_message_port(&port_id);
@ -554,22 +609,25 @@ impl GlobalScope {
_ => None, _ => None,
}) })
.collect(); .collect();
for id in to_be_added { for id in to_be_added.iter() {
let (id, port_info) = message_ports let (id, port_info) = message_ports
.remove_entry(&id) .remove_entry(&id)
.expect("Collected port-id to match an entry"); .expect("Collected port-id to match an entry");
if let ManagedMessagePort::Pending(port_impl, dom_port) = port_info { match port_info {
let _ = self ManagedMessagePort::Pending(port_impl, dom_port) => {
.script_to_constellation_chan()
.send(ScriptMsg::NewMessagePort(
router_id.clone(),
port_impl.message_port_id().clone(),
));
let new_port_info = ManagedMessagePort::Added(port_impl, dom_port); let new_port_info = ManagedMessagePort::Added(port_impl, dom_port);
let present = message_ports.insert(id, new_port_info); let present = message_ports.insert(id, new_port_info);
assert!(present.is_none()); assert!(present.is_none());
},
_ => panic!("Only pending ports should be found in to_be_added"),
} }
} }
let _ =
self.script_to_constellation_chan()
.send(ScriptMsg::CompleteMessagePortTransfer(
router_id.clone(),
to_be_added,
));
} else { } else {
warn!("maybe_add_pending_ports called on a global not managing any ports."); warn!("maybe_add_pending_ports called on a global not managing any ports.");
} }

View file

@ -1042,8 +1042,12 @@ pub struct PortMessageTask {
/// Messages for communication between the constellation and a global managing ports. /// Messages for communication between the constellation and a global managing ports.
#[derive(Debug, Deserialize, Serialize)] #[derive(Debug, Deserialize, Serialize)]
pub enum MessagePortMsg { pub enum MessagePortMsg {
/// Enables a port to catch-up on messages that were sent while the transfer was ongoing. /// Complete the transfer for a batch of ports.
CompleteTransfer(MessagePortId, VecDeque<PortMessageTask>), CompleteTransfer(HashMap<MessagePortId, VecDeque<PortMessageTask>>),
/// Complete the transfer of a single port,
/// whose transfer was pending because it had been requested
/// while a previous failed transfer was being rolled-back.
CompletePendingTransfer(MessagePortId, VecDeque<PortMessageTask>),
/// Remove a port, the entangled one doesn't exists anymore. /// Remove a port, the entangled one doesn't exists anymore.
RemoveMessagePort(MessagePortId), RemoveMessagePort(MessagePortId),
/// Handle a new port-message-task. /// Handle a new port-message-task.

View file

@ -30,6 +30,7 @@ use net_traits::storage_thread::StorageType;
use net_traits::CoreResourceMsg; use net_traits::CoreResourceMsg;
use servo_url::ImmutableOrigin; use servo_url::ImmutableOrigin;
use servo_url::ServoUrl; use servo_url::ServoUrl;
use std::collections::{HashMap, VecDeque};
use std::fmt; use std::fmt;
use style_traits::viewport::ViewportConstraints; use style_traits::viewport::ViewportConstraints;
use style_traits::CSSPixel; use style_traits::CSSPixel;
@ -114,6 +115,17 @@ pub enum HistoryEntryReplacement {
/// Messages from the script to the constellation. /// Messages from the script to the constellation.
#[derive(Deserialize, Serialize)] #[derive(Deserialize, Serialize)]
pub enum ScriptMsg { pub enum ScriptMsg {
/// Request to complete the transfer of a set of ports to a router.
CompleteMessagePortTransfer(MessagePortRouterId, Vec<MessagePortId>),
/// The results of attempting to complete the transfer of a batch of ports.
MessagePortTransferResult(
/* The router whose transfer of ports succeeded, if any */
Option<MessagePortRouterId>,
/* The ids of ports transferred successfully */
Vec<MessagePortId>,
/* The ids, and buffers, of ports whose transfer failed */
HashMap<MessagePortId, VecDeque<PortMessageTask>>,
),
/// A new message-port was created or transferred, with corresponding control-sender. /// A new message-port was created or transferred, with corresponding control-sender.
NewMessagePort(MessagePortRouterId, MessagePortId), NewMessagePort(MessagePortRouterId, MessagePortId),
/// A global has started managing message-ports /// A global has started managing message-ports
@ -248,6 +260,8 @@ impl fmt::Debug for ScriptMsg {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
use self::ScriptMsg::*; use self::ScriptMsg::*;
let variant = match *self { let variant = match *self {
CompleteMessagePortTransfer(..) => "CompleteMessagePortTransfer",
MessagePortTransferResult(..) => "MessagePortTransferResult",
NewMessagePortRouter(..) => "NewMessagePortRouter", NewMessagePortRouter(..) => "NewMessagePortRouter",
RemoveMessagePortRouter(..) => "RemoveMessagePortRouter", RemoveMessagePortRouter(..) => "RemoveMessagePortRouter",
NewMessagePort(..) => "NewMessagePort", NewMessagePort(..) => "NewMessagePort",

View file

@ -11296,6 +11296,32 @@
{} {}
] ]
], ],
"mozilla/Channel_postMessage_with_second_transfer_in_timeout.window.js": [
[
"mozilla/Channel_postMessage_with_second_transfer_in_timeout.window.html",
{
"script_metadata": [
[
"script",
"/common/get-host-info.sub.js"
]
]
}
]
],
"mozilla/Channel_postMessage_with_second_transfer_in_timeout_with_delay.window.js": [
[
"mozilla/Channel_postMessage_with_second_transfer_in_timeout_with_delay.window.html",
{
"script_metadata": [
[
"script",
"/common/get-host-info.sub.js"
]
]
}
]
],
"mozilla/DOMParser.html": [ "mozilla/DOMParser.html": [
[ [
"mozilla/DOMParser.html", "mozilla/DOMParser.html",
@ -18432,6 +18458,14 @@
"276791c4348ada7e1da71041f2ccd383305e209c", "276791c4348ada7e1da71041f2ccd383305e209c",
"support" "support"
], ],
"mozilla/Channel_postMessage_with_second_transfer_in_timeout.window.js": [
"4ee3f64beb095963f06fc53c1d53dad2244109f9",
"testharness"
],
"mozilla/Channel_postMessage_with_second_transfer_in_timeout_with_delay.window.js": [
"939995678895c07047709f6e265d0f6b7b705eb5",
"testharness"
],
"mozilla/DOMParser.html": [ "mozilla/DOMParser.html": [
"f386a3e0191af2c70dcb05790ce7db15dd5ccbf1", "f386a3e0191af2c70dcb05790ce7db15dd5ccbf1",
"testharness" "testharness"

View file

@ -0,0 +1,33 @@
// META: script=/common/get-host-info.sub.js
async_test(function(t) {
var channel1 = new MessageChannel();
var channel2 = new MessageChannel();
var host = get_host_info();
let iframe = document.createElement('iframe');
iframe.src = host.HTTP_NOTSAMESITE_ORIGIN + "/webmessaging/support/ChildWindowPostMessage.htm";
document.body.appendChild(iframe);
var TARGET = document.querySelector("iframe").contentWindow;
iframe.onload = t.step_func(function() {
// Send a message, expecting it to be received in the iframe.
channel1.port2.postMessage(1)
// First, transfer the port into the same realm.
channel2.port2.postMessage(0, [channel1.port1]);
channel2.port1.onmessage = t.step_func(function (evt) {
assert_equals(Number(evt.data), 0);
t.step_timeout(function () {
// Transfer the port to the iframe.
TARGET.postMessage("ports", "*", evt.ports);
}, 0);
});
channel1.port2.onmessage = t.step_func(function (evt) {
assert_equals(Number(evt.data), 1);
t.done();
});
});
}, `A port transferred outside of a onmessage handler does not lose messages along the way.`);

View file

@ -0,0 +1,43 @@
// META: script=/common/get-host-info.sub.js
async_test(function(t) {
var channel1 = new MessageChannel();
var channel2 = new MessageChannel();
var host = get_host_info();
let iframe = document.createElement('iframe');
iframe.src = host.HTTP_NOTSAMESITE_ORIGIN + "/webmessaging/support/ChildWindowPostMessage.htm";
document.body.appendChild(iframe);
var TARGET = document.querySelector("iframe").contentWindow;
iframe.onload = t.step_func(function() {
// Send a message, expecting it to be received in the iframe.
channel1.port2.postMessage(1)
// First, transfer the port into the same realm.
channel2.port2.postMessage(0, [channel1.port1]);
channel2.port1.onmessage = t.step_func(function (evt) {
assert_equals(Number(evt.data), 0);
t.step_timeout(function () {
// Transfer the port to the iframe.
TARGET.postMessage("ports", "*", evt.ports);
// Keep the event-loop busy for one second,
// which will result in the iframe
// starting the "complete port transfer" flow,
// before the window global could finish it's own.
var request = new XMLHttpRequest();
request.open('GET', 'blank.html?pipe=trickle(d1)', false);
request.send(null);
}, 0);
});
channel1.port2.onmessage = t.step_func(function (evt) {
assert_equals(Number(evt.data), 1);
t.done();
});
});
}, `A port transferred outside of a onmessage handler,
followed by a delay in returning the buffer caused by blocking the event-loop,
does not lose messages along the way.`);