layout: Add a safe way for layout to perform random access on child

flows.

This uses caching to ensure that we perform no more than O(n) pointer
loads.
This commit is contained in:
Patrick Walton 2016-10-25 17:20:01 -07:00
parent d990a7eb84
commit 92b7189f31
2 changed files with 55 additions and 28 deletions

View file

@ -411,8 +411,9 @@ impl FlexFlow {
let mut margin_count = 0; let mut margin_count = 0;
let items = &mut self.items[start..]; let items = &mut self.items[start..];
let mut children = self.block_flow.base.children.random_access_mut();
for mut item in items { for mut item in items {
let kid = &mut self.block_flow.base.children[item.index]; let kid = children.get(item.index);
item.init_sizes(kid, container_size, self.main_mode); item.init_sizes(kid, container_size, self.main_mode);
let outer_main_size = item.outer_main_size(kid, self.main_mode); let outer_main_size = item.outer_main_size(kid, self.main_mode);
if total_line_size + outer_main_size > container_size && end != start && self.is_wrappable { if total_line_size + outer_main_size > container_size && end != start && self.is_wrappable {
@ -504,8 +505,10 @@ impl FlexFlow {
AxisSize::MinMax(ref constraint) => constraint.clamp(content_inline_size), AxisSize::MinMax(ref constraint) => constraint.clamp(content_inline_size),
AxisSize::Infinite => content_inline_size AxisSize::Infinite => content_inline_size
}; };
let mut children = self.block_flow.base.children.random_access_mut();
for kid in &mut self.items { for kid in &mut self.items {
let kid_base = flow::mut_base(&mut self.block_flow.base.children[kid.index]); let kid_base = flow::mut_base(children.get(kid.index));
kid_base.block_container_explicit_block_size = container_block_size; kid_base.block_container_explicit_block_size = container_block_size;
if kid_base.flags.contains(INLINE_POSITION_IS_STATIC) { if kid_base.flags.contains(INLINE_POSITION_IS_STATIC) {
// The inline-start margin edge of the child flow is at our inline-start content // The inline-start margin edge of the child flow is at our inline-start content
@ -598,8 +601,9 @@ impl FlexFlow {
_ => {} _ => {}
} }
let mut children = self.block_flow.base.children.random_access_mut();
for item in items.iter_mut() { for item in items.iter_mut() {
let mut block = self.block_flow.base.children[item.index].as_mut_block(); let mut block = children.get(item.index).as_mut_block();
block.base.block_container_writing_mode = container_mode; block.base.block_container_writing_mode = container_mode;
block.base.block_container_inline_size = inline_size; block.base.block_container_inline_size = inline_size;
@ -650,8 +654,10 @@ impl FlexFlow {
} else { } else {
self.block_flow.fragment.border_box.size.block self.block_flow.fragment.border_box.size.block
}; };
let mut children = self.block_flow.base.children.random_access_mut();
for item in &mut self.items { for item in &mut self.items {
let mut base = flow::mut_base(&mut self.block_flow.base.children[item.index]); let mut base = flow::mut_base(children.get(item.index));
if !self.main_reverse { if !self.main_reverse {
base.position.start.b = cur_b; base.position.start.b = cur_b;
cur_b = cur_b + base.position.size.block; cur_b = cur_b + base.position.size.block;
@ -672,15 +678,18 @@ impl FlexFlow {
let mut total_cross_size = Au(0); let mut total_cross_size = Au(0);
let mut line_interval = Au(0); let mut line_interval = Au(0);
{
let mut children = self.block_flow.base.children.random_access_mut();
for line in self.lines.iter_mut() { for line in self.lines.iter_mut() {
for item in &self.items[line.range.clone()] { for item in &self.items[line.range.clone()] {
let fragment = &self.block_flow.base.children[item.index].as_block().fragment; let fragment = &children.get(item.index).as_block().fragment;
line.cross_size = max(line.cross_size, line.cross_size = max(line.cross_size,
fragment.border_box.size.block + fragment.border_box.size.block +
fragment.margin.block_start_end()); fragment.margin.block_start_end());
} }
total_cross_size += line.cross_size; total_cross_size += line.cross_size;
} }
}
let box_border = self.block_flow.fragment.box_sizing_boundary(Direction::Block); let box_border = self.block_flow.fragment.box_sizing_boundary(Direction::Block);
let parent_container_size = let parent_container_size =
@ -726,9 +735,10 @@ impl FlexFlow {
} }
} }
let mut children = self.block_flow.base.children.random_access_mut();
for line in &self.lines { for line in &self.lines {
for item in self.items[line.range.clone()].iter_mut() { for item in self.items[line.range.clone()].iter_mut() {
let block = self.block_flow.base.children[item.index].as_mut_block(); let block = children.get(item.index).as_mut_block();
let auto_margin_count = item.auto_margin_count(block, Direction::Block); let auto_margin_count = item.auto_margin_count(block, Direction::Block);
let margin = block.fragment.style().logical_margin(); let margin = block.fragment.style().logical_margin();

View file

@ -5,7 +5,6 @@
use flow::Flow; use flow::Flow;
use flow_ref::{self, FlowRef}; use flow_ref::{self, FlowRef};
use std::collections::{LinkedList, linked_list}; use std::collections::{LinkedList, linked_list};
use std::ops::{Index, IndexMut};
/// This needs to be reworked now that we have dynamically-sized types in Rust. /// This needs to be reworked now that we have dynamically-sized types in Rust.
/// Until then, it's just a wrapper around LinkedList. /// Until then, it's just a wrapper around LinkedList.
@ -79,6 +78,20 @@ impl FlowList {
} }
} }
/// Provides a caching random-access iterator that yields mutable references. This is
/// guaranteed to perform no more than O(n) pointer chases.
///
/// SECURITY-NOTE(pcwalton): This does not hand out `FlowRef`s by design. Do not add a method
/// to do so! See the comment above in `FlowList`.
#[inline]
pub fn random_access_mut(&mut self) -> FlowListRandomAccessMut {
let length = self.flows.len();
FlowListRandomAccessMut {
iterator: self.flows.iter_mut(),
cache: Vec::with_capacity(length),
}
}
/// O(1) /// O(1)
#[inline] #[inline]
pub fn is_empty(&self) -> bool { pub fn is_empty(&self) -> bool {
@ -99,21 +112,6 @@ impl FlowList {
} }
} }
impl Index<usize> for FlowList {
/// FIXME(pcwalton): O(n)!
type Output = Flow;
fn index(&self, index: usize) -> &Flow {
&**self.flows.iter().nth(index).unwrap()
}
}
impl IndexMut<usize> for FlowList {
/// FIXME(pcwalton): O(n)!
fn index_mut(&mut self, index: usize) -> &mut Flow {
self.iter_mut().nth(index).unwrap()
}
}
impl<'a> DoubleEndedIterator for MutFlowListIterator<'a> { impl<'a> DoubleEndedIterator for MutFlowListIterator<'a> {
fn next_back(&mut self) -> Option<&'a mut Flow> { fn next_back(&mut self) -> Option<&'a mut Flow> {
self.it.next_back().map(flow_ref::deref_mut) self.it.next_back().map(flow_ref::deref_mut)
@ -132,3 +130,22 @@ impl<'a> Iterator for MutFlowListIterator<'a> {
self.it.size_hint() self.it.size_hint()
} }
} }
/// A caching random-access iterator that yields mutable references. This is guaranteed to perform
/// no more than O(n) pointer chases.
pub struct FlowListRandomAccessMut<'a> {
iterator: linked_list::IterMut<'a, FlowRef>,
cache: Vec<FlowRef>,
}
impl<'a> FlowListRandomAccessMut<'a> {
pub fn get<'b>(&'b mut self, index: usize) -> &'b mut Flow {
while index >= self.cache.len() {
match self.iterator.next() {
None => panic!("Flow index out of range!"),
Some(next_flow) => self.cache.push((*next_flow).clone()),
}
}
flow_ref::deref_mut(&mut self.cache[index])
}
}