mirror of
https://github.com/servo/servo.git
synced 2025-08-06 14:10:11 +01:00
Auto merge of #17142 - kenan-rhoton:LRUCacheArrayVecDeque, r=emilio
Move LRUCache to ArrayDeque crate We move LRUCache from using VecDeque to ArrayDeque to avoid using heap allocations. This relies on the fix in goandylok/arraydeque#4. --- <!-- Thank you for contributing to Servo! Please replace each `[ ]` by `[X]` when the step is complete, and replace `__` with appropriate data: --> - [X] `./mach build -d` does not report any errors - [X] `./mach test-tidy` does not report any errors - [X] These changes fix #17054 (github issue number if applicable). <!-- Either: --> - [ ] There are tests for these changes OR - [X] These changes do not require tests because the use cases are the same, only minimal implementation changes have been made <!-- Also, please make sure that "Allow edits from maintainers" checkbox is checked, so that we can help you if you get stuck somewhere along the way.--> <!-- Pull requests that do not address these steps are welcome, but they will require additional verification as part of the review process. --> --- I additionally ran test-unit, because I'm paranoid. <!-- Reviewable:start --> --- This change is [<img src="https://reviewable.io/review_button.svg" height="34" align="absmiddle" alt="Reviewable"/>](https://reviewable.io/reviews/servo/servo/17142) <!-- Reviewable:end -->
This commit is contained in:
commit
2dd67a9497
5 changed files with 31 additions and 20 deletions
|
@ -32,6 +32,7 @@ gecko_debug = ["nsstring_vendor/gecko_debug"]
|
|||
[dependencies]
|
||||
app_units = "0.4.1"
|
||||
arrayvec = "0.3.20"
|
||||
arraydeque = "0.2.3"
|
||||
atomic_refcell = "0.1"
|
||||
bitflags = "0.7"
|
||||
bit-vec = "0.4.3"
|
||||
|
|
|
@ -6,29 +6,28 @@
|
|||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use std::collections::VecDeque;
|
||||
use std::collections::vec_deque;
|
||||
extern crate arraydeque;
|
||||
use self::arraydeque::Array;
|
||||
use self::arraydeque::ArrayDeque;
|
||||
|
||||
/// A LRU cache used to store a set of at most `n` elements at the same time.
|
||||
///
|
||||
/// The most-recently-used entry is at index zero.
|
||||
pub struct LRUCache<K> {
|
||||
entries: VecDeque<K>,
|
||||
cache_size: usize,
|
||||
pub struct LRUCache <K: Array>{
|
||||
entries: ArrayDeque<K>,
|
||||
}
|
||||
|
||||
/// A iterator over the items of the LRU cache.
|
||||
pub type LRUCacheIterator<'a, K> = vec_deque::Iter<'a, K>;
|
||||
pub type LRUCacheIterator<'a, K> = arraydeque::Iter<'a, K>;
|
||||
|
||||
/// A iterator over the mutable items of the LRU cache.
|
||||
pub type LRUCacheMutIterator<'a, K> = vec_deque::IterMut<'a, K>;
|
||||
pub type LRUCacheMutIterator<'a, K> = arraydeque::IterMut<'a, K>;
|
||||
|
||||
impl<K: PartialEq> LRUCache<K> {
|
||||
impl<K: Array> LRUCache<K> {
|
||||
/// Create a new LRU cache with `size` elements at most.
|
||||
pub fn new(size: usize) -> Self {
|
||||
pub fn new() -> Self {
|
||||
LRUCache {
|
||||
entries: VecDeque::with_capacity(size),
|
||||
cache_size: size,
|
||||
entries: ArrayDeque::new(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -49,22 +48,22 @@ impl<K: PartialEq> LRUCache<K> {
|
|||
|
||||
/// Iterate over the contents of this cache, from more to less recently
|
||||
/// used.
|
||||
pub fn iter(&self) -> vec_deque::Iter<K> {
|
||||
pub fn iter(&self) -> arraydeque::Iter<K::Item> {
|
||||
self.entries.iter()
|
||||
}
|
||||
|
||||
/// Iterate mutably over the contents of this cache.
|
||||
pub fn iter_mut(&mut self) -> vec_deque::IterMut<K> {
|
||||
pub fn iter_mut(&mut self) -> arraydeque::IterMut<K::Item> {
|
||||
self.entries.iter_mut()
|
||||
}
|
||||
|
||||
/// Insert a given key in the cache.
|
||||
pub fn insert(&mut self, key: K) {
|
||||
if self.entries.len() == self.cache_size {
|
||||
pub fn insert(&mut self, key: K::Item) {
|
||||
if self.entries.len() == self.entries.capacity() {
|
||||
self.entries.pop_back();
|
||||
}
|
||||
self.entries.push_front(key);
|
||||
debug_assert!(self.entries.len() <= self.cache_size);
|
||||
debug_assert!(self.entries.len() <= self.entries.capacity());
|
||||
}
|
||||
|
||||
/// Evict all elements from the cache.
|
||||
|
|
|
@ -349,7 +349,7 @@ pub struct SelectorFlagsMap<E: TElement> {
|
|||
map: FnvHashMap<SendElement<E>, ElementSelectorFlags>,
|
||||
/// An LRU cache to avoid hashmap lookups, which can be slow if the map
|
||||
/// gets big.
|
||||
cache: LRUCache<(SendElement<E>, ElementSelectorFlags)>,
|
||||
cache: LRUCache<[(SendElement<E>, ElementSelectorFlags); 4 + 1]>,
|
||||
}
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
|
@ -364,7 +364,7 @@ impl<E: TElement> SelectorFlagsMap<E> {
|
|||
pub fn new() -> Self {
|
||||
SelectorFlagsMap {
|
||||
map: FnvHashMap::default(),
|
||||
cache: LRUCache::new(4),
|
||||
cache: LRUCache::new(),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -309,14 +309,14 @@ pub enum StyleSharingResult {
|
|||
/// Note that this cache is flushed every time we steal work from the queue, so
|
||||
/// storing nodes here temporarily is safe.
|
||||
pub struct StyleSharingCandidateCache<E: TElement> {
|
||||
cache: LRUCache<StyleSharingCandidate<E>>,
|
||||
cache: LRUCache<[StyleSharingCandidate<E>; STYLE_SHARING_CANDIDATE_CACHE_SIZE + 1]>,
|
||||
}
|
||||
|
||||
impl<E: TElement> StyleSharingCandidateCache<E> {
|
||||
/// Create a new style sharing candidate cache.
|
||||
pub fn new() -> Self {
|
||||
StyleSharingCandidateCache {
|
||||
cache: LRUCache::new(STYLE_SHARING_CANDIDATE_CACHE_SIZE),
|
||||
cache: LRUCache::new(),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue