wasmtime/runtime/vm/instance/allocator/pooling/
index_allocator.rs

1//! Index/slot allocator policies for the pooling allocator.
2
3use crate::hash_map::{Entry, HashMap};
4use crate::prelude::*;
5use crate::runtime::vm::CompiledModuleId;
6use std::mem;
7use std::sync::Mutex;
8use wasmtime_environ::DefinedMemoryIndex;
9
10/// A slot index.
11#[derive(Hash, Clone, Copy, Debug, PartialEq, Eq)]
12pub struct SlotId(pub u32);
13
14impl SlotId {
15    /// The index of this slot.
16    pub fn index(self) -> usize {
17        self.0 as usize
18    }
19}
20
21/// A simple index allocator.
22///
23/// This index allocator doesn't do any module affinity or anything like that,
24/// however it is built on top of the `ModuleAffinityIndexAllocator` to save
25/// code (and code size).
26#[derive(Debug)]
27pub struct SimpleIndexAllocator(ModuleAffinityIndexAllocator);
28
29impl SimpleIndexAllocator {
30    pub fn new(capacity: u32) -> Self {
31        SimpleIndexAllocator(ModuleAffinityIndexAllocator::new(capacity, 0))
32    }
33
34    #[allow(unused)] // some cfgs don't use this
35    pub fn is_empty(&self) -> bool {
36        self.0.is_empty()
37    }
38
39    pub fn alloc(&self) -> Option<SlotId> {
40        self.0.alloc(None)
41    }
42
43    pub(crate) fn free(&self, index: SlotId) {
44        self.0.free(index);
45    }
46
47    #[cfg(test)]
48    #[allow(unused)]
49    pub(crate) fn testing_freelist(&self) -> Vec<SlotId> {
50        self.0.testing_freelist()
51    }
52}
53
54/// A particular defined memory within a particular module.
55#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
56pub struct MemoryInModule(pub CompiledModuleId, pub DefinedMemoryIndex);
57
58/// An index allocator that has configurable affinity between slots and modules
59/// so that slots are often reused for the same module again.
60#[derive(Debug)]
61pub struct ModuleAffinityIndexAllocator(Mutex<Inner>);
62
63#[derive(Debug)]
64struct Inner {
65    /// Maximum number of "unused warm slots" which will be allowed during
66    /// allocation.
67    ///
68    /// This is a user-configurable knob which can be used to influence the
69    /// maximum number of unused slots at any one point in time. A "warm slot"
70    /// is one that's considered having been previously allocated.
71    max_unused_warm_slots: u32,
72
73    /// Current count of "warm slots", or those that were previously allocated
74    /// which are now no longer in use.
75    ///
76    /// This is the size of the `warm` list.
77    unused_warm_slots: u32,
78
79    /// A linked list (via indices) which enumerates all "warm and unused"
80    /// slots, or those which have previously been allocated and then free'd.
81    warm: List,
82
83    /// Last slot that was allocated for the first time ever.
84    ///
85    /// This is initially 0 and is incremented during `pick_cold`. If this
86    /// matches `max_cold`, there are no more cold slots left.
87    last_cold: u32,
88
89    /// The state of any given slot.
90    ///
91    /// Records indices in the above list (empty) or two lists (with affinity),
92    /// and these indices are kept up-to-date to allow fast removal.
93    slot_state: Vec<SlotState>,
94
95    /// Affine slot management which tracks which slots are free and were last
96    /// used with the specified `CompiledModuleId`.
97    ///
98    /// The `List` here is appended to during deallocation and removal happens
99    /// from the tail during allocation.
100    module_affine: HashMap<MemoryInModule, List>,
101}
102
103/// A helper "linked list" data structure which is based on indices.
104#[derive(Default, Debug)]
105struct List {
106    head: Option<SlotId>,
107    tail: Option<SlotId>,
108}
109
110/// A helper data structure for an intrusive linked list, coupled with the
111/// `List` type.
112#[derive(Default, Debug, Copy, Clone)]
113struct Link {
114    prev: Option<SlotId>,
115    next: Option<SlotId>,
116}
117
118#[derive(Clone, Debug)]
119enum SlotState {
120    /// This slot is currently in use and is affine to the specified module's memory.
121    Used(Option<MemoryInModule>),
122
123    /// This slot is not currently used, and has never been used.
124    UnusedCold,
125
126    /// This slot is not currently used, but was previously allocated.
127    ///
128    /// The payload here is metadata about the lists that this slot is contained
129    /// within.
130    UnusedWarm(Unused),
131}
132
133impl SlotState {
134    fn unwrap_unused(&mut self) -> &mut Unused {
135        match self {
136            SlotState::UnusedWarm(u) => u,
137            _ => unreachable!(),
138        }
139    }
140}
141
142#[derive(Default, Copy, Clone, Debug)]
143struct Unused {
144    /// Which module this slot was historically affine to, if any.
145    affinity: Option<MemoryInModule>,
146
147    /// Metadata about the linked list for all slots affine to `affinity`.
148    affine_list_link: Link,
149
150    /// Metadata within the `warm` list of the main allocator.
151    unused_list_link: Link,
152}
153
154enum AllocMode {
155    ForceAffineAndClear,
156    AnySlot,
157}
158
159impl ModuleAffinityIndexAllocator {
160    /// Create the default state for this strategy.
161    pub fn new(capacity: u32, max_unused_warm_slots: u32) -> Self {
162        ModuleAffinityIndexAllocator(Mutex::new(Inner {
163            last_cold: 0,
164            max_unused_warm_slots,
165            unused_warm_slots: 0,
166            module_affine: HashMap::new(),
167            slot_state: (0..capacity).map(|_| SlotState::UnusedCold).collect(),
168            warm: List::default(),
169        }))
170    }
171
172    /// How many slots can this allocator allocate?
173    pub fn len(&self) -> usize {
174        let inner = self.0.lock().unwrap();
175        inner.slot_state.len()
176    }
177
178    /// Are zero slots in use right now?
179    #[allow(unused)] // some cfgs don't use this
180    pub fn is_empty(&self) -> bool {
181        let inner = self.0.lock().unwrap();
182        !inner
183            .slot_state
184            .iter()
185            .any(|s| matches!(s, SlotState::Used(_)))
186    }
187
188    /// Allocate a new index from this allocator optionally using `id` as an
189    /// affinity request if the allocation strategy supports it.
190    ///
191    /// Returns `None` if no more slots are available.
192    pub fn alloc(&self, for_memory: Option<MemoryInModule>) -> Option<SlotId> {
193        self._alloc(for_memory, AllocMode::AnySlot)
194    }
195
196    /// Attempts to allocate a guaranteed-affine slot to the module `id`
197    /// specified.
198    ///
199    /// Returns `None` if there are no slots affine to `id`. The allocation of
200    /// this slot will not record the affinity to `id`, instead simply listing
201    /// it as taken. This is intended to be used for clearing out all affine
202    /// slots to a module.
203    pub fn alloc_affine_and_clear_affinity(
204        &self,
205        module_id: CompiledModuleId,
206        memory_index: DefinedMemoryIndex,
207    ) -> Option<SlotId> {
208        self._alloc(
209            Some(MemoryInModule(module_id, memory_index)),
210            AllocMode::ForceAffineAndClear,
211        )
212    }
213
214    fn _alloc(&self, for_memory: Option<MemoryInModule>, mode: AllocMode) -> Option<SlotId> {
215        let mut inner = self.0.lock().unwrap();
216        let inner = &mut *inner;
217
218        // As a first-pass always attempt an affine allocation. This will
219        // succeed if any slots are considered affine to `module_id` (if it's
220        // specified). Failing that something else is attempted to be chosen.
221        let slot_id = inner.pick_affine(for_memory).or_else(|| {
222            match mode {
223                // If any slot is requested then this is a normal instantiation
224                // looking for an index. Without any affine candidates there are
225                // two options here:
226                //
227                // 1. Pick a slot amongst previously allocated slots
228                // 2. Pick a slot that's never been used before
229                //
230                // The choice here is guided by the initial configuration of
231                // `max_unused_warm_slots`. If our unused warm slots, which are
232                // likely all affine, is below this threshold then the affinity
233                // of the warm slots isn't tampered with and first a cold slot
234                // is chosen. If the cold slot allocation fails, however, a warm
235                // slot is evicted.
236                //
237                // The opposite happens when we're above our threshold for the
238                // maximum number of warm slots, meaning that a warm slot is
239                // attempted to be picked from first with a cold slot following
240                // that. Note that the warm slot allocation in this case should
241                // only fail of `max_unused_warm_slots` is 0, otherwise
242                // `pick_warm` will always succeed.
243                AllocMode::AnySlot => {
244                    if inner.unused_warm_slots < inner.max_unused_warm_slots {
245                        inner.pick_cold().or_else(|| inner.pick_warm())
246                    } else {
247                        inner.pick_warm().or_else(|| {
248                            debug_assert!(inner.max_unused_warm_slots == 0);
249                            inner.pick_cold()
250                        })
251                    }
252                }
253
254                // In this mode an affinity-based allocation is always performed
255                // as the purpose here is to clear out slots relevant to
256                // `module_id` during module teardown. This means that there's
257                // no consulting non-affine slots in this path.
258                AllocMode::ForceAffineAndClear => None,
259            }
260        })?;
261
262        inner.slot_state[slot_id.index()] = SlotState::Used(match mode {
263            AllocMode::ForceAffineAndClear => None,
264            AllocMode::AnySlot => for_memory,
265        });
266
267        Some(slot_id)
268    }
269
270    pub(crate) fn free(&self, index: SlotId) {
271        let mut inner = self.0.lock().unwrap();
272        let inner = &mut *inner;
273        let module_memory = match inner.slot_state[index.index()] {
274            SlotState::Used(module_memory) => module_memory,
275            _ => unreachable!(),
276        };
277
278        // Bump the number of warm slots since this slot is now considered
279        // previously used. Afterwards append it to the linked list of all
280        // unused and warm slots.
281        inner.unused_warm_slots += 1;
282        let unused_list_link = inner
283            .warm
284            .append(index, &mut inner.slot_state, |s| &mut s.unused_list_link);
285
286        let affine_list_link = match module_memory {
287            // If this slot is affine to a particular module then append this
288            // index to the linked list for the affine module. Otherwise insert
289            // a new one-element linked list.
290            Some(module) => match inner.module_affine.entry(module) {
291                Entry::Occupied(mut e) => e
292                    .get_mut()
293                    .append(index, &mut inner.slot_state, |s| &mut s.affine_list_link),
294                Entry::Vacant(v) => {
295                    v.insert(List::new(index));
296                    Link::default()
297                }
298            },
299
300            // If this slot has no affinity then the affine link is empty.
301            None => Link::default(),
302        };
303
304        inner.slot_state[index.index()] = SlotState::UnusedWarm(Unused {
305            affinity: module_memory,
306            affine_list_link,
307            unused_list_link,
308        });
309    }
310
311    /// Return the number of empty slots available in this allocator.
312    #[cfg(test)]
313    pub fn num_empty_slots(&self) -> usize {
314        let inner = self.0.lock().unwrap();
315        let total_slots = inner.slot_state.len();
316        (total_slots - inner.last_cold as usize) + inner.unused_warm_slots as usize
317    }
318
319    /// For testing only, we want to be able to assert what is on the single
320    /// freelist, for the policies that keep just one.
321    #[cfg(test)]
322    #[allow(unused)]
323    pub(crate) fn testing_freelist(&self) -> Vec<SlotId> {
324        let inner = self.0.lock().unwrap();
325        inner
326            .warm
327            .iter(&inner.slot_state, |s| &s.unused_list_link)
328            .collect()
329    }
330
331    /// For testing only, get the list of all modules with at least one slot
332    /// with affinity for that module.
333    #[cfg(test)]
334    pub(crate) fn testing_module_affinity_list(&self) -> Vec<MemoryInModule> {
335        let inner = self.0.lock().unwrap();
336        inner.module_affine.keys().copied().collect()
337    }
338}
339
340impl Inner {
341    /// Attempts to allocate a slot already affine to `id`, returning `None` if
342    /// `id` is `None` or if there are no affine slots.
343    fn pick_affine(&mut self, for_memory: Option<MemoryInModule>) -> Option<SlotId> {
344        // Note that the `tail` is chosen here of the affine list as it's the
345        // most recently used, which for affine allocations is what we want --
346        // maximizing temporal reuse.
347        let ret = self.module_affine.get(&for_memory?)?.tail?;
348        self.remove(ret);
349        Some(ret)
350    }
351
352    fn pick_warm(&mut self) -> Option<SlotId> {
353        // Insertions into the `unused` list happen at the `tail`, so the
354        // least-recently-used item will be at the head. That's our goal here,
355        // pick the least-recently-used slot since something "warm" is being
356        // evicted anyway.
357        let head = self.warm.head?;
358        self.remove(head);
359        Some(head)
360    }
361
362    fn remove(&mut self, slot: SlotId) {
363        // Decrement the size of the warm list, and additionally remove it from
364        // the `warm` linked list.
365        self.unused_warm_slots -= 1;
366        self.warm
367            .remove(slot, &mut self.slot_state, |u| &mut u.unused_list_link);
368
369        // If this slot is affine to a module then additionally remove it from
370        // that module's affinity linked list. Note that if the module's affine
371        // list is empty then the module's entry in the map is completely
372        // removed as well.
373        let module = self.slot_state[slot.index()].unwrap_unused().affinity;
374        if let Some(module) = module {
375            let mut list = match self.module_affine.entry(module) {
376                Entry::Occupied(e) => e,
377                Entry::Vacant(_) => unreachable!(),
378            };
379            list.get_mut()
380                .remove(slot, &mut self.slot_state, |u| &mut u.affine_list_link);
381
382            if list.get_mut().head.is_none() {
383                list.remove();
384            }
385        }
386    }
387
388    fn pick_cold(&mut self) -> Option<SlotId> {
389        if (self.last_cold as usize) == self.slot_state.len() {
390            None
391        } else {
392            let ret = Some(SlotId(self.last_cold));
393            self.last_cold += 1;
394            ret
395        }
396    }
397}
398
399impl List {
400    /// Creates a new one-element list pointing at `id`.
401    fn new(id: SlotId) -> List {
402        List {
403            head: Some(id),
404            tail: Some(id),
405        }
406    }
407
408    /// Appends the `id` to this list whose links are determined by `link`.
409    fn append(
410        &mut self,
411        id: SlotId,
412        states: &mut [SlotState],
413        link: fn(&mut Unused) -> &mut Link,
414    ) -> Link {
415        // This `id` is the new tail...
416        let tail = mem::replace(&mut self.tail, Some(id));
417
418        // If the tail was present, then update its `next` field to ourselves as
419        // we've been appended, otherwise update the `head` since the list was
420        // previously empty.
421        match tail {
422            Some(tail) => link(states[tail.index()].unwrap_unused()).next = Some(id),
423            None => self.head = Some(id),
424        }
425        Link {
426            prev: tail,
427            next: None,
428        }
429    }
430
431    /// Removes `id` from this list whose links are determined by `link`.
432    fn remove(
433        &mut self,
434        id: SlotId,
435        slot_state: &mut [SlotState],
436        link: fn(&mut Unused) -> &mut Link,
437    ) -> Unused {
438        let mut state = *slot_state[id.index()].unwrap_unused();
439        let next = link(&mut state).next;
440        let prev = link(&mut state).prev;
441
442        // If a `next` node is present for this link, then its previous was our
443        // own previous now. Otherwise we are the tail so the new tail is our
444        // previous.
445        match next {
446            Some(next) => link(slot_state[next.index()].unwrap_unused()).prev = prev,
447            None => self.tail = prev,
448        }
449
450        // Same as the `next` node, except everything is in reverse.
451        match prev {
452            Some(prev) => link(slot_state[prev.index()].unwrap_unused()).next = next,
453            None => self.head = next,
454        }
455        state
456    }
457
458    #[cfg(test)]
459    #[allow(unused)]
460    fn iter<'a>(
461        &'a self,
462        states: &'a [SlotState],
463        link: fn(&Unused) -> &Link,
464    ) -> impl Iterator<Item = SlotId> + 'a {
465        let mut cur = self.head;
466        let mut prev = None;
467        std::iter::from_fn(move || {
468            if cur.is_none() {
469                assert_eq!(prev, self.tail);
470            }
471            let ret = cur?;
472            match &states[ret.index()] {
473                SlotState::UnusedWarm(u) => {
474                    assert_eq!(link(u).prev, prev);
475                    prev = Some(ret);
476                    cur = link(u).next
477                }
478                _ => unreachable!(),
479            }
480            Some(ret)
481        })
482    }
483}
484
485#[cfg(test)]
486mod test {
487    use super::*;
488    use wasmtime_environ::EntityRef;
489
490    #[test]
491    fn test_next_available_allocation_strategy() {
492        for size in 0..20 {
493            let state = ModuleAffinityIndexAllocator::new(size, 0);
494            assert_eq!(state.num_empty_slots(), usize::try_from(size).unwrap());
495            for i in 0..size {
496                assert_eq!(state.num_empty_slots(), usize::try_from(size - i).unwrap());
497                assert_eq!(state.alloc(None).unwrap().index(), i as usize);
498            }
499            assert!(state.alloc(None).is_none());
500        }
501    }
502
503    #[test]
504    fn test_affinity_allocation_strategy() {
505        let id1 = MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0));
506        let id2 = MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0));
507        let state = ModuleAffinityIndexAllocator::new(100, 100);
508
509        let index1 = state.alloc(Some(id1)).unwrap();
510        assert_eq!(index1.index(), 0);
511        let index2 = state.alloc(Some(id2)).unwrap();
512        assert_eq!(index2.index(), 1);
513        assert_ne!(index1, index2);
514
515        state.free(index1);
516        assert_eq!(state.num_empty_slots(), 99);
517
518        // Allocate to the same `index1` slot again.
519        let index3 = state.alloc(Some(id1)).unwrap();
520        assert_eq!(index3, index1);
521        state.free(index3);
522
523        state.free(index2);
524
525        // Both id1 and id2 should have some slots with affinity.
526        let affinity_modules = state.testing_module_affinity_list();
527        assert_eq!(2, affinity_modules.len());
528        assert!(affinity_modules.contains(&id1));
529        assert!(affinity_modules.contains(&id2));
530
531        // Now there is 1 free instance for id2 and 1 free instance
532        // for id1, and 98 empty. Allocate 100 for id2. The first
533        // should be equal to the one we know was previously used for
534        // id2. The next 99 are arbitrary.
535        assert_eq!(state.num_empty_slots(), 100);
536        let mut indices = vec![];
537        for _ in 0..100 {
538            indices.push(state.alloc(Some(id2)).unwrap());
539        }
540        assert!(state.alloc(None).is_none());
541        assert_eq!(indices[0], index2);
542        assert_eq!(state.num_empty_slots(), 0);
543
544        for i in indices {
545            state.free(i);
546        }
547
548        // Now there should be no slots left with affinity for id1.
549        let affinity_modules = state.testing_module_affinity_list();
550        assert_eq!(1, affinity_modules.len());
551        assert!(affinity_modules.contains(&id2));
552
553        // Allocate an index we know previously had an instance but
554        // now does not (list ran empty).
555        let index = state.alloc(Some(id1)).unwrap();
556        state.free(index);
557    }
558
559    #[test]
560    fn clear_affine() {
561        let id = CompiledModuleId::new();
562        let memory_index = DefinedMemoryIndex::new(0);
563
564        for max_unused_warm_slots in [0, 1, 2] {
565            let state = ModuleAffinityIndexAllocator::new(100, max_unused_warm_slots);
566
567            let index1 = state.alloc(Some(MemoryInModule(id, memory_index))).unwrap();
568            let index2 = state.alloc(Some(MemoryInModule(id, memory_index))).unwrap();
569            state.free(index2);
570            state.free(index1);
571            assert!(state
572                .alloc_affine_and_clear_affinity(id, memory_index)
573                .is_some());
574            assert!(state
575                .alloc_affine_and_clear_affinity(id, memory_index)
576                .is_some());
577            assert_eq!(
578                state.alloc_affine_and_clear_affinity(id, memory_index),
579                None
580            );
581        }
582    }
583
584    #[test]
585    fn test_affinity_allocation_strategy_random() {
586        use rand::Rng;
587        let mut rng = rand::thread_rng();
588
589        let ids = std::iter::repeat_with(|| {
590            MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0))
591        })
592        .take(10)
593        .collect::<Vec<_>>();
594        let state = ModuleAffinityIndexAllocator::new(1000, 1000);
595        let mut allocated: Vec<SlotId> = vec![];
596        let mut last_id = vec![None; 1000];
597
598        let mut hits = 0;
599        let amt = if cfg!(miri) { 100 } else { 100_000 };
600        for _ in 0..amt {
601            loop {
602                if !allocated.is_empty() && rng.gen_bool(0.5) {
603                    let i = rng.gen_range(0..allocated.len());
604                    let to_free_idx = allocated.swap_remove(i);
605                    state.free(to_free_idx);
606                } else {
607                    let id = ids[rng.gen_range(0..ids.len())];
608                    let index = match state.alloc(Some(id)) {
609                        Some(id) => id,
610                        None => continue,
611                    };
612                    if last_id[index.index()] == Some(id) {
613                        hits += 1;
614                    }
615                    last_id[index.index()] = Some(id);
616                    allocated.push(index);
617                }
618                break;
619            }
620        }
621
622        // 10% reuse would be random chance (because we have 10 module
623        // IDs). Check for at least double that to ensure some sort of
624        // affinity is occurring.
625        assert!(
626            hits > (amt / 5),
627            "expected at least 20000 (20%) ID-reuses but got {hits}"
628        );
629    }
630
631    #[test]
632    fn test_affinity_threshold() {
633        let id1 = MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0));
634        let id2 = MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0));
635        let id3 = MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0));
636        let state = ModuleAffinityIndexAllocator::new(10, 2);
637
638        // Set some slot affinities
639        assert_eq!(state.alloc(Some(id1)), Some(SlotId(0)));
640        state.free(SlotId(0));
641        assert_eq!(state.alloc(Some(id2)), Some(SlotId(1)));
642        state.free(SlotId(1));
643
644        // Only 2 slots are allowed to be unused and warm, so we're at our
645        // threshold, meaning one must now be evicted.
646        assert_eq!(state.alloc(Some(id3)), Some(SlotId(0)));
647        state.free(SlotId(0));
648
649        // pickup `id2` again, it should be affine.
650        assert_eq!(state.alloc(Some(id2)), Some(SlotId(1)));
651
652        // with only one warm slot available allocation for `id1` should pick a
653        // fresh slot
654        assert_eq!(state.alloc(Some(id1)), Some(SlotId(2)));
655
656        state.free(SlotId(1));
657        state.free(SlotId(2));
658
659        // ensure everything stays affine
660        assert_eq!(state.alloc(Some(id1)), Some(SlotId(2)));
661        assert_eq!(state.alloc(Some(id2)), Some(SlotId(1)));
662        assert_eq!(state.alloc(Some(id3)), Some(SlotId(0)));
663
664        state.free(SlotId(1));
665        state.free(SlotId(2));
666        state.free(SlotId(0));
667
668        // LRU is 1, so that should be picked
669        assert_eq!(
670            state.alloc(Some(MemoryInModule(
671                CompiledModuleId::new(),
672                DefinedMemoryIndex::new(0)
673            ))),
674            Some(SlotId(1))
675        );
676
677        // Pick another LRU entry, this time 2
678        assert_eq!(
679            state.alloc(Some(MemoryInModule(
680                CompiledModuleId::new(),
681                DefinedMemoryIndex::new(0)
682            ))),
683            Some(SlotId(2))
684        );
685
686        // This should preserve slot `0` and pick up something new
687        assert_eq!(
688            state.alloc(Some(MemoryInModule(
689                CompiledModuleId::new(),
690                DefinedMemoryIndex::new(0)
691            ))),
692            Some(SlotId(3))
693        );
694
695        state.free(SlotId(1));
696        state.free(SlotId(2));
697        state.free(SlotId(3));
698
699        // for good measure make sure id3 is still affine
700        assert_eq!(state.alloc(Some(id3)), Some(SlotId(0)));
701    }
702}