wasmtime/runtime/vm/instance/allocator/pooling/
index_allocator.rs

1//! Index/slot allocator policies for the pooling allocator.
2
3use crate::hash_map::{Entry, HashMap};
4use crate::prelude::*;
5use crate::runtime::vm::CompiledModuleId;
6use std::mem;
7use std::sync::Mutex;
8use wasmtime_environ::DefinedMemoryIndex;
9
10/// A slot index.
11#[derive(Hash, Clone, Copy, Debug, PartialEq, Eq)]
12pub struct SlotId(pub u32);
13
14impl SlotId {
15    /// The index of this slot.
16    pub fn index(self) -> usize {
17        self.0 as usize
18    }
19}
20
21/// A simple index allocator.
22///
23/// This index allocator doesn't do any module affinity or anything like that,
24/// however it is built on top of the `ModuleAffinityIndexAllocator` to save
25/// code (and code size).
26#[derive(Debug)]
27pub struct SimpleIndexAllocator(ModuleAffinityIndexAllocator);
28
29impl SimpleIndexAllocator {
30    pub fn new(capacity: u32) -> Self {
31        SimpleIndexAllocator(ModuleAffinityIndexAllocator::new(capacity, 0))
32    }
33
34    pub fn is_empty(&self) -> bool {
35        self.0.is_empty()
36    }
37
38    pub fn alloc(&self) -> Option<SlotId> {
39        self.0.alloc(None)
40    }
41
42    /// Frees the `index` slot to be available for allocation elsewhere.
43    ///
44    /// The `bytes_resident` argument is a counter to keep track of how many
45    /// bytse are still resident in this slot, if any, for reporting later via
46    /// the [`Self::unused_bytes_resident`] method.
47    pub(crate) fn free(&self, index: SlotId, bytes_resident: usize) {
48        self.0.free(index, bytes_resident);
49    }
50
51    /// Returns the number of previously-used slots in this allocator which are
52    /// not currently in use.
53    ///
54    /// Note that this acquires a `Mutex` for synchronization at this time to
55    /// read the internal counter information.
56    pub fn unused_warm_slots(&self) -> u32 {
57        self.0.unused_warm_slots()
58    }
59
60    /// Returns the number of bytes that are resident in previously-used slots
61    /// in this allocator which are not currently in use.
62    ///
63    /// Note that this acquires a `Mutex` for synchronization at this time to
64    /// read the internal counter information.
65    pub fn unused_bytes_resident(&self) -> usize {
66        self.0.unused_bytes_resident()
67    }
68
69    #[cfg(test)]
70    pub(crate) fn testing_freelist(&self) -> Vec<SlotId> {
71        self.0.testing_freelist()
72    }
73}
74
75/// A particular defined memory within a particular module.
76#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
77pub struct MemoryInModule(pub CompiledModuleId, pub DefinedMemoryIndex);
78
79/// An index allocator that has configurable affinity between slots and modules
80/// so that slots are often reused for the same module again.
81#[derive(Debug)]
82pub struct ModuleAffinityIndexAllocator(Mutex<Inner>);
83
84#[derive(Debug)]
85struct Inner {
86    /// Maximum number of "unused warm slots" which will be allowed during
87    /// allocation.
88    ///
89    /// This is a user-configurable knob which can be used to influence the
90    /// maximum number of unused slots at any one point in time. A "warm slot"
91    /// is one that's considered having been previously allocated.
92    max_unused_warm_slots: u32,
93
94    /// Current count of "warm slots", or those that were previously allocated
95    /// which are now no longer in use.
96    ///
97    /// This is the size of the `warm` list.
98    unused_warm_slots: u32,
99
100    /// A linked list (via indices) which enumerates all "warm and unused"
101    /// slots, or those which have previously been allocated and then free'd.
102    warm: List,
103
104    /// Last slot that was allocated for the first time ever.
105    ///
106    /// This is initially 0 and is incremented during `pick_cold`. If this
107    /// matches `max_cold`, there are no more cold slots left.
108    last_cold: u32,
109
110    /// The state of any given slot.
111    ///
112    /// Records indices in the above list (empty) or two lists (with affinity),
113    /// and these indices are kept up-to-date to allow fast removal.
114    slot_state: Vec<SlotState>,
115
116    /// Affine slot management which tracks which slots are free and were last
117    /// used with the specified `CompiledModuleId`.
118    ///
119    /// The `List` here is appended to during deallocation and removal happens
120    /// from the tail during allocation.
121    module_affine: HashMap<MemoryInModule, List>,
122
123    /// Cache for the sum of the `bytes_resident` of all `UnusedWarm` slots.
124    unused_bytes_resident: usize,
125}
126
127/// A helper "linked list" data structure which is based on indices.
128#[derive(Default, Debug)]
129struct List {
130    head: Option<SlotId>,
131    tail: Option<SlotId>,
132}
133
134/// A helper data structure for an intrusive linked list, coupled with the
135/// `List` type.
136#[derive(Default, Debug, Copy, Clone)]
137struct Link {
138    prev: Option<SlotId>,
139    next: Option<SlotId>,
140}
141
142#[derive(Clone, Debug)]
143enum SlotState {
144    /// This slot is currently in use and is affine to the specified module's memory.
145    Used(Option<MemoryInModule>),
146
147    /// This slot is not currently used, and has never been used.
148    UnusedCold,
149
150    /// This slot is not currently used, but was previously allocated.
151    ///
152    /// The payload here is metadata about the lists that this slot is contained
153    /// within.
154    UnusedWarm(Unused),
155}
156
157impl SlotState {
158    fn unwrap_unused(&mut self) -> &mut Unused {
159        match self {
160            SlotState::UnusedWarm(u) => u,
161            _ => unreachable!(),
162        }
163    }
164}
165
166#[derive(Default, Copy, Clone, Debug)]
167struct Unused {
168    /// Which module this slot was historically affine to, if any.
169    affinity: Option<MemoryInModule>,
170
171    /// Number of bytes that are part of `UnusedWarm` slots and are currently
172    /// kept resident (vs paged out).
173    bytes_resident: usize,
174
175    /// Metadata about the linked list for all slots affine to `affinity`.
176    affine_list_link: Link,
177
178    /// Metadata within the `warm` list of the main allocator.
179    unused_list_link: Link,
180}
181
182enum AllocMode {
183    ForceAffineAndClear,
184    AnySlot,
185}
186
187impl ModuleAffinityIndexAllocator {
188    /// Create the default state for this strategy.
189    pub fn new(capacity: u32, max_unused_warm_slots: u32) -> Self {
190        ModuleAffinityIndexAllocator(Mutex::new(Inner {
191            last_cold: 0,
192            max_unused_warm_slots,
193            unused_warm_slots: 0,
194            module_affine: HashMap::new(),
195            slot_state: (0..capacity).map(|_| SlotState::UnusedCold).collect(),
196            warm: List::default(),
197            unused_bytes_resident: 0,
198        }))
199    }
200
201    /// How many slots can this allocator allocate?
202    pub fn len(&self) -> usize {
203        let inner = self.0.lock().unwrap();
204        inner.slot_state.len()
205    }
206
207    /// Are zero slots in use right now?
208    pub fn is_empty(&self) -> bool {
209        let inner = self.0.lock().unwrap();
210        !inner
211            .slot_state
212            .iter()
213            .any(|s| matches!(s, SlotState::Used(_)))
214    }
215
216    /// Allocate a new index from this allocator optionally using `id` as an
217    /// affinity request if the allocation strategy supports it.
218    ///
219    /// Returns `None` if no more slots are available.
220    pub fn alloc(&self, for_memory: Option<MemoryInModule>) -> Option<SlotId> {
221        self._alloc(for_memory, AllocMode::AnySlot)
222    }
223
224    /// Attempts to allocate a guaranteed-affine slot to the module `id`
225    /// specified.
226    ///
227    /// Returns `None` if there are no slots affine to `id`. The allocation of
228    /// this slot will not record the affinity to `id`, instead simply listing
229    /// it as taken. This is intended to be used for clearing out all affine
230    /// slots to a module.
231    pub fn alloc_affine_and_clear_affinity(
232        &self,
233        module_id: CompiledModuleId,
234        memory_index: DefinedMemoryIndex,
235    ) -> Option<SlotId> {
236        self._alloc(
237            Some(MemoryInModule(module_id, memory_index)),
238            AllocMode::ForceAffineAndClear,
239        )
240    }
241
242    fn _alloc(&self, for_memory: Option<MemoryInModule>, mode: AllocMode) -> Option<SlotId> {
243        let mut inner = self.0.lock().unwrap();
244        let inner = &mut *inner;
245
246        // As a first-pass always attempt an affine allocation. This will
247        // succeed if any slots are considered affine to `module_id` (if it's
248        // specified). Failing that something else is attempted to be chosen.
249        let slot_id = inner.pick_affine(for_memory).or_else(|| {
250            match mode {
251                // If any slot is requested then this is a normal instantiation
252                // looking for an index. Without any affine candidates there are
253                // two options here:
254                //
255                // 1. Pick a slot amongst previously allocated slots
256                // 2. Pick a slot that's never been used before
257                //
258                // The choice here is guided by the initial configuration of
259                // `max_unused_warm_slots`. If our unused warm slots, which are
260                // likely all affine, is below this threshold then the affinity
261                // of the warm slots isn't tampered with and first a cold slot
262                // is chosen. If the cold slot allocation fails, however, a warm
263                // slot is evicted.
264                //
265                // The opposite happens when we're above our threshold for the
266                // maximum number of warm slots, meaning that a warm slot is
267                // attempted to be picked from first with a cold slot following
268                // that. Note that the warm slot allocation in this case should
269                // only fail of `max_unused_warm_slots` is 0, otherwise
270                // `pick_warm` will always succeed.
271                AllocMode::AnySlot => {
272                    if inner.unused_warm_slots < inner.max_unused_warm_slots {
273                        inner.pick_cold().or_else(|| inner.pick_warm())
274                    } else {
275                        inner.pick_warm().or_else(|| {
276                            debug_assert!(inner.max_unused_warm_slots == 0);
277                            inner.pick_cold()
278                        })
279                    }
280                }
281
282                // In this mode an affinity-based allocation is always performed
283                // as the purpose here is to clear out slots relevant to
284                // `module_id` during module teardown. This means that there's
285                // no consulting non-affine slots in this path.
286                AllocMode::ForceAffineAndClear => None,
287            }
288        })?;
289
290        let slot = &mut inner.slot_state[slot_id.index()];
291        if let SlotState::UnusedWarm(Unused { bytes_resident, .. }) = slot {
292            inner.unused_bytes_resident -= *bytes_resident;
293        }
294        *slot = SlotState::Used(match mode {
295            AllocMode::ForceAffineAndClear => None,
296            AllocMode::AnySlot => for_memory,
297        });
298
299        Some(slot_id)
300    }
301
302    pub(crate) fn free(&self, index: SlotId, bytes_resident: usize) {
303        let mut inner = self.0.lock().unwrap();
304        let inner = &mut *inner;
305        let module_memory = match inner.slot_state[index.index()] {
306            SlotState::Used(module_memory) => module_memory,
307            _ => unreachable!(),
308        };
309
310        // Bump the number of warm slots since this slot is now considered
311        // previously used. Afterwards append it to the linked list of all
312        // unused and warm slots.
313        inner.unused_warm_slots += 1;
314        let unused_list_link = inner
315            .warm
316            .append(index, &mut inner.slot_state, |s| &mut s.unused_list_link);
317
318        let affine_list_link = match module_memory {
319            // If this slot is affine to a particular module then append this
320            // index to the linked list for the affine module. Otherwise insert
321            // a new one-element linked list.
322            Some(module) => match inner.module_affine.entry(module) {
323                Entry::Occupied(mut e) => e
324                    .get_mut()
325                    .append(index, &mut inner.slot_state, |s| &mut s.affine_list_link),
326                Entry::Vacant(v) => {
327                    v.insert(List::new(index));
328                    Link::default()
329                }
330            },
331
332            // If this slot has no affinity then the affine link is empty.
333            None => Link::default(),
334        };
335
336        inner.unused_bytes_resident += bytes_resident;
337        inner.slot_state[index.index()] = SlotState::UnusedWarm(Unused {
338            affinity: module_memory,
339            bytes_resident,
340            affine_list_link,
341            unused_list_link,
342        });
343    }
344
345    /// Return the number of empty slots available in this allocator.
346    #[cfg(test)]
347    pub fn num_empty_slots(&self) -> usize {
348        let inner = self.0.lock().unwrap();
349        let total_slots = inner.slot_state.len();
350        (total_slots - inner.last_cold as usize) + inner.unused_warm_slots as usize
351    }
352
353    /// For testing only, we want to be able to assert what is on the single
354    /// freelist, for the policies that keep just one.
355    #[cfg(test)]
356    pub(crate) fn testing_freelist(&self) -> Vec<SlotId> {
357        let inner = self.0.lock().unwrap();
358        inner
359            .warm
360            .iter(&inner.slot_state, |s| &s.unused_list_link)
361            .collect()
362    }
363
364    /// For testing only, get the list of all modules with at least one slot
365    /// with affinity for that module.
366    #[cfg(test)]
367    pub(crate) fn testing_module_affinity_list(&self) -> Vec<MemoryInModule> {
368        let inner = self.0.lock().unwrap();
369        inner.module_affine.keys().copied().collect()
370    }
371
372    /// Returns the number of previously-used slots in this allocator which are
373    /// not currently in use.
374    ///
375    /// Note that this acquires a `Mutex` for synchronization at this time to
376    /// read the internal counter information.
377    pub fn unused_warm_slots(&self) -> u32 {
378        self.0.lock().unwrap().unused_warm_slots
379    }
380
381    /// Returns the number of bytes that are resident in previously-used slots
382    /// in this allocator which are not currently in use.
383    ///
384    /// Note that this acquires a `Mutex` for synchronization at this time to
385    /// read the internal counter information.
386    pub fn unused_bytes_resident(&self) -> usize {
387        self.0.lock().unwrap().unused_bytes_resident
388    }
389}
390
391impl Inner {
392    /// Attempts to allocate a slot already affine to `id`, returning `None` if
393    /// `id` is `None` or if there are no affine slots.
394    fn pick_affine(&mut self, for_memory: Option<MemoryInModule>) -> Option<SlotId> {
395        // Note that the `tail` is chosen here of the affine list as it's the
396        // most recently used, which for affine allocations is what we want --
397        // maximizing temporal reuse.
398        let ret = self.module_affine.get(&for_memory?)?.tail?;
399        self.remove(ret);
400        Some(ret)
401    }
402
403    fn pick_warm(&mut self) -> Option<SlotId> {
404        // Insertions into the `unused` list happen at the `tail`, so the
405        // least-recently-used item will be at the head. That's our goal here,
406        // pick the least-recently-used slot since something "warm" is being
407        // evicted anyway.
408        let head = self.warm.head?;
409        self.remove(head);
410        Some(head)
411    }
412
413    fn remove(&mut self, slot: SlotId) {
414        // Decrement the size of the warm list, and additionally remove it from
415        // the `warm` linked list.
416        self.unused_warm_slots -= 1;
417        self.warm
418            .remove(slot, &mut self.slot_state, |u| &mut u.unused_list_link);
419
420        // If this slot is affine to a module then additionally remove it from
421        // that module's affinity linked list. Note that if the module's affine
422        // list is empty then the module's entry in the map is completely
423        // removed as well.
424        let module = self.slot_state[slot.index()].unwrap_unused().affinity;
425        if let Some(module) = module {
426            let mut list = match self.module_affine.entry(module) {
427                Entry::Occupied(e) => e,
428                Entry::Vacant(_) => unreachable!(),
429            };
430            list.get_mut()
431                .remove(slot, &mut self.slot_state, |u| &mut u.affine_list_link);
432
433            if list.get_mut().head.is_none() {
434                list.remove();
435            }
436        }
437    }
438
439    fn pick_cold(&mut self) -> Option<SlotId> {
440        if (self.last_cold as usize) == self.slot_state.len() {
441            None
442        } else {
443            let ret = Some(SlotId(self.last_cold));
444            self.last_cold += 1;
445            ret
446        }
447    }
448}
449
450impl List {
451    /// Creates a new one-element list pointing at `id`.
452    fn new(id: SlotId) -> List {
453        List {
454            head: Some(id),
455            tail: Some(id),
456        }
457    }
458
459    /// Appends the `id` to this list whose links are determined by `link`.
460    fn append(
461        &mut self,
462        id: SlotId,
463        states: &mut [SlotState],
464        link: fn(&mut Unused) -> &mut Link,
465    ) -> Link {
466        // This `id` is the new tail...
467        let tail = mem::replace(&mut self.tail, Some(id));
468
469        // If the tail was present, then update its `next` field to ourselves as
470        // we've been appended, otherwise update the `head` since the list was
471        // previously empty.
472        match tail {
473            Some(tail) => link(states[tail.index()].unwrap_unused()).next = Some(id),
474            None => self.head = Some(id),
475        }
476        Link {
477            prev: tail,
478            next: None,
479        }
480    }
481
482    /// Removes `id` from this list whose links are determined by `link`.
483    fn remove(
484        &mut self,
485        id: SlotId,
486        slot_state: &mut [SlotState],
487        link: fn(&mut Unused) -> &mut Link,
488    ) -> Unused {
489        let mut state = *slot_state[id.index()].unwrap_unused();
490        let next = link(&mut state).next;
491        let prev = link(&mut state).prev;
492
493        // If a `next` node is present for this link, then its previous was our
494        // own previous now. Otherwise we are the tail so the new tail is our
495        // previous.
496        match next {
497            Some(next) => link(slot_state[next.index()].unwrap_unused()).prev = prev,
498            None => self.tail = prev,
499        }
500
501        // Same as the `next` node, except everything is in reverse.
502        match prev {
503            Some(prev) => link(slot_state[prev.index()].unwrap_unused()).next = next,
504            None => self.head = next,
505        }
506        state
507    }
508
509    #[cfg(test)]
510    fn iter<'a>(
511        &'a self,
512        states: &'a [SlotState],
513        link: fn(&Unused) -> &Link,
514    ) -> impl Iterator<Item = SlotId> + 'a {
515        let mut cur = self.head;
516        let mut prev = None;
517        std::iter::from_fn(move || {
518            if cur.is_none() {
519                assert_eq!(prev, self.tail);
520            }
521            let ret = cur?;
522            match &states[ret.index()] {
523                SlotState::UnusedWarm(u) => {
524                    assert_eq!(link(u).prev, prev);
525                    prev = Some(ret);
526                    cur = link(u).next
527                }
528                _ => unreachable!(),
529            }
530            Some(ret)
531        })
532    }
533}
534
535#[cfg(test)]
536mod test {
537    use super::*;
538    use wasmtime_environ::EntityRef;
539
540    #[test]
541    fn test_next_available_allocation_strategy() {
542        for size in 0..20 {
543            let state = ModuleAffinityIndexAllocator::new(size, 0);
544            assert_eq!(state.num_empty_slots(), usize::try_from(size).unwrap());
545            for i in 0..size {
546                assert_eq!(state.num_empty_slots(), usize::try_from(size - i).unwrap());
547                assert_eq!(state.alloc(None).unwrap().index(), i as usize);
548            }
549            assert!(state.alloc(None).is_none());
550        }
551    }
552
553    #[test]
554    fn test_affinity_allocation_strategy() {
555        let id1 = MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0));
556        let id2 = MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0));
557        let state = ModuleAffinityIndexAllocator::new(100, 100);
558
559        let index1 = state.alloc(Some(id1)).unwrap();
560        assert_eq!(index1.index(), 0);
561        let index2 = state.alloc(Some(id2)).unwrap();
562        assert_eq!(index2.index(), 1);
563        assert_ne!(index1, index2);
564
565        state.free(index1, 0);
566        assert_eq!(state.num_empty_slots(), 99);
567
568        // Allocate to the same `index1` slot again.
569        let index3 = state.alloc(Some(id1)).unwrap();
570        assert_eq!(index3, index1);
571        state.free(index3, 0);
572
573        state.free(index2, 0);
574
575        // Both id1 and id2 should have some slots with affinity.
576        let affinity_modules = state.testing_module_affinity_list();
577        assert_eq!(2, affinity_modules.len());
578        assert!(affinity_modules.contains(&id1));
579        assert!(affinity_modules.contains(&id2));
580
581        // Now there is 1 free instance for id2 and 1 free instance
582        // for id1, and 98 empty. Allocate 100 for id2. The first
583        // should be equal to the one we know was previously used for
584        // id2. The next 99 are arbitrary.
585        assert_eq!(state.num_empty_slots(), 100);
586        let mut indices = vec![];
587        for _ in 0..100 {
588            indices.push(state.alloc(Some(id2)).unwrap());
589        }
590        assert!(state.alloc(None).is_none());
591        assert_eq!(indices[0], index2);
592        assert_eq!(state.num_empty_slots(), 0);
593
594        for i in indices {
595            state.free(i, 0);
596        }
597
598        // Now there should be no slots left with affinity for id1.
599        let affinity_modules = state.testing_module_affinity_list();
600        assert_eq!(1, affinity_modules.len());
601        assert!(affinity_modules.contains(&id2));
602
603        // Allocate an index we know previously had an instance but
604        // now does not (list ran empty).
605        let index = state.alloc(Some(id1)).unwrap();
606        state.free(index, 0);
607    }
608
609    #[test]
610    fn clear_affine() {
611        let id = CompiledModuleId::new();
612        let memory_index = DefinedMemoryIndex::new(0);
613
614        for max_unused_warm_slots in [0, 1, 2] {
615            let state = ModuleAffinityIndexAllocator::new(100, max_unused_warm_slots);
616
617            let index1 = state.alloc(Some(MemoryInModule(id, memory_index))).unwrap();
618            let index2 = state.alloc(Some(MemoryInModule(id, memory_index))).unwrap();
619            state.free(index2, 0);
620            state.free(index1, 0);
621            assert!(
622                state
623                    .alloc_affine_and_clear_affinity(id, memory_index)
624                    .is_some()
625            );
626            assert!(
627                state
628                    .alloc_affine_and_clear_affinity(id, memory_index)
629                    .is_some()
630            );
631            assert_eq!(
632                state.alloc_affine_and_clear_affinity(id, memory_index),
633                None
634            );
635        }
636    }
637
638    #[test]
639    fn test_affinity_allocation_strategy_random() {
640        use rand::Rng;
641        let mut rng = rand::rng();
642
643        let ids = std::iter::repeat_with(|| {
644            MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0))
645        })
646        .take(10)
647        .collect::<Vec<_>>();
648        let state = ModuleAffinityIndexAllocator::new(1000, 1000);
649        let mut allocated: Vec<SlotId> = vec![];
650        let mut last_id = vec![None; 1000];
651
652        let mut hits = 0;
653        let amt = if cfg!(miri) { 100 } else { 100_000 };
654        for _ in 0..amt {
655            loop {
656                if !allocated.is_empty() && rng.random_bool(0.5) {
657                    let i = rng.random_range(0..allocated.len());
658                    let to_free_idx = allocated.swap_remove(i);
659                    state.free(to_free_idx, 0);
660                } else {
661                    let id = ids[rng.random_range(0..ids.len())];
662                    let index = match state.alloc(Some(id)) {
663                        Some(id) => id,
664                        None => continue,
665                    };
666                    if last_id[index.index()] == Some(id) {
667                        hits += 1;
668                    }
669                    last_id[index.index()] = Some(id);
670                    allocated.push(index);
671                }
672                break;
673            }
674        }
675
676        // 10% reuse would be random chance (because we have 10 module
677        // IDs). Check for at least double that to ensure some sort of
678        // affinity is occurring.
679        assert!(
680            hits > (amt / 5),
681            "expected at least 20000 (20%) ID-reuses but got {hits}"
682        );
683    }
684
685    #[test]
686    fn test_affinity_threshold() {
687        let id1 = MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0));
688        let id2 = MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0));
689        let id3 = MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0));
690        let state = ModuleAffinityIndexAllocator::new(10, 2);
691
692        // Set some slot affinities
693        assert_eq!(state.alloc(Some(id1)), Some(SlotId(0)));
694        state.free(SlotId(0), 0);
695        assert_eq!(state.alloc(Some(id2)), Some(SlotId(1)));
696        state.free(SlotId(1), 0);
697
698        // Only 2 slots are allowed to be unused and warm, so we're at our
699        // threshold, meaning one must now be evicted.
700        assert_eq!(state.alloc(Some(id3)), Some(SlotId(0)));
701        state.free(SlotId(0), 0);
702
703        // pickup `id2` again, it should be affine.
704        assert_eq!(state.alloc(Some(id2)), Some(SlotId(1)));
705
706        // with only one warm slot available allocation for `id1` should pick a
707        // fresh slot
708        assert_eq!(state.alloc(Some(id1)), Some(SlotId(2)));
709
710        state.free(SlotId(1), 0);
711        state.free(SlotId(2), 0);
712
713        // ensure everything stays affine
714        assert_eq!(state.alloc(Some(id1)), Some(SlotId(2)));
715        assert_eq!(state.alloc(Some(id2)), Some(SlotId(1)));
716        assert_eq!(state.alloc(Some(id3)), Some(SlotId(0)));
717
718        state.free(SlotId(1), 0);
719        state.free(SlotId(2), 0);
720        state.free(SlotId(0), 0);
721
722        // LRU is 1, so that should be picked
723        assert_eq!(
724            state.alloc(Some(MemoryInModule(
725                CompiledModuleId::new(),
726                DefinedMemoryIndex::new(0)
727            ))),
728            Some(SlotId(1))
729        );
730
731        // Pick another LRU entry, this time 2
732        assert_eq!(
733            state.alloc(Some(MemoryInModule(
734                CompiledModuleId::new(),
735                DefinedMemoryIndex::new(0)
736            ))),
737            Some(SlotId(2))
738        );
739
740        // This should preserve slot `0` and pick up something new
741        assert_eq!(
742            state.alloc(Some(MemoryInModule(
743                CompiledModuleId::new(),
744                DefinedMemoryIndex::new(0)
745            ))),
746            Some(SlotId(3))
747        );
748
749        state.free(SlotId(1), 0);
750        state.free(SlotId(2), 0);
751        state.free(SlotId(3), 0);
752
753        // for good measure make sure id3 is still affine
754        assert_eq!(state.alloc(Some(id3)), Some(SlotId(0)));
755    }
756
757    #[test]
758    fn test_freelist() {
759        let allocator = SimpleIndexAllocator::new(10);
760        assert_eq!(allocator.testing_freelist(), []);
761        let a = allocator.alloc().unwrap();
762        assert_eq!(allocator.testing_freelist(), []);
763        allocator.free(a, 0);
764        assert_eq!(allocator.testing_freelist(), [a]);
765        assert_eq!(allocator.alloc(), Some(a));
766        assert_eq!(allocator.testing_freelist(), []);
767        let b = allocator.alloc().unwrap();
768        assert_eq!(allocator.testing_freelist(), []);
769        allocator.free(b, 0);
770        assert_eq!(allocator.testing_freelist(), [b]);
771        allocator.free(a, 0);
772        assert_eq!(allocator.testing_freelist(), [b, a]);
773    }
774}