wasmtime/runtime/vm/instance/allocator/pooling/
index_allocator.rs

1//! Index/slot allocator policies for the pooling allocator.
2
3use crate::hash_map::{Entry, HashMap};
4use crate::prelude::*;
5use crate::runtime::vm::CompiledModuleId;
6use std::mem;
7use std::sync::Mutex;
8use wasmtime_environ::DefinedMemoryIndex;
9
10/// A slot index.
11#[derive(Hash, Clone, Copy, Debug, PartialEq, Eq)]
12pub struct SlotId(pub u32);
13
14impl SlotId {
15    /// The index of this slot.
16    pub fn index(self) -> usize {
17        self.0 as usize
18    }
19}
20
21/// A simple index allocator.
22///
23/// This index allocator doesn't do any module affinity or anything like that,
24/// however it is built on top of the `ModuleAffinityIndexAllocator` to save
25/// code (and code size).
26#[derive(Debug)]
27pub struct SimpleIndexAllocator(ModuleAffinityIndexAllocator);
28
29impl SimpleIndexAllocator {
30    pub fn new(capacity: u32) -> Self {
31        SimpleIndexAllocator(ModuleAffinityIndexAllocator::new(capacity, 0))
32    }
33
34    pub fn is_empty(&self) -> bool {
35        self.0.is_empty()
36    }
37
38    pub fn alloc(&self) -> Option<SlotId> {
39        self.0.alloc(None)
40    }
41
42    pub(crate) fn free(&self, index: SlotId) {
43        self.0.free(index);
44    }
45
46    #[cfg(test)]
47    pub(crate) fn testing_freelist(&self) -> Vec<SlotId> {
48        self.0.testing_freelist()
49    }
50}
51
52/// A particular defined memory within a particular module.
53#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
54pub struct MemoryInModule(pub CompiledModuleId, pub DefinedMemoryIndex);
55
56/// An index allocator that has configurable affinity between slots and modules
57/// so that slots are often reused for the same module again.
58#[derive(Debug)]
59pub struct ModuleAffinityIndexAllocator(Mutex<Inner>);
60
61#[derive(Debug)]
62struct Inner {
63    /// Maximum number of "unused warm slots" which will be allowed during
64    /// allocation.
65    ///
66    /// This is a user-configurable knob which can be used to influence the
67    /// maximum number of unused slots at any one point in time. A "warm slot"
68    /// is one that's considered having been previously allocated.
69    max_unused_warm_slots: u32,
70
71    /// Current count of "warm slots", or those that were previously allocated
72    /// which are now no longer in use.
73    ///
74    /// This is the size of the `warm` list.
75    unused_warm_slots: u32,
76
77    /// A linked list (via indices) which enumerates all "warm and unused"
78    /// slots, or those which have previously been allocated and then free'd.
79    warm: List,
80
81    /// Last slot that was allocated for the first time ever.
82    ///
83    /// This is initially 0 and is incremented during `pick_cold`. If this
84    /// matches `max_cold`, there are no more cold slots left.
85    last_cold: u32,
86
87    /// The state of any given slot.
88    ///
89    /// Records indices in the above list (empty) or two lists (with affinity),
90    /// and these indices are kept up-to-date to allow fast removal.
91    slot_state: Vec<SlotState>,
92
93    /// Affine slot management which tracks which slots are free and were last
94    /// used with the specified `CompiledModuleId`.
95    ///
96    /// The `List` here is appended to during deallocation and removal happens
97    /// from the tail during allocation.
98    module_affine: HashMap<MemoryInModule, List>,
99}
100
101/// A helper "linked list" data structure which is based on indices.
102#[derive(Default, Debug)]
103struct List {
104    head: Option<SlotId>,
105    tail: Option<SlotId>,
106}
107
108/// A helper data structure for an intrusive linked list, coupled with the
109/// `List` type.
110#[derive(Default, Debug, Copy, Clone)]
111struct Link {
112    prev: Option<SlotId>,
113    next: Option<SlotId>,
114}
115
116#[derive(Clone, Debug)]
117enum SlotState {
118    /// This slot is currently in use and is affine to the specified module's memory.
119    Used(Option<MemoryInModule>),
120
121    /// This slot is not currently used, and has never been used.
122    UnusedCold,
123
124    /// This slot is not currently used, but was previously allocated.
125    ///
126    /// The payload here is metadata about the lists that this slot is contained
127    /// within.
128    UnusedWarm(Unused),
129}
130
131impl SlotState {
132    fn unwrap_unused(&mut self) -> &mut Unused {
133        match self {
134            SlotState::UnusedWarm(u) => u,
135            _ => unreachable!(),
136        }
137    }
138}
139
140#[derive(Default, Copy, Clone, Debug)]
141struct Unused {
142    /// Which module this slot was historically affine to, if any.
143    affinity: Option<MemoryInModule>,
144
145    /// Metadata about the linked list for all slots affine to `affinity`.
146    affine_list_link: Link,
147
148    /// Metadata within the `warm` list of the main allocator.
149    unused_list_link: Link,
150}
151
152enum AllocMode {
153    ForceAffineAndClear,
154    AnySlot,
155}
156
157impl ModuleAffinityIndexAllocator {
158    /// Create the default state for this strategy.
159    pub fn new(capacity: u32, max_unused_warm_slots: u32) -> Self {
160        ModuleAffinityIndexAllocator(Mutex::new(Inner {
161            last_cold: 0,
162            max_unused_warm_slots,
163            unused_warm_slots: 0,
164            module_affine: HashMap::new(),
165            slot_state: (0..capacity).map(|_| SlotState::UnusedCold).collect(),
166            warm: List::default(),
167        }))
168    }
169
170    /// How many slots can this allocator allocate?
171    pub fn len(&self) -> usize {
172        let inner = self.0.lock().unwrap();
173        inner.slot_state.len()
174    }
175
176    /// Are zero slots in use right now?
177    pub fn is_empty(&self) -> bool {
178        let inner = self.0.lock().unwrap();
179        !inner
180            .slot_state
181            .iter()
182            .any(|s| matches!(s, SlotState::Used(_)))
183    }
184
185    /// Allocate a new index from this allocator optionally using `id` as an
186    /// affinity request if the allocation strategy supports it.
187    ///
188    /// Returns `None` if no more slots are available.
189    pub fn alloc(&self, for_memory: Option<MemoryInModule>) -> Option<SlotId> {
190        self._alloc(for_memory, AllocMode::AnySlot)
191    }
192
193    /// Attempts to allocate a guaranteed-affine slot to the module `id`
194    /// specified.
195    ///
196    /// Returns `None` if there are no slots affine to `id`. The allocation of
197    /// this slot will not record the affinity to `id`, instead simply listing
198    /// it as taken. This is intended to be used for clearing out all affine
199    /// slots to a module.
200    pub fn alloc_affine_and_clear_affinity(
201        &self,
202        module_id: CompiledModuleId,
203        memory_index: DefinedMemoryIndex,
204    ) -> Option<SlotId> {
205        self._alloc(
206            Some(MemoryInModule(module_id, memory_index)),
207            AllocMode::ForceAffineAndClear,
208        )
209    }
210
211    fn _alloc(&self, for_memory: Option<MemoryInModule>, mode: AllocMode) -> Option<SlotId> {
212        let mut inner = self.0.lock().unwrap();
213        let inner = &mut *inner;
214
215        // As a first-pass always attempt an affine allocation. This will
216        // succeed if any slots are considered affine to `module_id` (if it's
217        // specified). Failing that something else is attempted to be chosen.
218        let slot_id = inner.pick_affine(for_memory).or_else(|| {
219            match mode {
220                // If any slot is requested then this is a normal instantiation
221                // looking for an index. Without any affine candidates there are
222                // two options here:
223                //
224                // 1. Pick a slot amongst previously allocated slots
225                // 2. Pick a slot that's never been used before
226                //
227                // The choice here is guided by the initial configuration of
228                // `max_unused_warm_slots`. If our unused warm slots, which are
229                // likely all affine, is below this threshold then the affinity
230                // of the warm slots isn't tampered with and first a cold slot
231                // is chosen. If the cold slot allocation fails, however, a warm
232                // slot is evicted.
233                //
234                // The opposite happens when we're above our threshold for the
235                // maximum number of warm slots, meaning that a warm slot is
236                // attempted to be picked from first with a cold slot following
237                // that. Note that the warm slot allocation in this case should
238                // only fail of `max_unused_warm_slots` is 0, otherwise
239                // `pick_warm` will always succeed.
240                AllocMode::AnySlot => {
241                    if inner.unused_warm_slots < inner.max_unused_warm_slots {
242                        inner.pick_cold().or_else(|| inner.pick_warm())
243                    } else {
244                        inner.pick_warm().or_else(|| {
245                            debug_assert!(inner.max_unused_warm_slots == 0);
246                            inner.pick_cold()
247                        })
248                    }
249                }
250
251                // In this mode an affinity-based allocation is always performed
252                // as the purpose here is to clear out slots relevant to
253                // `module_id` during module teardown. This means that there's
254                // no consulting non-affine slots in this path.
255                AllocMode::ForceAffineAndClear => None,
256            }
257        })?;
258
259        inner.slot_state[slot_id.index()] = SlotState::Used(match mode {
260            AllocMode::ForceAffineAndClear => None,
261            AllocMode::AnySlot => for_memory,
262        });
263
264        Some(slot_id)
265    }
266
267    pub(crate) fn free(&self, index: SlotId) {
268        let mut inner = self.0.lock().unwrap();
269        let inner = &mut *inner;
270        let module_memory = match inner.slot_state[index.index()] {
271            SlotState::Used(module_memory) => module_memory,
272            _ => unreachable!(),
273        };
274
275        // Bump the number of warm slots since this slot is now considered
276        // previously used. Afterwards append it to the linked list of all
277        // unused and warm slots.
278        inner.unused_warm_slots += 1;
279        let unused_list_link = inner
280            .warm
281            .append(index, &mut inner.slot_state, |s| &mut s.unused_list_link);
282
283        let affine_list_link = match module_memory {
284            // If this slot is affine to a particular module then append this
285            // index to the linked list for the affine module. Otherwise insert
286            // a new one-element linked list.
287            Some(module) => match inner.module_affine.entry(module) {
288                Entry::Occupied(mut e) => e
289                    .get_mut()
290                    .append(index, &mut inner.slot_state, |s| &mut s.affine_list_link),
291                Entry::Vacant(v) => {
292                    v.insert(List::new(index));
293                    Link::default()
294                }
295            },
296
297            // If this slot has no affinity then the affine link is empty.
298            None => Link::default(),
299        };
300
301        inner.slot_state[index.index()] = SlotState::UnusedWarm(Unused {
302            affinity: module_memory,
303            affine_list_link,
304            unused_list_link,
305        });
306    }
307
308    /// Return the number of empty slots available in this allocator.
309    #[cfg(test)]
310    pub fn num_empty_slots(&self) -> usize {
311        let inner = self.0.lock().unwrap();
312        let total_slots = inner.slot_state.len();
313        (total_slots - inner.last_cold as usize) + inner.unused_warm_slots as usize
314    }
315
316    /// For testing only, we want to be able to assert what is on the single
317    /// freelist, for the policies that keep just one.
318    #[cfg(test)]
319    pub(crate) fn testing_freelist(&self) -> Vec<SlotId> {
320        let inner = self.0.lock().unwrap();
321        inner
322            .warm
323            .iter(&inner.slot_state, |s| &s.unused_list_link)
324            .collect()
325    }
326
327    /// For testing only, get the list of all modules with at least one slot
328    /// with affinity for that module.
329    #[cfg(test)]
330    pub(crate) fn testing_module_affinity_list(&self) -> Vec<MemoryInModule> {
331        let inner = self.0.lock().unwrap();
332        inner.module_affine.keys().copied().collect()
333    }
334}
335
336impl Inner {
337    /// Attempts to allocate a slot already affine to `id`, returning `None` if
338    /// `id` is `None` or if there are no affine slots.
339    fn pick_affine(&mut self, for_memory: Option<MemoryInModule>) -> Option<SlotId> {
340        // Note that the `tail` is chosen here of the affine list as it's the
341        // most recently used, which for affine allocations is what we want --
342        // maximizing temporal reuse.
343        let ret = self.module_affine.get(&for_memory?)?.tail?;
344        self.remove(ret);
345        Some(ret)
346    }
347
348    fn pick_warm(&mut self) -> Option<SlotId> {
349        // Insertions into the `unused` list happen at the `tail`, so the
350        // least-recently-used item will be at the head. That's our goal here,
351        // pick the least-recently-used slot since something "warm" is being
352        // evicted anyway.
353        let head = self.warm.head?;
354        self.remove(head);
355        Some(head)
356    }
357
358    fn remove(&mut self, slot: SlotId) {
359        // Decrement the size of the warm list, and additionally remove it from
360        // the `warm` linked list.
361        self.unused_warm_slots -= 1;
362        self.warm
363            .remove(slot, &mut self.slot_state, |u| &mut u.unused_list_link);
364
365        // If this slot is affine to a module then additionally remove it from
366        // that module's affinity linked list. Note that if the module's affine
367        // list is empty then the module's entry in the map is completely
368        // removed as well.
369        let module = self.slot_state[slot.index()].unwrap_unused().affinity;
370        if let Some(module) = module {
371            let mut list = match self.module_affine.entry(module) {
372                Entry::Occupied(e) => e,
373                Entry::Vacant(_) => unreachable!(),
374            };
375            list.get_mut()
376                .remove(slot, &mut self.slot_state, |u| &mut u.affine_list_link);
377
378            if list.get_mut().head.is_none() {
379                list.remove();
380            }
381        }
382    }
383
384    fn pick_cold(&mut self) -> Option<SlotId> {
385        if (self.last_cold as usize) == self.slot_state.len() {
386            None
387        } else {
388            let ret = Some(SlotId(self.last_cold));
389            self.last_cold += 1;
390            ret
391        }
392    }
393}
394
395impl List {
396    /// Creates a new one-element list pointing at `id`.
397    fn new(id: SlotId) -> List {
398        List {
399            head: Some(id),
400            tail: Some(id),
401        }
402    }
403
404    /// Appends the `id` to this list whose links are determined by `link`.
405    fn append(
406        &mut self,
407        id: SlotId,
408        states: &mut [SlotState],
409        link: fn(&mut Unused) -> &mut Link,
410    ) -> Link {
411        // This `id` is the new tail...
412        let tail = mem::replace(&mut self.tail, Some(id));
413
414        // If the tail was present, then update its `next` field to ourselves as
415        // we've been appended, otherwise update the `head` since the list was
416        // previously empty.
417        match tail {
418            Some(tail) => link(states[tail.index()].unwrap_unused()).next = Some(id),
419            None => self.head = Some(id),
420        }
421        Link {
422            prev: tail,
423            next: None,
424        }
425    }
426
427    /// Removes `id` from this list whose links are determined by `link`.
428    fn remove(
429        &mut self,
430        id: SlotId,
431        slot_state: &mut [SlotState],
432        link: fn(&mut Unused) -> &mut Link,
433    ) -> Unused {
434        let mut state = *slot_state[id.index()].unwrap_unused();
435        let next = link(&mut state).next;
436        let prev = link(&mut state).prev;
437
438        // If a `next` node is present for this link, then its previous was our
439        // own previous now. Otherwise we are the tail so the new tail is our
440        // previous.
441        match next {
442            Some(next) => link(slot_state[next.index()].unwrap_unused()).prev = prev,
443            None => self.tail = prev,
444        }
445
446        // Same as the `next` node, except everything is in reverse.
447        match prev {
448            Some(prev) => link(slot_state[prev.index()].unwrap_unused()).next = next,
449            None => self.head = next,
450        }
451        state
452    }
453
454    #[cfg(test)]
455    fn iter<'a>(
456        &'a self,
457        states: &'a [SlotState],
458        link: fn(&Unused) -> &Link,
459    ) -> impl Iterator<Item = SlotId> + 'a {
460        let mut cur = self.head;
461        let mut prev = None;
462        std::iter::from_fn(move || {
463            if cur.is_none() {
464                assert_eq!(prev, self.tail);
465            }
466            let ret = cur?;
467            match &states[ret.index()] {
468                SlotState::UnusedWarm(u) => {
469                    assert_eq!(link(u).prev, prev);
470                    prev = Some(ret);
471                    cur = link(u).next
472                }
473                _ => unreachable!(),
474            }
475            Some(ret)
476        })
477    }
478}
479
480#[cfg(test)]
481mod test {
482    use super::*;
483    use wasmtime_environ::EntityRef;
484
485    #[test]
486    fn test_next_available_allocation_strategy() {
487        for size in 0..20 {
488            let state = ModuleAffinityIndexAllocator::new(size, 0);
489            assert_eq!(state.num_empty_slots(), usize::try_from(size).unwrap());
490            for i in 0..size {
491                assert_eq!(state.num_empty_slots(), usize::try_from(size - i).unwrap());
492                assert_eq!(state.alloc(None).unwrap().index(), i as usize);
493            }
494            assert!(state.alloc(None).is_none());
495        }
496    }
497
498    #[test]
499    fn test_affinity_allocation_strategy() {
500        let id1 = MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0));
501        let id2 = MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0));
502        let state = ModuleAffinityIndexAllocator::new(100, 100);
503
504        let index1 = state.alloc(Some(id1)).unwrap();
505        assert_eq!(index1.index(), 0);
506        let index2 = state.alloc(Some(id2)).unwrap();
507        assert_eq!(index2.index(), 1);
508        assert_ne!(index1, index2);
509
510        state.free(index1);
511        assert_eq!(state.num_empty_slots(), 99);
512
513        // Allocate to the same `index1` slot again.
514        let index3 = state.alloc(Some(id1)).unwrap();
515        assert_eq!(index3, index1);
516        state.free(index3);
517
518        state.free(index2);
519
520        // Both id1 and id2 should have some slots with affinity.
521        let affinity_modules = state.testing_module_affinity_list();
522        assert_eq!(2, affinity_modules.len());
523        assert!(affinity_modules.contains(&id1));
524        assert!(affinity_modules.contains(&id2));
525
526        // Now there is 1 free instance for id2 and 1 free instance
527        // for id1, and 98 empty. Allocate 100 for id2. The first
528        // should be equal to the one we know was previously used for
529        // id2. The next 99 are arbitrary.
530        assert_eq!(state.num_empty_slots(), 100);
531        let mut indices = vec![];
532        for _ in 0..100 {
533            indices.push(state.alloc(Some(id2)).unwrap());
534        }
535        assert!(state.alloc(None).is_none());
536        assert_eq!(indices[0], index2);
537        assert_eq!(state.num_empty_slots(), 0);
538
539        for i in indices {
540            state.free(i);
541        }
542
543        // Now there should be no slots left with affinity for id1.
544        let affinity_modules = state.testing_module_affinity_list();
545        assert_eq!(1, affinity_modules.len());
546        assert!(affinity_modules.contains(&id2));
547
548        // Allocate an index we know previously had an instance but
549        // now does not (list ran empty).
550        let index = state.alloc(Some(id1)).unwrap();
551        state.free(index);
552    }
553
554    #[test]
555    fn clear_affine() {
556        let id = CompiledModuleId::new();
557        let memory_index = DefinedMemoryIndex::new(0);
558
559        for max_unused_warm_slots in [0, 1, 2] {
560            let state = ModuleAffinityIndexAllocator::new(100, max_unused_warm_slots);
561
562            let index1 = state.alloc(Some(MemoryInModule(id, memory_index))).unwrap();
563            let index2 = state.alloc(Some(MemoryInModule(id, memory_index))).unwrap();
564            state.free(index2);
565            state.free(index1);
566            assert!(
567                state
568                    .alloc_affine_and_clear_affinity(id, memory_index)
569                    .is_some()
570            );
571            assert!(
572                state
573                    .alloc_affine_and_clear_affinity(id, memory_index)
574                    .is_some()
575            );
576            assert_eq!(
577                state.alloc_affine_and_clear_affinity(id, memory_index),
578                None
579            );
580        }
581    }
582
583    #[test]
584    fn test_affinity_allocation_strategy_random() {
585        use rand::Rng;
586        let mut rng = rand::rng();
587
588        let ids = std::iter::repeat_with(|| {
589            MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0))
590        })
591        .take(10)
592        .collect::<Vec<_>>();
593        let state = ModuleAffinityIndexAllocator::new(1000, 1000);
594        let mut allocated: Vec<SlotId> = vec![];
595        let mut last_id = vec![None; 1000];
596
597        let mut hits = 0;
598        let amt = if cfg!(miri) { 100 } else { 100_000 };
599        for _ in 0..amt {
600            loop {
601                if !allocated.is_empty() && rng.random_bool(0.5) {
602                    let i = rng.random_range(0..allocated.len());
603                    let to_free_idx = allocated.swap_remove(i);
604                    state.free(to_free_idx);
605                } else {
606                    let id = ids[rng.random_range(0..ids.len())];
607                    let index = match state.alloc(Some(id)) {
608                        Some(id) => id,
609                        None => continue,
610                    };
611                    if last_id[index.index()] == Some(id) {
612                        hits += 1;
613                    }
614                    last_id[index.index()] = Some(id);
615                    allocated.push(index);
616                }
617                break;
618            }
619        }
620
621        // 10% reuse would be random chance (because we have 10 module
622        // IDs). Check for at least double that to ensure some sort of
623        // affinity is occurring.
624        assert!(
625            hits > (amt / 5),
626            "expected at least 20000 (20%) ID-reuses but got {hits}"
627        );
628    }
629
630    #[test]
631    fn test_affinity_threshold() {
632        let id1 = MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0));
633        let id2 = MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0));
634        let id3 = MemoryInModule(CompiledModuleId::new(), DefinedMemoryIndex::new(0));
635        let state = ModuleAffinityIndexAllocator::new(10, 2);
636
637        // Set some slot affinities
638        assert_eq!(state.alloc(Some(id1)), Some(SlotId(0)));
639        state.free(SlotId(0));
640        assert_eq!(state.alloc(Some(id2)), Some(SlotId(1)));
641        state.free(SlotId(1));
642
643        // Only 2 slots are allowed to be unused and warm, so we're at our
644        // threshold, meaning one must now be evicted.
645        assert_eq!(state.alloc(Some(id3)), Some(SlotId(0)));
646        state.free(SlotId(0));
647
648        // pickup `id2` again, it should be affine.
649        assert_eq!(state.alloc(Some(id2)), Some(SlotId(1)));
650
651        // with only one warm slot available allocation for `id1` should pick a
652        // fresh slot
653        assert_eq!(state.alloc(Some(id1)), Some(SlotId(2)));
654
655        state.free(SlotId(1));
656        state.free(SlotId(2));
657
658        // ensure everything stays affine
659        assert_eq!(state.alloc(Some(id1)), Some(SlotId(2)));
660        assert_eq!(state.alloc(Some(id2)), Some(SlotId(1)));
661        assert_eq!(state.alloc(Some(id3)), Some(SlotId(0)));
662
663        state.free(SlotId(1));
664        state.free(SlotId(2));
665        state.free(SlotId(0));
666
667        // LRU is 1, so that should be picked
668        assert_eq!(
669            state.alloc(Some(MemoryInModule(
670                CompiledModuleId::new(),
671                DefinedMemoryIndex::new(0)
672            ))),
673            Some(SlotId(1))
674        );
675
676        // Pick another LRU entry, this time 2
677        assert_eq!(
678            state.alloc(Some(MemoryInModule(
679                CompiledModuleId::new(),
680                DefinedMemoryIndex::new(0)
681            ))),
682            Some(SlotId(2))
683        );
684
685        // This should preserve slot `0` and pick up something new
686        assert_eq!(
687            state.alloc(Some(MemoryInModule(
688                CompiledModuleId::new(),
689                DefinedMemoryIndex::new(0)
690            ))),
691            Some(SlotId(3))
692        );
693
694        state.free(SlotId(1));
695        state.free(SlotId(2));
696        state.free(SlotId(3));
697
698        // for good measure make sure id3 is still affine
699        assert_eq!(state.alloc(Some(id3)), Some(SlotId(0)));
700    }
701
702    #[test]
703    fn test_freelist() {
704        let allocator = SimpleIndexAllocator::new(10);
705        assert_eq!(allocator.testing_freelist(), []);
706        let a = allocator.alloc().unwrap();
707        assert_eq!(allocator.testing_freelist(), []);
708        allocator.free(a);
709        assert_eq!(allocator.testing_freelist(), [a]);
710        assert_eq!(allocator.alloc(), Some(a));
711        assert_eq!(allocator.testing_freelist(), []);
712        let b = allocator.alloc().unwrap();
713        assert_eq!(allocator.testing_freelist(), []);
714        allocator.free(b);
715        assert_eq!(allocator.testing_freelist(), [b]);
716        allocator.free(a);
717        assert_eq!(allocator.testing_freelist(), [b, a]);
718    }
719}