wasmtime/runtime/vm/gc/enabled/
free_list.rs

1use crate::prelude::*;
2use alloc::collections::BTreeMap;
3use core::cmp;
4use core::{alloc::Layout, num::NonZeroU32, ops::Bound};
5
6/// A very simple first-fit free list for use by our garbage collectors.
7pub(crate) struct FreeList {
8    /// The total capacity of the contiguous range of memory we are managing.
9    capacity: usize,
10    /// Our free blocks, as a map from index to length of the free block at that
11    /// index.
12    free_block_index_to_len: BTreeMap<u32, u32>,
13}
14
15/// Our minimum and maximum supported alignment. Every allocation is aligned to
16/// this. Additionally, this is the minimum allocation size, and every
17/// allocation is rounded up to this size.
18const ALIGN_U32: u32 = 16;
19const ALIGN_USIZE: usize = ALIGN_U32 as usize;
20
21impl FreeList {
22    /// Create a new `Layout` from the given `size` with an alignment that is
23    /// compatible with this free list.
24    pub fn layout(size: usize) -> Layout {
25        Layout::from_size_align(size, ALIGN_USIZE).unwrap()
26    }
27
28    /// Create a new `FreeList` for a contiguous region of memory of the given
29    /// size.
30    pub fn new(capacity: usize) -> Self {
31        log::trace!("FreeList::new({capacity})");
32        let mut free_list = FreeList {
33            capacity,
34            free_block_index_to_len: BTreeMap::new(),
35        };
36        free_list.reset();
37        free_list
38    }
39
40    fn max_size(&self) -> usize {
41        let cap = cmp::min(self.capacity, usize::try_from(u32::MAX).unwrap());
42        round_usize_down_to_pow2(cap.saturating_sub(ALIGN_USIZE), ALIGN_USIZE)
43    }
44
45    /// Check the given layout for compatibility with this free list and return
46    /// the actual block size we will use for this layout.
47    fn check_layout(&self, layout: Layout) -> Result<u32> {
48        ensure!(
49            layout.align() <= ALIGN_USIZE,
50            "requested allocation's alignment of {} is greater than max supported alignment of {ALIGN_USIZE}",
51            layout.align(),
52        );
53
54        ensure!(
55            layout.size() <= self.max_size(),
56            "requested allocation's size of {} is greater than the max supported size of {}",
57            layout.size(),
58            self.max_size(),
59        );
60
61        let alloc_size = u32::try_from(layout.size())
62            .context("requested allocation's size does not fit in a u32")?;
63        alloc_size
64            .checked_next_multiple_of(ALIGN_U32)
65            .ok_or_else(|| {
66                anyhow!(
67                    "failed to round allocation size of {alloc_size} up to next \
68                     multiple of {ALIGN_USIZE}"
69                )
70            })
71    }
72
73    /// Find the first free block that can hold an allocation of the given size
74    /// and remove it from the free list.
75    fn first_fit(&mut self, alloc_size: u32) -> Option<(u32, u32)> {
76        debug_assert_eq!(alloc_size % ALIGN_U32, 0);
77
78        let (&block_index, &block_len) = self
79            .free_block_index_to_len
80            .iter()
81            .find(|(_idx, len)| **len >= alloc_size)?;
82
83        debug_assert_eq!(block_index % ALIGN_U32, 0);
84        debug_assert_eq!(block_len % ALIGN_U32, 0);
85
86        let entry = self.free_block_index_to_len.remove(&block_index);
87        debug_assert!(entry.is_some());
88
89        Some((block_index, block_len))
90    }
91
92    /// If the given allocated block is large enough such that we can split it
93    /// and still have enough space left for future allocations, then split it.
94    ///
95    /// Returns the new length of the allocated block.
96    fn maybe_split(&mut self, alloc_size: u32, block_index: u32, block_len: u32) -> u32 {
97        debug_assert_eq!(alloc_size % ALIGN_U32, 0);
98        debug_assert_eq!(block_index % ALIGN_U32, 0);
99        debug_assert_eq!(block_len % ALIGN_U32, 0);
100
101        if block_len - alloc_size < ALIGN_U32 {
102            // The block is not large enough to split.
103            return block_len;
104        }
105
106        // The block is large enough to split. Split the block at exactly the
107        // requested allocation size and put the tail back in the free list.
108        let new_block_len = alloc_size;
109        let split_start = block_index + alloc_size;
110        let split_len = block_len - alloc_size;
111
112        debug_assert_eq!(new_block_len % ALIGN_U32, 0);
113        debug_assert_eq!(split_start % ALIGN_U32, 0);
114        debug_assert_eq!(split_len % ALIGN_U32, 0);
115
116        self.free_block_index_to_len.insert(split_start, split_len);
117
118        new_block_len
119    }
120
121    /// Allocate space for an object of the given layout.
122    ///
123    /// Returns:
124    ///
125    /// * `Ok(Some(_))`: Allocation succeeded.
126    ///
127    /// * `Ok(None)`: Can't currently fulfill the allocation request, but might
128    ///   be able to if some stuff was reallocated.
129    ///
130    /// * `Err(_)`:
131    pub fn alloc(&mut self, layout: Layout) -> Result<Option<NonZeroU32>> {
132        log::trace!("FreeList::alloc({layout:?})");
133        let alloc_size = self.check_layout(layout)?;
134        debug_assert_eq!(alloc_size % ALIGN_U32, 0);
135
136        let (block_index, block_len) = match self.first_fit(alloc_size) {
137            None => return Ok(None),
138            Some(tup) => tup,
139        };
140        debug_assert_ne!(block_index, 0);
141        debug_assert_eq!(block_index % ALIGN_U32, 0);
142        debug_assert!(block_len >= alloc_size);
143        debug_assert_eq!(block_len % ALIGN_U32, 0);
144
145        let block_len = self.maybe_split(alloc_size, block_index, block_len);
146        debug_assert!(block_len >= alloc_size);
147        debug_assert_eq!(block_len % ALIGN_U32, 0);
148
149        // After we've mutated the free list, double check its integrity.
150        #[cfg(debug_assertions)]
151        self.check_integrity();
152
153        log::trace!("FreeList::alloc({layout:?}) -> {block_index:#x}");
154        Ok(Some(unsafe { NonZeroU32::new_unchecked(block_index) }))
155    }
156
157    /// Deallocate an object with the given layout.
158    pub fn dealloc(&mut self, index: NonZeroU32, layout: Layout) {
159        log::trace!("FreeList::dealloc({index:#x}, {layout:?})");
160
161        let index = index.get();
162        debug_assert_eq!(index % ALIGN_U32, 0);
163
164        let alloc_size = self.check_layout(layout).unwrap();
165        debug_assert_eq!(alloc_size % ALIGN_U32, 0);
166
167        let prev_block = self
168            .free_block_index_to_len
169            .range((Bound::Unbounded, Bound::Excluded(index)))
170            .next_back()
171            .map(|(idx, len)| (*idx, *len));
172
173        let next_block = self
174            .free_block_index_to_len
175            .range((Bound::Excluded(index), Bound::Unbounded))
176            .next()
177            .map(|(idx, len)| (*idx, *len));
178
179        // Try and merge this block with its previous and next blocks in the
180        // free list, if any and if they are contiguous.
181        match (prev_block, next_block) {
182            // The prev, this, and next blocks are all contiguous: merge this
183            // and next into prev.
184            (Some((prev_index, prev_len)), Some((next_index, next_len)))
185                if blocks_are_contiguous(prev_index, prev_len, index)
186                    && blocks_are_contiguous(index, alloc_size, next_index) =>
187            {
188                log::trace!(
189                    "merging blocks {prev_index:#x}..{prev_len:#x}, {index:#x}..{index_end:#x}, {next_index:#x}..{next_end:#x}",
190                    prev_len = prev_index + prev_len,
191                    index_end = index + u32::try_from(layout.size()).unwrap(),
192                    next_end = next_index + next_len,
193                );
194                self.free_block_index_to_len.remove(&next_index);
195                let merged_block_len = next_index + next_len - prev_index;
196                debug_assert_eq!(merged_block_len % ALIGN_U32, 0);
197                *self.free_block_index_to_len.get_mut(&prev_index).unwrap() = merged_block_len;
198            }
199
200            // The prev and this blocks are contiguous: merge this into prev.
201            (Some((prev_index, prev_len)), _)
202                if blocks_are_contiguous(prev_index, prev_len, index) =>
203            {
204                log::trace!(
205                    "merging blocks {prev_index:#x}..{prev_len:#x}, {index:#x}..{index_end:#x}",
206                    prev_len = prev_index + prev_len,
207                    index_end = index + u32::try_from(layout.size()).unwrap(),
208                );
209                let merged_block_len = index + alloc_size - prev_index;
210                debug_assert_eq!(merged_block_len % ALIGN_U32, 0);
211                *self.free_block_index_to_len.get_mut(&prev_index).unwrap() = merged_block_len;
212            }
213
214            // The this and next blocks are contiguous: merge next into this.
215            (_, Some((next_index, next_len)))
216                if blocks_are_contiguous(index, alloc_size, next_index) =>
217            {
218                log::trace!(
219                    "merging blocks {index:#x}..{index_end:#x}, {next_index:#x}..{next_end:#x}",
220                    index_end = index + u32::try_from(layout.size()).unwrap(),
221                    next_end = next_index + next_len,
222                );
223                self.free_block_index_to_len.remove(&next_index);
224                let merged_block_len = next_index + next_len - index;
225                debug_assert_eq!(merged_block_len % ALIGN_U32, 0);
226                self.free_block_index_to_len.insert(index, merged_block_len);
227            }
228
229            // None of the blocks are contiguous: insert this block into the
230            // free list.
231            (_, _) => {
232                log::trace!("cannot merge blocks");
233                self.free_block_index_to_len.insert(index, alloc_size);
234            }
235        }
236
237        // After we've added to/mutated the free list, double check its
238        // integrity.
239        #[cfg(debug_assertions)]
240        self.check_integrity();
241    }
242
243    /// Assert that the free list is valid:
244    ///
245    /// 1. All blocks are within `ALIGN..self.capacity`
246    ///
247    /// 2. No blocks are overlapping.
248    ///
249    /// 3. All blocks are aligned to `ALIGN`
250    ///
251    /// 4. All block sizes are a multiple of `ALIGN`
252    #[cfg(debug_assertions)]
253    fn check_integrity(&self) {
254        let mut prev_end = None;
255        for (&index, &len) in self.free_block_index_to_len.iter() {
256            // (1)
257            let end = index + len;
258            assert!(usize::try_from(end).unwrap() <= self.capacity);
259
260            // (2)
261            if let Some(prev_end) = prev_end {
262                // We could assert `prev_end <= index`, and that would be
263                // correct, but it would also mean that we missed an opportunity
264                // to merge the previous block and this current block
265                // together. We don't want to allow that kind of fragmentation,
266                // so do the stricter `prev_end < index` assert here.
267                assert!(prev_end < index);
268            }
269
270            // (3)
271            assert_eq!(index % ALIGN_U32, 0);
272
273            // (4)
274            assert_eq!(len % ALIGN_U32, 0);
275
276            prev_end = Some(end);
277        }
278    }
279
280    /// Reset this free list, making the whole range available for allocation.
281    pub fn reset(&mut self) {
282        let end = u32::try_from(self.capacity).unwrap_or_else(|_| {
283            assert!(self.capacity > usize::try_from(u32::MAX).unwrap());
284            u32::MAX
285        });
286
287        // Don't start at `0`. Reserve that for "null pointers" and this way we
288        // can use `NonZeroU32` as out pointer type, giving us some more
289        // bitpacking opportunities.
290        let start = ALIGN_U32;
291
292        let len = round_u32_down_to_pow2(end.saturating_sub(start), ALIGN_U32);
293
294        let entire_range = if len >= ALIGN_U32 {
295            Some((start, len))
296        } else {
297            None
298        };
299
300        self.free_block_index_to_len.clear();
301        self.free_block_index_to_len.extend(entire_range);
302    }
303}
304
305#[inline]
306fn blocks_are_contiguous(prev_index: u32, prev_len: u32, next_index: u32) -> bool {
307    // NB: We might have decided *not* to split the prev block if it was larger
308    // than the requested allocation size but not large enough such that if we
309    // split it, the remainder could fulfill future allocations. In such cases,
310    // the size of the `Layout` given to us upon deallocation (aka `prev_len`)
311    // is smaller than the actual size of the block we allocated.
312    let end_of_prev = prev_index + prev_len;
313    debug_assert!(
314        next_index >= end_of_prev,
315        "overlapping blocks: \n\
316         \t prev_index = {prev_index:#x}\n\
317         \t   prev_len = {prev_len:#x}\n\
318         \tend_of_prev = {end_of_prev:#x}\n\
319         \t next_index = {next_index:#x}\n\
320         `next_index` should be >= `end_of_prev`"
321    );
322    let delta_to_next = next_index - end_of_prev;
323    delta_to_next < ALIGN_U32
324}
325
326#[inline]
327fn round_u32_down_to_pow2(value: u32, divisor: u32) -> u32 {
328    debug_assert!(divisor > 0);
329    debug_assert!(divisor.is_power_of_two());
330    value & !(divisor - 1)
331}
332
333#[inline]
334fn round_usize_down_to_pow2(value: usize, divisor: usize) -> usize {
335    debug_assert!(divisor > 0);
336    debug_assert!(divisor.is_power_of_two());
337    value & !(divisor - 1)
338}
339
340#[cfg(test)]
341mod tests {
342    use super::*;
343    use crate::hash_map::HashMap;
344    use proptest::prelude::*;
345    use std::num::NonZeroUsize;
346
347    fn free_list_block_len_and_size(free_list: &FreeList) -> (usize, Option<usize>) {
348        let len = free_list.free_block_index_to_len.len();
349        let size = free_list
350            .free_block_index_to_len
351            .values()
352            .next()
353            .map(|s| usize::try_from(*s).unwrap());
354        (len, size)
355    }
356
357    proptest! {
358        /// This property test ensures that `FreeList` doesn't suffer from
359        /// permanent fragmentation. That is, it can always merge neighboring
360        /// free blocks together into a single, larger free block that can be
361        /// used to satisfy larger allocations than either of those smaller
362        /// blocks could have. In the limit, once we've freed all blocks, that
363        /// means we should end up with a single block that represents the whole
364        /// range of memory that the `FreeList` is portioning out (just like
365        /// what we started with when we initially created the `FreeList`).
366        #[test]
367        #[cfg_attr(miri, ignore)]
368        fn check_no_fragmentation((capacity, ops) in ops()) {
369            // Map from allocation id to ptr.
370            let mut live = HashMap::new();
371
372            // Set of deferred deallocations, where the strategy told us to
373            // deallocate an id before it was allocated. These simply get
374            // deallocated en-mass at the end.
375            let mut deferred = vec![];
376
377            // The free list we are testing.
378            let mut free_list = FreeList::new(capacity.get());
379
380            let (initial_len, initial_size) = free_list_block_len_and_size(&free_list);
381            assert!(initial_len == 0 || initial_len == 1);
382            assert!(initial_size.unwrap_or(0) <= capacity.get());
383            assert_eq!(initial_size.unwrap_or(0), free_list.max_size());
384
385            // Run through the generated ops and perform each operation.
386            for (id, op) in ops {
387                match op {
388                    Op::Alloc(layout) => {
389                        if let Ok(Some(ptr)) = free_list.alloc(layout) {
390                            live.insert(id, ptr);
391                        }
392                    }
393                    Op::Dealloc(layout) => {
394                        if let Some(ptr) = live.remove(&id) {
395                            free_list.dealloc(ptr, layout);
396                        } else {
397                            deferred.push((id, layout));
398                        }
399                    }
400                }
401            }
402
403            // Now that we've completed all allocations, perform the deferred
404            // deallocations.
405            for (id, layout) in deferred {
406                // NB: not all IDs necessarily got successful allocations, so
407                // there might not be a live pointer for this ID, even after
408                // we've already performed all the allocation operations.
409                if let Some(ptr) = live.remove(&id) {
410                    free_list.dealloc(ptr, layout);
411                }
412            }
413
414            // Now we can assert various properties that should hold after we
415            // have deallocated everything that was allocated.
416            //
417            // First, assert we did in fact deallocate everything.
418            assert!(live.is_empty());
419
420            let (final_len, final_size) = free_list_block_len_and_size(&free_list);
421
422            // The free list should have a single chunk again (or no chunks if
423            // the capacity was too small).
424            assert_eq!(final_len, initial_len);
425
426            // And the size of that chunk should be the same as the initial size.
427            assert_eq!(final_size, initial_size);
428        }
429    }
430
431    #[derive(Clone, Debug)]
432    enum Op {
433        Alloc(Layout),
434        Dealloc(Layout),
435    }
436
437    /// Map an arbitrary `x` to a power of 2 that is less than or equal to
438    /// `max`, but with as little bias as possible (e.g. rounding `min(x, max)`
439    /// to the nearest power of 2 is unacceptable because it would majorly bias
440    /// the distribution towards `max` when `max` is much smaller than
441    /// `usize::MAX`).
442    fn clamp_to_pow2_in_range(x: usize, max: usize) -> usize {
443        let log_x = max.ilog2() as usize;
444        if log_x == 0 {
445            return 1;
446        }
447        let divisor = usize::MAX / log_x;
448        let y = 1_usize << (x / divisor);
449        assert!(y.is_power_of_two(), "{y} is not a power of two");
450        assert!(y <= max, "{y} is larger than {max}");
451        y
452    }
453
454    /// Helper to turn a pair of arbitrary `usize`s into a valid `Layout` of
455    /// reasonable size for use with quickchecks.
456    fn arbitrary_layout(max_size: NonZeroUsize, size: usize, align: usize) -> Layout {
457        // The maximum size cannot be larger than `isize::MAX` because `Layout`
458        // imposes that constraint on its size.
459        let max_size = std::cmp::min(max_size.get(), usize::try_from(isize::MAX).unwrap());
460
461        // Ensure that the alignment is a power of 2 that is less than or equal
462        // to the maximum alignment that `FreeList` supports.
463        let align = clamp_to_pow2_in_range(align, super::ALIGN_USIZE);
464
465        // Ensure that `size` is less than or equal to `max_size`.
466        let size = size % (max_size + 1);
467
468        // Ensure that `size` is a multiple of `align`.
469        //
470        // NB: We round `size` *down* to the previous multiple of `align` to
471        // preserve `size <= max_size`.
472        let size = round_usize_down_to_pow2(size, align);
473        assert!(size <= max_size);
474
475        // Double check that we satisfied all of `Layout::from_size_align`'s
476        // success requirements.
477        assert_ne!(align, 0);
478        assert!(align.is_power_of_two());
479        assert_eq!(size % align, 0);
480        assert!(size <= usize::try_from(isize::MAX).unwrap());
481
482        Layout::from_size_align(size, align).unwrap()
483    }
484
485    /// Proptest strategy to generate a free list capacity and a series of
486    /// allocation operations to perform in a free list of that capacity.
487    fn ops() -> impl Strategy<Value = (NonZeroUsize, Vec<(usize, Op)>)> {
488        any::<usize>().prop_flat_map(|capacity| {
489            let capacity =
490                NonZeroUsize::new(capacity).unwrap_or_else(|| NonZeroUsize::new(1 << 31).unwrap());
491
492            (
493                Just(capacity),
494                (any::<usize>(), any::<usize>(), any::<usize>())
495                    .prop_flat_map(move |(id, size, align)| {
496                        let layout = arbitrary_layout(capacity, size, align);
497                        vec![
498                            Just((id, Op::Alloc(layout))),
499                            Just((id, Op::Dealloc(layout))),
500                        ]
501                    })
502                    .prop_shuffle(),
503            )
504        })
505    }
506
507    #[test]
508    fn allocate_no_split() {
509        // Create a free list with the capacity to allocate two blocks of size
510        // `ALIGN_U32`.
511        let mut free_list = FreeList::new(ALIGN_USIZE + usize::try_from(ALIGN_U32).unwrap() * 2);
512
513        assert_eq!(free_list.free_block_index_to_len.len(), 1);
514        assert_eq!(
515            free_list.max_size(),
516            usize::try_from(ALIGN_U32).unwrap() * 2
517        );
518
519        // Allocate a block such that the remainder is not worth splitting.
520        free_list
521            .alloc(
522                Layout::from_size_align(
523                    usize::try_from(ALIGN_U32).unwrap() + ALIGN_USIZE,
524                    ALIGN_USIZE,
525                )
526                .unwrap(),
527            )
528            .expect("allocation within 'static' free list limits")
529            .expect("have free space available for allocation");
530
531        // Should not have split the block.
532        assert_eq!(free_list.free_block_index_to_len.len(), 0);
533    }
534
535    #[test]
536    fn allocate_and_split() {
537        // Create a free list with the capacity to allocate three blocks of size
538        // `ALIGN_U32`.
539        let mut free_list = FreeList::new(ALIGN_USIZE + usize::try_from(ALIGN_U32).unwrap() * 3);
540
541        assert_eq!(free_list.free_block_index_to_len.len(), 1);
542        assert_eq!(
543            free_list.max_size(),
544            usize::try_from(ALIGN_U32).unwrap() * 3
545        );
546
547        // Allocate a block such that the remainder is not worth splitting.
548        free_list
549            .alloc(
550                Layout::from_size_align(
551                    usize::try_from(ALIGN_U32).unwrap() + ALIGN_USIZE,
552                    ALIGN_USIZE,
553                )
554                .unwrap(),
555            )
556            .expect("allocation within 'static' free list limits")
557            .expect("have free space available for allocation");
558
559        // Should have split the block.
560        assert_eq!(free_list.free_block_index_to_len.len(), 1);
561    }
562
563    #[test]
564    fn dealloc_merge_prev_and_next() {
565        let layout =
566            Layout::from_size_align(usize::try_from(ALIGN_U32).unwrap(), ALIGN_USIZE).unwrap();
567
568        let mut free_list = FreeList::new(ALIGN_USIZE + usize::try_from(ALIGN_U32).unwrap() * 100);
569        assert_eq!(
570            free_list.free_block_index_to_len.len(),
571            1,
572            "initially one big free block"
573        );
574
575        let a = free_list
576            .alloc(layout)
577            .expect("allocation within 'static' free list limits")
578            .expect("have free space available for allocation");
579        assert_eq!(
580            free_list.free_block_index_to_len.len(),
581            1,
582            "should have split the block to allocate `a`"
583        );
584
585        let b = free_list
586            .alloc(layout)
587            .expect("allocation within 'static' free list limits")
588            .expect("have free space available for allocation");
589        assert_eq!(
590            free_list.free_block_index_to_len.len(),
591            1,
592            "should have split the block to allocate `b`"
593        );
594
595        free_list.dealloc(a, layout);
596        assert_eq!(
597            free_list.free_block_index_to_len.len(),
598            2,
599            "should have two non-contiguous free blocks after deallocating `a`"
600        );
601
602        free_list.dealloc(b, layout);
603        assert_eq!(
604            free_list.free_block_index_to_len.len(),
605            1,
606            "should have merged `a` and `b` blocks with the rest to form a \
607             single, contiguous free block after deallocating `b`"
608        );
609    }
610
611    #[test]
612    fn dealloc_merge_with_prev_and_not_next() {
613        let layout =
614            Layout::from_size_align(usize::try_from(ALIGN_U32).unwrap(), ALIGN_USIZE).unwrap();
615
616        let mut free_list = FreeList::new(ALIGN_USIZE + usize::try_from(ALIGN_U32).unwrap() * 100);
617        assert_eq!(
618            free_list.free_block_index_to_len.len(),
619            1,
620            "initially one big free block"
621        );
622
623        let a = free_list
624            .alloc(layout)
625            .expect("allocation within 'static' free list limits")
626            .expect("have free space available for allocation");
627        let b = free_list
628            .alloc(layout)
629            .expect("allocation within 'static' free list limits")
630            .expect("have free space available for allocation");
631        let c = free_list
632            .alloc(layout)
633            .expect("allocation within 'static' free list limits")
634            .expect("have free space available for allocation");
635        assert_eq!(
636            free_list.free_block_index_to_len.len(),
637            1,
638            "should have split the block to allocate `a`, `b`, and `c`"
639        );
640
641        free_list.dealloc(a, layout);
642        assert_eq!(
643            free_list.free_block_index_to_len.len(),
644            2,
645            "should have two non-contiguous free blocks after deallocating `a`"
646        );
647
648        free_list.dealloc(b, layout);
649        assert_eq!(
650            free_list.free_block_index_to_len.len(),
651            2,
652            "should have merged `a` and `b` blocks, but not merged with the \
653             rest of the free space"
654        );
655
656        let _ = c;
657    }
658
659    #[test]
660    fn dealloc_merge_with_next_and_not_prev() {
661        let layout =
662            Layout::from_size_align(usize::try_from(ALIGN_U32).unwrap(), ALIGN_USIZE).unwrap();
663
664        let mut free_list = FreeList::new(ALIGN_USIZE + usize::try_from(ALIGN_U32).unwrap() * 100);
665        assert_eq!(
666            free_list.free_block_index_to_len.len(),
667            1,
668            "initially one big free block"
669        );
670
671        let a = free_list
672            .alloc(layout)
673            .expect("allocation within 'static' free list limits")
674            .expect("have free space available for allocation");
675        let b = free_list
676            .alloc(layout)
677            .expect("allocation within 'static' free list limits")
678            .expect("have free space available for allocation");
679        let c = free_list
680            .alloc(layout)
681            .expect("allocation within 'static' free list limits")
682            .expect("have free space available for allocation");
683        assert_eq!(
684            free_list.free_block_index_to_len.len(),
685            1,
686            "should have split the block to allocate `a`, `b`, and `c`"
687        );
688
689        free_list.dealloc(a, layout);
690        assert_eq!(
691            free_list.free_block_index_to_len.len(),
692            2,
693            "should have two non-contiguous free blocks after deallocating `a`"
694        );
695
696        free_list.dealloc(c, layout);
697        assert_eq!(
698            free_list.free_block_index_to_len.len(),
699            2,
700            "should have merged `c` block with rest of the free space, but not \
701             with `a` block"
702        );
703
704        let _ = b;
705    }
706
707    #[test]
708    fn dealloc_no_merge() {
709        let layout =
710            Layout::from_size_align(usize::try_from(ALIGN_U32).unwrap(), ALIGN_USIZE).unwrap();
711
712        let mut free_list = FreeList::new(ALIGN_USIZE + usize::try_from(ALIGN_U32).unwrap() * 100);
713        assert_eq!(
714            free_list.free_block_index_to_len.len(),
715            1,
716            "initially one big free block"
717        );
718
719        let a = free_list
720            .alloc(layout)
721            .expect("allocation within 'static' free list limits")
722            .expect("have free space available for allocation");
723        let b = free_list
724            .alloc(layout)
725            .expect("allocation within 'static' free list limits")
726            .expect("have free space available for allocation");
727        let c = free_list
728            .alloc(layout)
729            .expect("allocation within 'static' free list limits")
730            .expect("have free space available for allocation");
731        let d = free_list
732            .alloc(layout)
733            .expect("allocation within 'static' free list limits")
734            .expect("have free space available for allocation");
735        assert_eq!(
736            free_list.free_block_index_to_len.len(),
737            1,
738            "should have split the block to allocate `a`, `b`, `c`, and `d`"
739        );
740
741        free_list.dealloc(a, layout);
742        assert_eq!(
743            free_list.free_block_index_to_len.len(),
744            2,
745            "should have two non-contiguous free blocks after deallocating `a`"
746        );
747
748        free_list.dealloc(c, layout);
749        assert_eq!(
750            free_list.free_block_index_to_len.len(),
751            3,
752            "should not have merged `c` block `a` block or rest of the free \
753             space"
754        );
755
756        let _ = (b, d);
757    }
758
759    #[test]
760    fn alloc_size_too_large() {
761        // Free list with room for 10 min-sized blocks.
762        let mut free_list = FreeList::new(ALIGN_USIZE + usize::try_from(ALIGN_U32).unwrap() * 10);
763        assert_eq!(
764            free_list.max_size(),
765            usize::try_from(ALIGN_U32).unwrap() * 10
766        );
767
768        // Attempt to allocate something that is 20 times the size of our
769        // min-sized block.
770        assert!(free_list
771            .alloc(
772                Layout::from_size_align(usize::try_from(ALIGN_U32).unwrap() * 20, ALIGN_USIZE)
773                    .unwrap(),
774            )
775            .is_err());
776    }
777
778    #[test]
779    fn alloc_align_too_large() {
780        // Free list with room for 10 min-sized blocks.
781        let mut free_list = FreeList::new(ALIGN_USIZE + usize::try_from(ALIGN_U32).unwrap() * 10);
782        assert_eq!(
783            free_list.max_size(),
784            usize::try_from(ALIGN_U32).unwrap() * 10
785        );
786
787        // Attempt to allocate something that requires larger alignment than
788        // `FreeList` supports.
789        assert!(free_list
790            .alloc(
791                Layout::from_size_align(usize::try_from(ALIGN_U32).unwrap(), ALIGN_USIZE * 2)
792                    .unwrap(),
793            )
794            .is_err());
795    }
796
797    #[test]
798    fn all_pairwise_alloc_dealloc_orderings() {
799        let tests: &[fn(&mut FreeList, Layout)] = &[
800            |f, l| {
801                let a = f.alloc(l).unwrap().unwrap();
802                let b = f.alloc(l).unwrap().unwrap();
803                f.dealloc(a, l);
804                f.dealloc(b, l);
805            },
806            |f, l| {
807                let a = f.alloc(l).unwrap().unwrap();
808                let b = f.alloc(l).unwrap().unwrap();
809                f.dealloc(b, l);
810                f.dealloc(a, l);
811            },
812            |f, l| {
813                let a = f.alloc(l).unwrap().unwrap();
814                f.dealloc(a, l);
815                let b = f.alloc(l).unwrap().unwrap();
816                f.dealloc(b, l);
817            },
818        ];
819
820        let l = Layout::from_size_align(16, 8).unwrap();
821        for test in tests {
822            let mut f = FreeList::new(0x100);
823            test(&mut f, l);
824        }
825    }
826}