cranelift_fuzzgen/
config.rs

1use std::collections::HashMap;
2use std::ops::RangeInclusive;
3
4/// Holds the range of acceptable values to use during the generation of testcases
5pub struct Config {
6    /// Maximum allowed test case inputs.
7    /// We build test case inputs from the rest of the bytes that the fuzzer provides us
8    /// so we allow the fuzzer to control this by feeding us more or less bytes.
9    /// The upper bound here is to prevent too many inputs that cause long test times
10    pub max_test_case_inputs: usize,
11    // Number of functions that we generate per testcase
12    pub testcase_funcs: RangeInclusive<usize>,
13    pub signature_params: RangeInclusive<usize>,
14    pub signature_rets: RangeInclusive<usize>,
15    pub instructions_per_block: RangeInclusive<usize>,
16    /// Number of variables that we allocate per function
17    /// This value does not include the signature params
18    pub vars_per_function: RangeInclusive<usize>,
19    /// Number of blocks that we generate per function.
20    /// This value does not include the entry block
21    pub blocks_per_function: RangeInclusive<usize>,
22    /// Number of params a block should take
23    /// This value does not apply to block0 which takes the function params
24    /// and is thus governed by `signature_params`
25    pub block_signature_params: RangeInclusive<usize>,
26    /// Max number of jump tables entries to generate
27    pub jump_table_entries: RangeInclusive<usize>,
28
29    /// The Switch API specializes either individual blocks or contiguous ranges.
30    /// In `switch_cases` we decide to produce either a single block or a range.
31    /// The size of the range is controlled by `switch_max_range_size`.
32    pub switch_cases: RangeInclusive<usize>,
33    pub switch_max_range_size: RangeInclusive<usize>,
34
35    /// Stack slots.
36    /// The combination of these two determines stack usage per function
37    pub static_stack_slots_per_function: RangeInclusive<usize>,
38    /// Size in bytes
39    pub static_stack_slot_size: RangeInclusive<usize>,
40    /// Stack slot alignment as a power of 2
41    pub stack_slot_alignment_log2: RangeInclusive<usize>,
42    /// Allowed stack probe sizes
43    pub stack_probe_size_log2: RangeInclusive<usize>,
44
45    /// Determines how often we generate a backwards branch
46    /// Backwards branches are prone to infinite loops, and thus cause timeouts.
47    pub backwards_branch_ratio: (usize, usize),
48
49    /// How often should we allow integer division by zero traps.
50    ///
51    /// Some instructions such as Srem and Udiv can cause a `int_divz` trap
52    /// under some inputs. We almost always insert a sequence of instructions
53    /// that avoids these issues. However we can allow some `int_divz` traps
54    /// by controlling this config.
55    pub allowed_int_divz_ratio: (usize, usize),
56
57    /// How often should we allow fcvt related traps.
58    ///
59    /// `Fcvt*` instructions fail under some inputs, most commonly NaN's.
60    /// We insert a checking sequence to guarantee that those inputs never make
61    /// it to the instruction, but sometimes we want to allow them.
62    pub allowed_fcvt_traps_ratio: (usize, usize),
63
64    /// Some flags really impact compile performance, we still want to test
65    /// them, but probably at a lower rate, so that overall execution time isn't
66    /// impacted as much
67    pub compile_flag_ratio: HashMap<&'static str, (usize, usize)>,
68
69    /// Range of values for the padding between basic blocks. Larger values will
70    /// generate larger functions.
71    pub bb_padding_log2_size: RangeInclusive<usize>,
72}
73
74impl Default for Config {
75    fn default() -> Self {
76        Config {
77            max_test_case_inputs: 100,
78            testcase_funcs: 1..=8,
79            signature_params: 0..=16,
80            signature_rets: 0..=16,
81            instructions_per_block: 0..=64,
82            vars_per_function: 0..=16,
83            blocks_per_function: 0..=16,
84            block_signature_params: 0..=16,
85            jump_table_entries: 0..=16,
86            switch_cases: 0..=64,
87            // Ranges smaller than 2 don't make sense.
88            switch_max_range_size: 2..=32,
89            static_stack_slots_per_function: 0..=8,
90            static_stack_slot_size: 0..=128,
91            stack_slot_alignment_log2: 0..=10,
92            // We need the mix of sizes that allows us to:
93            //  * not generates any stack probes
94            //  * generate unrolled stack probes
95            //  * generate loop stack probes
96            //
97            // This depends on the total amount of stack space that we have for this function
98            // (controlled by `static_stack_slots_per_function` and `static_stack_slot_size`)
99            //
100            // 1<<6 = 64 and 1<<14 = 16384
101            //
102            // This range allows us to generate all 3 cases within the current allowed
103            // stack size range.
104            stack_probe_size_log2: 6..=14,
105            // 0.1% allows us to explore this, while not causing enough timeouts to significantly
106            // impact execs/s
107            backwards_branch_ratio: (1, 1000),
108            allowed_int_divz_ratio: (1, 1_000_000),
109            allowed_fcvt_traps_ratio: (1, 1_000_000),
110            compile_flag_ratio: [("regalloc_checker", (1usize, 1000))].into_iter().collect(),
111            // Generate up to 4KiB of padding between basic blocks. Although we only
112            // explicitly generate up to 16 blocks, after SSA construction we can
113            // end up with way more blocks than that (Seeing 400 blocks is not uncommon).
114            // At 4KiB we end up at around 1.5MiB of padding per function, which seems reasonable.
115            bb_padding_log2_size: 0..=12,
116        }
117    }
118}