Skip to main content

wasmtime/
config.rs

1use crate::prelude::*;
2use alloc::sync::Arc;
3use bitflags::Flags;
4use core::fmt;
5use core::num::NonZeroUsize;
6use core::str::FromStr;
7#[cfg(any(feature = "cranelift", feature = "winch"))]
8use std::path::Path;
9pub use wasmparser::WasmFeatures;
10use wasmtime_environ::{ConfigTunables, OperatorCost, OperatorCostStrategy, TripleExt, Tunables};
11
12#[cfg(feature = "runtime")]
13use crate::memory::MemoryCreator;
14#[cfg(feature = "runtime")]
15use crate::profiling_agent::{self, ProfilingAgent};
16#[cfg(feature = "runtime")]
17use crate::runtime::vm::{
18    GcRuntime, InstanceAllocator, OnDemandInstanceAllocator, RuntimeMemoryCreator,
19};
20#[cfg(feature = "runtime")]
21use crate::trampoline::MemoryCreatorProxy;
22
23#[cfg(feature = "async")]
24use crate::stack::{StackCreator, StackCreatorProxy};
25#[cfg(feature = "async")]
26use wasmtime_fiber::RuntimeFiberStackCreator;
27
28#[cfg(feature = "runtime")]
29pub use crate::runtime::code_memory::CustomCodeMemory;
30#[cfg(feature = "cache")]
31pub use wasmtime_cache::{Cache, CacheConfig};
32#[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
33pub use wasmtime_environ::CacheStore;
34
35pub(crate) const DEFAULT_WASM_BACKTRACE_MAX_FRAMES: NonZeroUsize = NonZeroUsize::new(20).unwrap();
36
37/// Represents the module instance allocation strategy to use.
38#[derive(Clone)]
39#[non_exhaustive]
40pub enum InstanceAllocationStrategy {
41    /// The on-demand instance allocation strategy.
42    ///
43    /// Resources related to a module instance are allocated at instantiation time and
44    /// immediately deallocated when the `Store` referencing the instance is dropped.
45    ///
46    /// This is the default allocation strategy for Wasmtime.
47    OnDemand,
48    /// The pooling instance allocation strategy.
49    ///
50    /// A pool of resources is created in advance and module instantiation reuses resources
51    /// from the pool. Resources are returned to the pool when the `Store` referencing the instance
52    /// is dropped.
53    #[cfg(feature = "pooling-allocator")]
54    Pooling(PoolingAllocationConfig),
55}
56
57impl InstanceAllocationStrategy {
58    /// The default pooling instance allocation strategy.
59    #[cfg(feature = "pooling-allocator")]
60    pub fn pooling() -> Self {
61        Self::Pooling(Default::default())
62    }
63}
64
65impl Default for InstanceAllocationStrategy {
66    fn default() -> Self {
67        Self::OnDemand
68    }
69}
70
71#[cfg(feature = "pooling-allocator")]
72impl From<PoolingAllocationConfig> for InstanceAllocationStrategy {
73    fn from(cfg: PoolingAllocationConfig) -> InstanceAllocationStrategy {
74        InstanceAllocationStrategy::Pooling(cfg)
75    }
76}
77
78#[derive(Clone)]
79/// Configure the strategy used for versioning in serializing and deserializing [`crate::Module`].
80pub enum ModuleVersionStrategy {
81    /// Use the wasmtime crate's Cargo package version.
82    WasmtimeVersion,
83    /// Use a custom version string. Must be at most 255 bytes.
84    Custom(String),
85    /// Emit no version string in serialization, and accept all version strings in deserialization.
86    None,
87}
88
89impl Default for ModuleVersionStrategy {
90    fn default() -> Self {
91        ModuleVersionStrategy::WasmtimeVersion
92    }
93}
94
95impl core::hash::Hash for ModuleVersionStrategy {
96    fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
97        match self {
98            Self::WasmtimeVersion => env!("CARGO_PKG_VERSION").hash(hasher),
99            Self::Custom(s) => s.hash(hasher),
100            Self::None => {}
101        };
102    }
103}
104
105impl ModuleVersionStrategy {
106    /// Get the string-encoding version of the module.
107    pub fn as_str(&self) -> &str {
108        match &self {
109            Self::WasmtimeVersion => env!("CARGO_PKG_VERSION_MAJOR"),
110            Self::Custom(c) => c,
111            Self::None => "",
112        }
113    }
114}
115
116/// Configuration for record/replay
117#[derive(Clone)]
118#[non_exhaustive]
119pub enum RRConfig {
120    #[cfg(feature = "rr")]
121    /// Recording on store is enabled
122    Recording,
123    #[cfg(feature = "rr")]
124    /// Replaying on store is enabled
125    Replaying,
126    /// No record/replay is enabled
127    None,
128}
129
130/// Global configuration options used to create an [`Engine`](crate::Engine)
131/// and customize its behavior.
132///
133/// This structure exposed a builder-like interface and is primarily consumed by
134/// [`Engine::new()`](crate::Engine::new).
135///
136/// The validation of `Config` is deferred until the engine is being built, thus
137/// a problematic config may cause `Engine::new` to fail.
138///
139/// # Defaults
140///
141/// The `Default` trait implementation and the return value from
142/// [`Config::new()`] are the same and represent the default set of
143/// configuration for an engine. The exact set of defaults will differ based on
144/// properties such as enabled Cargo features at compile time and the configured
145/// target (see [`Config::target`]). Configuration options document their
146/// default values and what the conditional value of the default is where
147/// applicable.
148#[derive(Clone)]
149pub struct Config {
150    #[cfg(any(feature = "cranelift", feature = "winch"))]
151    compiler_config: Option<CompilerConfig>,
152    target: Option<target_lexicon::Triple>,
153    #[cfg(feature = "gc")]
154    collector: Collector,
155    profiling_strategy: ProfilingStrategy,
156    tunables: ConfigTunables,
157
158    #[cfg(feature = "cache")]
159    pub(crate) cache: Option<Cache>,
160    #[cfg(feature = "runtime")]
161    pub(crate) mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>,
162    #[cfg(feature = "runtime")]
163    pub(crate) custom_code_memory: Option<Arc<dyn CustomCodeMemory>>,
164    pub(crate) allocation_strategy: InstanceAllocationStrategy,
165    pub(crate) max_wasm_stack: usize,
166    /// Explicitly enabled features via `Config::wasm_*` methods. This is a
167    /// signal that the embedder specifically wants something turned on
168    /// regardless of the defaults that Wasmtime might otherwise have enabled.
169    ///
170    /// Note that this, and `disabled_features` below, start as the empty set of
171    /// features to only track explicit user requests.
172    pub(crate) enabled_features: WasmFeatures,
173    /// Same as `enabled_features`, but for those that are explicitly disabled.
174    pub(crate) disabled_features: WasmFeatures,
175    pub(crate) wasm_backtrace_details_env_used: bool,
176    pub(crate) wasm_backtrace_max_frames: Option<NonZeroUsize>,
177    pub(crate) native_unwind_info: Option<bool>,
178    #[cfg(any(feature = "async", feature = "stack-switching"))]
179    pub(crate) async_stack_size: usize,
180    #[cfg(feature = "async")]
181    pub(crate) async_stack_zeroing: bool,
182    #[cfg(feature = "async")]
183    pub(crate) stack_creator: Option<Arc<dyn RuntimeFiberStackCreator>>,
184    pub(crate) module_version: ModuleVersionStrategy,
185    pub(crate) parallel_compilation: bool,
186    pub(crate) memory_guaranteed_dense_image_size: u64,
187    pub(crate) force_memory_init_memfd: bool,
188    pub(crate) wmemcheck: bool,
189    #[cfg(feature = "coredump")]
190    pub(crate) coredump_on_trap: bool,
191    pub(crate) macos_use_mach_ports: bool,
192    pub(crate) detect_host_feature: Option<fn(&str) -> Option<bool>>,
193    pub(crate) x86_float_abi_ok: Option<bool>,
194    pub(crate) shared_memory: bool,
195    pub(crate) rr_config: RRConfig,
196}
197
198/// User-provided configuration for the compiler.
199#[cfg(any(feature = "cranelift", feature = "winch"))]
200#[derive(Debug, Clone)]
201struct CompilerConfig {
202    strategy: Option<Strategy>,
203    settings: crate::hash_map::HashMap<String, String>,
204    flags: crate::hash_set::HashSet<String>,
205    #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
206    cache_store: Option<Arc<dyn CacheStore>>,
207    clif_dir: Option<std::path::PathBuf>,
208    wmemcheck: bool,
209}
210
211#[cfg(any(feature = "cranelift", feature = "winch"))]
212impl CompilerConfig {
213    fn new() -> Self {
214        Self {
215            strategy: Strategy::Auto.not_auto(),
216            settings: Default::default(),
217            flags: Default::default(),
218            #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
219            cache_store: None,
220            clif_dir: None,
221            wmemcheck: false,
222        }
223    }
224
225    /// Ensures that the key is not set or equals to the given value.
226    /// If the key is not set, it will be set to the given value.
227    ///
228    /// # Returns
229    ///
230    /// Returns true if successfully set or already had the given setting
231    /// value, or false if the setting was explicitly set to something
232    /// else previously.
233    fn ensure_setting_unset_or_given(&mut self, k: &str, v: &str) -> bool {
234        if let Some(value) = self.settings.get(k) {
235            if value != v {
236                return false;
237            }
238        } else {
239            self.settings.insert(k.to_string(), v.to_string());
240        }
241        true
242    }
243}
244
245#[cfg(any(feature = "cranelift", feature = "winch"))]
246impl Default for CompilerConfig {
247    fn default() -> Self {
248        Self::new()
249    }
250}
251
252impl Config {
253    /// Creates a new configuration object with the default configuration
254    /// specified.
255    pub fn new() -> Self {
256        let mut ret = Self {
257            tunables: ConfigTunables::default(),
258            #[cfg(any(feature = "cranelift", feature = "winch"))]
259            compiler_config: Some(CompilerConfig::default()),
260            target: None,
261            #[cfg(feature = "gc")]
262            collector: Collector::default(),
263            #[cfg(feature = "cache")]
264            cache: None,
265            profiling_strategy: ProfilingStrategy::None,
266            #[cfg(feature = "runtime")]
267            mem_creator: None,
268            #[cfg(feature = "runtime")]
269            custom_code_memory: None,
270            allocation_strategy: InstanceAllocationStrategy::OnDemand,
271            // 512k of stack -- note that this is chosen currently to not be too
272            // big, not be too small, and be a good default for most platforms.
273            // One platform of particular note is Windows where the stack size
274            // of the main thread seems to, by default, be smaller than that of
275            // Linux and macOS. This 512k value at least lets our current test
276            // suite pass on the main thread of Windows (using `--test-threads
277            // 1` forces this), or at least it passed when this change was
278            // committed.
279            max_wasm_stack: 512 * 1024,
280            wasm_backtrace_details_env_used: false,
281            wasm_backtrace_max_frames: Some(DEFAULT_WASM_BACKTRACE_MAX_FRAMES),
282            native_unwind_info: None,
283            enabled_features: WasmFeatures::empty(),
284            disabled_features: WasmFeatures::empty(),
285            #[cfg(any(feature = "async", feature = "stack-switching"))]
286            async_stack_size: 2 << 20,
287            #[cfg(feature = "async")]
288            async_stack_zeroing: false,
289            #[cfg(feature = "async")]
290            stack_creator: None,
291            module_version: ModuleVersionStrategy::default(),
292            parallel_compilation: !cfg!(miri),
293            memory_guaranteed_dense_image_size: 16 << 20,
294            force_memory_init_memfd: false,
295            wmemcheck: false,
296            #[cfg(feature = "coredump")]
297            coredump_on_trap: false,
298            macos_use_mach_ports: !cfg!(miri),
299            #[cfg(feature = "std")]
300            detect_host_feature: Some(detect_host_feature),
301            #[cfg(not(feature = "std"))]
302            detect_host_feature: None,
303            x86_float_abi_ok: None,
304            shared_memory: false,
305            rr_config: RRConfig::None,
306        };
307        ret.wasm_backtrace_details(WasmBacktraceDetails::Environment);
308        ret
309    }
310
311    #[cfg(any(feature = "cranelift", feature = "winch"))]
312    pub(crate) fn has_compiler(&self) -> bool {
313        self.compiler_config.is_some()
314    }
315
316    #[track_caller]
317    #[cfg(any(feature = "cranelift", feature = "winch"))]
318    fn compiler_config_mut(&mut self) -> &mut CompilerConfig {
319        self.compiler_config.as_mut().expect(
320            "cannot configure compiler settings for `Config`s \
321             created by `Config::without_compiler`",
322        )
323    }
324
325    /// Configure whether Wasm compilation is enabled.
326    ///
327    /// Disabling Wasm compilation will allow you to load and run
328    /// [pre-compiled][crate::Engine::precompile_module] Wasm programs, but not
329    /// to compile and run new Wasm programs that have not already been
330    /// pre-compiled.
331    ///
332    /// Many compilation-related configuration methods will panic if compilation
333    /// has been disabled.
334    ///
335    /// Note that there are two ways to disable Wasm compilation:
336    ///
337    /// 1. Statically, by disabling the `"cranelift"` and `"winch"` cargo
338    ///    features when building Wasmtime. These builds of Wasmtime will have
339    ///    smaller code size, since they do not include any of the code to
340    ///    compile Wasm.
341    ///
342    /// 2. Dynamically, by passing `false` to this method at run-time when
343    ///    configuring Wasmtime. The Wasmtime binary will still include the code
344    ///    for compiling Wasm, it just won't be executed, so code size is larger
345    ///    than with the first approach.
346    ///
347    /// The static approach is better in most cases, however dynamically calling
348    /// `enable_compiler(false)` is useful whenever you create multiple
349    /// `Engine`s in the same process, some of which must be able to compile
350    /// Wasm and some of which should never do so. Tests are a common example of
351    /// such a situation, especially when there are multiple Rust binaries in
352    /// the same cargo workspace, and cargo's feature resolution enables the
353    /// `"cranelift"` or `"winch"` features across the whole workspace.
354    #[cfg(any(feature = "cranelift", feature = "winch"))]
355    pub fn enable_compiler(&mut self, enable: bool) -> &mut Self {
356        match (enable, &self.compiler_config) {
357            (true, Some(_)) | (false, None) => {}
358            (true, None) => {
359                self.compiler_config = Some(CompilerConfig::default());
360            }
361            (false, Some(_)) => {
362                self.compiler_config = None;
363            }
364        }
365        self
366    }
367
368    /// Configures the target platform of this [`Config`].
369    ///
370    /// This method is used to configure the output of compilation in an
371    /// [`Engine`](crate::Engine). This can be used, for example, to
372    /// cross-compile from one platform to another. By default, the host target
373    /// triple is used meaning compiled code is suitable to run on the host.
374    ///
375    /// Note that the [`Module`](crate::Module) type can only be created if the
376    /// target configured here matches the host. Otherwise if a cross-compile is
377    /// being performed where the host doesn't match the target then
378    /// [`Engine::precompile_module`](crate::Engine::precompile_module) must be
379    /// used instead.
380    ///
381    /// Target-specific flags (such as CPU features) will not be inferred by
382    /// default for the target when one is provided here. This means that this
383    /// can also be used, for example, with the host architecture to disable all
384    /// host-inferred feature flags. Configuring target-specific flags can be
385    /// done with [`Config::cranelift_flag_set`] and
386    /// [`Config::cranelift_flag_enable`].
387    ///
388    /// # Errors
389    ///
390    /// This method will error if the given target triple is not supported.
391    pub fn target(&mut self, target: &str) -> Result<&mut Self> {
392        self.target =
393            Some(target_lexicon::Triple::from_str(target).map_err(|e| crate::format_err!(e))?);
394
395        Ok(self)
396    }
397
398    /// Enables the incremental compilation cache in Cranelift, using the provided `CacheStore`
399    /// backend for storage.
400    ///
401    /// # Panics
402    ///
403    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
404    #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
405    pub fn enable_incremental_compilation(
406        &mut self,
407        cache_store: Arc<dyn CacheStore>,
408    ) -> Result<&mut Self> {
409        self.compiler_config_mut().cache_store = Some(cache_store);
410        Ok(self)
411    }
412
413    #[doc(hidden)]
414    #[deprecated(note = "no longer has any effect")]
415    #[cfg(feature = "async")]
416    pub fn async_support(&mut self, _enable: bool) -> &mut Self {
417        self
418    }
419
420    /// Configures whether DWARF debug information will be emitted
421    /// during compilation for a native debugger on the Wasmtime
422    /// process to consume.
423    ///
424    /// Note that the `debug-builtins` compile-time Cargo feature must also be
425    /// enabled for native debuggers such as GDB or LLDB to be able to debug
426    /// guest WebAssembly programs.
427    ///
428    /// By default this option is `false`.
429    /// **Note** Enabling this option is not compatible with the Winch compiler.
430    pub fn debug_info(&mut self, enable: bool) -> &mut Self {
431        self.tunables.debug_native = Some(enable);
432        self
433    }
434
435    /// Configures whether compiled guest code will be instrumented to
436    /// provide debugging at the Wasm VM level.
437    ///
438    /// This is required in order to enable a guest-level debugging
439    /// API that can precisely examine Wasm VM state and (eventually,
440    /// once it is complete) set breakpoints and watchpoints and step
441    /// through code.
442    ///
443    /// Without this enabled, debugging can only be done via a native
444    /// debugger operating on the compiled guest code (see
445    /// [`Config::debug_info`] and is "best-effort": we may be able to
446    /// recover some Wasm locals or operand stack values, but it is
447    /// not guaranteed, even when optimizations are disabled.
448    ///
449    /// When this is enabled, additional instrumentation is inserted
450    /// that directly tracks the Wasm VM state at every step. This has
451    /// some performance impact, but allows perfect debugging
452    /// fidelity.
453    ///
454    /// Breakpoints, watchpoints, and stepping are not yet supported,
455    /// but will be added in a future version of Wasmtime.
456    ///
457    /// This enables use of the [`crate::FrameHandle`] API which is
458    /// provided by [`crate::Caller::debug_exit_frames`] or
459    /// [`crate::Store::debug_exit_frames`].
460    ///
461    /// ***Note*** Enabling this option is not compatible with the
462    /// Winch compiler.
463    #[cfg(feature = "debug")]
464    pub fn guest_debug(&mut self, enable: bool) -> &mut Self {
465        self.tunables.debug_guest = Some(enable);
466        self
467    }
468
469    /// Configures whether [`WasmBacktrace`] will be present in the context of
470    /// errors returned from Wasmtime.
471    ///
472    /// This method is deprecated in favor of
473    /// [`Config::wasm_backtrace_max_frames`]. Calling `wasm_backtrace(false)`
474    /// is equivalent to `wasm_backtrace_max_frames(None)`, and
475    /// `wasm_backtrace(true)` will leave `wasm_backtrace_max_frames` unchanged
476    /// if the value is `Some` and will otherwise restore the default `Some`
477    /// value.
478    ///
479    /// [`WasmBacktrace`]: crate::WasmBacktrace
480    #[deprecated = "use `wasm_backtrace_max_frames` instead"]
481    pub fn wasm_backtrace(&mut self, enable: bool) -> &mut Self {
482        match (enable, self.wasm_backtrace_max_frames) {
483            (false, _) => self.wasm_backtrace_max_frames = None,
484            // Wasm backtraces were disabled; enable them with the
485            // default maximum number of frames to capture.
486            (true, None) => {
487                self.wasm_backtrace_max_frames = Some(DEFAULT_WASM_BACKTRACE_MAX_FRAMES)
488            }
489            // Wasm backtraces are already enabled; keep the existing
490            // max-frames configuration.
491            (true, Some(_)) => {}
492        }
493        self
494    }
495
496    /// Configures whether backtraces in `Trap` will parse debug info in the wasm file to
497    /// have filename/line number information.
498    ///
499    /// When enabled this will causes modules to retain debugging information
500    /// found in wasm binaries. This debug information will be used when a trap
501    /// happens to symbolicate each stack frame and attempt to print a
502    /// filename/line number for each wasm frame in the stack trace.
503    ///
504    /// By default this option is `WasmBacktraceDetails::Environment`, meaning
505    /// that wasm will read `WASMTIME_BACKTRACE_DETAILS` to indicate whether
506    /// details should be parsed. Note that the `std` feature of this crate must
507    /// be active to read environment variables, otherwise this is disabled by
508    /// default.
509    pub fn wasm_backtrace_details(&mut self, enable: WasmBacktraceDetails) -> &mut Self {
510        self.wasm_backtrace_details_env_used = false;
511        self.tunables.parse_wasm_debuginfo = match enable {
512            WasmBacktraceDetails::Enable => Some(true),
513            WasmBacktraceDetails::Disable => Some(false),
514            WasmBacktraceDetails::Environment => {
515                #[cfg(feature = "std")]
516                {
517                    self.wasm_backtrace_details_env_used = true;
518                    std::env::var("WASMTIME_BACKTRACE_DETAILS")
519                        .map(|s| Some(s == "1"))
520                        .unwrap_or(Some(false))
521                }
522                #[cfg(not(feature = "std"))]
523                {
524                    Some(false)
525                }
526            }
527        };
528        self
529    }
530
531    /// Configures the maximum number of WebAssembly frames to collect in
532    /// backtraces.
533    ///
534    /// A backtrace may be collected whenever an error is returned from a host
535    /// function call through to WebAssembly or when WebAssembly itself hits a
536    /// trap condition, such as an out-of-bounds memory access. This flag
537    /// indicates, in these conditions, whether the backtrace is collected or
538    /// not and how many frames should be collected.
539    ///
540    /// Currently wasm backtraces are implemented through frame pointer walking.
541    /// This means that collecting a backtrace is expected to be a fast and
542    /// relatively cheap operation. Additionally backtrace collection is
543    /// suitable in concurrent environments since one thread capturing a
544    /// backtrace won't block other threads.
545    ///
546    /// Collected backtraces are attached via
547    /// [`Error::context`](crate::Error::context) to errors returned from host
548    /// functions. The [`WasmBacktrace`] type can be acquired via
549    /// [`Error::downcast_ref`](crate::Error::downcast_ref) to inspect the
550    /// backtrace. When this option is set to `None` then this context is never
551    /// applied to errors coming out of wasm.
552    ///
553    /// The default value is 20.
554    ///
555    /// [`WasmBacktrace`]: crate::WasmBacktrace
556    pub fn wasm_backtrace_max_frames(&mut self, limit: Option<NonZeroUsize>) -> &mut Self {
557        self.wasm_backtrace_max_frames = limit;
558        self
559    }
560
561    /// Configures whether to generate native unwind information
562    /// (e.g. `.eh_frame` on Linux).
563    ///
564    /// This configuration option only exists to help third-party stack
565    /// capturing mechanisms, such as the system's unwinder or the `backtrace`
566    /// crate, determine how to unwind through Wasm frames. It does not affect
567    /// whether Wasmtime can capture Wasm backtraces or not. The presence of
568    /// [`WasmBacktrace`] is controlled by the
569    /// [`Config::wasm_backtrace_max_frames`] option.
570    ///
571    /// Native unwind information is included:
572    /// - When targeting Windows, since the Windows ABI requires it.
573    /// - By default.
574    ///
575    /// Note that systems loading many modules may wish to disable this
576    /// configuration option instead of leaving it on-by-default. Some platforms
577    /// exhibit quadratic behavior when registering/unregistering unwinding
578    /// information which can greatly slow down the module loading/unloading
579    /// process.
580    ///
581    /// [`WasmBacktrace`]: crate::WasmBacktrace
582    pub fn native_unwind_info(&mut self, enable: bool) -> &mut Self {
583        self.native_unwind_info = Some(enable);
584        self
585    }
586
587    /// Configures whether execution of WebAssembly will "consume fuel" to
588    /// either halt or yield execution as desired.
589    ///
590    /// This can be used to deterministically prevent infinitely-executing
591    /// WebAssembly code by instrumenting generated code to consume fuel as it
592    /// executes. When fuel runs out a trap is raised, however [`Store`] can be
593    /// configured to yield execution periodically via
594    /// [`crate::Store::fuel_async_yield_interval`].
595    ///
596    /// Note that a [`Store`] starts with no fuel, so if you enable this option
597    /// you'll have to be sure to pour some fuel into [`Store`] before
598    /// executing some code.
599    ///
600    /// By default this option is `false`.
601    ///
602    /// **Note** Enabling this option is not compatible with the Winch compiler.
603    ///
604    /// [`Store`]: crate::Store
605    pub fn consume_fuel(&mut self, enable: bool) -> &mut Self {
606        self.tunables.consume_fuel = Some(enable);
607        self
608    }
609
610    /// Configures the fuel cost of each WebAssembly operator.
611    ///
612    /// This is only relevant when [`Config::consume_fuel`] is enabled.
613    pub fn operator_cost(&mut self, cost: OperatorCost) -> &mut Self {
614        self.tunables.operator_cost = Some(OperatorCostStrategy::table(cost));
615        self
616    }
617
618    /// Enables epoch-based interruption.
619    ///
620    /// When executing code in async mode, we sometimes want to
621    /// implement a form of cooperative timeslicing: long-running Wasm
622    /// guest code should periodically yield to the executor
623    /// loop. This yielding could be implemented by using "fuel" (see
624    /// [`consume_fuel`](Config::consume_fuel)). However, fuel
625    /// instrumentation is somewhat expensive: it modifies the
626    /// compiled form of the Wasm code so that it maintains a precise
627    /// instruction count, frequently checking this count against the
628    /// remaining fuel. If one does not need this precise count or
629    /// deterministic interruptions, and only needs a periodic
630    /// interrupt of some form, then It would be better to have a more
631    /// lightweight mechanism.
632    ///
633    /// Epoch-based interruption is that mechanism. There is a global
634    /// "epoch", which is a counter that divides time into arbitrary
635    /// periods (or epochs). This counter lives on the
636    /// [`Engine`](crate::Engine) and can be incremented by calling
637    /// [`Engine::increment_epoch`](crate::Engine::increment_epoch).
638    /// Epoch-based instrumentation works by setting a "deadline
639    /// epoch". The compiled code knows the deadline, and at certain
640    /// points, checks the current epoch against that deadline. It
641    /// will yield if the deadline has been reached.
642    ///
643    /// The idea is that checking an infrequently-changing counter is
644    /// cheaper than counting and frequently storing a precise metric
645    /// (instructions executed) locally. The interruptions are not
646    /// deterministic, but if the embedder increments the epoch in a
647    /// periodic way (say, every regular timer tick by a thread or
648    /// signal handler), then we can ensure that all async code will
649    /// yield to the executor within a bounded time.
650    ///
651    /// The deadline check cannot be avoided by malicious wasm code. It is safe
652    /// to use epoch deadlines to limit the execution time of untrusted
653    /// code.
654    ///
655    /// The [`Store`](crate::Store) tracks the deadline, and controls
656    /// what happens when the deadline is reached during
657    /// execution. Several behaviors are possible:
658    ///
659    /// - Trap if code is executing when the epoch deadline is
660    ///   met. See
661    ///   [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap).
662    ///
663    /// - Call an arbitrary function. This function may chose to trap or
664    ///   increment the epoch. See
665    ///   [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback).
666    ///
667    /// - Yield to the executor loop, then resume when the future is
668    ///   next polled. See
669    ///   [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update).
670    ///
671    /// Trapping is the default. The yielding behaviour may be used for
672    /// the timeslicing behavior described above.
673    ///
674    /// This feature is available with or without async support.
675    /// However, without async support, the timeslicing behaviour is
676    /// not available. This means epoch-based interruption can only
677    /// serve as a simple external-interruption mechanism.
678    ///
679    /// An initial deadline must be set before executing code by calling
680    /// [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline). If this
681    /// deadline is not configured then wasm will immediately trap.
682    ///
683    /// ## Interaction with blocking host calls
684    ///
685    /// Epochs (and fuel) do not assist in handling WebAssembly code blocked in
686    /// a call to the host. For example if the WebAssembly function calls
687    /// `wasi:io/poll.poll` to sleep epochs will not assist in waking this up or
688    /// timing it out. Epochs intentionally only affect running WebAssembly code
689    /// itself and it's left to the embedder to determine how best to wake up
690    /// indefinitely blocking code in the host.
691    ///
692    /// The typical solution for this, however, is to use the `async` variant of
693    /// WASI host functions. This models computation as a Rust `Future` which
694    /// means that when blocking happens the future is only suspended and
695    /// control yields back to the main event loop. This gives the embedder the
696    /// opportunity to use `tokio::time::timeout` for example on a wasm
697    /// computation and have the desired effect of cancelling a blocking
698    /// operation when a timeout expires.
699    ///
700    /// ## When to use fuel vs. epochs
701    ///
702    /// In general, epoch-based interruption results in faster
703    /// execution. This difference is sometimes significant: in some
704    /// measurements, up to 2-3x. This is because epoch-based
705    /// interruption does less work: it only watches for a global
706    /// rarely-changing counter to increment, rather than keeping a
707    /// local frequently-changing counter and comparing it to a
708    /// deadline.
709    ///
710    /// Fuel, in contrast, should be used when *deterministic*
711    /// yielding or trapping is needed. For example, if it is required
712    /// that the same function call with the same starting state will
713    /// always either complete or trap with an out-of-fuel error,
714    /// deterministically, then fuel with a fixed bound should be
715    /// used.
716    ///
717    /// **Note** Enabling this option is not compatible with the Winch compiler.
718    ///
719    /// # See Also
720    ///
721    /// - [`Engine::increment_epoch`](crate::Engine::increment_epoch)
722    /// - [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline)
723    /// - [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap)
724    /// - [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback)
725    /// - [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update)
726    pub fn epoch_interruption(&mut self, enable: bool) -> &mut Self {
727        self.tunables.epoch_interruption = Some(enable);
728        self
729    }
730
731    /// Configures the maximum amount of stack space available for
732    /// executing WebAssembly code.
733    ///
734    /// WebAssembly has well-defined semantics on stack overflow. This is
735    /// intended to be a knob which can help configure how much stack space
736    /// wasm execution is allowed to consume. Note that the number here is not
737    /// super-precise, but rather wasm will take at most "pretty close to this
738    /// much" stack space.
739    ///
740    /// If a wasm call (or series of nested wasm calls) take more stack space
741    /// than the `size` specified then a stack overflow trap will be raised.
742    ///
743    /// Caveat: this knob only limits the stack space consumed by wasm code.
744    /// More importantly, it does not ensure that this much stack space is
745    /// available on the calling thread stack. Exhausting the thread stack
746    /// typically leads to an **abort** of the process.
747    ///
748    /// Here are some examples of how that could happen:
749    ///
750    /// - Let's assume this option is set to 2 MiB and then a thread that has
751    ///   a stack with 512 KiB left.
752    ///
753    ///   If wasm code consumes more than 512 KiB then the process will be aborted.
754    ///
755    /// - Assuming the same conditions, but this time wasm code does not consume
756    ///   any stack but calls into a host function. The host function consumes
757    ///   more than 512 KiB of stack space. The process will be aborted.
758    ///
759    /// There's another gotcha related to recursive calling into wasm: the stack
760    /// space consumed by a host function is counted towards this limit. The
761    /// host functions are not prevented from consuming more than this limit.
762    /// However, if the host function that used more than this limit and called
763    /// back into wasm, then the execution will trap immediately because of
764    /// stack overflow.
765    ///
766    /// When the `async` feature is enabled, this value cannot exceed the
767    /// `async_stack_size` option. Be careful not to set this value too close
768    /// to `async_stack_size` as doing so may limit how much stack space
769    /// is available for host functions.
770    ///
771    /// By default this option is 512 KiB.
772    ///
773    /// # Errors
774    ///
775    /// The `Engine::new` method will fail if the `size` specified here is
776    /// either 0 or larger than the [`Config::async_stack_size`] configuration.
777    pub fn max_wasm_stack(&mut self, size: usize) -> &mut Self {
778        self.max_wasm_stack = size;
779        self
780    }
781
782    /// Configures the size of the stacks used for asynchronous execution.
783    ///
784    /// This setting configures the size of the stacks that are allocated for
785    /// asynchronous execution. The value cannot be less than `max_wasm_stack`.
786    ///
787    /// The amount of stack space guaranteed for host functions is
788    /// `async_stack_size - max_wasm_stack`, so take care not to set these two values
789    /// close to one another; doing so may cause host functions to overflow the
790    /// stack and abort the process.
791    ///
792    /// By default this option is 2 MiB.
793    ///
794    /// # Errors
795    ///
796    /// The `Engine::new` method will fail if the value for this option is
797    /// smaller than the [`Config::max_wasm_stack`] option.
798    #[cfg(any(feature = "async", feature = "stack-switching"))]
799    pub fn async_stack_size(&mut self, size: usize) -> &mut Self {
800        self.async_stack_size = size;
801        self
802    }
803
804    /// Configures whether or not stacks used for async futures are zeroed
805    /// before (re)use.
806    ///
807    /// When the [`call_async`] variant of calling WebAssembly is used
808    /// then Wasmtime will create a separate runtime execution stack for each
809    /// future produced by [`call_async`]. By default upon allocation, depending
810    /// on the platform, these stacks might be filled with uninitialized
811    /// memory. This is safe and correct because, modulo bugs in Wasmtime,
812    /// compiled Wasm code will never read from a stack slot before it
813    /// initializes the stack slot.
814    ///
815    /// However, as a defense-in-depth mechanism, you may configure Wasmtime to
816    /// ensure that these stacks are zeroed before they are used. Notably, if
817    /// you are using the pooling allocator, stacks can be pooled and reused
818    /// across different Wasm guests; ensuring that stacks are zeroed can
819    /// prevent data leakage between Wasm guests even in the face of potential
820    /// read-of-stack-slot-before-initialization bugs in Wasmtime's compiler.
821    ///
822    /// Stack zeroing can be a costly operation in highly concurrent
823    /// environments due to modifications of the virtual address space requiring
824    /// process-wide synchronization. It can also be costly in `no-std`
825    /// environments that must manually zero memory, and cannot rely on an OS
826    /// and virtual memory to provide zeroed pages.
827    ///
828    /// This option defaults to `false`.
829    ///
830    /// [`call_async`]: crate::TypedFunc::call_async
831    #[cfg(feature = "async")]
832    pub fn async_stack_zeroing(&mut self, enable: bool) -> &mut Self {
833        self.async_stack_zeroing = enable;
834        self
835    }
836
837    /// Explicitly enables (and un-disables) a given set of [`WasmFeatures`].
838    ///
839    /// Note: this is a low-level method that does not necessarily imply that
840    /// wasmtime _supports_ a feature. It should only be used to _disable_
841    /// features that callers want to be rejected by the parser or _enable_
842    /// features callers are certain that the current configuration of wasmtime
843    /// supports.
844    ///
845    /// Feature validation is deferred until an engine is being built, thus by
846    /// enabling features here a caller may cause
847    /// [`Engine::new`](crate::Engine::new) to fail later, if the feature
848    /// configuration isn't supported.
849    pub fn wasm_features(&mut self, flag: WasmFeatures, enable: bool) -> &mut Self {
850        self.enabled_features.set(flag, enable);
851        self.disabled_features.set(flag, !enable);
852        self
853    }
854
855    /// Configures whether the WebAssembly tail calls proposal will be enabled
856    /// for compilation or not.
857    ///
858    /// The [WebAssembly tail calls proposal] introduces the `return_call` and
859    /// `return_call_indirect` instructions. These instructions allow for Wasm
860    /// programs to implement some recursive algorithms with *O(1)* stack space
861    /// usage.
862    ///
863    /// This is `true` by default except when the Winch compiler is enabled.
864    ///
865    /// [WebAssembly tail calls proposal]: https://github.com/WebAssembly/tail-call
866    pub fn wasm_tail_call(&mut self, enable: bool) -> &mut Self {
867        self.wasm_features(WasmFeatures::TAIL_CALL, enable);
868        self
869    }
870
871    /// Configures whether the WebAssembly custom-page-sizes proposal will be
872    /// enabled for compilation or not.
873    ///
874    /// The [WebAssembly custom-page-sizes proposal] allows a memory to
875    /// customize its page sizes. By default, Wasm page sizes are 64KiB
876    /// large. This proposal allows the memory to opt into smaller page sizes
877    /// instead, allowing Wasm to run in environments with less than 64KiB RAM
878    /// available, for example.
879    ///
880    /// Note that the page size is part of the memory's type, and because
881    /// different memories may have different types, they may also have
882    /// different page sizes.
883    ///
884    /// Currently the only valid page sizes are 64KiB (the default) and 1
885    /// byte. Future extensions may relax this constraint and allow all powers
886    /// of two.
887    ///
888    /// Support for this proposal is disabled by default.
889    ///
890    /// [WebAssembly custom-page-sizes proposal]: https://github.com/WebAssembly/custom-page-sizes
891    pub fn wasm_custom_page_sizes(&mut self, enable: bool) -> &mut Self {
892        self.wasm_features(WasmFeatures::CUSTOM_PAGE_SIZES, enable);
893        self
894    }
895
896    /// Configures whether the WebAssembly [threads] proposal will be enabled
897    /// for compilation.
898    ///
899    /// This feature gates items such as shared memories and atomic
900    /// instructions. Note that the threads feature depends on the bulk memory
901    /// feature, which is enabled by default. Additionally note that while the
902    /// wasm feature is called "threads" it does not actually include the
903    /// ability to spawn threads. Spawning threads is part of the [wasi-threads]
904    /// proposal which is a separately gated feature in Wasmtime.
905    ///
906    /// Embeddings of Wasmtime are able to build their own custom threading
907    /// scheme on top of the core wasm threads proposal, however.
908    ///
909    /// The default value for this option is whether the `threads`
910    /// crate feature of Wasmtime is enabled or not. By default this crate
911    /// feature is enabled.
912    ///
913    /// [threads]: https://github.com/webassembly/threads
914    /// [wasi-threads]: https://github.com/webassembly/wasi-threads
915    #[cfg(feature = "threads")]
916    pub fn wasm_threads(&mut self, enable: bool) -> &mut Self {
917        self.wasm_features(WasmFeatures::THREADS, enable);
918        self
919    }
920
921    /// Configures whether the WebAssembly [shared-everything-threads] proposal
922    /// will be enabled for compilation.
923    ///
924    /// This feature gates extended use of the `shared` attribute on items other
925    /// than memories, extra atomic instructions, and new component model
926    /// intrinsics for spawning threads. It depends on the
927    /// [`wasm_threads`][Self::wasm_threads] being enabled.
928    ///
929    /// [shared-everything-threads]:
930    ///     https://github.com/webassembly/shared-everything-threads
931    pub fn wasm_shared_everything_threads(&mut self, enable: bool) -> &mut Self {
932        self.wasm_features(WasmFeatures::SHARED_EVERYTHING_THREADS, enable);
933        self
934    }
935
936    /// Configures whether the [WebAssembly reference types proposal][proposal]
937    /// will be enabled for compilation.
938    ///
939    /// This feature gates items such as the `externref` and `funcref` types as
940    /// well as allowing a module to define multiple tables.
941    ///
942    /// Note that the reference types proposal depends on the bulk memory proposal.
943    ///
944    /// This feature is `true` by default.
945    ///
946    /// # Errors
947    ///
948    /// The validation of this feature are deferred until the engine is being built,
949    /// and thus may cause `Engine::new` fail if the `bulk_memory` feature is disabled.
950    ///
951    /// [proposal]: https://github.com/webassembly/reference-types
952    #[cfg(feature = "gc")]
953    pub fn wasm_reference_types(&mut self, enable: bool) -> &mut Self {
954        self.wasm_features(WasmFeatures::REFERENCE_TYPES, enable);
955        self
956    }
957
958    /// Configures whether the [WebAssembly function references
959    /// proposal][proposal] will be enabled for compilation.
960    ///
961    /// This feature gates non-nullable reference types, function reference
962    /// types, `call_ref`, `ref.func`, and non-nullable reference related
963    /// instructions.
964    ///
965    /// Note that the function references proposal depends on the reference
966    /// types proposal.
967    ///
968    /// This feature is `false` by default.
969    ///
970    /// [proposal]: https://github.com/WebAssembly/function-references
971    #[cfg(feature = "gc")]
972    pub fn wasm_function_references(&mut self, enable: bool) -> &mut Self {
973        self.wasm_features(WasmFeatures::FUNCTION_REFERENCES, enable);
974        self
975    }
976
977    /// Configures whether the [WebAssembly wide-arithmetic][proposal] will be
978    /// enabled for compilation.
979    ///
980    /// This feature is `false` by default.
981    ///
982    /// [proposal]: https://github.com/WebAssembly/wide-arithmetic
983    pub fn wasm_wide_arithmetic(&mut self, enable: bool) -> &mut Self {
984        self.wasm_features(WasmFeatures::WIDE_ARITHMETIC, enable);
985        self
986    }
987
988    /// Configures whether the [WebAssembly Garbage Collection
989    /// proposal][proposal] will be enabled for compilation.
990    ///
991    /// This feature gates `struct` and `array` type definitions and references,
992    /// the `i31ref` type, and all related instructions.
993    ///
994    /// Note that the function references proposal depends on the typed function
995    /// references proposal.
996    ///
997    /// This feature is `false` by default.
998    ///
999    /// **Warning: Wasmtime's implementation of the GC proposal is still in
1000    /// progress and generally not ready for primetime.**
1001    ///
1002    /// [proposal]: https://github.com/WebAssembly/gc
1003    #[cfg(feature = "gc")]
1004    pub fn wasm_gc(&mut self, enable: bool) -> &mut Self {
1005        self.wasm_features(WasmFeatures::GC, enable);
1006        self
1007    }
1008
1009    /// Configures whether the WebAssembly SIMD proposal will be
1010    /// enabled for compilation.
1011    ///
1012    /// The [WebAssembly SIMD proposal][proposal]. This feature gates items such
1013    /// as the `v128` type and all of its operators being in a module. Note that
1014    /// this does not enable the [relaxed simd proposal].
1015    ///
1016    /// **Note**
1017    ///
1018    /// On x86_64 platforms the base CPU feature requirement for SIMD
1019    /// is SSE2 for the Cranelift compiler and AVX for the Winch compiler.
1020    ///
1021    /// This is `true` by default.
1022    ///
1023    /// [proposal]: https://github.com/webassembly/simd
1024    /// [relaxed simd proposal]: https://github.com/WebAssembly/relaxed-simd
1025    pub fn wasm_simd(&mut self, enable: bool) -> &mut Self {
1026        self.wasm_features(WasmFeatures::SIMD, enable);
1027        self
1028    }
1029
1030    /// Configures whether the WebAssembly Relaxed SIMD proposal will be
1031    /// enabled for compilation.
1032    ///
1033    /// The relaxed SIMD proposal adds new instructions to WebAssembly which,
1034    /// for some specific inputs, are allowed to produce different results on
1035    /// different hosts. More-or-less this proposal enables exposing
1036    /// platform-specific semantics of SIMD instructions in a controlled
1037    /// fashion to a WebAssembly program. From an embedder's perspective this
1038    /// means that WebAssembly programs may execute differently depending on
1039    /// whether the host is x86_64 or AArch64, for example.
1040    ///
1041    /// By default Wasmtime lowers relaxed SIMD instructions to the fastest
1042    /// lowering for the platform it's running on. This means that, by default,
1043    /// some relaxed SIMD instructions may have different results for the same
1044    /// inputs across x86_64 and AArch64. This behavior can be disabled through
1045    /// the [`Config::relaxed_simd_deterministic`] option which will force
1046    /// deterministic behavior across all platforms, as classified by the
1047    /// specification, at the cost of performance.
1048    ///
1049    /// This is `true` by default.
1050    ///
1051    /// [proposal]: https://github.com/webassembly/relaxed-simd
1052    pub fn wasm_relaxed_simd(&mut self, enable: bool) -> &mut Self {
1053        self.wasm_features(WasmFeatures::RELAXED_SIMD, enable);
1054        self
1055    }
1056
1057    /// This option can be used to control the behavior of the [relaxed SIMD
1058    /// proposal's][proposal] instructions.
1059    ///
1060    /// The relaxed SIMD proposal introduces instructions that are allowed to
1061    /// have different behavior on different architectures, primarily to afford
1062    /// an efficient implementation on all architectures. This means, however,
1063    /// that the same module may execute differently on one host than another,
1064    /// which typically is not otherwise the case. This option is provided to
1065    /// force Wasmtime to generate deterministic code for all relaxed simd
1066    /// instructions, at the cost of performance, for all architectures. When
1067    /// this option is enabled then the deterministic behavior of all
1068    /// instructions in the relaxed SIMD proposal is selected.
1069    ///
1070    /// This is `false` by default.
1071    ///
1072    /// [proposal]: https://github.com/webassembly/relaxed-simd
1073    pub fn relaxed_simd_deterministic(&mut self, enable: bool) -> &mut Self {
1074        self.tunables.relaxed_simd_deterministic = Some(enable);
1075        self
1076    }
1077
1078    /// Configures whether the [WebAssembly bulk memory operations
1079    /// proposal][proposal] will be enabled for compilation.
1080    ///
1081    /// This feature gates items such as the `memory.copy` instruction, passive
1082    /// data/table segments, etc, being in a module.
1083    ///
1084    /// This is `true` by default.
1085    ///
1086    /// Feature `reference_types`, which is also `true` by default, requires
1087    /// this feature to be enabled. Thus disabling this feature must also disable
1088    /// `reference_types` as well using [`wasm_reference_types`](crate::Config::wasm_reference_types).
1089    ///
1090    /// # Errors
1091    ///
1092    /// Disabling this feature without disabling `reference_types` will cause
1093    /// `Engine::new` to fail.
1094    ///
1095    /// [proposal]: https://github.com/webassembly/bulk-memory-operations
1096    pub fn wasm_bulk_memory(&mut self, enable: bool) -> &mut Self {
1097        self.wasm_features(WasmFeatures::BULK_MEMORY, enable);
1098        self
1099    }
1100
1101    /// Configures whether the WebAssembly multi-value [proposal] will
1102    /// be enabled for compilation.
1103    ///
1104    /// This feature gates functions and blocks returning multiple values in a
1105    /// module, for example.
1106    ///
1107    /// This is `true` by default.
1108    ///
1109    /// [proposal]: https://github.com/webassembly/multi-value
1110    pub fn wasm_multi_value(&mut self, enable: bool) -> &mut Self {
1111        self.wasm_features(WasmFeatures::MULTI_VALUE, enable);
1112        self
1113    }
1114
1115    /// Configures whether the WebAssembly multi-memory [proposal] will
1116    /// be enabled for compilation.
1117    ///
1118    /// This feature gates modules having more than one linear memory
1119    /// declaration or import.
1120    ///
1121    /// This is `true` by default.
1122    ///
1123    /// [proposal]: https://github.com/webassembly/multi-memory
1124    pub fn wasm_multi_memory(&mut self, enable: bool) -> &mut Self {
1125        self.wasm_features(WasmFeatures::MULTI_MEMORY, enable);
1126        self
1127    }
1128
1129    /// Configures whether the WebAssembly memory64 [proposal] will
1130    /// be enabled for compilation.
1131    ///
1132    /// Note that this the upstream specification is not finalized and Wasmtime
1133    /// may also have bugs for this feature since it hasn't been exercised
1134    /// much.
1135    ///
1136    /// This is `false` by default.
1137    ///
1138    /// [proposal]: https://github.com/webassembly/memory64
1139    pub fn wasm_memory64(&mut self, enable: bool) -> &mut Self {
1140        self.wasm_features(WasmFeatures::MEMORY64, enable);
1141        self
1142    }
1143
1144    /// Configures whether the WebAssembly extended-const [proposal] will
1145    /// be enabled for compilation.
1146    ///
1147    /// This is `true` by default.
1148    ///
1149    /// [proposal]: https://github.com/webassembly/extended-const
1150    pub fn wasm_extended_const(&mut self, enable: bool) -> &mut Self {
1151        self.wasm_features(WasmFeatures::EXTENDED_CONST, enable);
1152        self
1153    }
1154
1155    /// Configures whether the [WebAssembly stack switching
1156    /// proposal][proposal] will be enabled for compilation.
1157    ///
1158    /// This feature gates the use of control tags.
1159    ///
1160    /// This feature depends on the `function_reference_types` and
1161    /// `exceptions` features.
1162    ///
1163    /// This feature is `false` by default.
1164    ///
1165    /// # Errors
1166    ///
1167    /// [proposal]: https://github.com/webassembly/stack-switching
1168    pub fn wasm_stack_switching(&mut self, enable: bool) -> &mut Self {
1169        self.wasm_features(WasmFeatures::STACK_SWITCHING, enable);
1170        self
1171    }
1172
1173    /// Configures whether the WebAssembly component-model [proposal] will
1174    /// be enabled for compilation.
1175    ///
1176    /// This flag can be used to blanket disable all components within Wasmtime.
1177    /// Otherwise usage of components requires statically using
1178    /// [`Component`](crate::component::Component) instead of
1179    /// [`Module`](crate::Module) for example anyway.
1180    ///
1181    /// The default value for this option is whether the `component-model`
1182    /// crate feature of Wasmtime is enabled or not. By default this crate
1183    /// feature is enabled.
1184    ///
1185    /// [proposal]: https://github.com/webassembly/component-model
1186    #[cfg(feature = "component-model")]
1187    pub fn wasm_component_model(&mut self, enable: bool) -> &mut Self {
1188        self.wasm_features(WasmFeatures::COMPONENT_MODEL, enable);
1189        self
1190    }
1191
1192    /// Configures whether components support the async ABI [proposal] for
1193    /// lifting and lowering functions, as well as `stream`, `future`, and
1194    /// `error-context` types.
1195    ///
1196    /// Please note that Wasmtime's support for this feature is _very_
1197    /// incomplete.
1198    ///
1199    /// [proposal]:
1200    ///     https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1201    #[cfg(feature = "component-model-async")]
1202    pub fn wasm_component_model_async(&mut self, enable: bool) -> &mut Self {
1203        self.wasm_features(WasmFeatures::CM_ASYNC, enable);
1204        self
1205    }
1206
1207    /// This corresponds to the 🚝 emoji in the component model specification.
1208    ///
1209    /// Please note that Wasmtime's support for this feature is _very_
1210    /// incomplete.
1211    ///
1212    /// [proposal]:
1213    ///     https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1214    #[cfg(feature = "component-model-async")]
1215    pub fn wasm_component_model_async_builtins(&mut self, enable: bool) -> &mut Self {
1216        self.wasm_features(WasmFeatures::CM_ASYNC_BUILTINS, enable);
1217        self
1218    }
1219
1220    /// This corresponds to the 🚟 emoji in the component model specification.
1221    ///
1222    /// Please note that Wasmtime's support for this feature is _very_
1223    /// incomplete.
1224    ///
1225    /// [proposal]: https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1226    #[cfg(feature = "component-model-async")]
1227    pub fn wasm_component_model_async_stackful(&mut self, enable: bool) -> &mut Self {
1228        self.wasm_features(WasmFeatures::CM_ASYNC_STACKFUL, enable);
1229        self
1230    }
1231
1232    /// This corresponds to the 🧵 emoji in the component model specification.
1233    ///
1234    /// Please note that Wasmtime's support for this feature is _very_
1235    /// incomplete.
1236    ///
1237    /// [proposal]:
1238    ///     https://github.com/WebAssembly/component-model/pull/557
1239    #[cfg(feature = "component-model-async")]
1240    pub fn wasm_component_model_threading(&mut self, enable: bool) -> &mut Self {
1241        self.wasm_features(WasmFeatures::CM_THREADING, enable);
1242        self
1243    }
1244
1245    /// This corresponds to the 📝 emoji in the component model specification.
1246    ///
1247    /// Please note that Wasmtime's support for this feature is _very_
1248    /// incomplete.
1249    ///
1250    /// [proposal]: https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1251    #[cfg(feature = "component-model")]
1252    pub fn wasm_component_model_error_context(&mut self, enable: bool) -> &mut Self {
1253        self.wasm_features(WasmFeatures::CM_ERROR_CONTEXT, enable);
1254        self
1255    }
1256
1257    /// Configures whether the [GC extension to the component-model
1258    /// proposal][proposal] is enabled or not.
1259    ///
1260    /// This corresponds to the 🛸 emoji in the component model specification.
1261    ///
1262    /// Please note that Wasmtime's support for this feature is _very_
1263    /// incomplete.
1264    ///
1265    /// [proposal]: https://github.com/WebAssembly/component-model/issues/525
1266    #[cfg(feature = "component-model")]
1267    pub fn wasm_component_model_gc(&mut self, enable: bool) -> &mut Self {
1268        self.wasm_features(WasmFeatures::CM_GC, enable);
1269        self
1270    }
1271
1272    /// Configures whether the component model map type is enabled or not.
1273    ///
1274    /// This is part of the component model specification and enables the
1275    /// `map<k, v>` type in WIT and the component binary format.
1276    #[cfg(feature = "component-model")]
1277    pub fn wasm_component_model_map(&mut self, enable: bool) -> &mut Self {
1278        self.wasm_features(WasmFeatures::CM_MAP, enable);
1279        self
1280    }
1281
1282    /// This corresponds to the 🔧 emoji in the component model specification.
1283    ///
1284    /// Please note that Wasmtime's support for this feature is _very_
1285    /// incomplete.
1286    #[cfg(feature = "component-model")]
1287    pub fn wasm_component_model_fixed_length_lists(&mut self, enable: bool) -> &mut Self {
1288        self.wasm_features(WasmFeatures::CM_FIXED_LENGTH_LISTS, enable);
1289        self
1290    }
1291
1292    /// Configures whether the [Exception-handling proposal][proposal] is enabled or not.
1293    ///
1294    /// [proposal]: https://github.com/WebAssembly/exception-handling
1295    #[cfg(feature = "gc")]
1296    pub fn wasm_exceptions(&mut self, enable: bool) -> &mut Self {
1297        self.wasm_features(WasmFeatures::EXCEPTIONS, enable);
1298        self
1299    }
1300
1301    #[doc(hidden)] // FIXME(#3427) - if/when implemented then un-hide this
1302    #[deprecated = "This configuration option only exists for internal \
1303                    usage with the spec testsuite. It may be removed at \
1304                    any time and without warning. Do not rely on it!"]
1305    pub fn wasm_legacy_exceptions(&mut self, enable: bool) -> &mut Self {
1306        self.wasm_features(WasmFeatures::LEGACY_EXCEPTIONS, enable);
1307        self
1308    }
1309
1310    /// Configures which compilation strategy will be used for wasm modules.
1311    ///
1312    /// This method can be used to configure which compiler is used for wasm
1313    /// modules, and for more documentation consult the [`Strategy`] enumeration
1314    /// and its documentation.
1315    ///
1316    /// The default value for this is `Strategy::Auto`.
1317    ///
1318    /// # Panics
1319    ///
1320    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1321    #[cfg(any(feature = "cranelift", feature = "winch"))]
1322    pub fn strategy(&mut self, strategy: Strategy) -> &mut Self {
1323        self.compiler_config_mut().strategy = strategy.not_auto();
1324        self
1325    }
1326
1327    /// Configures which garbage collector will be used for Wasm modules.
1328    ///
1329    /// This method can be used to configure which garbage collector
1330    /// implementation is used for Wasm modules. For more documentation, consult
1331    /// the [`Collector`] enumeration and its documentation.
1332    ///
1333    /// The default value for this is `Collector::Auto`.
1334    #[cfg(feature = "gc")]
1335    pub fn collector(&mut self, collector: Collector) -> &mut Self {
1336        self.collector = collector;
1337        self
1338    }
1339
1340    /// Creates a default profiler based on the profiling strategy chosen.
1341    ///
1342    /// Profiler creation calls the type's default initializer where the purpose is
1343    /// really just to put in place the type used for profiling.
1344    ///
1345    /// Some [`ProfilingStrategy`] require specific platforms or particular feature
1346    /// to be enabled, such as `ProfilingStrategy::JitDump` requires the `jitdump`
1347    /// feature.
1348    ///
1349    /// # Errors
1350    ///
1351    /// The validation of this field is deferred until the engine is being built, and thus may
1352    /// cause `Engine::new` fail if the required feature is disabled, or the platform is not
1353    /// supported.
1354    pub fn profiler(&mut self, profile: ProfilingStrategy) -> &mut Self {
1355        self.profiling_strategy = profile;
1356        self
1357    }
1358
1359    /// Configures whether the debug verifier of Cranelift is enabled or not.
1360    ///
1361    /// When Cranelift is used as a code generation backend this will configure
1362    /// it to have the `enable_verifier` flag which will enable a number of debug
1363    /// checks inside of Cranelift. This is largely only useful for the
1364    /// developers of wasmtime itself.
1365    ///
1366    /// The default value for this is `false`
1367    ///
1368    /// # Panics
1369    ///
1370    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1371    #[cfg(any(feature = "cranelift", feature = "winch"))]
1372    pub fn cranelift_debug_verifier(&mut self, enable: bool) -> &mut Self {
1373        let val = if enable { "true" } else { "false" };
1374        self.compiler_config_mut()
1375            .settings
1376            .insert("enable_verifier".to_string(), val.to_string());
1377        self
1378    }
1379
1380    /// Configures whether extra debug checks are inserted into
1381    /// Wasmtime-generated code by Cranelift.
1382    ///
1383    /// The default value for this is `false`
1384    ///
1385    /// # Panics
1386    ///
1387    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1388    #[cfg(any(feature = "cranelift", feature = "winch"))]
1389    pub fn cranelift_wasmtime_debug_checks(&mut self, enable: bool) -> &mut Self {
1390        unsafe { self.cranelift_flag_set("wasmtime_debug_checks", &enable.to_string()) }
1391    }
1392
1393    /// Configures the Cranelift code generator optimization level.
1394    ///
1395    /// When the Cranelift code generator is used you can configure the
1396    /// optimization level used for generated code in a few various ways. For
1397    /// more information see the documentation of [`OptLevel`].
1398    ///
1399    /// The default value for this is `OptLevel::Speed`.
1400    ///
1401    /// # Panics
1402    ///
1403    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1404    #[cfg(any(feature = "cranelift", feature = "winch"))]
1405    pub fn cranelift_opt_level(&mut self, level: OptLevel) -> &mut Self {
1406        let val = match level {
1407            OptLevel::None => "none",
1408            OptLevel::Speed => "speed",
1409            OptLevel::SpeedAndSize => "speed_and_size",
1410        };
1411        self.compiler_config_mut()
1412            .settings
1413            .insert("opt_level".to_string(), val.to_string());
1414        self
1415    }
1416
1417    /// Configures the regalloc algorithm used by the Cranelift code generator.
1418    ///
1419    /// Cranelift can select any of several register allocator algorithms. Each
1420    /// of these algorithms generates correct code, but they represent different
1421    /// tradeoffs between compile speed (how expensive the compilation process
1422    /// is) and run-time speed (how fast the generated code runs).
1423    /// For more information see the documentation of [`RegallocAlgorithm`].
1424    ///
1425    /// The default value for this is `RegallocAlgorithm::Backtracking`.
1426    ///
1427    /// # Panics
1428    ///
1429    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1430    #[cfg(any(feature = "cranelift", feature = "winch"))]
1431    pub fn cranelift_regalloc_algorithm(&mut self, algo: RegallocAlgorithm) -> &mut Self {
1432        let val = match algo {
1433            RegallocAlgorithm::Backtracking => "backtracking",
1434            RegallocAlgorithm::SinglePass => "single_pass",
1435        };
1436        self.compiler_config_mut()
1437            .settings
1438            .insert("regalloc_algorithm".to_string(), val.to_string());
1439        self
1440    }
1441
1442    /// Configures whether Cranelift should perform a NaN-canonicalization pass.
1443    ///
1444    /// When Cranelift is used as a code generation backend this will configure
1445    /// it to replace NaNs with a single canonical value. This is useful for
1446    /// users requiring entirely deterministic WebAssembly computation.  This is
1447    /// not required by the WebAssembly spec, so it is not enabled by default.
1448    ///
1449    /// Note that this option affects not only WebAssembly's `f32` and `f64`
1450    /// types but additionally the `v128` type. This option will cause
1451    /// operations using any of these types to have extra checks placed after
1452    /// them to normalize NaN values as needed.
1453    ///
1454    /// The default value for this is `false`
1455    ///
1456    /// # Panics
1457    ///
1458    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1459    #[cfg(any(feature = "cranelift", feature = "winch"))]
1460    pub fn cranelift_nan_canonicalization(&mut self, enable: bool) -> &mut Self {
1461        let val = if enable { "true" } else { "false" };
1462        self.compiler_config_mut()
1463            .settings
1464            .insert("enable_nan_canonicalization".to_string(), val.to_string());
1465        self
1466    }
1467
1468    /// Controls whether proof-carrying code (PCC) is used to validate
1469    /// lowering of Wasm sandbox checks.
1470    ///
1471    /// Proof-carrying code carries "facts" about program values from
1472    /// the IR all the way to machine code, and checks those facts
1473    /// against known machine-instruction semantics. This guards
1474    /// against bugs in instruction lowering that might create holes
1475    /// in the Wasm sandbox.
1476    ///
1477    /// PCC is designed to be fast: it does not require complex
1478    /// solvers or logic engines to verify, but only a linear pass
1479    /// over a trail of "breadcrumbs" or facts at each intermediate
1480    /// value. Thus, it is appropriate to enable in production.
1481    ///
1482    /// # Panics
1483    ///
1484    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1485    #[cfg(any(feature = "cranelift", feature = "winch"))]
1486    pub fn cranelift_pcc(&mut self, enable: bool) -> &mut Self {
1487        let val = if enable { "true" } else { "false" };
1488        self.compiler_config_mut()
1489            .settings
1490            .insert("enable_pcc".to_string(), val.to_string());
1491        self
1492    }
1493
1494    /// Allows setting a Cranelift boolean flag or preset. This allows
1495    /// fine-tuning of Cranelift settings.
1496    ///
1497    /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1498    /// either; other `Config` functions should be preferred for stability.
1499    ///
1500    /// # Safety
1501    ///
1502    /// This is marked as unsafe, because setting the wrong flag might break invariants,
1503    /// resulting in execution hazards.
1504    ///
1505    /// # Errors
1506    ///
1507    /// The validation of the flags are deferred until the engine is being built, and thus may
1508    /// cause `Engine::new` fail if the flag's name does not exist, or the value is not appropriate
1509    /// for the flag type.
1510    ///
1511    /// # Panics
1512    ///
1513    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1514    #[cfg(any(feature = "cranelift", feature = "winch"))]
1515    pub unsafe fn cranelift_flag_enable(&mut self, flag: &str) -> &mut Self {
1516        self.compiler_config_mut().flags.insert(flag.to_string());
1517        self
1518    }
1519
1520    /// Allows settings another Cranelift flag defined by a flag name and value. This allows
1521    /// fine-tuning of Cranelift settings.
1522    ///
1523    /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1524    /// either; other `Config` functions should be preferred for stability.
1525    ///
1526    /// # Safety
1527    ///
1528    /// This is marked as unsafe, because setting the wrong flag might break invariants,
1529    /// resulting in execution hazards.
1530    ///
1531    /// # Errors
1532    ///
1533    /// The validation of the flags are deferred until the engine is being built, and thus may
1534    /// cause `Engine::new` fail if the flag's name does not exist, or incompatible with other
1535    /// settings.
1536    ///
1537    /// For example, feature `wasm_backtrace` will set `unwind_info` to `true`, but if it's
1538    /// manually set to false then it will fail.
1539    ///
1540    /// # Panics
1541    ///
1542    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1543    #[cfg(any(feature = "cranelift", feature = "winch"))]
1544    pub unsafe fn cranelift_flag_set(&mut self, name: &str, value: &str) -> &mut Self {
1545        self.compiler_config_mut()
1546            .settings
1547            .insert(name.to_string(), value.to_string());
1548        self
1549    }
1550
1551    /// Set a custom [`Cache`].
1552    ///
1553    /// To load a cache configuration from a file, use [`Cache::from_file`]. Otherwise, you can
1554    /// create a new cache config using [`CacheConfig::new`] and passing that to [`Cache::new`].
1555    ///
1556    /// If you want to disable the cache, you can call this method with `None`.
1557    ///
1558    /// By default, new configs do not have caching enabled.
1559    /// Every call to [`Module::new(my_wasm)`][crate::Module::new] will recompile `my_wasm`,
1560    /// even when it is unchanged, unless an enabled `CacheConfig` is provided.
1561    ///
1562    /// This method is only available when the `cache` feature of this crate is
1563    /// enabled.
1564    ///
1565    /// [docs]: https://bytecodealliance.github.io/wasmtime/cli-cache.html
1566    #[cfg(feature = "cache")]
1567    pub fn cache(&mut self, cache: Option<Cache>) -> &mut Self {
1568        self.cache = cache;
1569        self
1570    }
1571
1572    /// Sets a custom memory creator.
1573    ///
1574    /// Custom memory creators are used when creating host `Memory` objects or when
1575    /// creating instance linear memories for the on-demand instance allocation strategy.
1576    #[cfg(feature = "runtime")]
1577    pub fn with_host_memory(&mut self, mem_creator: Arc<dyn MemoryCreator>) -> &mut Self {
1578        self.mem_creator = Some(Arc::new(MemoryCreatorProxy(mem_creator)));
1579        self
1580    }
1581
1582    /// Sets a custom stack creator.
1583    ///
1584    /// Custom memory creators are used when creating creating async instance stacks for
1585    /// the on-demand instance allocation strategy.
1586    #[cfg(feature = "async")]
1587    pub fn with_host_stack(&mut self, stack_creator: Arc<dyn StackCreator>) -> &mut Self {
1588        self.stack_creator = Some(Arc::new(StackCreatorProxy(stack_creator)));
1589        self
1590    }
1591
1592    /// Sets a custom executable-memory publisher.
1593    ///
1594    /// Custom executable-memory publishers are hooks that allow
1595    /// Wasmtime to make certain regions of memory executable when
1596    /// loading precompiled modules or compiling new modules
1597    /// in-process. In most modern operating systems, memory allocated
1598    /// for heap usage is readable and writable by default but not
1599    /// executable. To jump to machine code stored in that memory, we
1600    /// need to make it executable. For security reasons, we usually
1601    /// also make it read-only at the same time, so the executing code
1602    /// can't be modified later.
1603    ///
1604    /// By default, Wasmtime will use the appropriate system calls on
1605    /// the host platform for this work. However, it also allows
1606    /// plugging in a custom implementation via this configuration
1607    /// option. This may be useful on custom or `no_std` platforms,
1608    /// for example, especially where virtual memory is not otherwise
1609    /// used by Wasmtime (no `signals-and-traps` feature).
1610    #[cfg(feature = "runtime")]
1611    pub fn with_custom_code_memory(
1612        &mut self,
1613        custom_code_memory: Option<Arc<dyn CustomCodeMemory>>,
1614    ) -> &mut Self {
1615        self.custom_code_memory = custom_code_memory;
1616        self
1617    }
1618
1619    /// Sets the instance allocation strategy to use.
1620    ///
1621    /// This is notably used in conjunction with
1622    /// [`InstanceAllocationStrategy::Pooling`] and [`PoolingAllocationConfig`].
1623    pub fn allocation_strategy(
1624        &mut self,
1625        strategy: impl Into<InstanceAllocationStrategy>,
1626    ) -> &mut Self {
1627        self.allocation_strategy = strategy.into();
1628        self
1629    }
1630
1631    /// Specifies the capacity of linear memories, in bytes, in their initial
1632    /// allocation.
1633    ///
1634    /// > Note: this value has important performance ramifications, be sure to
1635    /// > benchmark when setting this to a non-default value and read over this
1636    /// > documentation.
1637    ///
1638    /// This function will change the size of the initial memory allocation made
1639    /// for linear memories. This setting is only applicable when the initial
1640    /// size of a linear memory is below this threshold. Linear memories are
1641    /// allocated in the virtual address space of the host process with OS APIs
1642    /// such as `mmap` and this setting affects how large the allocation will
1643    /// be.
1644    ///
1645    /// ## Background: WebAssembly Linear Memories
1646    ///
1647    /// WebAssembly linear memories always start with a minimum size and can
1648    /// possibly grow up to a maximum size. The minimum size is always specified
1649    /// in a WebAssembly module itself and the maximum size can either be
1650    /// optionally specified in the module or inherently limited by the index
1651    /// type. For example for this module:
1652    ///
1653    /// ```wasm
1654    /// (module
1655    ///     (memory $a 4)
1656    ///     (memory $b 4096 4096 (pagesize 1))
1657    ///     (memory $c i64 10)
1658    /// )
1659    /// ```
1660    ///
1661    /// * Memory `$a` initially allocates 4 WebAssembly pages (256KiB) and can
1662    ///   grow up to 4GiB, the limit of the 32-bit index space.
1663    /// * Memory `$b` initially allocates 4096 WebAssembly pages, but in this
1664    ///   case its page size is 1, so it's 4096 bytes. Memory can also grow no
1665    ///   further meaning that it will always be 4096 bytes.
1666    /// * Memory `$c` is a 64-bit linear memory which starts with 640KiB of
1667    ///   memory and can theoretically grow up to 2^64 bytes, although most
1668    ///   hosts will run out of memory long before that.
1669    ///
1670    /// All operations on linear memories done by wasm are required to be
1671    /// in-bounds. Any access beyond the end of a linear memory is considered a
1672    /// trap.
1673    ///
1674    /// ## What this setting affects: Virtual Memory
1675    ///
1676    /// This setting is used to configure the behavior of the size of the linear
1677    /// memory allocation performed for each of these memories. For example the
1678    /// initial linear memory allocation looks like this:
1679    ///
1680    /// ```text
1681    ///              memory_reservation
1682    ///                    |
1683    ///          ◄─────────┴────────────────►
1684    /// ┌───────┬─────────┬──────────────────┬───────┐
1685    /// │ guard │ initial │ ... capacity ... │ guard │
1686    /// └───────┴─────────┴──────────────────┴───────┘
1687    ///  ◄──┬──►                              ◄──┬──►
1688    ///     │                                    │
1689    ///     │                             memory_guard_size
1690    ///     │
1691    ///     │
1692    ///  memory_guard_size (if guard_before_linear_memory)
1693    /// ```
1694    ///
1695    /// Memory in the `initial` range is accessible to the instance and can be
1696    /// read/written by wasm code. Memory in the `guard` regions is never
1697    /// accessible to wasm code and memory in `capacity` is initially
1698    /// inaccessible but may become accessible through `memory.grow` instructions
1699    /// for example.
1700    ///
1701    /// This means that this setting is the size of the initial chunk of virtual
1702    /// memory that a linear memory may grow into.
1703    ///
1704    /// ## What this setting affects: Runtime Speed
1705    ///
1706    /// This is a performance-sensitive setting which is taken into account
1707    /// during the compilation process of a WebAssembly module. For example if a
1708    /// 32-bit WebAssembly linear memory has a `memory_reservation` size of 4GiB
1709    /// then bounds checks can be elided because `capacity` will be guaranteed
1710    /// to be unmapped for all addressable bytes that wasm can access (modulo a
1711    /// few details).
1712    ///
1713    /// If `memory_reservation` was something smaller like 256KiB then that
1714    /// would have a much smaller impact on virtual memory but the compile code
1715    /// would then need to have explicit bounds checks to ensure that
1716    /// loads/stores are in-bounds.
1717    ///
1718    /// The goal of this setting is to enable skipping bounds checks in most
1719    /// modules by default. Some situations which require explicit bounds checks
1720    /// though are:
1721    ///
1722    /// * When `memory_reservation` is smaller than the addressable size of the
1723    ///   linear memory. For example if 64-bit linear memories always need
1724    ///   bounds checks as they can address the entire virtual address spacce.
1725    ///   For 32-bit linear memories a `memory_reservation` minimum size of 4GiB
1726    ///   is required to elide bounds checks.
1727    ///
1728    /// * When linear memories have a page size of 1 then bounds checks are
1729    ///   required. In this situation virtual memory can't be relied upon
1730    ///   because that operates at the host page size granularity where wasm
1731    ///   requires a per-byte level granularity.
1732    ///
1733    /// * Configuration settings such as [`Config::signals_based_traps`] can be
1734    ///   used to disable the use of signal handlers and virtual memory so
1735    ///   explicit bounds checks are required.
1736    ///
1737    /// * When [`Config::memory_guard_size`] is too small a bounds check may be
1738    ///   required. For 32-bit wasm addresses are actually 33-bit effective
1739    ///   addresses because loads/stores have a 32-bit static offset to add to
1740    ///   the dynamic 32-bit address. If the static offset is larger than the
1741    ///   size of the guard region then an explicit bounds check is required.
1742    ///
1743    /// ## What this setting affects: Memory Growth Behavior
1744    ///
1745    /// In addition to affecting bounds checks emitted in compiled code this
1746    /// setting also affects how WebAssembly linear memories are grown. The
1747    /// `memory.grow` instruction can be used to make a linear memory larger and
1748    /// this is also affected by APIs such as
1749    /// [`Memory::grow`](crate::Memory::grow).
1750    ///
1751    /// In these situations when the amount being grown is small enough to fit
1752    /// within the remaining capacity then the linear memory doesn't have to be
1753    /// moved at runtime. If the capacity runs out though then a new linear
1754    /// memory allocation must be made and the contents of linear memory is
1755    /// copied over.
1756    ///
1757    /// For example here's a situation where a copy happens:
1758    ///
1759    /// * The `memory_reservation` setting is configured to 128KiB.
1760    /// * A WebAssembly linear memory starts with a single 64KiB page.
1761    /// * This memory can be grown by one page to contain the full 128KiB of
1762    ///   memory.
1763    /// * If grown by one more page, though, then a 192KiB allocation must be
1764    ///   made and the previous 128KiB of contents are copied into the new
1765    ///   allocation.
1766    ///
1767    /// This growth behavior can have a significant performance impact if lots
1768    /// of data needs to be copied on growth. Conversely if memory growth never
1769    /// needs to happen because the capacity will always be large enough then
1770    /// optimizations can be applied to cache the base pointer of linear memory.
1771    ///
1772    /// When memory is grown then the
1773    /// [`Config::memory_reservation_for_growth`] is used for the new
1774    /// memory allocation to have memory to grow into.
1775    ///
1776    /// When using the pooling allocator via [`PoolingAllocationConfig`] then
1777    /// memories are never allowed to move so requests for growth are instead
1778    /// rejected with an error.
1779    ///
1780    /// ## When this setting is not used
1781    ///
1782    /// This setting is ignored and unused when the initial size of linear
1783    /// memory is larger than this threshold. For example if this setting is set
1784    /// to 1MiB but a wasm module requires a 2MiB minimum allocation then this
1785    /// setting is ignored. In this situation the minimum size of memory will be
1786    /// allocated along with [`Config::memory_reservation_for_growth`]
1787    /// after it to grow into.
1788    ///
1789    /// That means that this value can be set to zero. That can be useful in
1790    /// benchmarking to see the overhead of bounds checks for example.
1791    /// Additionally it can be used to minimize the virtual memory allocated by
1792    /// Wasmtime.
1793    ///
1794    /// ## Default Value
1795    ///
1796    /// The default value for this property depends on the host platform. For
1797    /// 64-bit platforms there's lots of address space available, so the default
1798    /// configured here is 4GiB. When coupled with the default size of
1799    /// [`Config::memory_guard_size`] this means that 32-bit WebAssembly linear
1800    /// memories with 64KiB page sizes will skip almost all bounds checks by
1801    /// default.
1802    ///
1803    /// For 32-bit platforms this value defaults to 10MiB. This means that
1804    /// bounds checks will be required on 32-bit platforms.
1805    pub fn memory_reservation(&mut self, bytes: u64) -> &mut Self {
1806        self.tunables.memory_reservation = Some(bytes);
1807        self
1808    }
1809
1810    /// Indicates whether linear memories may relocate their base pointer at
1811    /// runtime.
1812    ///
1813    /// WebAssembly linear memories either have a maximum size that's explicitly
1814    /// listed in the type of a memory or inherently limited by the index type
1815    /// of the memory (e.g. 4GiB for 32-bit linear memories). Depending on how
1816    /// the linear memory is allocated (see [`Config::memory_reservation`]) it
1817    /// may be necessary to move the memory in the host's virtual address space
1818    /// during growth. This option controls whether this movement is allowed or
1819    /// not.
1820    ///
1821    /// An example of a linear memory needing to move is when
1822    /// [`Config::memory_reservation`] is 0 then a linear memory will be
1823    /// allocated as the minimum size of the memory plus
1824    /// [`Config::memory_reservation_for_growth`]. When memory grows beyond the
1825    /// reservation for growth then the memory needs to be relocated.
1826    ///
1827    /// When this option is set to `false` then it can have a number of impacts
1828    /// on how memories work at runtime:
1829    ///
1830    /// * Modules can be compiled with static knowledge the base pointer of
1831    ///   linear memory never changes to enable optimizations such as
1832    ///   loop invariant code motion (hoisting the base pointer out of a loop).
1833    ///
1834    /// * Memories cannot grow in excess of their original allocation. This
1835    ///   means that [`Config::memory_reservation`] and
1836    ///   [`Config::memory_reservation_for_growth`] may need tuning to ensure
1837    ///   the memory configuration works at runtime.
1838    ///
1839    /// The default value for this option is `true`.
1840    pub fn memory_may_move(&mut self, enable: bool) -> &mut Self {
1841        self.tunables.memory_may_move = Some(enable);
1842        self
1843    }
1844
1845    /// Configures the size, in bytes, of the guard region used at the end of a
1846    /// linear memory's address space reservation.
1847    ///
1848    /// > Note: this value has important performance ramifications, be sure to
1849    /// > understand what this value does before tweaking it and benchmarking.
1850    ///
1851    /// This setting controls how many bytes are guaranteed to be unmapped after
1852    /// the virtual memory allocation of a linear memory. When
1853    /// combined with sufficiently large values of
1854    /// [`Config::memory_reservation`] (e.g. 4GiB for 32-bit linear memories)
1855    /// then a guard region can be used to eliminate bounds checks in generated
1856    /// code.
1857    ///
1858    /// This setting additionally can be used to help deduplicate bounds checks
1859    /// in code that otherwise requires bounds checks. For example with a 4KiB
1860    /// guard region then a 64-bit linear memory which accesses addresses `x+8`
1861    /// and `x+16` only needs to perform a single bounds check on `x`. If that
1862    /// bounds check passes then the offset is guaranteed to either reside in
1863    /// linear memory or the guard region, resulting in deterministic behavior
1864    /// either way.
1865    ///
1866    /// ## How big should the guard be?
1867    ///
1868    /// In general, like with configuring [`Config::memory_reservation`], you
1869    /// probably don't want to change this value from the defaults. Removing
1870    /// bounds checks is dependent on a number of factors where the size of the
1871    /// guard region is only one piece of the equation. Other factors include:
1872    ///
1873    /// * [`Config::memory_reservation`]
1874    /// * The index type of the linear memory (e.g. 32-bit or 64-bit)
1875    /// * The page size of the linear memory
1876    /// * Other settings such as [`Config::signals_based_traps`]
1877    ///
1878    /// Embeddings using virtual memory almost always want at least some guard
1879    /// region, but otherwise changes from the default should be profiled
1880    /// locally to see the performance impact.
1881    ///
1882    /// ## Default
1883    ///
1884    /// The default value for this property is 32MiB on 64-bit platforms. This
1885    /// allows eliminating almost all bounds checks on loads/stores with an
1886    /// immediate offset of less than 32MiB. On 32-bit platforms this defaults
1887    /// to 64KiB.
1888    pub fn memory_guard_size(&mut self, bytes: u64) -> &mut Self {
1889        self.tunables.memory_guard_size = Some(bytes);
1890        self
1891    }
1892
1893    /// Configures the size, in bytes, of the extra virtual memory space
1894    /// reserved after a linear memory is relocated.
1895    ///
1896    /// This setting is used in conjunction with [`Config::memory_reservation`]
1897    /// to configure what happens after a linear memory is relocated in the host
1898    /// address space. If the initial size of a linear memory exceeds
1899    /// [`Config::memory_reservation`] or if it grows beyond that size
1900    /// throughout its lifetime then this setting will be used.
1901    ///
1902    /// When a linear memory is relocated it will initially look like this:
1903    ///
1904    /// ```text
1905    ///            memory.size
1906    ///                 │
1907    ///          ◄──────┴─────►
1908    /// ┌───────┬──────────────┬───────┐
1909    /// │ guard │  accessible  │ guard │
1910    /// └───────┴──────────────┴───────┘
1911    ///                         ◄──┬──►
1912    ///                            │
1913    ///                     memory_guard_size
1914    /// ```
1915    ///
1916    /// where `accessible` needs to be grown but there's no more memory to grow
1917    /// into. A new region of the virtual address space will be allocated that
1918    /// looks like this:
1919    ///
1920    /// ```text
1921    ///                           memory_reservation_for_growth
1922    ///                                       │
1923    ///            memory.size                │
1924    ///                 │                     │
1925    ///          ◄──────┴─────► ◄─────────────┴───────────►
1926    /// ┌───────┬──────────────┬───────────────────────────┬───────┐
1927    /// │ guard │  accessible  │ .. reserved for growth .. │ guard │
1928    /// └───────┴──────────────┴───────────────────────────┴───────┘
1929    ///                                                     ◄──┬──►
1930    ///                                                        │
1931    ///                                               memory_guard_size
1932    /// ```
1933    ///
1934    /// This means that up to `memory_reservation_for_growth` bytes can be
1935    /// allocated again before the entire linear memory needs to be moved again
1936    /// when another `memory_reservation_for_growth` bytes will be appended to
1937    /// the size of the allocation.
1938    ///
1939    /// Note that this is a currently simple heuristic for optimizing the growth
1940    /// of dynamic memories, primarily implemented for the memory64 proposal
1941    /// where the maximum size of memory is larger than 4GiB. This setting is
1942    /// unlikely to be a one-size-fits-all style approach and if you're an
1943    /// embedder running into issues with growth and are interested in having
1944    /// other growth strategies available here please feel free to [open an
1945    /// issue on the Wasmtime repository][issue]!
1946    ///
1947    /// [issue]: https://github.com/bytecodealliance/wasmtime/issues/new
1948    ///
1949    /// ## Default
1950    ///
1951    /// For 64-bit platforms this defaults to 2GiB, and for 32-bit platforms
1952    /// this defaults to 1MiB.
1953    pub fn memory_reservation_for_growth(&mut self, bytes: u64) -> &mut Self {
1954        self.tunables.memory_reservation_for_growth = Some(bytes);
1955        self
1956    }
1957
1958    /// Indicates whether a guard region is present before allocations of
1959    /// linear memory.
1960    ///
1961    /// Guard regions before linear memories are never used during normal
1962    /// operation of WebAssembly modules, even if they have out-of-bounds
1963    /// loads. The only purpose for a preceding guard region in linear memory
1964    /// is extra protection against possible bugs in code generators like
1965    /// Cranelift. This setting does not affect performance in any way, but will
1966    /// result in larger virtual memory reservations for linear memories (it
1967    /// won't actually ever use more memory, just use more of the address
1968    /// space).
1969    ///
1970    /// The size of the guard region before linear memory is the same as the
1971    /// guard size that comes after linear memory, which is configured by
1972    /// [`Config::memory_guard_size`].
1973    ///
1974    /// ## Default
1975    ///
1976    /// This value defaults to `true`.
1977    pub fn guard_before_linear_memory(&mut self, enable: bool) -> &mut Self {
1978        self.tunables.guard_before_linear_memory = Some(enable);
1979        self
1980    }
1981
1982    /// Indicates whether to initialize tables lazily, so that instantiation
1983    /// is fast but indirect calls are a little slower. If false, tables
1984    /// are initialized eagerly during instantiation from any active element
1985    /// segments that apply to them.
1986    ///
1987    /// **Note** Disabling this option is not compatible with the Winch compiler.
1988    ///
1989    /// ## Default
1990    ///
1991    /// This value defaults to `true`.
1992    pub fn table_lazy_init(&mut self, table_lazy_init: bool) -> &mut Self {
1993        self.tunables.table_lazy_init = Some(table_lazy_init);
1994        self
1995    }
1996
1997    /// Configure the version information used in serialized and deserialized [`crate::Module`]s.
1998    /// This effects the behavior of [`crate::Module::serialize()`], as well as
1999    /// [`crate::Module::deserialize()`] and related functions.
2000    ///
2001    /// The default strategy is to use the wasmtime crate's Cargo package version.
2002    pub fn module_version(&mut self, strategy: ModuleVersionStrategy) -> Result<&mut Self> {
2003        match strategy {
2004            // This case requires special precondition for assertion in SerializedModule::to_bytes
2005            ModuleVersionStrategy::Custom(ref v) => {
2006                if v.as_bytes().len() > 255 {
2007                    bail!("custom module version cannot be more than 255 bytes: {v}");
2008                }
2009            }
2010            _ => {}
2011        }
2012        self.module_version = strategy;
2013        Ok(self)
2014    }
2015
2016    /// Configure whether wasmtime should compile a module using multiple
2017    /// threads.
2018    ///
2019    /// Disabling this will result in a single thread being used to compile
2020    /// the wasm bytecode.
2021    ///
2022    /// By default parallel compilation is enabled.
2023    #[cfg(feature = "parallel-compilation")]
2024    pub fn parallel_compilation(&mut self, parallel: bool) -> &mut Self {
2025        self.parallel_compilation = parallel;
2026        self
2027    }
2028
2029    /// Configures whether compiled artifacts will contain information to map
2030    /// native program addresses back to the original wasm module.
2031    ///
2032    /// This configuration option is `true` by default and, if enabled,
2033    /// generates the appropriate tables in compiled modules to map from native
2034    /// address back to wasm source addresses. This is used for displaying wasm
2035    /// program counters in backtraces as well as generating filenames/line
2036    /// numbers if so configured as well (and the original wasm module has DWARF
2037    /// debugging information present).
2038    pub fn generate_address_map(&mut self, generate: bool) -> &mut Self {
2039        self.tunables.generate_address_map = Some(generate);
2040        self
2041    }
2042
2043    /// Configures whether copy-on-write memory-mapped data is used to
2044    /// initialize a linear memory.
2045    ///
2046    /// Initializing linear memory via a copy-on-write mapping can drastically
2047    /// improve instantiation costs of a WebAssembly module because copying
2048    /// memory is deferred. Additionally if a page of memory is only ever read
2049    /// from WebAssembly and never written too then the same underlying page of
2050    /// data will be reused between all instantiations of a module meaning that
2051    /// if a module is instantiated many times this can lower the overall memory
2052    /// required needed to run that module.
2053    ///
2054    /// The main disadvantage of copy-on-write initialization, however, is that
2055    /// it may be possible for highly-parallel scenarios to be less scalable. If
2056    /// a page is read initially by a WebAssembly module then that page will be
2057    /// mapped to a read-only copy shared between all WebAssembly instances. If
2058    /// the same page is then written, however, then a private copy is created
2059    /// and swapped out from the read-only version. This also requires an [IPI],
2060    /// however, which can be a significant bottleneck in high-parallelism
2061    /// situations.
2062    ///
2063    /// This feature is only applicable when a WebAssembly module meets specific
2064    /// criteria to be initialized in this fashion, such as:
2065    ///
2066    /// * Only memories defined in the module can be initialized this way.
2067    /// * Data segments for memory must use statically known offsets.
2068    /// * Data segments for memory must all be in-bounds.
2069    ///
2070    /// Modules which do not meet these criteria will fall back to
2071    /// initialization of linear memory based on copying memory.
2072    ///
2073    /// This feature of Wasmtime is also platform-specific:
2074    ///
2075    /// * Linux - this feature is supported for all instances of [`Module`].
2076    ///   Modules backed by an existing mmap (such as those created by
2077    ///   [`Module::deserialize_file`]) will reuse that mmap to cow-initialize
2078    ///   memory. Other instance of [`Module`] may use the `memfd_create`
2079    ///   syscall to create an initialization image to `mmap`.
2080    /// * Unix (not Linux) - this feature is only supported when loading modules
2081    ///   from a precompiled file via [`Module::deserialize_file`] where there
2082    ///   is a file descriptor to use to map data into the process. Note that
2083    ///   the module must have been compiled with this setting enabled as well.
2084    /// * Windows - there is no support for this feature at this time. Memory
2085    ///   initialization will always copy bytes.
2086    ///
2087    /// By default this option is enabled.
2088    ///
2089    /// [`Module::deserialize_file`]: crate::Module::deserialize_file
2090    /// [`Module`]: crate::Module
2091    /// [IPI]: https://en.wikipedia.org/wiki/Inter-processor_interrupt
2092    pub fn memory_init_cow(&mut self, enable: bool) -> &mut Self {
2093        self.tunables.memory_init_cow = Some(enable);
2094        self
2095    }
2096
2097    /// A configuration option to force the usage of `memfd_create` on Linux to
2098    /// be used as the backing source for a module's initial memory image.
2099    ///
2100    /// When [`Config::memory_init_cow`] is enabled, which is enabled by
2101    /// default, module memory initialization images are taken from a module's
2102    /// original mmap if possible. If a precompiled module was loaded from disk
2103    /// this means that the disk's file is used as an mmap source for the
2104    /// initial linear memory contents. This option can be used to force, on
2105    /// Linux, that instead of using the original file on disk a new in-memory
2106    /// file is created with `memfd_create` to hold the contents of the initial
2107    /// image.
2108    ///
2109    /// This option can be used to avoid possibly loading the contents of memory
2110    /// from disk through a page fault. Instead with `memfd_create` the contents
2111    /// of memory are always in RAM, meaning that even page faults which
2112    /// initially populate a wasm linear memory will only work with RAM instead
2113    /// of ever hitting the disk that the original precompiled module is stored
2114    /// on.
2115    ///
2116    /// This option is disabled by default.
2117    pub fn force_memory_init_memfd(&mut self, enable: bool) -> &mut Self {
2118        self.force_memory_init_memfd = enable;
2119        self
2120    }
2121
2122    /// Configures whether or not a coredump should be generated and attached to
2123    /// the [`Error`](crate::Error) when a trap is raised.
2124    ///
2125    /// This option is disabled by default.
2126    #[cfg(feature = "coredump")]
2127    pub fn coredump_on_trap(&mut self, enable: bool) -> &mut Self {
2128        self.coredump_on_trap = enable;
2129        self
2130    }
2131
2132    /// Enables memory error checking for wasm programs.
2133    ///
2134    /// This option is disabled by default.
2135    ///
2136    /// # Panics
2137    ///
2138    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
2139    #[cfg(any(feature = "cranelift", feature = "winch"))]
2140    pub fn wmemcheck(&mut self, enable: bool) -> &mut Self {
2141        self.wmemcheck = enable;
2142        self.compiler_config_mut().wmemcheck = enable;
2143        self
2144    }
2145
2146    /// Configures the "guaranteed dense image size" for copy-on-write
2147    /// initialized memories.
2148    ///
2149    /// When using the [`Config::memory_init_cow`] feature to initialize memory
2150    /// efficiently (which is enabled by default), compiled modules contain an
2151    /// image of the module's initial heap. If the module has a fairly sparse
2152    /// initial heap, with just a few data segments at very different offsets,
2153    /// this could result in a large region of zero bytes in the image. In
2154    /// other words, it's not very memory-efficient.
2155    ///
2156    /// We normally use a heuristic to avoid this: if less than half
2157    /// of the initialized range (first non-zero to last non-zero
2158    /// byte) of any memory in the module has pages with nonzero
2159    /// bytes, then we avoid creating a memory image for the entire module.
2160    ///
2161    /// However, if the embedder always needs the instantiation-time efficiency
2162    /// of copy-on-write initialization, and is otherwise carefully controlling
2163    /// parameters of the modules (for example, by limiting the maximum heap
2164    /// size of the modules), then it may be desirable to ensure a memory image
2165    /// is created even if this could go against the heuristic above. Thus, we
2166    /// add another condition: there is a size of initialized data region up to
2167    /// which we *always* allow a memory image. The embedder can set this to a
2168    /// known maximum heap size if they desire to always get the benefits of
2169    /// copy-on-write images.
2170    ///
2171    /// In the future we may implement a "best of both worlds"
2172    /// solution where we have a dense image up to some limit, and
2173    /// then support a sparse list of initializers beyond that; this
2174    /// would get most of the benefit of copy-on-write and pay the incremental
2175    /// cost of eager initialization only for those bits of memory
2176    /// that are out-of-bounds. However, for now, an embedder desiring
2177    /// fast instantiation should ensure that this setting is as large
2178    /// as the maximum module initial memory content size.
2179    ///
2180    /// By default this value is 16 MiB.
2181    pub fn memory_guaranteed_dense_image_size(&mut self, size_in_bytes: u64) -> &mut Self {
2182        self.memory_guaranteed_dense_image_size = size_in_bytes;
2183        self
2184    }
2185
2186    /// Whether to enable function inlining during compilation or not.
2187    ///
2188    /// This may result in faster execution at runtime, but adds additional
2189    /// compilation time. Inlining may also enlarge the size of compiled
2190    /// artifacts (for example, the size of the result of
2191    /// [`Engine::precompile_component`](crate::Engine::precompile_component)).
2192    ///
2193    /// Inlining is not supported by all of Wasmtime's compilation strategies;
2194    /// currently, it only Cranelift supports it. This setting will be ignored
2195    /// when using a compilation strategy that does not support inlining, like
2196    /// Winch.
2197    ///
2198    /// Note that inlining is still somewhat experimental at the moment (as of
2199    /// the Wasmtime version 36).
2200    pub fn compiler_inlining(&mut self, inlining: bool) -> &mut Self {
2201        self.tunables.inlining = Some(inlining);
2202        self
2203    }
2204
2205    /// Returns the set of features that the currently selected compiler backend
2206    /// does not support at all and may panic on.
2207    ///
2208    /// Wasmtime strives to reject unknown modules or unsupported modules with
2209    /// first-class errors instead of panics. Not all compiler backends have the
2210    /// same level of feature support on all platforms as well. This method
2211    /// returns a set of features that the currently selected compiler
2212    /// configuration is known to not support and may panic on. This acts as a
2213    /// first-level filter on incoming wasm modules/configuration to fail-fast
2214    /// instead of panicking later on.
2215    ///
2216    /// Note that if a feature is not listed here it does not mean that the
2217    /// backend fully supports the proposal. Instead that means that the backend
2218    /// doesn't ever panic on the proposal, but errors during compilation may
2219    /// still be returned. This means that features listed here are definitely
2220    /// not supported at all, but features not listed here may still be
2221    /// partially supported. For example at the time of this writing the Winch
2222    /// backend partially supports simd so it's not listed here. Winch doesn't
2223    /// fully support simd but unimplemented instructions just return errors.
2224    fn compiler_panicking_wasm_features(&self) -> WasmFeatures {
2225        // First we compute the set of features that Wasmtime itself knows;
2226        // this is a sort of "maximal set" that we invert to create a set
2227        // of features we _definitely can't support_ because wasmtime
2228        // has never heard of them.
2229        let features_known_to_wasmtime = WasmFeatures::empty()
2230            | WasmFeatures::MUTABLE_GLOBAL
2231            | WasmFeatures::SATURATING_FLOAT_TO_INT
2232            | WasmFeatures::SIGN_EXTENSION
2233            | WasmFeatures::REFERENCE_TYPES
2234            | WasmFeatures::CALL_INDIRECT_OVERLONG
2235            | WasmFeatures::MULTI_VALUE
2236            | WasmFeatures::BULK_MEMORY
2237            | WasmFeatures::BULK_MEMORY_OPT
2238            | WasmFeatures::SIMD
2239            | WasmFeatures::RELAXED_SIMD
2240            | WasmFeatures::THREADS
2241            | WasmFeatures::SHARED_EVERYTHING_THREADS
2242            | WasmFeatures::TAIL_CALL
2243            | WasmFeatures::FLOATS
2244            | WasmFeatures::MULTI_MEMORY
2245            | WasmFeatures::EXCEPTIONS
2246            | WasmFeatures::MEMORY64
2247            | WasmFeatures::EXTENDED_CONST
2248            | WasmFeatures::COMPONENT_MODEL
2249            | WasmFeatures::FUNCTION_REFERENCES
2250            | WasmFeatures::GC
2251            | WasmFeatures::CUSTOM_PAGE_SIZES
2252            | WasmFeatures::GC_TYPES
2253            | WasmFeatures::STACK_SWITCHING
2254            | WasmFeatures::WIDE_ARITHMETIC
2255            | WasmFeatures::CM_ASYNC
2256            | WasmFeatures::CM_ASYNC_STACKFUL
2257            | WasmFeatures::CM_ASYNC_BUILTINS
2258            | WasmFeatures::CM_THREADING
2259            | WasmFeatures::CM_ERROR_CONTEXT
2260            | WasmFeatures::CM_GC
2261            | WasmFeatures::CM_MAP
2262            | WasmFeatures::CM_FIXED_LENGTH_LISTS;
2263
2264        #[allow(unused_mut, reason = "easier to avoid #[cfg]")]
2265        let mut unsupported = !features_known_to_wasmtime;
2266
2267        #[cfg(any(feature = "cranelift", feature = "winch"))]
2268        match self.compiler_config.as_ref().and_then(|c| c.strategy) {
2269            None | Some(Strategy::Cranelift) => {
2270                // Pulley at this time fundamentally doesn't support the
2271                // `threads` proposal, notably shared memory, because Rust can't
2272                // safely implement loads/stores in the face of shared memory.
2273                // Stack switching is not implemented, either.
2274                if self.compiler_target().is_pulley() {
2275                    unsupported |= WasmFeatures::THREADS;
2276                    unsupported |= WasmFeatures::STACK_SWITCHING;
2277                }
2278
2279                use target_lexicon::*;
2280                match self.compiler_target() {
2281                    Triple {
2282                        architecture: Architecture::X86_64 | Architecture::X86_64h,
2283                        operating_system:
2284                            OperatingSystem::Linux
2285                            | OperatingSystem::MacOSX(_)
2286                            | OperatingSystem::Darwin(_),
2287                        ..
2288                    } => {
2289                        // Stack switching supported on (non-Pulley) Cranelift.
2290                    }
2291
2292                    _ => {
2293                        // On platforms other than x64 Unix-like, we don't
2294                        // support stack switching.
2295                        unsupported |= WasmFeatures::STACK_SWITCHING;
2296                    }
2297                }
2298            }
2299            Some(Strategy::Winch) => {
2300                unsupported |= WasmFeatures::GC
2301                    | WasmFeatures::FUNCTION_REFERENCES
2302                    | WasmFeatures::RELAXED_SIMD
2303                    | WasmFeatures::TAIL_CALL
2304                    | WasmFeatures::GC_TYPES
2305                    | WasmFeatures::EXCEPTIONS
2306                    | WasmFeatures::LEGACY_EXCEPTIONS
2307                    | WasmFeatures::STACK_SWITCHING
2308                    | WasmFeatures::CM_ASYNC;
2309                match self.compiler_target().architecture {
2310                    target_lexicon::Architecture::Aarch64(_) => {
2311                        unsupported |= WasmFeatures::THREADS;
2312                        unsupported |= WasmFeatures::WIDE_ARITHMETIC;
2313                    }
2314
2315                    // Winch doesn't support other non-x64 architectures at this
2316                    // time either but will return an first-class error for
2317                    // them.
2318                    _ => {}
2319                }
2320            }
2321            Some(Strategy::Auto) => unreachable!(),
2322        }
2323        unsupported
2324    }
2325
2326    /// Calculates the set of features that are enabled for this `Config`.
2327    ///
2328    /// This method internally will start with the an empty set of features to
2329    /// avoid being tied to wasmparser's defaults. Next Wasmtime's set of
2330    /// default features are added to this set, some of which are conditional
2331    /// depending on crate features. Finally explicitly requested features via
2332    /// `wasm_*` methods on `Config` are applied. Everything is then validated
2333    /// later in `Config::validate`.
2334    fn features(&self) -> WasmFeatures {
2335        // Wasmtime by default supports all of the wasm 2.0 version of the
2336        // specification.
2337        let mut features = WasmFeatures::WASM2;
2338
2339        // On-by-default features that wasmtime has. Note that these are all
2340        // subject to the criteria at
2341        // https://docs.wasmtime.dev/contributing-implementing-wasm-proposals.html
2342        // and
2343        // https://docs.wasmtime.dev/stability-wasm-proposals.html
2344        features |= WasmFeatures::MULTI_MEMORY;
2345        features |= WasmFeatures::RELAXED_SIMD;
2346        features |= WasmFeatures::TAIL_CALL;
2347        features |= WasmFeatures::EXTENDED_CONST;
2348        features |= WasmFeatures::MEMORY64;
2349        // NB: if you add a feature above this line please double-check
2350        // https://docs.wasmtime.dev/stability-wasm-proposals.html
2351        // to ensure all requirements are met and/or update the documentation
2352        // there too.
2353
2354        // Set some features to their conditionally-enabled defaults depending
2355        // on crate compile-time features.
2356        features.set(WasmFeatures::GC_TYPES, cfg!(feature = "gc"));
2357        features.set(WasmFeatures::THREADS, cfg!(feature = "threads"));
2358        features.set(
2359            WasmFeatures::COMPONENT_MODEL,
2360            cfg!(feature = "component-model"),
2361        );
2362
2363        // From the default set of proposals remove any that the current
2364        // compiler backend may panic on if the module contains them.
2365        features = features & !self.compiler_panicking_wasm_features();
2366
2367        // After wasmtime's defaults are configured then factor in user requests
2368        // and disable/enable features. Note that the enable/disable sets should
2369        // be disjoint.
2370        debug_assert!((self.enabled_features & self.disabled_features).is_empty());
2371        features &= !self.disabled_features;
2372        features |= self.enabled_features;
2373
2374        features
2375    }
2376
2377    /// Returns the configured compiler target for this `Config`.
2378    pub(crate) fn compiler_target(&self) -> target_lexicon::Triple {
2379        // If a target is explicitly configured, always use that.
2380        if let Some(target) = self.target.clone() {
2381            return target;
2382        }
2383
2384        // If the `build.rs` script determined that this platform uses pulley by
2385        // default, then use Pulley.
2386        if cfg!(default_target_pulley) {
2387            return target_lexicon::Triple::pulley_host();
2388        }
2389
2390        // And at this point the target is for sure the host.
2391        target_lexicon::Triple::host()
2392    }
2393
2394    pub(crate) fn validate(&self) -> Result<(Tunables, WasmFeatures)> {
2395        let features = self.features();
2396
2397        // First validate that the selected compiler backend and configuration
2398        // supports the set of `features` that are enabled. This will help
2399        // provide more first class errors instead of panics about unsupported
2400        // features and configurations.
2401        let unsupported = features & self.compiler_panicking_wasm_features();
2402        if !unsupported.is_empty() {
2403            for flag in WasmFeatures::FLAGS.iter() {
2404                if !unsupported.contains(*flag.value()) {
2405                    continue;
2406                }
2407                bail!(
2408                    "the wasm_{} feature is not supported on this compiler configuration",
2409                    flag.name().to_lowercase()
2410                );
2411            }
2412
2413            panic!("should have returned an error by now")
2414        }
2415
2416        #[cfg(any(feature = "async", feature = "stack-switching"))]
2417        if self.max_wasm_stack > self.async_stack_size {
2418            bail!("max_wasm_stack size cannot exceed the async_stack_size");
2419        }
2420        if self.max_wasm_stack == 0 {
2421            bail!("max_wasm_stack size cannot be zero");
2422        }
2423        if !cfg!(feature = "wmemcheck") && self.wmemcheck {
2424            bail!("wmemcheck (memory checker) was requested but is not enabled in this build");
2425        }
2426
2427        if !cfg!(feature = "gc") && features.gc_types() {
2428            bail!("support for GC was disabled at compile time")
2429        }
2430
2431        if !cfg!(feature = "gc") && features.contains(WasmFeatures::EXCEPTIONS) {
2432            bail!("exceptions support requires garbage collection (GC) to be enabled in the build");
2433        }
2434
2435        match &self.rr_config {
2436            #[cfg(feature = "rr")]
2437            RRConfig::Recording | RRConfig::Replaying => {
2438                self.validate_rr_determinism_conflicts()?;
2439            }
2440            RRConfig::None => {}
2441        };
2442
2443        let mut tunables = Tunables::default_for_target(&self.compiler_target())?;
2444
2445        // By default this is enabled with the Cargo feature, and if the feature
2446        // is missing this is disabled.
2447        tunables.concurrency_support = cfg!(feature = "component-model-async");
2448
2449        #[cfg(feature = "rr")]
2450        {
2451            tunables.recording = matches!(self.rr_config, RRConfig::Recording);
2452        }
2453
2454        // If no target is explicitly specified then further refine `tunables`
2455        // for the configuration of this host depending on what platform
2456        // features were found available at compile time. This means that anyone
2457        // cross-compiling for a customized host will need to further refine
2458        // compilation options.
2459        if self.target.is_none() {
2460            // If this platform doesn't have native signals then change some
2461            // defaults to account for that. Note that VM guards are turned off
2462            // here because that's primarily a feature of eliding
2463            // bounds-checks.
2464            if !cfg!(has_native_signals) {
2465                tunables.signals_based_traps = cfg!(has_native_signals);
2466                tunables.memory_guard_size = 0;
2467            }
2468
2469            // When virtual memory is not available use slightly different
2470            // defaults for tunables to be more amenable to `MallocMemory`.
2471            // Note that these can still be overridden by config options.
2472            if !cfg!(has_virtual_memory) {
2473                tunables.memory_reservation = 0;
2474                tunables.memory_reservation_for_growth = 1 << 20; // 1MB
2475                tunables.memory_init_cow = false;
2476            }
2477        }
2478
2479        // If guest-debugging is enabled, we must disable
2480        // signals-based traps. Do this before we process the user's
2481        // provided tunables settings so we can detect a conflict with
2482        // an explicit request to use signals-based traps.
2483        #[cfg(feature = "debug")]
2484        if self.tunables.debug_guest == Some(true) {
2485            tunables.signals_based_traps = false;
2486        }
2487
2488        self.tunables.configure(&mut tunables);
2489
2490        // If we're going to compile with winch, we must use the winch calling convention.
2491        #[cfg(any(feature = "cranelift", feature = "winch"))]
2492        {
2493            tunables.winch_callable = self
2494                .compiler_config
2495                .as_ref()
2496                .is_some_and(|c| c.strategy == Some(Strategy::Winch));
2497        }
2498
2499        tunables.collector = if features.gc_types() {
2500            #[cfg(feature = "gc")]
2501            {
2502                use wasmtime_environ::Collector as EnvCollector;
2503                Some(match self.collector.try_not_auto()? {
2504                    Collector::DeferredReferenceCounting => EnvCollector::DeferredReferenceCounting,
2505                    Collector::Null => EnvCollector::Null,
2506                    Collector::Auto => unreachable!(),
2507                })
2508            }
2509            #[cfg(not(feature = "gc"))]
2510            bail!("cannot use GC types: the `gc` feature was disabled at compile time")
2511        } else {
2512            None
2513        };
2514
2515        if tunables.debug_guest {
2516            ensure!(
2517                cfg!(feature = "debug"),
2518                "debug instrumentation support was disabled at compile time"
2519            );
2520            ensure!(
2521                !tunables.signals_based_traps,
2522                "cannot use signals-based traps with guest debugging enabled"
2523            );
2524        }
2525
2526        // Concurrency support is required for some component model features.
2527        let requires_concurrency = WasmFeatures::CM_ASYNC
2528            | WasmFeatures::CM_ASYNC_BUILTINS
2529            | WasmFeatures::CM_ASYNC_STACKFUL
2530            | WasmFeatures::CM_THREADING
2531            | WasmFeatures::CM_ERROR_CONTEXT;
2532        if tunables.concurrency_support && !cfg!(feature = "component-model-async") {
2533            bail!(
2534                "concurrency support was requested but was not \
2535                 compiled into this build of Wasmtime"
2536            )
2537        }
2538        if !tunables.concurrency_support && features.intersects(requires_concurrency) {
2539            bail!(
2540                "concurrency support must be enabled to use the component \
2541                 model async or threading features"
2542            )
2543        }
2544
2545        Ok((tunables, features))
2546    }
2547
2548    #[cfg(feature = "runtime")]
2549    pub(crate) fn build_allocator(
2550        &self,
2551        tunables: &Tunables,
2552    ) -> Result<Box<dyn InstanceAllocator + Send + Sync>> {
2553        #[cfg(feature = "async")]
2554        let (stack_size, stack_zeroing) = (self.async_stack_size, self.async_stack_zeroing);
2555
2556        #[cfg(not(feature = "async"))]
2557        let (stack_size, stack_zeroing) = (0, false);
2558
2559        let _ = tunables;
2560
2561        match &self.allocation_strategy {
2562            InstanceAllocationStrategy::OnDemand => {
2563                let mut _allocator = try_new::<Box<_>>(OnDemandInstanceAllocator::new(
2564                    self.mem_creator.clone(),
2565                    stack_size,
2566                    stack_zeroing,
2567                ))?;
2568                #[cfg(feature = "async")]
2569                if let Some(stack_creator) = &self.stack_creator {
2570                    _allocator.set_stack_creator(stack_creator.clone());
2571                }
2572                Ok(_allocator as _)
2573            }
2574            #[cfg(feature = "pooling-allocator")]
2575            InstanceAllocationStrategy::Pooling(config) => {
2576                let mut config = config.config;
2577                config.stack_size = stack_size;
2578                config.async_stack_zeroing = stack_zeroing;
2579                let allocator = try_new::<Box<_>>(
2580                    crate::runtime::vm::PoolingInstanceAllocator::new(&config, tunables)?,
2581                )?;
2582                Ok(allocator as _)
2583            }
2584        }
2585    }
2586
2587    #[cfg(feature = "runtime")]
2588    pub(crate) fn build_gc_runtime(&self) -> Result<Option<Arc<dyn GcRuntime>>> {
2589        if !self.features().gc_types() {
2590            return Ok(None);
2591        }
2592
2593        #[cfg(not(feature = "gc"))]
2594        bail!("cannot create a GC runtime: the `gc` feature was disabled at compile time");
2595
2596        #[cfg(feature = "gc")]
2597        #[cfg_attr(
2598            not(any(feature = "gc-null", feature = "gc-drc")),
2599            expect(unreachable_code, reason = "definitions known to be dummy")
2600        )]
2601        {
2602            Ok(Some(match self.collector.try_not_auto()? {
2603                #[cfg(feature = "gc-drc")]
2604                Collector::DeferredReferenceCounting => {
2605                    try_new::<Arc<_>>(crate::runtime::vm::DrcCollector::default())? as _
2606                }
2607                #[cfg(not(feature = "gc-drc"))]
2608                Collector::DeferredReferenceCounting => unreachable!(),
2609
2610                #[cfg(feature = "gc-null")]
2611                Collector::Null => {
2612                    try_new::<Arc<_>>(crate::runtime::vm::NullCollector::default())? as _
2613                }
2614                #[cfg(not(feature = "gc-null"))]
2615                Collector::Null => unreachable!(),
2616
2617                Collector::Auto => unreachable!(),
2618            }))
2619        }
2620    }
2621
2622    #[cfg(feature = "runtime")]
2623    pub(crate) fn build_profiler(&self) -> Result<Box<dyn ProfilingAgent>> {
2624        Ok(match self.profiling_strategy {
2625            ProfilingStrategy::PerfMap => profiling_agent::new_perfmap()?,
2626            ProfilingStrategy::JitDump => profiling_agent::new_jitdump()?,
2627            ProfilingStrategy::VTune => profiling_agent::new_vtune()?,
2628            ProfilingStrategy::None => profiling_agent::new_null(),
2629            ProfilingStrategy::Pulley => profiling_agent::new_pulley()?,
2630        })
2631    }
2632
2633    #[cfg(any(feature = "cranelift", feature = "winch"))]
2634    pub(crate) fn build_compiler(
2635        mut self,
2636        tunables: &mut Tunables,
2637        features: WasmFeatures,
2638    ) -> Result<(Self, Box<dyn wasmtime_environ::Compiler>)> {
2639        let target = self.compiler_target();
2640
2641        // The target passed to the builders below is an `Option<Triple>` where
2642        // `None` represents the current host with CPU features inferred from
2643        // the host's CPU itself. The `target` above is not an `Option`, so
2644        // switch it to `None` in the case that a target wasn't explicitly
2645        // specified (which indicates no feature inference) and the target
2646        // matches the host.
2647        let target_for_builder =
2648            if self.target.is_none() && target == target_lexicon::Triple::host() {
2649                None
2650            } else {
2651                Some(target.clone())
2652            };
2653
2654        let mut compiler = match self.compiler_config_mut().strategy {
2655            #[cfg(feature = "cranelift")]
2656            Some(Strategy::Cranelift) => wasmtime_cranelift::builder(target_for_builder)?,
2657            #[cfg(not(feature = "cranelift"))]
2658            Some(Strategy::Cranelift) => bail!("cranelift support not compiled in"),
2659            #[cfg(feature = "winch")]
2660            Some(Strategy::Winch) => wasmtime_winch::builder(target_for_builder)?,
2661            #[cfg(not(feature = "winch"))]
2662            Some(Strategy::Winch) => bail!("winch support not compiled in"),
2663
2664            None | Some(Strategy::Auto) => unreachable!(),
2665        };
2666
2667        if let Some(path) = &self.compiler_config_mut().clif_dir {
2668            compiler.clif_dir(path)?;
2669        }
2670
2671        // If probestack is enabled for a target, Wasmtime will always use the
2672        // inline strategy which doesn't require us to define a `__probestack`
2673        // function or similar.
2674        self.compiler_config_mut()
2675            .settings
2676            .insert("probestack_strategy".into(), "inline".into());
2677
2678        // We enable stack probing by default on all targets.
2679        // This is required on Windows because of the way Windows
2680        // commits its stacks, but it's also a good idea on other
2681        // platforms to ensure guard pages are hit for large frame
2682        // sizes.
2683        self.compiler_config_mut()
2684            .flags
2685            .insert("enable_probestack".into());
2686
2687        // The current wasm multivalue implementation depends on this.
2688        // FIXME(#9510) handle this in wasmtime-cranelift instead.
2689        self.compiler_config_mut()
2690            .flags
2691            .insert("enable_multi_ret_implicit_sret".into());
2692
2693        if let Some(unwind_requested) = self.native_unwind_info {
2694            if !self
2695                .compiler_config_mut()
2696                .ensure_setting_unset_or_given("unwind_info", &unwind_requested.to_string())
2697            {
2698                bail!(
2699                    "incompatible settings requested for Cranelift and Wasmtime `unwind-info` settings"
2700                );
2701            }
2702        }
2703
2704        if target.operating_system == target_lexicon::OperatingSystem::Windows {
2705            if !self
2706                .compiler_config_mut()
2707                .ensure_setting_unset_or_given("unwind_info", "true")
2708            {
2709                bail!("`native_unwind_info` cannot be disabled on Windows");
2710            }
2711        }
2712
2713        // We require frame pointers for correct stack walking, which is safety
2714        // critical in the presence of reference types, and otherwise it is just
2715        // really bad developer experience to get wrong.
2716        self.compiler_config_mut()
2717            .settings
2718            .insert("preserve_frame_pointers".into(), "true".into());
2719
2720        if !tunables.signals_based_traps {
2721            let mut ok = self
2722                .compiler_config_mut()
2723                .ensure_setting_unset_or_given("enable_table_access_spectre_mitigation", "false");
2724            ok = ok
2725                && self.compiler_config_mut().ensure_setting_unset_or_given(
2726                    "enable_heap_access_spectre_mitigation",
2727                    "false",
2728                );
2729
2730            // Right now spectre-mitigated bounds checks will load from zero so
2731            // if host-based signal handlers are disabled then that's a mismatch
2732            // and doesn't work right now. Fixing this will require more thought
2733            // of how to implement the bounds check in spectre-only mode.
2734            if !ok {
2735                bail!(
2736                    "when signals-based traps are disabled then spectre \
2737                     mitigations must also be disabled"
2738                );
2739            }
2740        }
2741
2742        if features.contains(WasmFeatures::RELAXED_SIMD) && !features.contains(WasmFeatures::SIMD) {
2743            bail!("cannot disable the simd proposal but enable the relaxed simd proposal");
2744        }
2745
2746        if features.contains(WasmFeatures::STACK_SWITCHING) {
2747            use target_lexicon::OperatingSystem;
2748            let model = match target.operating_system {
2749                OperatingSystem::Windows => "update_windows_tib",
2750                OperatingSystem::Linux
2751                | OperatingSystem::MacOSX(_)
2752                | OperatingSystem::Darwin(_) => "basic",
2753                _ => bail!("stack-switching feature not supported on this platform "),
2754            };
2755
2756            if !self
2757                .compiler_config_mut()
2758                .ensure_setting_unset_or_given("stack_switch_model", model)
2759            {
2760                bail!(
2761                    "compiler option 'stack_switch_model' must be set to '{model}' on this platform"
2762                );
2763            }
2764        }
2765
2766        // Apply compiler settings and flags
2767        compiler.set_tunables(tunables.clone())?;
2768        for (k, v) in self.compiler_config_mut().settings.iter() {
2769            compiler.set(k, v)?;
2770        }
2771        for flag in self.compiler_config_mut().flags.iter() {
2772            compiler.enable(flag)?;
2773        }
2774        *tunables = compiler.tunables().cloned().unwrap();
2775
2776        #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
2777        if let Some(cache_store) = &self.compiler_config_mut().cache_store {
2778            compiler.enable_incremental_compilation(cache_store.clone())?;
2779        }
2780
2781        compiler.wmemcheck(self.compiler_config_mut().wmemcheck);
2782
2783        Ok((self, compiler.build()?))
2784    }
2785
2786    /// Internal setting for whether adapter modules for components will have
2787    /// extra WebAssembly instructions inserted performing more debug checks
2788    /// then are necessary.
2789    #[cfg(feature = "component-model")]
2790    pub fn debug_adapter_modules(&mut self, debug: bool) -> &mut Self {
2791        self.tunables.debug_adapter_modules = Some(debug);
2792        self
2793    }
2794
2795    /// Enables clif output when compiling a WebAssembly module.
2796    #[cfg(any(feature = "cranelift", feature = "winch"))]
2797    pub fn emit_clif(&mut self, path: &Path) -> &mut Self {
2798        self.compiler_config_mut().clif_dir = Some(path.to_path_buf());
2799        self
2800    }
2801
2802    /// Configures whether, when on macOS, Mach ports are used for exception
2803    /// handling instead of traditional Unix-based signal handling.
2804    ///
2805    /// WebAssembly traps in Wasmtime are implemented with native faults, for
2806    /// example a `SIGSEGV` will occur when a WebAssembly guest accesses
2807    /// out-of-bounds memory. Handling this can be configured to either use Unix
2808    /// signals or Mach ports on macOS. By default Mach ports are used.
2809    ///
2810    /// Mach ports enable Wasmtime to work by default with foreign
2811    /// error-handling systems such as breakpad which also use Mach ports to
2812    /// handle signals. In this situation Wasmtime will continue to handle guest
2813    /// faults gracefully while any non-guest faults will get forwarded to
2814    /// process-level handlers such as breakpad. Some more background on this
2815    /// can be found in #2456.
2816    ///
2817    /// A downside of using mach ports, however, is that they don't interact
2818    /// well with `fork()`. Forking a Wasmtime process on macOS will produce a
2819    /// child process that cannot successfully run WebAssembly. In this
2820    /// situation traditional Unix signal handling should be used as that's
2821    /// inherited and works across forks.
2822    ///
2823    /// If your embedding wants to use a custom error handler which leverages
2824    /// Mach ports and you additionally wish to `fork()` the process and use
2825    /// Wasmtime in the child process that's not currently possible. Please
2826    /// reach out to us if you're in this bucket!
2827    ///
2828    /// This option defaults to `true`, using Mach ports by default.
2829    pub fn macos_use_mach_ports(&mut self, mach_ports: bool) -> &mut Self {
2830        self.macos_use_mach_ports = mach_ports;
2831        self
2832    }
2833
2834    /// Configures an embedder-provided function, `detect`, which is used to
2835    /// determine if an ISA-specific feature is available on the current host.
2836    ///
2837    /// This function is used to verify that any features enabled for a compiler
2838    /// backend, such as AVX support on x86\_64, are also available on the host.
2839    /// It is undefined behavior to execute an AVX instruction on a host that
2840    /// doesn't support AVX instructions, for example.
2841    ///
2842    /// When the `std` feature is active on this crate then this function is
2843    /// configured to a default implementation that uses the standard library's
2844    /// feature detection. When the `std` feature is disabled then there is no
2845    /// default available and this method must be called to configure a feature
2846    /// probing function.
2847    ///
2848    /// The `detect` function provided is given a string name of an ISA feature.
2849    /// The function should then return:
2850    ///
2851    /// * `Some(true)` - indicates that the feature was found on the host and it
2852    ///   is supported.
2853    /// * `Some(false)` - the feature name was recognized but it was not
2854    ///   detected on the host, for example the CPU is too old.
2855    /// * `None` - the feature name was not recognized and it's not known
2856    ///   whether it's on the host or not.
2857    ///
2858    /// Feature names passed to `detect` match the same feature name used in the
2859    /// Rust standard library. For example `"sse4.2"` is used on x86\_64.
2860    ///
2861    /// # Unsafety
2862    ///
2863    /// This function is `unsafe` because it is undefined behavior to execute
2864    /// instructions that a host does not support. This means that the result of
2865    /// `detect` must be correct for memory safe execution at runtime.
2866    pub unsafe fn detect_host_feature(&mut self, detect: fn(&str) -> Option<bool>) -> &mut Self {
2867        self.detect_host_feature = Some(detect);
2868        self
2869    }
2870
2871    /// Configures Wasmtime to not use signals-based trap handlers, for example
2872    /// disables `SIGILL` and `SIGSEGV` handler registration on Unix platforms.
2873    ///
2874    /// > **Note:** this option has important performance ramifications, be sure
2875    /// > to understand the implications. Wasm programs have been measured to
2876    /// > run up to 2x slower when signals-based traps are disabled.
2877    ///
2878    /// Wasmtime will by default leverage signals-based trap handlers (or the
2879    /// platform equivalent, for example "vectored exception handlers" on
2880    /// Windows) to make generated code more efficient. For example, when
2881    /// Wasmtime can use signals-based traps, it can elide explicit bounds
2882    /// checks for Wasm linear memory accesses, instead relying on virtual
2883    /// memory guard pages to raise a `SIGSEGV` (on Unix) for out-of-bounds
2884    /// accesses, which Wasmtime's runtime then catches and handles. Another
2885    /// example is divide-by-zero: with signals-based traps, Wasmtime can let
2886    /// the hardware raise a trap when the divisor is zero. Without
2887    /// signals-based traps, Wasmtime must explicitly emit additional
2888    /// instructions to check for zero and conditionally branch to a trapping
2889    /// code path.
2890    ///
2891    /// Some environments however may not have access to signal handlers. For
2892    /// example embedded scenarios may not support virtual memory. Other
2893    /// environments where Wasmtime is embedded within the surrounding
2894    /// environment may require that new signal handlers aren't registered due
2895    /// to the global nature of signal handlers. This option exists to disable
2896    /// the signal handler registration when required for these scenarios.
2897    ///
2898    /// When signals-based trap handlers are disabled, then Wasmtime and its
2899    /// generated code will *never* rely on segfaults or other
2900    /// signals. Generated code will be slower because bounds must be explicitly
2901    /// checked along with other conditions like division by zero.
2902    ///
2903    /// The following additional factors can also affect Wasmtime's ability to
2904    /// elide explicit bounds checks and leverage signals-based traps:
2905    ///
2906    /// * The [`Config::memory_reservation`] and [`Config::memory_guard_size`]
2907    ///   settings
2908    /// * The index type of the linear memory (e.g. 32-bit or 64-bit)
2909    /// * The page size of the linear memory
2910    ///
2911    /// When this option is disabled, the
2912    /// `enable_heap_access_spectre_mitigation` and
2913    /// `enable_table_access_spectre_mitigation` Cranelift settings must also be
2914    /// disabled. This means that generated code must have spectre mitigations
2915    /// disabled. This is because spectre mitigations rely on faults from
2916    /// loading from the null address to implement bounds checks.
2917    ///
2918    /// This option defaults to `true`: signals-based trap handlers are enabled
2919    /// by default.
2920    ///
2921    /// > **Note:** Disabling this option is not compatible with the Winch
2922    /// > compiler.
2923    pub fn signals_based_traps(&mut self, enable: bool) -> &mut Self {
2924        self.tunables.signals_based_traps = Some(enable);
2925        self
2926    }
2927
2928    /// Enable/disable GC support in Wasmtime entirely.
2929    ///
2930    /// This flag can be used to gate whether GC infrastructure is enabled or
2931    /// initialized in Wasmtime at all. Wasmtime's GC implementation is required
2932    /// for the [`Self::wasm_gc`] proposal, [`Self::wasm_function_references`],
2933    /// and [`Self::wasm_exceptions`] at this time. None of those proposal can
2934    /// be enabled without also having this option enabled.
2935    ///
2936    /// This option defaults to whether the crate `gc` feature is enabled or
2937    /// not.
2938    pub fn gc_support(&mut self, enable: bool) -> &mut Self {
2939        self.wasm_features(WasmFeatures::GC_TYPES, enable)
2940    }
2941
2942    /// Explicitly indicate or not whether the host is using a hardware float
2943    /// ABI on x86 targets.
2944    ///
2945    /// This configuration option is only applicable on the
2946    /// `x86_64-unknown-none` Rust target and has no effect on other host
2947    /// targets. The `x86_64-unknown-none` Rust target does not support hardware
2948    /// floats by default and uses a "soft float" implementation and ABI. This
2949    /// means that `f32`, for example, is passed in a general-purpose register
2950    /// between functions instead of a floating-point register. This does not
2951    /// match Cranelift's ABI for `f32` where it's passed in floating-point
2952    /// registers.  Cranelift does not have support for a "soft float"
2953    /// implementation where all floating-point operations are lowered to
2954    /// libcalls.
2955    ///
2956    /// This means that for the `x86_64-unknown-none` target the ABI between
2957    /// Wasmtime's libcalls and the host is incompatible when floats are used.
2958    /// This further means that, by default, Wasmtime is unable to load native
2959    /// code when compiled to the `x86_64-unknown-none` target. The purpose of
2960    /// this option is to explicitly allow loading code and bypass this check.
2961    ///
2962    /// Setting this configuration option to `true` indicates that either:
2963    /// (a) the Rust target is compiled with the hard-float ABI manually via
2964    /// `-Zbuild-std` and a custom target JSON configuration, or (b) sufficient
2965    /// x86 features have been enabled in the compiler such that float libcalls
2966    /// will not be used in Wasmtime. For (a) there is no way in Rust at this
2967    /// time to detect whether a hard-float or soft-float ABI is in use on
2968    /// stable Rust, so this manual opt-in is required. For (b) the only
2969    /// instance where Wasmtime passes a floating-point value in a register
2970    /// between the host and compiled wasm code is with libcalls.
2971    ///
2972    /// Float-based libcalls are only used when the compilation target for a
2973    /// wasm module has insufficient target features enabled for native
2974    /// support. For example SSE4.1 is required for the `f32.ceil` WebAssembly
2975    /// instruction to be compiled to a native instruction. If SSE4.1 is not
2976    /// enabled then `f32.ceil` is translated to a "libcall" which is
2977    /// implemented on the host. Float-based libcalls can be avoided with
2978    /// sufficient target features enabled, for example:
2979    ///
2980    /// * `self.cranelift_flag_enable("has_sse3")`
2981    /// * `self.cranelift_flag_enable("has_ssse3")`
2982    /// * `self.cranelift_flag_enable("has_sse41")`
2983    /// * `self.cranelift_flag_enable("has_sse42")`
2984    /// * `self.cranelift_flag_enable("has_fma")`
2985    ///
2986    /// Note that when these features are enabled Wasmtime will perform a
2987    /// runtime check to determine that the host actually has the feature
2988    /// present.
2989    ///
2990    /// For some more discussion see [#11506].
2991    ///
2992    /// [#11506]: https://github.com/bytecodealliance/wasmtime/issues/11506
2993    ///
2994    /// # Safety
2995    ///
2996    /// This method is not safe because it cannot be detected in Rust right now
2997    /// whether the host is compiled with a soft or hard float ABI. Additionally
2998    /// if the host is compiled with a soft float ABI disabling this check does
2999    /// not ensure that the wasm module in question has zero usage of floats
3000    /// in the boundary to the host.
3001    ///
3002    /// Safely using this method requires one of:
3003    ///
3004    /// * The host target is compiled to use hardware floats.
3005    /// * Wasm modules loaded are compiled with enough x86 Cranelift features
3006    ///   enabled to avoid float-related hostcalls.
3007    pub unsafe fn x86_float_abi_ok(&mut self, enable: bool) -> &mut Self {
3008        self.x86_float_abi_ok = Some(enable);
3009        self
3010    }
3011
3012    /// Enable or disable the ability to create a
3013    /// [`SharedMemory`](crate::SharedMemory).
3014    ///
3015    /// The WebAssembly threads proposal, configured by [`Config::wasm_threads`]
3016    /// is on-by-default but there are enough deficiencies in Wasmtime's
3017    /// implementation and API integration that creation of a shared memory is
3018    /// disabled by default. This cofiguration knob can be used to enable this.
3019    ///
3020    /// When enabling this method be aware that wasm threads are, at this time,
3021    /// a [tier 2
3022    /// feature](https://docs.wasmtime.dev/stability-tiers.html#tier-2) in
3023    /// Wasmtime meaning that it will not receive security updates or fixes to
3024    /// historical releases. Additionally security CVEs will not be issued for
3025    /// bugs in the implementation.
3026    ///
3027    /// This option is `false` by default.
3028    pub fn shared_memory(&mut self, enable: bool) -> &mut Self {
3029        self.shared_memory = enable;
3030        self
3031    }
3032
3033    /// Specifies whether support for concurrent execution of WebAssembly is
3034    /// supported within this store.
3035    ///
3036    /// This configuration option affects whether runtime data structures are
3037    /// initialized within a `Store` on creation to support concurrent execution
3038    /// of WebAssembly guests. This is primarily applicable to the
3039    /// [`Config::wasm_component_model_async`] configuration which is the first
3040    /// time Wasmtime has supported concurrent execution of guests. This
3041    /// configuration option, for example, enables usage of
3042    /// [`Store::run_concurrent`], [`Func::call_concurrent`], [`StreamReader`],
3043    /// etc.
3044    ///
3045    /// This configuration option can be manually disabled to avoid initializing
3046    /// data structures in the [`Store`] related to concurrent execution. When
3047    /// this option is disabled then APIs related to concurrency will all fail
3048    /// with a panic. For example [`Store::run_concurrent`] will panic, creating
3049    /// a [`StreamReader`] will panic, etc.
3050    ///
3051    /// The value of this option additionally affects whether a [`Config`] is
3052    /// valid and the default set of enabled WebAssembly features. If this
3053    /// option is disabled then component-model features related to concurrency
3054    /// will all be disabled. If this option is enabled, then the options will
3055    /// retain their normal defaults. It is not valid to create a [`Config`]
3056    /// with component-model-async explicitly enabled and this option explicitly
3057    /// disabled, however.
3058    ///
3059    /// This option defaults to `true`.
3060    ///
3061    /// [`Store`]: crate::Store
3062    /// [`Store::run_concurrent`]: crate::Store::run_concurrent
3063    /// [`Func::call_concurrent`]: crate::component::Func::call_concurrent
3064    /// [`StreamReader`]: crate::component::StreamReader
3065    pub fn concurrency_support(&mut self, enable: bool) -> &mut Self {
3066        self.tunables.concurrency_support = Some(enable);
3067        self
3068    }
3069
3070    /// Validate if the current configuration has conflicting overrides that prevent
3071    /// execution determinism. Returns an error if a conflict exists.
3072    ///
3073    /// Note: Keep this in sync with [`Config::enforce_determinism`].
3074    #[inline]
3075    #[cfg(feature = "rr")]
3076    pub(crate) fn validate_rr_determinism_conflicts(&self) -> Result<()> {
3077        if let Some(v) = self.tunables.relaxed_simd_deterministic {
3078            if v == false {
3079                bail!("Relaxed deterministic SIMD cannot be disabled when determinism is enforced");
3080            }
3081        }
3082        #[cfg(any(feature = "cranelift", feature = "winch"))]
3083        if let Some(v) = self
3084            .compiler_config
3085            .as_ref()
3086            .and_then(|c| c.settings.get("enable_nan_canonicalization"))
3087        {
3088            if v != "true" {
3089                bail!("NaN canonicalization cannot be disabled when determinism is enforced");
3090            }
3091        }
3092        Ok(())
3093    }
3094
3095    /// Enable execution trace recording or replaying to the configuration.
3096    ///
3097    /// When either recording/replaying are enabled, validation fails if settings
3098    /// that control determinism are not set appropriately. In particular, RR requires
3099    /// doing the following:
3100    /// * Enabling NaN canonicalization with [`Config::cranelift_nan_canonicalization`].
3101    /// * Enabling deterministic relaxed SIMD with [`Config::relaxed_simd_deterministic`].
3102    #[inline]
3103    pub fn rr(&mut self, cfg: RRConfig) -> &mut Self {
3104        self.rr_config = cfg;
3105        self
3106    }
3107}
3108
3109impl Default for Config {
3110    fn default() -> Config {
3111        Config::new()
3112    }
3113}
3114
3115impl fmt::Debug for Config {
3116    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
3117        let mut f = f.debug_struct("Config");
3118
3119        // Not every flag in WasmFeatures can be enabled as part of creating
3120        // a Config. This impl gives a complete picture of all WasmFeatures
3121        // enabled, and doesn't require maintenance by hand (which has become out
3122        // of date in the past), at the cost of possible confusion for why
3123        // a flag in this set doesn't have a Config setter.
3124        let features = self.features();
3125        for flag in WasmFeatures::FLAGS.iter() {
3126            f.field(
3127                &format!("wasm_{}", flag.name().to_lowercase()),
3128                &features.contains(*flag.value()),
3129            );
3130        }
3131
3132        f.field("parallel_compilation", &self.parallel_compilation);
3133        #[cfg(any(feature = "cranelift", feature = "winch"))]
3134        {
3135            f.field("compiler_config", &self.compiler_config);
3136        }
3137
3138        self.tunables.format(&mut f);
3139        f.finish()
3140    }
3141}
3142
3143/// Possible Compilation strategies for a wasm module.
3144///
3145/// This is used as an argument to the [`Config::strategy`] method.
3146#[non_exhaustive]
3147#[derive(PartialEq, Eq, Clone, Debug, Copy)]
3148pub enum Strategy {
3149    /// An indicator that the compilation strategy should be automatically
3150    /// selected.
3151    ///
3152    /// This is generally what you want for most projects and indicates that the
3153    /// `wasmtime` crate itself should make the decision about what the best
3154    /// code generator for a wasm module is.
3155    ///
3156    /// Currently this always defaults to Cranelift, but the default value may
3157    /// change over time.
3158    Auto,
3159
3160    /// Currently the default backend, Cranelift aims to be a reasonably fast
3161    /// code generator which generates high quality machine code.
3162    Cranelift,
3163
3164    /// A low-latency baseline compiler for WebAssembly.
3165    /// For more details regarding ISA support and Wasm proposals support
3166    /// see <https://docs.wasmtime.dev/stability-tiers.html#current-tier-status>
3167    Winch,
3168}
3169
3170#[cfg(any(feature = "winch", feature = "cranelift"))]
3171impl Strategy {
3172    fn not_auto(&self) -> Option<Strategy> {
3173        match self {
3174            Strategy::Auto => {
3175                if cfg!(feature = "cranelift") {
3176                    Some(Strategy::Cranelift)
3177                } else if cfg!(feature = "winch") {
3178                    Some(Strategy::Winch)
3179                } else {
3180                    None
3181                }
3182            }
3183            other => Some(*other),
3184        }
3185    }
3186}
3187
3188/// Possible garbage collector implementations for Wasm.
3189///
3190/// This is used as an argument to the [`Config::collector`] method.
3191///
3192/// The properties of Wasmtime's available collectors are summarized in the
3193/// following table:
3194///
3195/// | Collector                   | Collects Garbage[^1] | Latency[^2] | Throughput[^3] | Allocation Speed[^4] | Heap Utilization[^5] |
3196/// |-----------------------------|----------------------|-------------|----------------|----------------------|----------------------|
3197/// | `DeferredReferenceCounting` | Yes, but not cycles  | 🙂         | 🙁             | 😐                   | 😐                  |
3198/// | `Null`                      | No                   | 🙂         | 🙂             | 🙂                   | 🙂                  |
3199///
3200/// [^1]: Whether or not the collector is capable of collecting garbage and cyclic garbage.
3201///
3202/// [^2]: How long the Wasm program is paused during garbage
3203///       collections. Shorter is better. In general, better latency implies
3204///       worse throughput and vice versa.
3205///
3206/// [^3]: How fast the Wasm program runs when using this collector. Roughly
3207///       equivalent to the number of Wasm instructions executed per
3208///       second. Faster is better. In general, better throughput implies worse
3209///       latency and vice versa.
3210///
3211/// [^4]: How fast can individual objects be allocated?
3212///
3213/// [^5]: How many objects can the collector fit into N bytes of memory? That
3214///       is, how much space for bookkeeping and metadata does this collector
3215///       require? Less space taken up by metadata means more space for
3216///       additional objects. Reference counts are larger than mark bits and
3217///       free lists are larger than bump pointers, for example.
3218#[non_exhaustive]
3219#[derive(PartialEq, Eq, Clone, Debug, Copy)]
3220pub enum Collector {
3221    /// An indicator that the garbage collector should be automatically
3222    /// selected.
3223    ///
3224    /// This is generally what you want for most projects and indicates that the
3225    /// `wasmtime` crate itself should make the decision about what the best
3226    /// collector for a wasm module is.
3227    ///
3228    /// Currently this always defaults to the deferred reference-counting
3229    /// collector, but the default value may change over time.
3230    Auto,
3231
3232    /// The deferred reference-counting collector.
3233    ///
3234    /// A reference-counting collector, generally trading improved latency for
3235    /// worsened throughput. However, to avoid the largest overheads of
3236    /// reference counting, it avoids manipulating reference counts for Wasm
3237    /// objects on the stack. Instead, it will hold a reference count for an
3238    /// over-approximation of all objects that are currently on the stack, trace
3239    /// the stack during collection to find the precise set of on-stack roots,
3240    /// and decrement the reference count of any object that was in the
3241    /// over-approximation but not the precise set. This improves throughput,
3242    /// compared to "pure" reference counting, by performing many fewer
3243    /// refcount-increment and -decrement operations. The cost is the increased
3244    /// latency associated with tracing the stack.
3245    ///
3246    /// This collector cannot currently collect cycles; they will leak until the
3247    /// GC heap's store is dropped.
3248    DeferredReferenceCounting,
3249
3250    /// The null collector.
3251    ///
3252    /// This collector does not actually collect any garbage. It simply
3253    /// allocates objects until it runs out of memory, at which point further
3254    /// objects allocation attempts will trap.
3255    ///
3256    /// This collector is useful for incredibly short-running Wasm instances
3257    /// where additionally you would rather halt an over-allocating Wasm program
3258    /// than spend time collecting its garbage to allow it to keep running. It
3259    /// is also useful for measuring the overheads associated with other
3260    /// collectors, as this collector imposes as close to zero throughput and
3261    /// latency overhead as possible.
3262    Null,
3263}
3264
3265impl Default for Collector {
3266    fn default() -> Collector {
3267        Collector::Auto
3268    }
3269}
3270
3271#[cfg(feature = "gc")]
3272impl Collector {
3273    fn not_auto(&self) -> Option<Collector> {
3274        match self {
3275            Collector::Auto => {
3276                if cfg!(feature = "gc-drc") {
3277                    Some(Collector::DeferredReferenceCounting)
3278                } else if cfg!(feature = "gc-null") {
3279                    Some(Collector::Null)
3280                } else {
3281                    None
3282                }
3283            }
3284            other => Some(*other),
3285        }
3286    }
3287
3288    fn try_not_auto(&self) -> Result<Self> {
3289        match self.not_auto() {
3290            #[cfg(feature = "gc-drc")]
3291            Some(c @ Collector::DeferredReferenceCounting) => Ok(c),
3292            #[cfg(not(feature = "gc-drc"))]
3293            Some(Collector::DeferredReferenceCounting) => bail!(
3294                "cannot create an engine using the deferred reference-counting \
3295                 collector because the `gc-drc` feature was not enabled at \
3296                 compile time",
3297            ),
3298
3299            #[cfg(feature = "gc-null")]
3300            Some(c @ Collector::Null) => Ok(c),
3301            #[cfg(not(feature = "gc-null"))]
3302            Some(Collector::Null) => bail!(
3303                "cannot create an engine using the null collector because \
3304                 the `gc-null` feature was not enabled at compile time",
3305            ),
3306
3307            Some(Collector::Auto) => unreachable!(),
3308
3309            None => bail!(
3310                "cannot create an engine with GC support when none of the \
3311                 collectors are available; enable one of the following \
3312                 features: `gc-drc`, `gc-null`",
3313            ),
3314        }
3315    }
3316}
3317
3318/// Possible optimization levels for the Cranelift codegen backend.
3319#[non_exhaustive]
3320#[derive(Copy, Clone, Debug, Eq, PartialEq)]
3321pub enum OptLevel {
3322    /// No optimizations performed, minimizes compilation time by disabling most
3323    /// optimizations.
3324    None,
3325    /// Generates the fastest possible code, but may take longer.
3326    Speed,
3327    /// Similar to `speed`, but also performs transformations aimed at reducing
3328    /// code size.
3329    SpeedAndSize,
3330}
3331
3332/// Possible register allocator algorithms for the Cranelift codegen backend.
3333#[non_exhaustive]
3334#[derive(Copy, Clone, Debug, Eq, PartialEq)]
3335pub enum RegallocAlgorithm {
3336    /// Generates the fastest possible code, but may take longer.
3337    ///
3338    /// This algorithm performs "backtracking", which means that it may
3339    /// undo its earlier work and retry as it discovers conflicts. This
3340    /// results in better register utilization, producing fewer spills
3341    /// and moves, but can cause super-linear compile runtime.
3342    Backtracking,
3343    /// Generates acceptable code very quickly.
3344    ///
3345    /// This algorithm performs a single pass through the code,
3346    /// guaranteed to work in linear time.  (Note that the rest of
3347    /// Cranelift is not necessarily guaranteed to run in linear time,
3348    /// however.) It cannot undo earlier decisions, however, and it
3349    /// cannot foresee constraints or issues that may occur further
3350    /// ahead in the code, so the code may have more spills and moves as
3351    /// a result.
3352    ///
3353    /// > **Note**: This algorithm is not yet production-ready and has
3354    /// > historically had known problems. It is not recommended to enable this
3355    /// > algorithm for security-sensitive applications and the Wasmtime project
3356    /// > does not consider this configuration option for issuing security
3357    /// > advisories at this time.
3358    SinglePass,
3359}
3360
3361/// Select which profiling technique to support.
3362#[derive(Debug, Clone, Copy, PartialEq)]
3363pub enum ProfilingStrategy {
3364    /// No profiler support.
3365    None,
3366
3367    /// Collect function name information as the "perf map" file format, used with `perf` on Linux.
3368    PerfMap,
3369
3370    /// Collect profiling info for "jitdump" file format, used with `perf` on
3371    /// Linux.
3372    JitDump,
3373
3374    /// Collect profiling info using the "ittapi", used with `VTune` on Linux.
3375    VTune,
3376
3377    /// Support for profiling Pulley, Wasmtime's interpreter. Note that enabling
3378    /// this at runtime requires enabling the `profile-pulley` Cargo feature at
3379    /// compile time.
3380    Pulley,
3381}
3382
3383/// Select how wasm backtrace detailed information is handled.
3384#[derive(Debug, Clone, Copy)]
3385pub enum WasmBacktraceDetails {
3386    /// Support is unconditionally enabled and wasmtime will parse and read
3387    /// debug information.
3388    Enable,
3389
3390    /// Support is disabled, and wasmtime will not parse debug information for
3391    /// backtrace details.
3392    Disable,
3393
3394    /// Support for backtrace details is conditional on the
3395    /// `WASMTIME_BACKTRACE_DETAILS` environment variable.
3396    Environment,
3397}
3398
3399/// Describe the tri-state configuration of keys such as MPK or PAGEMAP_SCAN.
3400#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
3401pub enum Enabled {
3402    /// Enable this feature if it's detected on the host system, otherwise leave
3403    /// it disabled.
3404    Auto,
3405    /// Enable this feature and fail configuration if the feature is not
3406    /// detected on the host system.
3407    Yes,
3408    /// Do not enable this feature, even if the host system supports it.
3409    No,
3410}
3411
3412/// Configuration options used with [`InstanceAllocationStrategy::Pooling`] to
3413/// change the behavior of the pooling instance allocator.
3414///
3415/// This structure has a builder-style API in the same manner as [`Config`] and
3416/// is configured with [`Config::allocation_strategy`].
3417///
3418/// Note that usage of the pooling allocator does not affect compiled
3419/// WebAssembly code. Compiled `*.cwasm` files, for example, are usable both
3420/// with and without the pooling allocator.
3421///
3422/// ## Advantages of Pooled Allocation
3423///
3424/// The main benefit of the pooling allocator is to make WebAssembly
3425/// instantiation both faster and more scalable in terms of parallelism.
3426/// Allocation is faster because virtual memory is already configured and ready
3427/// to go within the pool, there's no need to [`mmap`] (for example on Unix) a
3428/// new region and configure it with guard pages. By avoiding [`mmap`] this
3429/// avoids whole-process virtual memory locks which can improve scalability and
3430/// performance through avoiding this.
3431///
3432/// Additionally with pooled allocation it's possible to create "affine slots"
3433/// to a particular WebAssembly module or component over time. For example if
3434/// the same module is multiple times over time the pooling allocator will, by
3435/// default, attempt to reuse the same slot. This mean that the slot has been
3436/// pre-configured and can retain virtual memory mappings for a copy-on-write
3437/// image, for example (see [`Config::memory_init_cow`] for more information.
3438/// This means that in a steady state instance deallocation is a single
3439/// [`madvise`] to reset linear memory to its original contents followed by a
3440/// single (optional) [`mprotect`] during the next instantiation to shrink
3441/// memory back to its original size. Compared to non-pooled allocation this
3442/// avoids the need to [`mmap`] a new region of memory, [`munmap`] it, and
3443/// [`mprotect`] regions too.
3444///
3445/// Another benefit of pooled allocation is that it's possible to configure
3446/// things such that no virtual memory management is required at all in a steady
3447/// state. For example a pooling allocator can be configured with:
3448///
3449/// * [`Config::memory_init_cow`] disabled
3450/// * [`Config::memory_guard_size`] disabled
3451/// * [`Config::memory_reservation`] shrunk to minimal size
3452/// * [`PoolingAllocationConfig::table_keep_resident`] sufficiently large
3453/// * [`PoolingAllocationConfig::linear_memory_keep_resident`] sufficiently large
3454///
3455/// With all these options in place no virtual memory tricks are used at all and
3456/// everything is manually managed by Wasmtime (for example resetting memory is
3457/// a `memset(0)`). This is not as fast in a single-threaded scenario but can
3458/// provide benefits in high-parallelism situations as no virtual memory locks
3459/// or IPIs need happen.
3460///
3461/// ## Disadvantages of Pooled Allocation
3462///
3463/// Despite the above advantages to instantiation performance the pooling
3464/// allocator is not enabled by default in Wasmtime. One reason is that the
3465/// performance advantages are not necessarily portable, for example while the
3466/// pooling allocator works on Windows it has not been tuned for performance on
3467/// Windows in the same way it has on Linux.
3468///
3469/// Additionally the main cost of the pooling allocator is that it requires a
3470/// very large reservation of virtual memory (on the order of most of the
3471/// addressable virtual address space). WebAssembly 32-bit linear memories in
3472/// Wasmtime are, by default 4G address space reservations with a small guard
3473/// region both before and after the linear memory. Memories in the pooling
3474/// allocator are contiguous which means that we only need a guard after linear
3475/// memory because the previous linear memory's slot post-guard is our own
3476/// pre-guard. This means that, by default, the pooling allocator uses roughly
3477/// 4G of virtual memory per WebAssembly linear memory slot. 4G of virtual
3478/// memory is 32 bits of a 64-bit address. Many 64-bit systems can only
3479/// actually use 48-bit addresses by default (although this can be extended on
3480/// architectures nowadays too), and of those 48 bits one of them is reserved
3481/// to indicate kernel-vs-userspace. This leaves 47-32=15 bits left,
3482/// meaning you can only have at most 32k slots of linear memories on many
3483/// systems by default. This is a relatively small number and shows how the
3484/// pooling allocator can quickly exhaust all of virtual memory.
3485///
3486/// Another disadvantage of the pooling allocator is that it may keep memory
3487/// alive when nothing is using it. A previously used slot for an instance might
3488/// have paged-in memory that will not get paged out until the
3489/// [`Engine`](crate::Engine) owning the pooling allocator is dropped. While
3490/// suitable for some applications this behavior may not be suitable for all
3491/// applications.
3492///
3493/// Finally the last disadvantage of the pooling allocator is that the
3494/// configuration values for the maximum number of instances, memories, tables,
3495/// etc, must all be fixed up-front. There's not always a clear answer as to
3496/// what these values should be so not all applications may be able to work
3497/// with this constraint.
3498///
3499/// [`madvise`]: https://man7.org/linux/man-pages/man2/madvise.2.html
3500/// [`mprotect`]: https://man7.org/linux/man-pages/man2/mprotect.2.html
3501/// [`mmap`]: https://man7.org/linux/man-pages/man2/mmap.2.html
3502/// [`munmap`]: https://man7.org/linux/man-pages/man2/munmap.2.html
3503#[cfg(feature = "pooling-allocator")]
3504#[derive(Debug, Clone, Default)]
3505pub struct PoolingAllocationConfig {
3506    config: crate::runtime::vm::PoolingInstanceAllocatorConfig,
3507}
3508
3509#[cfg(feature = "pooling-allocator")]
3510impl PoolingAllocationConfig {
3511    /// Returns a new configuration builder with all default settings
3512    /// configured.
3513    pub fn new() -> PoolingAllocationConfig {
3514        PoolingAllocationConfig::default()
3515    }
3516
3517    /// Configures the maximum number of "unused warm slots" to retain in the
3518    /// pooling allocator.
3519    ///
3520    /// The pooling allocator operates over slots to allocate from, and each
3521    /// slot is considered "cold" if it's never been used before or "warm" if
3522    /// it's been used by some module in the past. Slots in the pooling
3523    /// allocator additionally track an "affinity" flag to a particular core
3524    /// wasm module. When a module is instantiated into a slot then the slot is
3525    /// considered affine to that module, even after the instance has been
3526    /// deallocated.
3527    ///
3528    /// When a new instance is created then a slot must be chosen, and the
3529    /// current algorithm for selecting a slot is:
3530    ///
3531    /// * If there are slots that are affine to the module being instantiated,
3532    ///   then the most recently used slot is selected to be allocated from.
3533    ///   This is done to improve reuse of resources such as memory mappings and
3534    ///   additionally try to benefit from temporal locality for things like
3535    ///   caches.
3536    ///
3537    /// * Otherwise if there are more than N affine slots to other modules, then
3538    ///   one of those affine slots is chosen to be allocated. The slot chosen
3539    ///   is picked on a least-recently-used basis.
3540    ///
3541    /// * Finally, if there are less than N affine slots to other modules, then
3542    ///   the non-affine slots are allocated from.
3543    ///
3544    /// This setting, `max_unused_warm_slots`, is the value for N in the above
3545    /// algorithm. The purpose of this setting is to have a knob over the RSS
3546    /// impact of "unused slots" for a long-running wasm server.
3547    ///
3548    /// If this setting is set to 0, for example, then affine slots are
3549    /// aggressively reused on a least-recently-used basis. A "cold" slot is
3550    /// only used if there are no affine slots available to allocate from. This
3551    /// means that the set of slots used over the lifetime of a program is the
3552    /// same as the maximum concurrent number of wasm instances.
3553    ///
3554    /// If this setting is set to infinity, however, then cold slots are
3555    /// prioritized to be allocated from. This means that the set of slots used
3556    /// over the lifetime of a program will approach
3557    /// [`PoolingAllocationConfig::total_memories`], or the maximum number of
3558    /// slots in the pooling allocator.
3559    ///
3560    /// Wasmtime does not aggressively decommit all resources associated with a
3561    /// slot when the slot is not in use. For example the
3562    /// [`PoolingAllocationConfig::linear_memory_keep_resident`] option can be
3563    /// used to keep memory associated with a slot, even when it's not in use.
3564    /// This means that the total set of used slots in the pooling instance
3565    /// allocator can impact the overall RSS usage of a program.
3566    ///
3567    /// The default value for this option is `100`.
3568    pub fn max_unused_warm_slots(&mut self, max: u32) -> &mut Self {
3569        self.config.max_unused_warm_slots = max;
3570        self
3571    }
3572
3573    /// The target number of decommits to do per batch.
3574    ///
3575    /// This is not precise, as we can queue up decommits at times when we
3576    /// aren't prepared to immediately flush them, and so we may go over this
3577    /// target size occasionally.
3578    ///
3579    /// A batch size of one effectively disables batching.
3580    ///
3581    /// Defaults to `1`.
3582    pub fn decommit_batch_size(&mut self, batch_size: usize) -> &mut Self {
3583        self.config.decommit_batch_size = batch_size;
3584        self
3585    }
3586
3587    /// How much memory, in bytes, to keep resident for async stacks allocated
3588    /// with the pooling allocator.
3589    ///
3590    /// When [`Config::async_stack_zeroing`] is enabled then Wasmtime will reset
3591    /// the contents of async stacks back to zero upon deallocation. This option
3592    /// can be used to perform the zeroing operation with `memset` up to a
3593    /// certain threshold of bytes instead of using system calls to reset the
3594    /// stack to zero.
3595    ///
3596    /// Note that when using this option the memory with async stacks will
3597    /// never be decommitted.
3598    #[cfg(feature = "async")]
3599    pub fn async_stack_keep_resident(&mut self, size: usize) -> &mut Self {
3600        self.config.async_stack_keep_resident = size;
3601        self
3602    }
3603
3604    /// How much memory, in bytes, to keep resident for each linear memory
3605    /// after deallocation.
3606    ///
3607    /// This option is only applicable on Linux and has no effect on other
3608    /// platforms.
3609    ///
3610    /// By default Wasmtime will use `madvise` to reset the entire contents of
3611    /// linear memory back to zero when a linear memory is deallocated. This
3612    /// option can be used to use `memset` instead to set memory back to zero
3613    /// which can, in some configurations, reduce the number of page faults
3614    /// taken when a slot is reused.
3615    pub fn linear_memory_keep_resident(&mut self, size: usize) -> &mut Self {
3616        self.config.linear_memory_keep_resident = size;
3617        self
3618    }
3619
3620    /// How much memory, in bytes, to keep resident for each table after
3621    /// deallocation.
3622    ///
3623    /// This option is only applicable on Linux and has no effect on other
3624    /// platforms.
3625    ///
3626    /// This option is the same as
3627    /// [`PoolingAllocationConfig::linear_memory_keep_resident`] except that it
3628    /// is applicable to tables instead.
3629    pub fn table_keep_resident(&mut self, size: usize) -> &mut Self {
3630        self.config.table_keep_resident = size;
3631        self
3632    }
3633
3634    /// The maximum number of concurrent component instances supported (default
3635    /// is `1000`).
3636    ///
3637    /// This provides an upper-bound on the total size of component
3638    /// metadata-related allocations, along with
3639    /// [`PoolingAllocationConfig::max_component_instance_size`]. The upper bound is
3640    ///
3641    /// ```text
3642    /// total_component_instances * max_component_instance_size
3643    /// ```
3644    ///
3645    /// where `max_component_instance_size` is rounded up to the size and alignment
3646    /// of the internal representation of the metadata.
3647    pub fn total_component_instances(&mut self, count: u32) -> &mut Self {
3648        self.config.limits.total_component_instances = count;
3649        self
3650    }
3651
3652    /// The maximum size, in bytes, allocated for a component instance's
3653    /// `VMComponentContext` metadata as well as the aggregate size of this
3654    /// component's core instances `VMContext` metadata.
3655    ///
3656    /// The [`wasmtime::component::Instance`][crate::component::Instance] type
3657    /// has a static size but its internal `VMComponentContext` is dynamically
3658    /// sized depending on the component being instantiated. This size limit
3659    /// loosely correlates to the size of the component, taking into account
3660    /// factors such as:
3661    ///
3662    /// * number of lifted and lowered functions,
3663    /// * number of memories
3664    /// * number of inner instances
3665    /// * number of resources
3666    ///
3667    /// If the allocated size per instance is too small then instantiation of a
3668    /// module will fail at runtime with an error indicating how many bytes were
3669    /// needed.
3670    ///
3671    /// In addition to the memory in the runtime for the component itself,
3672    /// components contain one or more core module instances. Each of these
3673    /// require some memory in the runtime as described in
3674    /// [`PoolingAllocationConfig::max_core_instance_size`]. The limit here
3675    /// applies against the sum of all of these individual allocations.
3676    ///
3677    /// The default value for this is 1MiB.
3678    ///
3679    /// This provides an upper-bound on the total size of all component's
3680    /// metadata-related allocations (for both the component and its embedded
3681    /// core module instances), along with
3682    /// [`PoolingAllocationConfig::total_component_instances`]. The upper bound is
3683    ///
3684    /// ```text
3685    /// total_component_instances * max_component_instance_size
3686    /// ```
3687    ///
3688    /// where `max_component_instance_size` is rounded up to the size and alignment
3689    /// of the internal representation of the metadata.
3690    pub fn max_component_instance_size(&mut self, size: usize) -> &mut Self {
3691        self.config.limits.component_instance_size = size;
3692        self
3693    }
3694
3695    /// The maximum number of core instances a single component may contain
3696    /// (default is unlimited).
3697    ///
3698    /// This method (along with
3699    /// [`PoolingAllocationConfig::max_memories_per_component`],
3700    /// [`PoolingAllocationConfig::max_tables_per_component`], and
3701    /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3702    /// the amount of resources a single component allocation consumes.
3703    ///
3704    /// If a component will instantiate more core instances than `count`, then
3705    /// the component will fail to instantiate.
3706    pub fn max_core_instances_per_component(&mut self, count: u32) -> &mut Self {
3707        self.config.limits.max_core_instances_per_component = count;
3708        self
3709    }
3710
3711    /// The maximum number of Wasm linear memories that a single component may
3712    /// transitively contain (default is unlimited).
3713    ///
3714    /// This method (along with
3715    /// [`PoolingAllocationConfig::max_core_instances_per_component`],
3716    /// [`PoolingAllocationConfig::max_tables_per_component`], and
3717    /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3718    /// the amount of resources a single component allocation consumes.
3719    ///
3720    /// If a component transitively contains more linear memories than `count`,
3721    /// then the component will fail to instantiate.
3722    pub fn max_memories_per_component(&mut self, count: u32) -> &mut Self {
3723        self.config.limits.max_memories_per_component = count;
3724        self
3725    }
3726
3727    /// The maximum number of tables that a single component may transitively
3728    /// contain (default is unlimited).
3729    ///
3730    /// This method (along with
3731    /// [`PoolingAllocationConfig::max_core_instances_per_component`],
3732    /// [`PoolingAllocationConfig::max_memories_per_component`],
3733    /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3734    /// the amount of resources a single component allocation consumes.
3735    ///
3736    /// If a component will transitively contains more tables than `count`, then
3737    /// the component will fail to instantiate.
3738    pub fn max_tables_per_component(&mut self, count: u32) -> &mut Self {
3739        self.config.limits.max_tables_per_component = count;
3740        self
3741    }
3742
3743    /// The maximum number of concurrent Wasm linear memories supported (default
3744    /// is `1000`).
3745    ///
3746    /// This value has a direct impact on the amount of memory allocated by the pooling
3747    /// instance allocator.
3748    ///
3749    /// The pooling instance allocator allocates a memory pool, where each entry
3750    /// in the pool contains the reserved address space for each linear memory
3751    /// supported by an instance.
3752    ///
3753    /// The memory pool will reserve a large quantity of host process address
3754    /// space to elide the bounds checks required for correct WebAssembly memory
3755    /// semantics. Even with 64-bit address spaces, the address space is limited
3756    /// when dealing with a large number of linear memories.
3757    ///
3758    /// For example, on Linux x86_64, the userland address space limit is 128
3759    /// TiB. That might seem like a lot, but each linear memory will *reserve* 6
3760    /// GiB of space by default.
3761    pub fn total_memories(&mut self, count: u32) -> &mut Self {
3762        self.config.limits.total_memories = count;
3763        self
3764    }
3765
3766    /// The maximum number of concurrent tables supported (default is `1000`).
3767    ///
3768    /// This value has a direct impact on the amount of memory allocated by the
3769    /// pooling instance allocator.
3770    ///
3771    /// The pooling instance allocator allocates a table pool, where each entry
3772    /// in the pool contains the space needed for each WebAssembly table
3773    /// supported by an instance (see `table_elements` to control the size of
3774    /// each table).
3775    pub fn total_tables(&mut self, count: u32) -> &mut Self {
3776        self.config.limits.total_tables = count;
3777        self
3778    }
3779
3780    /// The maximum number of execution stacks allowed for asynchronous
3781    /// execution, when enabled (default is `1000`).
3782    ///
3783    /// This value has a direct impact on the amount of memory allocated by the
3784    /// pooling instance allocator.
3785    #[cfg(feature = "async")]
3786    pub fn total_stacks(&mut self, count: u32) -> &mut Self {
3787        self.config.limits.total_stacks = count;
3788        self
3789    }
3790
3791    /// The maximum number of concurrent core instances supported (default is
3792    /// `1000`).
3793    ///
3794    /// This provides an upper-bound on the total size of core instance
3795    /// metadata-related allocations, along with
3796    /// [`PoolingAllocationConfig::max_core_instance_size`]. The upper bound is
3797    ///
3798    /// ```text
3799    /// total_core_instances * max_core_instance_size
3800    /// ```
3801    ///
3802    /// where `max_core_instance_size` is rounded up to the size and alignment of
3803    /// the internal representation of the metadata.
3804    pub fn total_core_instances(&mut self, count: u32) -> &mut Self {
3805        self.config.limits.total_core_instances = count;
3806        self
3807    }
3808
3809    /// The maximum size, in bytes, allocated for a core instance's `VMContext`
3810    /// metadata.
3811    ///
3812    /// The [`Instance`][crate::Instance] type has a static size but its
3813    /// `VMContext` metadata is dynamically sized depending on the module being
3814    /// instantiated. This size limit loosely correlates to the size of the Wasm
3815    /// module, taking into account factors such as:
3816    ///
3817    /// * number of functions
3818    /// * number of globals
3819    /// * number of memories
3820    /// * number of tables
3821    /// * number of function types
3822    ///
3823    /// If the allocated size per instance is too small then instantiation of a
3824    /// module will fail at runtime with an error indicating how many bytes were
3825    /// needed.
3826    ///
3827    /// The default value for this is 1MiB.
3828    ///
3829    /// This provides an upper-bound on the total size of core instance
3830    /// metadata-related allocations, along with
3831    /// [`PoolingAllocationConfig::total_core_instances`]. The upper bound is
3832    ///
3833    /// ```text
3834    /// total_core_instances * max_core_instance_size
3835    /// ```
3836    ///
3837    /// where `max_core_instance_size` is rounded up to the size and alignment of
3838    /// the internal representation of the metadata.
3839    pub fn max_core_instance_size(&mut self, size: usize) -> &mut Self {
3840        self.config.limits.core_instance_size = size;
3841        self
3842    }
3843
3844    /// The maximum number of defined tables for a core module (default is `1`).
3845    ///
3846    /// This value controls the capacity of the `VMTableDefinition` table in
3847    /// each instance's `VMContext` structure.
3848    ///
3849    /// The allocated size of the table will be `tables *
3850    /// sizeof(VMTableDefinition)` for each instance regardless of how many
3851    /// tables are defined by an instance's module.
3852    pub fn max_tables_per_module(&mut self, tables: u32) -> &mut Self {
3853        self.config.limits.max_tables_per_module = tables;
3854        self
3855    }
3856
3857    /// The maximum table elements for any table defined in a module (default is
3858    /// `20000`).
3859    ///
3860    /// If a table's minimum element limit is greater than this value, the
3861    /// module will fail to instantiate.
3862    ///
3863    /// If a table's maximum element limit is unbounded or greater than this
3864    /// value, the maximum will be `table_elements` for the purpose of any
3865    /// `table.grow` instruction.
3866    ///
3867    /// This value is used to reserve the maximum space for each supported
3868    /// table; table elements are pointer-sized in the Wasmtime runtime.
3869    /// Therefore, the space reserved for each instance is `tables *
3870    /// table_elements * sizeof::<*const ()>`.
3871    pub fn table_elements(&mut self, elements: usize) -> &mut Self {
3872        self.config.limits.table_elements = elements;
3873        self
3874    }
3875
3876    /// The maximum number of defined linear memories for a module (default is
3877    /// `1`).
3878    ///
3879    /// This value controls the capacity of the `VMMemoryDefinition` table in
3880    /// each core instance's `VMContext` structure.
3881    ///
3882    /// The allocated size of the table will be `memories *
3883    /// sizeof(VMMemoryDefinition)` for each core instance regardless of how
3884    /// many memories are defined by the core instance's module.
3885    pub fn max_memories_per_module(&mut self, memories: u32) -> &mut Self {
3886        self.config.limits.max_memories_per_module = memories;
3887        self
3888    }
3889
3890    /// The maximum byte size that any WebAssembly linear memory may grow to.
3891    ///
3892    /// This option defaults to 4 GiB meaning that for 32-bit linear memories
3893    /// there is no restrictions. 64-bit linear memories will not be allowed to
3894    /// grow beyond 4 GiB by default.
3895    ///
3896    /// If a memory's minimum size is greater than this value, the module will
3897    /// fail to instantiate.
3898    ///
3899    /// If a memory's maximum size is unbounded or greater than this value, the
3900    /// maximum will be `max_memory_size` for the purpose of any `memory.grow`
3901    /// instruction.
3902    ///
3903    /// This value is used to control the maximum accessible space for each
3904    /// linear memory of a core instance. This can be thought of as a simple
3905    /// mechanism like [`Store::limiter`](crate::Store::limiter) to limit memory
3906    /// at runtime. This value can also affect striping/coloring behavior when
3907    /// used in conjunction with
3908    /// [`memory_protection_keys`](PoolingAllocationConfig::memory_protection_keys).
3909    ///
3910    /// The virtual memory reservation size of each linear memory is controlled
3911    /// by the [`Config::memory_reservation`] setting and this method's
3912    /// configuration cannot exceed [`Config::memory_reservation`].
3913    pub fn max_memory_size(&mut self, bytes: usize) -> &mut Self {
3914        self.config.limits.max_memory_size = bytes;
3915        self
3916    }
3917
3918    /// Configures whether memory protection keys (MPK) should be used for more
3919    /// efficient layout of pool-allocated memories.
3920    ///
3921    /// When using the pooling allocator (see [`Config::allocation_strategy`],
3922    /// [`InstanceAllocationStrategy::Pooling`]), memory protection keys can
3923    /// reduce the total amount of allocated virtual memory by eliminating guard
3924    /// regions between WebAssembly memories in the pool. It does so by
3925    /// "coloring" memory regions with different memory keys and setting which
3926    /// regions are accessible each time executions switches from host to guest
3927    /// (or vice versa).
3928    ///
3929    /// Leveraging MPK requires configuring a smaller-than-default
3930    /// [`max_memory_size`](PoolingAllocationConfig::max_memory_size) to enable
3931    /// this coloring/striping behavior. For example embeddings might want to
3932    /// reduce the default 4G allowance to 128M.
3933    ///
3934    /// MPK is only available on Linux (called `pku` there) and recent x86
3935    /// systems; we check for MPK support at runtime by examining the `CPUID`
3936    /// register. This configuration setting can be in three states:
3937    ///
3938    /// - `auto`: if MPK support is available the guard regions are removed; if
3939    ///   not, the guard regions remain
3940    /// - `yes`: use MPK to eliminate guard regions; fail if MPK is not
3941    ///   supported
3942    /// - `no`: never use MPK
3943    ///
3944    /// By default this value is `no`, but may become `auto` in future
3945    /// releases.
3946    ///
3947    /// __WARNING__: this configuration options is still experimental--use at
3948    /// your own risk! MPK uses kernel and CPU features to protect memory
3949    /// regions; you may observe segmentation faults if anything is
3950    /// misconfigured.
3951    #[cfg(feature = "memory-protection-keys")]
3952    pub fn memory_protection_keys(&mut self, enable: Enabled) -> &mut Self {
3953        self.config.memory_protection_keys = enable;
3954        self
3955    }
3956
3957    /// Sets an upper limit on how many memory protection keys (MPK) Wasmtime
3958    /// will use.
3959    ///
3960    /// This setting is only applicable when
3961    /// [`PoolingAllocationConfig::memory_protection_keys`] is set to `enable`
3962    /// or `auto`. Configuring this above the HW and OS limits (typically 15)
3963    /// has no effect.
3964    ///
3965    /// If multiple Wasmtime engines are used in the same process, note that all
3966    /// engines will share the same set of allocated keys; this setting will
3967    /// limit how many keys are allocated initially and thus available to all
3968    /// other engines.
3969    #[cfg(feature = "memory-protection-keys")]
3970    pub fn max_memory_protection_keys(&mut self, max: usize) -> &mut Self {
3971        self.config.max_memory_protection_keys = max;
3972        self
3973    }
3974
3975    /// Check if memory protection keys (MPK) are available on the current host.
3976    ///
3977    /// This is a convenience method for determining MPK availability using the
3978    /// same method that [`Enabled::Auto`] does. See
3979    /// [`PoolingAllocationConfig::memory_protection_keys`] for more
3980    /// information.
3981    #[cfg(feature = "memory-protection-keys")]
3982    pub fn are_memory_protection_keys_available() -> bool {
3983        crate::runtime::vm::mpk::is_supported()
3984    }
3985
3986    /// The maximum number of concurrent GC heaps supported (default is `1000`).
3987    ///
3988    /// This value has a direct impact on the amount of memory allocated by the
3989    /// pooling instance allocator.
3990    ///
3991    /// The pooling instance allocator allocates a GC heap pool, where each
3992    /// entry in the pool contains the space needed for each GC heap used by a
3993    /// store.
3994    #[cfg(feature = "gc")]
3995    pub fn total_gc_heaps(&mut self, count: u32) -> &mut Self {
3996        self.config.limits.total_gc_heaps = count;
3997        self
3998    }
3999
4000    /// Configures whether the Linux-specific [`PAGEMAP_SCAN` ioctl][ioctl] is
4001    /// used to help reset linear memory.
4002    ///
4003    /// When [`Self::linear_memory_keep_resident`] or
4004    /// [`Self::table_keep_resident`] options are configured to nonzero values
4005    /// the default behavior is to `memset` the lowest addresses of a table or
4006    /// memory back to their original contents. With the `PAGEMAP_SCAN` ioctl on
4007    /// Linux this can be done to more intelligently scan for resident pages in
4008    /// the region and only reset those pages back to their original contents
4009    /// with `memset` rather than assuming the low addresses are all resident.
4010    ///
4011    /// This ioctl has the potential to provide a number of performance benefits
4012    /// in high-reuse and high concurrency scenarios. Notably this enables
4013    /// Wasmtime to scan the entire region of WebAssembly linear memory and
4014    /// manually reset memory back to its original contents, up to
4015    /// [`Self::linear_memory_keep_resident`] bytes, possibly skipping an
4016    /// `madvise` entirely. This can be more efficient by avoiding removing
4017    /// pages from the address space entirely and additionally ensuring that
4018    /// future use of the linear memory doesn't incur page faults as the pages
4019    /// remain resident.
4020    ///
4021    /// At this time this configuration option is still being evaluated as to
4022    /// how appropriate it is for all use cases. It currently defaults to
4023    /// `no` or disabled but may change to `auto`, enable if supported, in the
4024    /// future. This option is only supported on Linux and requires a kernel
4025    /// version of 6.7 or higher.
4026    ///
4027    /// [ioctl]: https://www.man7.org/linux/man-pages/man2/PAGEMAP_SCAN.2const.html
4028    pub fn pagemap_scan(&mut self, enable: Enabled) -> &mut Self {
4029        self.config.pagemap_scan = enable;
4030        self
4031    }
4032
4033    /// Tests whether [`Self::pagemap_scan`] is available or not on the host
4034    /// system.
4035    pub fn is_pagemap_scan_available() -> bool {
4036        crate::runtime::vm::PoolingInstanceAllocatorConfig::is_pagemap_scan_available()
4037    }
4038}
4039
4040#[cfg(feature = "std")]
4041fn detect_host_feature(feature: &str) -> Option<bool> {
4042    #[cfg(target_arch = "aarch64")]
4043    {
4044        return match feature {
4045            "lse" => Some(std::arch::is_aarch64_feature_detected!("lse")),
4046            "paca" => Some(std::arch::is_aarch64_feature_detected!("paca")),
4047            "fp16" => Some(std::arch::is_aarch64_feature_detected!("fp16")),
4048
4049            _ => None,
4050        };
4051    }
4052
4053    // `is_s390x_feature_detected` is nightly only for now, so use the
4054    // STORE FACILITY LIST EXTENDED instruction as a temporary measure.
4055    #[cfg(target_arch = "s390x")]
4056    {
4057        let mut facility_list: [u64; 4] = [0; 4];
4058        unsafe {
4059            core::arch::asm!(
4060                "stfle 0({})",
4061                in(reg_addr) facility_list.as_mut_ptr() ,
4062                inout("r0") facility_list.len() as u64 - 1 => _,
4063                options(nostack)
4064            );
4065        }
4066        let get_facility_bit = |n: usize| {
4067            // NOTE: bits are numbered from the left.
4068            facility_list[n / 64] & (1 << (63 - (n % 64))) != 0
4069        };
4070
4071        return match feature {
4072            "mie3" => Some(get_facility_bit(61)),
4073            "mie4" => Some(get_facility_bit(84)),
4074            "vxrs_ext2" => Some(get_facility_bit(148)),
4075            "vxrs_ext3" => Some(get_facility_bit(198)),
4076
4077            _ => None,
4078        };
4079    }
4080
4081    #[cfg(target_arch = "riscv64")]
4082    {
4083        return match feature {
4084            // due to `is_riscv64_feature_detected` is not stable.
4085            // we cannot use it. For now lie and say all features are always
4086            // found to keep tests working.
4087            _ => Some(true),
4088        };
4089    }
4090
4091    #[cfg(target_arch = "x86_64")]
4092    {
4093        return match feature {
4094            "cmpxchg16b" => Some(std::is_x86_feature_detected!("cmpxchg16b")),
4095            "sse3" => Some(std::is_x86_feature_detected!("sse3")),
4096            "ssse3" => Some(std::is_x86_feature_detected!("ssse3")),
4097            "sse4.1" => Some(std::is_x86_feature_detected!("sse4.1")),
4098            "sse4.2" => Some(std::is_x86_feature_detected!("sse4.2")),
4099            "popcnt" => Some(std::is_x86_feature_detected!("popcnt")),
4100            "avx" => Some(std::is_x86_feature_detected!("avx")),
4101            "avx2" => Some(std::is_x86_feature_detected!("avx2")),
4102            "fma" => Some(std::is_x86_feature_detected!("fma")),
4103            "bmi1" => Some(std::is_x86_feature_detected!("bmi1")),
4104            "bmi2" => Some(std::is_x86_feature_detected!("bmi2")),
4105            "avx512bitalg" => Some(std::is_x86_feature_detected!("avx512bitalg")),
4106            "avx512dq" => Some(std::is_x86_feature_detected!("avx512dq")),
4107            "avx512f" => Some(std::is_x86_feature_detected!("avx512f")),
4108            "avx512vl" => Some(std::is_x86_feature_detected!("avx512vl")),
4109            "avx512vbmi" => Some(std::is_x86_feature_detected!("avx512vbmi")),
4110            "lzcnt" => Some(std::is_x86_feature_detected!("lzcnt")),
4111
4112            _ => None,
4113        };
4114    }
4115
4116    #[allow(
4117        unreachable_code,
4118        reason = "reachable or not depending on if a target above matches"
4119    )]
4120    {
4121        let _ = feature;
4122        return None;
4123    }
4124}