Skip to main content

wasmtime/
config.rs

1use crate::prelude::*;
2use alloc::sync::Arc;
3use bitflags::Flags;
4use core::fmt;
5use core::num::{NonZeroU32, NonZeroUsize};
6use core::str::FromStr;
7#[cfg(any(feature = "cranelift", feature = "winch"))]
8use std::path::Path;
9pub use wasmparser::WasmFeatures;
10use wasmtime_environ::{ConfigTunables, OperatorCost, OperatorCostStrategy, TripleExt, Tunables};
11
12#[cfg(feature = "runtime")]
13use crate::memory::MemoryCreator;
14#[cfg(feature = "runtime")]
15use crate::profiling_agent::{self, ProfilingAgent};
16#[cfg(feature = "runtime")]
17use crate::runtime::vm::{
18    GcRuntime, InstanceAllocator, OnDemandInstanceAllocator, RuntimeMemoryCreator,
19};
20#[cfg(feature = "runtime")]
21use crate::trampoline::MemoryCreatorProxy;
22
23#[cfg(feature = "async")]
24use crate::stack::{StackCreator, StackCreatorProxy};
25#[cfg(feature = "async")]
26use wasmtime_fiber::RuntimeFiberStackCreator;
27
28#[cfg(feature = "runtime")]
29pub use crate::runtime::code_memory::CustomCodeMemory;
30#[cfg(feature = "cache")]
31pub use wasmtime_cache::{Cache, CacheConfig};
32#[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
33pub use wasmtime_environ::CacheStore;
34
35pub(crate) const DEFAULT_WASM_BACKTRACE_MAX_FRAMES: NonZeroUsize = NonZeroUsize::new(20).unwrap();
36
37/// Represents the module instance allocation strategy to use.
38#[derive(Clone)]
39#[non_exhaustive]
40pub enum InstanceAllocationStrategy {
41    /// The on-demand instance allocation strategy.
42    ///
43    /// Resources related to a module instance are allocated at instantiation time and
44    /// immediately deallocated when the `Store` referencing the instance is dropped.
45    ///
46    /// This is the default allocation strategy for Wasmtime.
47    OnDemand,
48    /// The pooling instance allocation strategy.
49    ///
50    /// A pool of resources is created in advance and module instantiation reuses resources
51    /// from the pool. Resources are returned to the pool when the `Store` referencing the instance
52    /// is dropped.
53    ///
54    /// When GC is enabled, the pooling allocator requires that the GC heap
55    /// configuration matches the linear memory configuration (i.e.,
56    /// `gc_heap_reservation` must equal `memory_reservation`, etc.). By
57    /// default, if no `gc_heap_*` tunables are explicitly configured, they
58    /// automatically inherit the `memory_*` values.
59    #[cfg(feature = "pooling-allocator")]
60    Pooling(PoolingAllocationConfig),
61}
62
63impl InstanceAllocationStrategy {
64    /// The default pooling instance allocation strategy.
65    #[cfg(feature = "pooling-allocator")]
66    pub fn pooling() -> Self {
67        Self::Pooling(Default::default())
68    }
69}
70
71impl Default for InstanceAllocationStrategy {
72    fn default() -> Self {
73        Self::OnDemand
74    }
75}
76
77#[cfg(feature = "pooling-allocator")]
78impl From<PoolingAllocationConfig> for InstanceAllocationStrategy {
79    fn from(cfg: PoolingAllocationConfig) -> InstanceAllocationStrategy {
80        InstanceAllocationStrategy::Pooling(cfg)
81    }
82}
83
84#[derive(Clone)]
85/// Configure the strategy used for versioning in serializing and deserializing [`crate::Module`].
86pub enum ModuleVersionStrategy {
87    /// Use the wasmtime crate's Cargo package version.
88    WasmtimeVersion,
89    /// Use a custom version string. Must be at most 255 bytes.
90    Custom(String),
91    /// Emit no version string in serialization, and accept all version strings in deserialization.
92    None,
93}
94
95impl Default for ModuleVersionStrategy {
96    fn default() -> Self {
97        ModuleVersionStrategy::WasmtimeVersion
98    }
99}
100
101impl core::hash::Hash for ModuleVersionStrategy {
102    fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
103        match self {
104            Self::WasmtimeVersion => env!("CARGO_PKG_VERSION").hash(hasher),
105            Self::Custom(s) => s.hash(hasher),
106            Self::None => {}
107        };
108    }
109}
110
111impl ModuleVersionStrategy {
112    /// Get the string-encoding version of the module.
113    pub fn as_str(&self) -> &str {
114        match &self {
115            Self::WasmtimeVersion => env!("CARGO_PKG_VERSION_MAJOR"),
116            Self::Custom(c) => c,
117            Self::None => "",
118        }
119    }
120}
121
122/// Configuration for record/replay
123#[derive(Clone)]
124#[non_exhaustive]
125pub enum RRConfig {
126    #[cfg(feature = "rr")]
127    /// Recording on store is enabled
128    Recording,
129    #[cfg(feature = "rr")]
130    /// Replaying on store is enabled
131    Replaying,
132    /// No record/replay is enabled
133    None,
134}
135
136/// Global configuration options used to create an [`Engine`](crate::Engine)
137/// and customize its behavior.
138///
139/// This structure exposed a builder-like interface and is primarily consumed by
140/// [`Engine::new()`](crate::Engine::new).
141///
142/// The validation of `Config` is deferred until the engine is being built, thus
143/// a problematic config may cause `Engine::new` to fail.
144///
145/// # Defaults
146///
147/// The `Default` trait implementation and the return value from
148/// [`Config::new()`] are the same and represent the default set of
149/// configuration for an engine. The exact set of defaults will differ based on
150/// properties such as enabled Cargo features at compile time and the configured
151/// target (see [`Config::target`]). Configuration options document their
152/// default values and what the conditional value of the default is where
153/// applicable.
154#[derive(Clone)]
155pub struct Config {
156    #[cfg(any(feature = "cranelift", feature = "winch"))]
157    compiler_config: Option<CompilerConfig>,
158    target: Option<target_lexicon::Triple>,
159    #[cfg(feature = "gc")]
160    collector: Collector,
161    profiling_strategy: ProfilingStrategy,
162    tunables: ConfigTunables,
163
164    #[cfg(feature = "cache")]
165    pub(crate) cache: Option<Cache>,
166    #[cfg(feature = "runtime")]
167    pub(crate) mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>,
168    #[cfg(feature = "runtime")]
169    pub(crate) custom_code_memory: Option<Arc<dyn CustomCodeMemory>>,
170    pub(crate) allocation_strategy: InstanceAllocationStrategy,
171    pub(crate) max_wasm_stack: usize,
172    /// Explicitly enabled features via `Config::wasm_*` methods. This is a
173    /// signal that the embedder specifically wants something turned on
174    /// regardless of the defaults that Wasmtime might otherwise have enabled.
175    ///
176    /// Note that this, and `disabled_features` below, start as the empty set of
177    /// features to only track explicit user requests.
178    pub(crate) enabled_features: WasmFeatures,
179    /// Same as `enabled_features`, but for those that are explicitly disabled.
180    pub(crate) disabled_features: WasmFeatures,
181    pub(crate) wasm_backtrace_details_env_used: bool,
182    pub(crate) wasm_backtrace_max_frames: Option<NonZeroUsize>,
183    pub(crate) native_unwind_info: Option<bool>,
184    #[cfg(any(feature = "async", feature = "stack-switching"))]
185    pub(crate) async_stack_size: usize,
186    #[cfg(feature = "async")]
187    pub(crate) async_stack_zeroing: bool,
188    #[cfg(feature = "async")]
189    pub(crate) stack_creator: Option<Arc<dyn RuntimeFiberStackCreator>>,
190    pub(crate) module_version: ModuleVersionStrategy,
191    pub(crate) parallel_compilation: bool,
192    pub(crate) memory_guaranteed_dense_image_size: u64,
193    pub(crate) force_memory_init_memfd: bool,
194    pub(crate) wmemcheck: bool,
195    #[cfg(feature = "coredump")]
196    pub(crate) coredump_on_trap: bool,
197    pub(crate) macos_use_mach_ports: bool,
198    pub(crate) detect_host_feature: Option<fn(&str) -> Option<bool>>,
199    pub(crate) x86_float_abi_ok: Option<bool>,
200    pub(crate) shared_memory: bool,
201    pub(crate) rr_config: RRConfig,
202}
203
204/// User-provided configuration for the compiler.
205#[cfg(any(feature = "cranelift", feature = "winch"))]
206#[derive(Debug, Clone)]
207struct CompilerConfig {
208    strategy: Option<Strategy>,
209    settings: crate::hash_map::HashMap<String, String>,
210    flags: crate::hash_set::HashSet<String>,
211    #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
212    cache_store: Option<Arc<dyn CacheStore>>,
213    clif_dir: Option<std::path::PathBuf>,
214    wmemcheck: bool,
215}
216
217#[cfg(any(feature = "cranelift", feature = "winch"))]
218impl CompilerConfig {
219    fn new() -> Self {
220        Self {
221            strategy: Strategy::Auto.not_auto(),
222            settings: Default::default(),
223            flags: Default::default(),
224            #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
225            cache_store: None,
226            clif_dir: None,
227            wmemcheck: false,
228        }
229    }
230
231    /// Ensures that the key is not set or equals to the given value.
232    /// If the key is not set, it will be set to the given value.
233    ///
234    /// # Returns
235    ///
236    /// Returns true if successfully set or already had the given setting
237    /// value, or false if the setting was explicitly set to something
238    /// else previously.
239    fn ensure_setting_unset_or_given(&mut self, k: &str, v: &str) -> bool {
240        if let Some(value) = self.settings.get(k) {
241            if value != v {
242                return false;
243            }
244        } else {
245            self.settings.insert(k.to_string(), v.to_string());
246        }
247        true
248    }
249}
250
251#[cfg(any(feature = "cranelift", feature = "winch"))]
252impl Default for CompilerConfig {
253    fn default() -> Self {
254        Self::new()
255    }
256}
257
258impl Config {
259    /// Creates a new configuration object with the default configuration
260    /// specified.
261    pub fn new() -> Self {
262        let mut ret = Self {
263            tunables: ConfigTunables::default(),
264            #[cfg(any(feature = "cranelift", feature = "winch"))]
265            compiler_config: Some(CompilerConfig::default()),
266            target: None,
267            #[cfg(feature = "gc")]
268            collector: Collector::default(),
269            #[cfg(feature = "cache")]
270            cache: None,
271            profiling_strategy: ProfilingStrategy::None,
272            #[cfg(feature = "runtime")]
273            mem_creator: None,
274            #[cfg(feature = "runtime")]
275            custom_code_memory: None,
276            allocation_strategy: InstanceAllocationStrategy::OnDemand,
277            // 512k of stack -- note that this is chosen currently to not be too
278            // big, not be too small, and be a good default for most platforms.
279            // One platform of particular note is Windows where the stack size
280            // of the main thread seems to, by default, be smaller than that of
281            // Linux and macOS. This 512k value at least lets our current test
282            // suite pass on the main thread of Windows (using `--test-threads
283            // 1` forces this), or at least it passed when this change was
284            // committed.
285            max_wasm_stack: 512 * 1024,
286            wasm_backtrace_details_env_used: false,
287            wasm_backtrace_max_frames: Some(DEFAULT_WASM_BACKTRACE_MAX_FRAMES),
288            native_unwind_info: None,
289            enabled_features: WasmFeatures::empty(),
290            disabled_features: WasmFeatures::empty(),
291            #[cfg(any(feature = "async", feature = "stack-switching"))]
292            async_stack_size: 2 << 20,
293            #[cfg(feature = "async")]
294            async_stack_zeroing: false,
295            #[cfg(feature = "async")]
296            stack_creator: None,
297            module_version: ModuleVersionStrategy::default(),
298            parallel_compilation: !cfg!(miri),
299            memory_guaranteed_dense_image_size: 16 << 20,
300            force_memory_init_memfd: false,
301            wmemcheck: false,
302            #[cfg(feature = "coredump")]
303            coredump_on_trap: false,
304            macos_use_mach_ports: !cfg!(miri),
305            #[cfg(feature = "std")]
306            detect_host_feature: Some(detect_host_feature),
307            #[cfg(not(feature = "std"))]
308            detect_host_feature: None,
309            x86_float_abi_ok: None,
310            shared_memory: false,
311            rr_config: RRConfig::None,
312        };
313        ret.wasm_backtrace_details(WasmBacktraceDetails::Environment);
314        ret
315    }
316
317    #[cfg(any(feature = "cranelift", feature = "winch"))]
318    pub(crate) fn has_compiler(&self) -> bool {
319        self.compiler_config.is_some()
320    }
321
322    #[track_caller]
323    #[cfg(any(feature = "cranelift", feature = "winch"))]
324    fn compiler_config_mut(&mut self) -> &mut CompilerConfig {
325        self.compiler_config.as_mut().expect(
326            "cannot configure compiler settings for `Config`s \
327             created by `Config::without_compiler`",
328        )
329    }
330
331    /// Configure whether Wasm compilation is enabled.
332    ///
333    /// Disabling Wasm compilation will allow you to load and run
334    /// [pre-compiled][crate::Engine::precompile_module] Wasm programs, but not
335    /// to compile and run new Wasm programs that have not already been
336    /// pre-compiled.
337    ///
338    /// Many compilation-related configuration methods will panic if compilation
339    /// has been disabled.
340    ///
341    /// Note that there are two ways to disable Wasm compilation:
342    ///
343    /// 1. Statically, by disabling the `"cranelift"` and `"winch"` cargo
344    ///    features when building Wasmtime. These builds of Wasmtime will have
345    ///    smaller code size, since they do not include any of the code to
346    ///    compile Wasm.
347    ///
348    /// 2. Dynamically, by passing `false` to this method at run-time when
349    ///    configuring Wasmtime. The Wasmtime binary will still include the code
350    ///    for compiling Wasm, it just won't be executed, so code size is larger
351    ///    than with the first approach.
352    ///
353    /// The static approach is better in most cases, however dynamically calling
354    /// `enable_compiler(false)` is useful whenever you create multiple
355    /// `Engine`s in the same process, some of which must be able to compile
356    /// Wasm and some of which should never do so. Tests are a common example of
357    /// such a situation, especially when there are multiple Rust binaries in
358    /// the same cargo workspace, and cargo's feature resolution enables the
359    /// `"cranelift"` or `"winch"` features across the whole workspace.
360    #[cfg(any(feature = "cranelift", feature = "winch"))]
361    pub fn enable_compiler(&mut self, enable: bool) -> &mut Self {
362        match (enable, &self.compiler_config) {
363            (true, Some(_)) | (false, None) => {}
364            (true, None) => {
365                self.compiler_config = Some(CompilerConfig::default());
366            }
367            (false, Some(_)) => {
368                self.compiler_config = None;
369            }
370        }
371        self
372    }
373
374    /// Configures the target platform of this [`Config`].
375    ///
376    /// This method is used to configure the output of compilation in an
377    /// [`Engine`](crate::Engine). This can be used, for example, to
378    /// cross-compile from one platform to another. By default, the host target
379    /// triple is used meaning compiled code is suitable to run on the host.
380    ///
381    /// Note that the [`Module`](crate::Module) type can only be created if the
382    /// target configured here matches the host. Otherwise if a cross-compile is
383    /// being performed where the host doesn't match the target then
384    /// [`Engine::precompile_module`](crate::Engine::precompile_module) must be
385    /// used instead.
386    ///
387    /// Target-specific flags (such as CPU features) will not be inferred by
388    /// default for the target when one is provided here. This means that this
389    /// can also be used, for example, with the host architecture to disable all
390    /// host-inferred feature flags. Configuring target-specific flags can be
391    /// done with [`Config::cranelift_flag_set`] and
392    /// [`Config::cranelift_flag_enable`].
393    ///
394    /// # Errors
395    ///
396    /// This method will error if the given target triple is not supported.
397    pub fn target(&mut self, target: &str) -> Result<&mut Self> {
398        self.target =
399            Some(target_lexicon::Triple::from_str(target).map_err(|e| crate::format_err!(e))?);
400
401        Ok(self)
402    }
403
404    /// Enables the incremental compilation cache in Cranelift, using the provided `CacheStore`
405    /// backend for storage.
406    ///
407    /// # Panics
408    ///
409    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
410    #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
411    pub fn enable_incremental_compilation(
412        &mut self,
413        cache_store: Arc<dyn CacheStore>,
414    ) -> Result<&mut Self> {
415        self.compiler_config_mut().cache_store = Some(cache_store);
416        Ok(self)
417    }
418
419    #[doc(hidden)]
420    #[deprecated(note = "no longer has any effect")]
421    #[cfg(feature = "async")]
422    pub fn async_support(&mut self, _enable: bool) -> &mut Self {
423        self
424    }
425
426    /// Configures whether DWARF debug information will be emitted
427    /// during compilation for a native debugger on the Wasmtime
428    /// process to consume.
429    ///
430    /// Note that the `debug-builtins` compile-time Cargo feature must also be
431    /// enabled for native debuggers such as GDB or LLDB to be able to debug
432    /// guest WebAssembly programs.
433    ///
434    /// By default this option is `false`.
435    /// **Note** Enabling this option is not compatible with the Winch compiler.
436    pub fn debug_info(&mut self, enable: bool) -> &mut Self {
437        self.tunables.debug_native = Some(enable);
438        self
439    }
440
441    /// Configures whether compiled guest code will be instrumented to
442    /// provide debugging at the Wasm VM level.
443    ///
444    /// This is required in order to enable a guest-level debugging
445    /// API that can precisely examine Wasm VM state and (eventually,
446    /// once it is complete) set breakpoints and watchpoints and step
447    /// through code.
448    ///
449    /// Without this enabled, debugging can only be done via a native
450    /// debugger operating on the compiled guest code (see
451    /// [`Config::debug_info`] and is "best-effort": we may be able to
452    /// recover some Wasm locals or operand stack values, but it is
453    /// not guaranteed, even when optimizations are disabled.
454    ///
455    /// When this is enabled, additional instrumentation is inserted
456    /// that directly tracks the Wasm VM state at every step. This has
457    /// some performance impact, but allows perfect debugging
458    /// fidelity.
459    ///
460    /// Breakpoints, watchpoints, and stepping are not yet supported,
461    /// but will be added in a future version of Wasmtime.
462    ///
463    /// This enables use of the [`crate::FrameHandle`] API which is
464    /// provided by [`crate::Caller::debug_exit_frames`] or
465    /// [`crate::Store::debug_exit_frames`].
466    ///
467    /// ***Note*** Enabling this option is not compatible with the
468    /// Winch compiler.
469    #[cfg(feature = "debug")]
470    pub fn guest_debug(&mut self, enable: bool) -> &mut Self {
471        self.tunables.debug_guest = Some(enable);
472        self
473    }
474
475    /// Configures whether [`WasmBacktrace`] will be present in the context of
476    /// errors returned from Wasmtime.
477    ///
478    /// This method is deprecated in favor of
479    /// [`Config::wasm_backtrace_max_frames`]. Calling `wasm_backtrace(false)`
480    /// is equivalent to `wasm_backtrace_max_frames(None)`, and
481    /// `wasm_backtrace(true)` will leave `wasm_backtrace_max_frames` unchanged
482    /// if the value is `Some` and will otherwise restore the default `Some`
483    /// value.
484    ///
485    /// [`WasmBacktrace`]: crate::WasmBacktrace
486    #[deprecated = "use `wasm_backtrace_max_frames` instead"]
487    pub fn wasm_backtrace(&mut self, enable: bool) -> &mut Self {
488        match (enable, self.wasm_backtrace_max_frames) {
489            (false, _) => self.wasm_backtrace_max_frames = None,
490            // Wasm backtraces were disabled; enable them with the
491            // default maximum number of frames to capture.
492            (true, None) => {
493                self.wasm_backtrace_max_frames = Some(DEFAULT_WASM_BACKTRACE_MAX_FRAMES)
494            }
495            // Wasm backtraces are already enabled; keep the existing
496            // max-frames configuration.
497            (true, Some(_)) => {}
498        }
499        self
500    }
501
502    /// Configures whether backtraces in `Trap` will parse debug info in the wasm file to
503    /// have filename/line number information.
504    ///
505    /// When enabled this will causes modules to retain debugging information
506    /// found in wasm binaries. This debug information will be used when a trap
507    /// happens to symbolicate each stack frame and attempt to print a
508    /// filename/line number for each wasm frame in the stack trace.
509    ///
510    /// By default this option is `WasmBacktraceDetails::Environment`, meaning
511    /// that wasm will read `WASMTIME_BACKTRACE_DETAILS` to indicate whether
512    /// details should be parsed. Note that the `std` feature of this crate must
513    /// be active to read environment variables, otherwise this is disabled by
514    /// default.
515    pub fn wasm_backtrace_details(&mut self, enable: WasmBacktraceDetails) -> &mut Self {
516        self.wasm_backtrace_details_env_used = false;
517        self.tunables.parse_wasm_debuginfo = match enable {
518            WasmBacktraceDetails::Enable => Some(true),
519            WasmBacktraceDetails::Disable => Some(false),
520            WasmBacktraceDetails::Environment => {
521                #[cfg(feature = "std")]
522                {
523                    self.wasm_backtrace_details_env_used = true;
524                    std::env::var("WASMTIME_BACKTRACE_DETAILS")
525                        .map(|s| Some(s == "1"))
526                        .unwrap_or(Some(false))
527                }
528                #[cfg(not(feature = "std"))]
529                {
530                    Some(false)
531                }
532            }
533        };
534        self
535    }
536
537    /// Configures the maximum number of WebAssembly frames to collect in
538    /// backtraces.
539    ///
540    /// A backtrace may be collected whenever an error is returned from a host
541    /// function call through to WebAssembly or when WebAssembly itself hits a
542    /// trap condition, such as an out-of-bounds memory access. This flag
543    /// indicates, in these conditions, whether the backtrace is collected or
544    /// not and how many frames should be collected.
545    ///
546    /// Currently wasm backtraces are implemented through frame pointer walking.
547    /// This means that collecting a backtrace is expected to be a fast and
548    /// relatively cheap operation. Additionally backtrace collection is
549    /// suitable in concurrent environments since one thread capturing a
550    /// backtrace won't block other threads.
551    ///
552    /// Collected backtraces are attached via
553    /// [`Error::context`](crate::Error::context) to errors returned from host
554    /// functions. The [`WasmBacktrace`] type can be acquired via
555    /// [`Error::downcast_ref`](crate::Error::downcast_ref) to inspect the
556    /// backtrace. When this option is set to `None` then this context is never
557    /// applied to errors coming out of wasm.
558    ///
559    /// The default value is 20.
560    ///
561    /// [`WasmBacktrace`]: crate::WasmBacktrace
562    pub fn wasm_backtrace_max_frames(&mut self, limit: Option<NonZeroUsize>) -> &mut Self {
563        self.wasm_backtrace_max_frames = limit;
564        self
565    }
566
567    /// Configures whether to generate native unwind information
568    /// (e.g. `.eh_frame` on Linux).
569    ///
570    /// This configuration option only exists to help third-party stack
571    /// capturing mechanisms, such as the system's unwinder or the `backtrace`
572    /// crate, determine how to unwind through Wasm frames. It does not affect
573    /// whether Wasmtime can capture Wasm backtraces or not. The presence of
574    /// [`WasmBacktrace`] is controlled by the
575    /// [`Config::wasm_backtrace_max_frames`] option.
576    ///
577    /// Native unwind information is included:
578    /// - When targeting Windows, since the Windows ABI requires it.
579    /// - By default.
580    ///
581    /// Note that systems loading many modules may wish to disable this
582    /// configuration option instead of leaving it on-by-default. Some platforms
583    /// exhibit quadratic behavior when registering/unregistering unwinding
584    /// information which can greatly slow down the module loading/unloading
585    /// process.
586    ///
587    /// [`WasmBacktrace`]: crate::WasmBacktrace
588    pub fn native_unwind_info(&mut self, enable: bool) -> &mut Self {
589        self.native_unwind_info = Some(enable);
590        self
591    }
592
593    /// Configures whether execution of WebAssembly will "consume fuel" to
594    /// either halt or yield execution as desired.
595    ///
596    /// This can be used to deterministically prevent infinitely-executing
597    /// WebAssembly code by instrumenting generated code to consume fuel as it
598    /// executes. When fuel runs out a trap is raised, however [`Store`] can be
599    /// configured to yield execution periodically via
600    /// [`crate::Store::fuel_async_yield_interval`].
601    ///
602    /// Note that a [`Store`] starts with no fuel, so if you enable this option
603    /// you'll have to be sure to pour some fuel into [`Store`] before
604    /// executing some code.
605    ///
606    /// By default this option is `false`.
607    ///
608    /// **Note** Enabling this option is not compatible with the Winch compiler.
609    ///
610    /// [`Store`]: crate::Store
611    pub fn consume_fuel(&mut self, enable: bool) -> &mut Self {
612        self.tunables.consume_fuel = Some(enable);
613        self
614    }
615
616    /// Configures the fuel cost of each WebAssembly operator.
617    ///
618    /// This is only relevant when [`Config::consume_fuel`] is enabled.
619    pub fn operator_cost(&mut self, cost: OperatorCost) -> &mut Self {
620        self.tunables.operator_cost = Some(OperatorCostStrategy::table(cost));
621        self
622    }
623
624    /// Enables epoch-based interruption.
625    ///
626    /// When executing code in async mode, we sometimes want to
627    /// implement a form of cooperative timeslicing: long-running Wasm
628    /// guest code should periodically yield to the executor
629    /// loop. This yielding could be implemented by using "fuel" (see
630    /// [`consume_fuel`](Config::consume_fuel)). However, fuel
631    /// instrumentation is somewhat expensive: it modifies the
632    /// compiled form of the Wasm code so that it maintains a precise
633    /// instruction count, frequently checking this count against the
634    /// remaining fuel. If one does not need this precise count or
635    /// deterministic interruptions, and only needs a periodic
636    /// interrupt of some form, then It would be better to have a more
637    /// lightweight mechanism.
638    ///
639    /// Epoch-based interruption is that mechanism. There is a global
640    /// "epoch", which is a counter that divides time into arbitrary
641    /// periods (or epochs). This counter lives on the
642    /// [`Engine`](crate::Engine) and can be incremented by calling
643    /// [`Engine::increment_epoch`](crate::Engine::increment_epoch).
644    /// Epoch-based instrumentation works by setting a "deadline
645    /// epoch". The compiled code knows the deadline, and at certain
646    /// points, checks the current epoch against that deadline. It
647    /// will yield if the deadline has been reached.
648    ///
649    /// The idea is that checking an infrequently-changing counter is
650    /// cheaper than counting and frequently storing a precise metric
651    /// (instructions executed) locally. The interruptions are not
652    /// deterministic, but if the embedder increments the epoch in a
653    /// periodic way (say, every regular timer tick by a thread or
654    /// signal handler), then we can ensure that all async code will
655    /// yield to the executor within a bounded time.
656    ///
657    /// The deadline check cannot be avoided by malicious wasm code. It is safe
658    /// to use epoch deadlines to limit the execution time of untrusted
659    /// code.
660    ///
661    /// The [`Store`](crate::Store) tracks the deadline, and controls
662    /// what happens when the deadline is reached during
663    /// execution. Several behaviors are possible:
664    ///
665    /// - Trap if code is executing when the epoch deadline is
666    ///   met. See
667    ///   [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap).
668    ///
669    /// - Call an arbitrary function. This function may chose to trap or
670    ///   increment the epoch. See
671    ///   [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback).
672    ///
673    /// - Yield to the executor loop, then resume when the future is
674    ///   next polled. See
675    ///   [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update).
676    ///
677    /// Trapping is the default. The yielding behaviour may be used for
678    /// the timeslicing behavior described above.
679    ///
680    /// This feature is available with or without async support.
681    /// However, without async support, the timeslicing behaviour is
682    /// not available. This means epoch-based interruption can only
683    /// serve as a simple external-interruption mechanism.
684    ///
685    /// An initial deadline must be set before executing code by calling
686    /// [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline). If this
687    /// deadline is not configured then wasm will immediately trap.
688    ///
689    /// ## Interaction with blocking host calls
690    ///
691    /// Epochs (and fuel) do not assist in handling WebAssembly code blocked in
692    /// a call to the host. For example if the WebAssembly function calls
693    /// `wasi:io/poll.poll` to sleep epochs will not assist in waking this up or
694    /// timing it out. Epochs intentionally only affect running WebAssembly code
695    /// itself and it's left to the embedder to determine how best to wake up
696    /// indefinitely blocking code in the host.
697    ///
698    /// The typical solution for this, however, is to use the `async` variant of
699    /// WASI host functions. This models computation as a Rust `Future` which
700    /// means that when blocking happens the future is only suspended and
701    /// control yields back to the main event loop. This gives the embedder the
702    /// opportunity to use `tokio::time::timeout` for example on a wasm
703    /// computation and have the desired effect of cancelling a blocking
704    /// operation when a timeout expires.
705    ///
706    /// ## When to use fuel vs. epochs
707    ///
708    /// In general, epoch-based interruption results in faster
709    /// execution. This difference is sometimes significant: in some
710    /// measurements, up to 2-3x. This is because epoch-based
711    /// interruption does less work: it only watches for a global
712    /// rarely-changing counter to increment, rather than keeping a
713    /// local frequently-changing counter and comparing it to a
714    /// deadline.
715    ///
716    /// Fuel, in contrast, should be used when *deterministic*
717    /// yielding or trapping is needed. For example, if it is required
718    /// that the same function call with the same starting state will
719    /// always either complete or trap with an out-of-fuel error,
720    /// deterministically, then fuel with a fixed bound should be
721    /// used.
722    ///
723    /// **Note** Enabling this option is not compatible with the Winch compiler.
724    ///
725    /// # See Also
726    ///
727    /// - [`Engine::increment_epoch`](crate::Engine::increment_epoch)
728    /// - [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline)
729    /// - [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap)
730    /// - [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback)
731    /// - [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update)
732    pub fn epoch_interruption(&mut self, enable: bool) -> &mut Self {
733        self.tunables.epoch_interruption = Some(enable);
734        self
735    }
736
737    /// XXX: For internal fuzzing and debugging use only!
738    #[doc(hidden)]
739    pub fn gc_zeal_alloc_counter(&mut self, counter: Option<NonZeroU32>) -> Result<&mut Self> {
740        #[cfg(not(gc_zeal))]
741        {
742            let _ = counter;
743            bail!(
744                "cannot set `gc_zeal_alloc_counter` because Wasmtime was not built with `cfg(gc_zeal)`"
745            );
746        }
747
748        #[cfg(gc_zeal)]
749        {
750            self.tunables.gc_zeal_alloc_counter = Some(counter);
751            Ok(self)
752        }
753    }
754
755    /// Configures the maximum amount of stack space available for
756    /// executing WebAssembly code.
757    ///
758    /// WebAssembly has well-defined semantics on stack overflow. This is
759    /// intended to be a knob which can help configure how much stack space
760    /// wasm execution is allowed to consume. Note that the number here is not
761    /// super-precise, but rather wasm will take at most "pretty close to this
762    /// much" stack space.
763    ///
764    /// If a wasm call (or series of nested wasm calls) take more stack space
765    /// than the `size` specified then a stack overflow trap will be raised.
766    ///
767    /// Caveat: this knob only limits the stack space consumed by wasm code.
768    /// More importantly, it does not ensure that this much stack space is
769    /// available on the calling thread stack. Exhausting the thread stack
770    /// typically leads to an **abort** of the process.
771    ///
772    /// Here are some examples of how that could happen:
773    ///
774    /// - Let's assume this option is set to 2 MiB and then a thread that has
775    ///   a stack with 512 KiB left.
776    ///
777    ///   If wasm code consumes more than 512 KiB then the process will be aborted.
778    ///
779    /// - Assuming the same conditions, but this time wasm code does not consume
780    ///   any stack but calls into a host function. The host function consumes
781    ///   more than 512 KiB of stack space. The process will be aborted.
782    ///
783    /// There's another gotcha related to recursive calling into wasm: the stack
784    /// space consumed by a host function is counted towards this limit. The
785    /// host functions are not prevented from consuming more than this limit.
786    /// However, if the host function that used more than this limit and called
787    /// back into wasm, then the execution will trap immediately because of
788    /// stack overflow.
789    ///
790    /// When the `async` feature is enabled, this value cannot exceed the
791    /// `async_stack_size` option. Be careful not to set this value too close
792    /// to `async_stack_size` as doing so may limit how much stack space
793    /// is available for host functions.
794    ///
795    /// By default this option is 512 KiB.
796    ///
797    /// # Errors
798    ///
799    /// The `Engine::new` method will fail if the `size` specified here is
800    /// either 0 or larger than the [`Config::async_stack_size`] configuration.
801    pub fn max_wasm_stack(&mut self, size: usize) -> &mut Self {
802        self.max_wasm_stack = size;
803        self
804    }
805
806    /// Configures the size of the stacks used for asynchronous execution.
807    ///
808    /// This setting configures the size of the stacks that are allocated for
809    /// asynchronous execution. The value cannot be less than `max_wasm_stack`.
810    ///
811    /// The amount of stack space guaranteed for host functions is
812    /// `async_stack_size - max_wasm_stack`, so take care not to set these two values
813    /// close to one another; doing so may cause host functions to overflow the
814    /// stack and abort the process.
815    ///
816    /// By default this option is 2 MiB.
817    ///
818    /// # Errors
819    ///
820    /// The `Engine::new` method will fail if the value for this option is
821    /// smaller than the [`Config::max_wasm_stack`] option.
822    #[cfg(any(feature = "async", feature = "stack-switching"))]
823    pub fn async_stack_size(&mut self, size: usize) -> &mut Self {
824        self.async_stack_size = size;
825        self
826    }
827
828    /// Configures whether or not stacks used for async futures are zeroed
829    /// before (re)use.
830    ///
831    /// When the [`call_async`] variant of calling WebAssembly is used
832    /// then Wasmtime will create a separate runtime execution stack for each
833    /// future produced by [`call_async`]. By default upon allocation, depending
834    /// on the platform, these stacks might be filled with uninitialized
835    /// memory. This is safe and correct because, modulo bugs in Wasmtime,
836    /// compiled Wasm code will never read from a stack slot before it
837    /// initializes the stack slot.
838    ///
839    /// However, as a defense-in-depth mechanism, you may configure Wasmtime to
840    /// ensure that these stacks are zeroed before they are used. Notably, if
841    /// you are using the pooling allocator, stacks can be pooled and reused
842    /// across different Wasm guests; ensuring that stacks are zeroed can
843    /// prevent data leakage between Wasm guests even in the face of potential
844    /// read-of-stack-slot-before-initialization bugs in Wasmtime's compiler.
845    ///
846    /// Stack zeroing can be a costly operation in highly concurrent
847    /// environments due to modifications of the virtual address space requiring
848    /// process-wide synchronization. It can also be costly in `no-std`
849    /// environments that must manually zero memory, and cannot rely on an OS
850    /// and virtual memory to provide zeroed pages.
851    ///
852    /// This option defaults to `false`.
853    ///
854    /// [`call_async`]: crate::TypedFunc::call_async
855    #[cfg(feature = "async")]
856    pub fn async_stack_zeroing(&mut self, enable: bool) -> &mut Self {
857        self.async_stack_zeroing = enable;
858        self
859    }
860
861    /// Explicitly enables (and un-disables) a given set of [`WasmFeatures`].
862    ///
863    /// Note: this is a low-level method that does not necessarily imply that
864    /// wasmtime _supports_ a feature. It should only be used to _disable_
865    /// features that callers want to be rejected by the parser or _enable_
866    /// features callers are certain that the current configuration of wasmtime
867    /// supports.
868    ///
869    /// Feature validation is deferred until an engine is being built, thus by
870    /// enabling features here a caller may cause
871    /// [`Engine::new`](crate::Engine::new) to fail later, if the feature
872    /// configuration isn't supported.
873    pub fn wasm_features(&mut self, flag: WasmFeatures, enable: bool) -> &mut Self {
874        self.enabled_features.set(flag, enable);
875        self.disabled_features.set(flag, !enable);
876        self
877    }
878
879    /// Configures whether the WebAssembly tail calls proposal will be enabled
880    /// for compilation or not.
881    ///
882    /// The [WebAssembly tail calls proposal] introduces the `return_call` and
883    /// `return_call_indirect` instructions. These instructions allow for Wasm
884    /// programs to implement some recursive algorithms with *O(1)* stack space
885    /// usage.
886    ///
887    /// This is `true` by default except when the Winch compiler is enabled.
888    ///
889    /// [WebAssembly tail calls proposal]: https://github.com/WebAssembly/tail-call
890    pub fn wasm_tail_call(&mut self, enable: bool) -> &mut Self {
891        self.wasm_features(WasmFeatures::TAIL_CALL, enable);
892        self
893    }
894
895    /// Configures whether the WebAssembly custom-page-sizes proposal will be
896    /// enabled for compilation or not.
897    ///
898    /// The [WebAssembly custom-page-sizes proposal] allows a memory to
899    /// customize its page sizes. By default, Wasm page sizes are 64KiB
900    /// large. This proposal allows the memory to opt into smaller page sizes
901    /// instead, allowing Wasm to run in environments with less than 64KiB RAM
902    /// available, for example.
903    ///
904    /// Note that the page size is part of the memory's type, and because
905    /// different memories may have different types, they may also have
906    /// different page sizes.
907    ///
908    /// Currently the only valid page sizes are 64KiB (the default) and 1
909    /// byte. Future extensions may relax this constraint and allow all powers
910    /// of two.
911    ///
912    /// Support for this proposal is disabled by default.
913    ///
914    /// [WebAssembly custom-page-sizes proposal]: https://github.com/WebAssembly/custom-page-sizes
915    pub fn wasm_custom_page_sizes(&mut self, enable: bool) -> &mut Self {
916        self.wasm_features(WasmFeatures::CUSTOM_PAGE_SIZES, enable);
917        self
918    }
919
920    /// Configures whether the WebAssembly [threads] proposal will be enabled
921    /// for compilation.
922    ///
923    /// This feature gates items such as shared memories and atomic
924    /// instructions. Note that the threads feature depends on the bulk memory
925    /// feature, which is enabled by default. Additionally note that while the
926    /// wasm feature is called "threads" it does not actually include the
927    /// ability to spawn threads. Spawning threads is part of the [wasi-threads]
928    /// proposal which is a separately gated feature in Wasmtime.
929    ///
930    /// Embeddings of Wasmtime are able to build their own custom threading
931    /// scheme on top of the core wasm threads proposal, however.
932    ///
933    /// The default value for this option is whether the `threads`
934    /// crate feature of Wasmtime is enabled or not. By default this crate
935    /// feature is enabled.
936    ///
937    /// [threads]: https://github.com/webassembly/threads
938    /// [wasi-threads]: https://github.com/webassembly/wasi-threads
939    #[cfg(feature = "threads")]
940    pub fn wasm_threads(&mut self, enable: bool) -> &mut Self {
941        self.wasm_features(WasmFeatures::THREADS, enable);
942        self
943    }
944
945    /// Configures whether the WebAssembly [shared-everything-threads] proposal
946    /// will be enabled for compilation.
947    ///
948    /// This feature gates extended use of the `shared` attribute on items other
949    /// than memories, extra atomic instructions, and new component model
950    /// intrinsics for spawning threads. It depends on the
951    /// [`wasm_threads`][Self::wasm_threads] being enabled.
952    ///
953    /// [shared-everything-threads]:
954    ///     https://github.com/webassembly/shared-everything-threads
955    pub fn wasm_shared_everything_threads(&mut self, enable: bool) -> &mut Self {
956        self.wasm_features(WasmFeatures::SHARED_EVERYTHING_THREADS, enable);
957        self
958    }
959
960    /// Configures whether the [WebAssembly reference types proposal][proposal]
961    /// will be enabled for compilation.
962    ///
963    /// This feature gates items such as the `externref` and `funcref` types as
964    /// well as allowing a module to define multiple tables.
965    ///
966    /// Note that the reference types proposal depends on the bulk memory proposal.
967    ///
968    /// This feature is `true` by default.
969    ///
970    /// # Errors
971    ///
972    /// The validation of this feature are deferred until the engine is being built,
973    /// and thus may cause `Engine::new` fail if the `bulk_memory` feature is disabled.
974    ///
975    /// [proposal]: https://github.com/webassembly/reference-types
976    #[cfg(feature = "gc")]
977    pub fn wasm_reference_types(&mut self, enable: bool) -> &mut Self {
978        self.wasm_features(WasmFeatures::REFERENCE_TYPES, enable);
979        self
980    }
981
982    /// Configures whether the [WebAssembly function references
983    /// proposal][proposal] will be enabled for compilation.
984    ///
985    /// This feature gates non-nullable reference types, function reference
986    /// types, `call_ref`, `ref.func`, and non-nullable reference related
987    /// instructions.
988    ///
989    /// Note that the function references proposal depends on the reference
990    /// types proposal.
991    ///
992    /// This feature is `false` by default.
993    ///
994    /// [proposal]: https://github.com/WebAssembly/function-references
995    #[cfg(feature = "gc")]
996    pub fn wasm_function_references(&mut self, enable: bool) -> &mut Self {
997        self.wasm_features(WasmFeatures::FUNCTION_REFERENCES, enable);
998        self
999    }
1000
1001    /// Configures whether the [WebAssembly wide-arithmetic][proposal] will be
1002    /// enabled for compilation.
1003    ///
1004    /// This feature is `false` by default.
1005    ///
1006    /// [proposal]: https://github.com/WebAssembly/wide-arithmetic
1007    pub fn wasm_wide_arithmetic(&mut self, enable: bool) -> &mut Self {
1008        self.wasm_features(WasmFeatures::WIDE_ARITHMETIC, enable);
1009        self
1010    }
1011
1012    /// Configures whether the [WebAssembly Garbage Collection
1013    /// proposal][proposal] will be enabled for compilation.
1014    ///
1015    /// This feature gates `struct` and `array` type definitions and references,
1016    /// the `i31ref` type, and all related instructions.
1017    ///
1018    /// Note that the function references proposal depends on the typed function
1019    /// references proposal.
1020    ///
1021    /// This feature is `false` by default.
1022    ///
1023    /// **Warning: Wasmtime's implementation of the GC proposal is still in
1024    /// progress and generally not ready for primetime.**
1025    ///
1026    /// [proposal]: https://github.com/WebAssembly/gc
1027    #[cfg(feature = "gc")]
1028    pub fn wasm_gc(&mut self, enable: bool) -> &mut Self {
1029        self.wasm_features(WasmFeatures::GC, enable);
1030        self
1031    }
1032
1033    /// Configures whether the WebAssembly SIMD proposal will be
1034    /// enabled for compilation.
1035    ///
1036    /// The [WebAssembly SIMD proposal][proposal]. This feature gates items such
1037    /// as the `v128` type and all of its operators being in a module. Note that
1038    /// this does not enable the [relaxed simd proposal].
1039    ///
1040    /// **Note**
1041    ///
1042    /// On x86_64 platforms the base CPU feature requirement for SIMD
1043    /// is SSE2 for the Cranelift compiler and AVX for the Winch compiler.
1044    ///
1045    /// This is `true` by default.
1046    ///
1047    /// [proposal]: https://github.com/webassembly/simd
1048    /// [relaxed simd proposal]: https://github.com/WebAssembly/relaxed-simd
1049    pub fn wasm_simd(&mut self, enable: bool) -> &mut Self {
1050        self.wasm_features(WasmFeatures::SIMD, enable);
1051        self
1052    }
1053
1054    /// Configures whether the WebAssembly Relaxed SIMD proposal will be
1055    /// enabled for compilation.
1056    ///
1057    /// The relaxed SIMD proposal adds new instructions to WebAssembly which,
1058    /// for some specific inputs, are allowed to produce different results on
1059    /// different hosts. More-or-less this proposal enables exposing
1060    /// platform-specific semantics of SIMD instructions in a controlled
1061    /// fashion to a WebAssembly program. From an embedder's perspective this
1062    /// means that WebAssembly programs may execute differently depending on
1063    /// whether the host is x86_64 or AArch64, for example.
1064    ///
1065    /// By default Wasmtime lowers relaxed SIMD instructions to the fastest
1066    /// lowering for the platform it's running on. This means that, by default,
1067    /// some relaxed SIMD instructions may have different results for the same
1068    /// inputs across x86_64 and AArch64. This behavior can be disabled through
1069    /// the [`Config::relaxed_simd_deterministic`] option which will force
1070    /// deterministic behavior across all platforms, as classified by the
1071    /// specification, at the cost of performance.
1072    ///
1073    /// This is `true` by default.
1074    ///
1075    /// [proposal]: https://github.com/webassembly/relaxed-simd
1076    pub fn wasm_relaxed_simd(&mut self, enable: bool) -> &mut Self {
1077        self.wasm_features(WasmFeatures::RELAXED_SIMD, enable);
1078        self
1079    }
1080
1081    /// This option can be used to control the behavior of the [relaxed SIMD
1082    /// proposal's][proposal] instructions.
1083    ///
1084    /// The relaxed SIMD proposal introduces instructions that are allowed to
1085    /// have different behavior on different architectures, primarily to afford
1086    /// an efficient implementation on all architectures. This means, however,
1087    /// that the same module may execute differently on one host than another,
1088    /// which typically is not otherwise the case. This option is provided to
1089    /// force Wasmtime to generate deterministic code for all relaxed simd
1090    /// instructions, at the cost of performance, for all architectures. When
1091    /// this option is enabled then the deterministic behavior of all
1092    /// instructions in the relaxed SIMD proposal is selected.
1093    ///
1094    /// This is `false` by default.
1095    ///
1096    /// [proposal]: https://github.com/webassembly/relaxed-simd
1097    pub fn relaxed_simd_deterministic(&mut self, enable: bool) -> &mut Self {
1098        self.tunables.relaxed_simd_deterministic = Some(enable);
1099        self
1100    }
1101
1102    /// Configures whether the [WebAssembly bulk memory operations
1103    /// proposal][proposal] will be enabled for compilation.
1104    ///
1105    /// This feature gates items such as the `memory.copy` instruction, passive
1106    /// data/table segments, etc, being in a module.
1107    ///
1108    /// This is `true` by default.
1109    ///
1110    /// Feature `reference_types`, which is also `true` by default, requires
1111    /// this feature to be enabled. Thus disabling this feature must also disable
1112    /// `reference_types` as well using [`wasm_reference_types`](crate::Config::wasm_reference_types).
1113    ///
1114    /// # Errors
1115    ///
1116    /// Disabling this feature without disabling `reference_types` will cause
1117    /// `Engine::new` to fail.
1118    ///
1119    /// [proposal]: https://github.com/webassembly/bulk-memory-operations
1120    pub fn wasm_bulk_memory(&mut self, enable: bool) -> &mut Self {
1121        self.wasm_features(WasmFeatures::BULK_MEMORY, enable);
1122        self
1123    }
1124
1125    /// Configures whether the WebAssembly multi-value [proposal] will
1126    /// be enabled for compilation.
1127    ///
1128    /// This feature gates functions and blocks returning multiple values in a
1129    /// module, for example.
1130    ///
1131    /// This is `true` by default.
1132    ///
1133    /// [proposal]: https://github.com/webassembly/multi-value
1134    pub fn wasm_multi_value(&mut self, enable: bool) -> &mut Self {
1135        self.wasm_features(WasmFeatures::MULTI_VALUE, enable);
1136        self
1137    }
1138
1139    /// Configures whether the WebAssembly multi-memory [proposal] will
1140    /// be enabled for compilation.
1141    ///
1142    /// This feature gates modules having more than one linear memory
1143    /// declaration or import.
1144    ///
1145    /// This is `true` by default.
1146    ///
1147    /// [proposal]: https://github.com/webassembly/multi-memory
1148    pub fn wasm_multi_memory(&mut self, enable: bool) -> &mut Self {
1149        self.wasm_features(WasmFeatures::MULTI_MEMORY, enable);
1150        self
1151    }
1152
1153    /// Configures whether the WebAssembly memory64 [proposal] will
1154    /// be enabled for compilation.
1155    ///
1156    /// Note that this the upstream specification is not finalized and Wasmtime
1157    /// may also have bugs for this feature since it hasn't been exercised
1158    /// much.
1159    ///
1160    /// This is `false` by default.
1161    ///
1162    /// [proposal]: https://github.com/webassembly/memory64
1163    pub fn wasm_memory64(&mut self, enable: bool) -> &mut Self {
1164        self.wasm_features(WasmFeatures::MEMORY64, enable);
1165        self
1166    }
1167
1168    /// Configures whether the WebAssembly extended-const [proposal] will
1169    /// be enabled for compilation.
1170    ///
1171    /// This is `true` by default.
1172    ///
1173    /// [proposal]: https://github.com/webassembly/extended-const
1174    pub fn wasm_extended_const(&mut self, enable: bool) -> &mut Self {
1175        self.wasm_features(WasmFeatures::EXTENDED_CONST, enable);
1176        self
1177    }
1178
1179    /// Configures whether the [WebAssembly stack switching
1180    /// proposal][proposal] will be enabled for compilation.
1181    ///
1182    /// This feature gates the use of control tags.
1183    ///
1184    /// This feature depends on the `function_reference_types` and
1185    /// `exceptions` features.
1186    ///
1187    /// This feature is `false` by default.
1188    ///
1189    /// # Errors
1190    ///
1191    /// [proposal]: https://github.com/webassembly/stack-switching
1192    pub fn wasm_stack_switching(&mut self, enable: bool) -> &mut Self {
1193        self.wasm_features(WasmFeatures::STACK_SWITCHING, enable);
1194        self
1195    }
1196
1197    /// Configures whether the WebAssembly component-model [proposal] will
1198    /// be enabled for compilation.
1199    ///
1200    /// This flag can be used to blanket disable all components within Wasmtime.
1201    /// Otherwise usage of components requires statically using
1202    /// [`Component`](crate::component::Component) instead of
1203    /// [`Module`](crate::Module) for example anyway.
1204    ///
1205    /// The default value for this option is whether the `component-model`
1206    /// crate feature of Wasmtime is enabled or not. By default this crate
1207    /// feature is enabled.
1208    ///
1209    /// [proposal]: https://github.com/webassembly/component-model
1210    #[cfg(feature = "component-model")]
1211    pub fn wasm_component_model(&mut self, enable: bool) -> &mut Self {
1212        self.wasm_features(WasmFeatures::COMPONENT_MODEL, enable);
1213        self
1214    }
1215
1216    /// Configures whether components support the async ABI [proposal] for
1217    /// lifting and lowering functions, as well as `stream`, `future`, and
1218    /// `error-context` types.
1219    ///
1220    /// Please note that Wasmtime's support for this feature is _very_
1221    /// incomplete.
1222    ///
1223    /// [proposal]:
1224    ///     https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1225    #[cfg(feature = "component-model-async")]
1226    pub fn wasm_component_model_async(&mut self, enable: bool) -> &mut Self {
1227        self.wasm_features(WasmFeatures::CM_ASYNC, enable);
1228        self
1229    }
1230
1231    /// This corresponds to the 🚝 emoji in the component model specification.
1232    ///
1233    /// Please note that Wasmtime's support for this feature is _very_
1234    /// incomplete.
1235    ///
1236    /// [proposal]:
1237    ///     https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1238    #[cfg(feature = "component-model-async")]
1239    pub fn wasm_component_model_async_builtins(&mut self, enable: bool) -> &mut Self {
1240        self.wasm_features(WasmFeatures::CM_ASYNC_BUILTINS, enable);
1241        self
1242    }
1243
1244    /// This corresponds to the 🚟 emoji in the component model specification.
1245    ///
1246    /// Please note that Wasmtime's support for this feature is _very_
1247    /// incomplete.
1248    ///
1249    /// [proposal]: https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1250    #[cfg(feature = "component-model-async")]
1251    pub fn wasm_component_model_async_stackful(&mut self, enable: bool) -> &mut Self {
1252        self.wasm_features(WasmFeatures::CM_ASYNC_STACKFUL, enable);
1253        self
1254    }
1255
1256    /// This corresponds to the 🧵 emoji in the component model specification.
1257    ///
1258    /// Please note that Wasmtime's support for this feature is _very_
1259    /// incomplete.
1260    ///
1261    /// [proposal]:
1262    ///     https://github.com/WebAssembly/component-model/pull/557
1263    #[cfg(feature = "component-model-async")]
1264    pub fn wasm_component_model_threading(&mut self, enable: bool) -> &mut Self {
1265        self.wasm_features(WasmFeatures::CM_THREADING, enable);
1266        self
1267    }
1268
1269    /// This corresponds to the 📝 emoji in the component model specification.
1270    ///
1271    /// Please note that Wasmtime's support for this feature is _very_
1272    /// incomplete.
1273    ///
1274    /// [proposal]: https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1275    #[cfg(feature = "component-model")]
1276    pub fn wasm_component_model_error_context(&mut self, enable: bool) -> &mut Self {
1277        self.wasm_features(WasmFeatures::CM_ERROR_CONTEXT, enable);
1278        self
1279    }
1280
1281    /// Configures whether the [GC extension to the component-model
1282    /// proposal][proposal] is enabled or not.
1283    ///
1284    /// This corresponds to the 🛸 emoji in the component model specification.
1285    ///
1286    /// Please note that Wasmtime's support for this feature is _very_
1287    /// incomplete.
1288    ///
1289    /// [proposal]: https://github.com/WebAssembly/component-model/issues/525
1290    #[cfg(feature = "component-model")]
1291    pub fn wasm_component_model_gc(&mut self, enable: bool) -> &mut Self {
1292        self.wasm_features(WasmFeatures::CM_GC, enable);
1293        self
1294    }
1295
1296    /// Configures whether the component model map type is enabled or not.
1297    ///
1298    /// This is part of the component model specification and enables the
1299    /// `map<k, v>` type in WIT and the component binary format.
1300    #[cfg(feature = "component-model")]
1301    pub fn wasm_component_model_map(&mut self, enable: bool) -> &mut Self {
1302        self.wasm_features(WasmFeatures::CM_MAP, enable);
1303        self
1304    }
1305
1306    /// This corresponds to the 🔧 emoji in the component model specification.
1307    ///
1308    /// Please note that Wasmtime's support for this feature is _very_
1309    /// incomplete.
1310    #[cfg(feature = "component-model")]
1311    pub fn wasm_component_model_fixed_length_lists(&mut self, enable: bool) -> &mut Self {
1312        self.wasm_features(WasmFeatures::CM_FIXED_LENGTH_LISTS, enable);
1313        self
1314    }
1315
1316    /// Configures whether the [Exception-handling proposal][proposal] is enabled or not.
1317    ///
1318    /// [proposal]: https://github.com/WebAssembly/exception-handling
1319    #[cfg(feature = "gc")]
1320    pub fn wasm_exceptions(&mut self, enable: bool) -> &mut Self {
1321        self.wasm_features(WasmFeatures::EXCEPTIONS, enable);
1322        self
1323    }
1324
1325    #[doc(hidden)] // FIXME(#3427) - if/when implemented then un-hide this
1326    #[deprecated = "This configuration option only exists for internal \
1327                    usage with the spec testsuite. It may be removed at \
1328                    any time and without warning. Do not rely on it!"]
1329    pub fn wasm_legacy_exceptions(&mut self, enable: bool) -> &mut Self {
1330        self.wasm_features(WasmFeatures::LEGACY_EXCEPTIONS, enable);
1331        self
1332    }
1333
1334    /// Configures which compilation strategy will be used for wasm modules.
1335    ///
1336    /// This method can be used to configure which compiler is used for wasm
1337    /// modules, and for more documentation consult the [`Strategy`] enumeration
1338    /// and its documentation.
1339    ///
1340    /// The default value for this is `Strategy::Auto`.
1341    ///
1342    /// # Panics
1343    ///
1344    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1345    #[cfg(any(feature = "cranelift", feature = "winch"))]
1346    pub fn strategy(&mut self, strategy: Strategy) -> &mut Self {
1347        self.compiler_config_mut().strategy = strategy.not_auto();
1348        self
1349    }
1350
1351    /// Configures which garbage collector will be used for Wasm modules.
1352    ///
1353    /// This method can be used to configure which garbage collector
1354    /// implementation is used for Wasm modules. For more documentation, consult
1355    /// the [`Collector`] enumeration and its documentation.
1356    ///
1357    /// The default value for this is `Collector::Auto`.
1358    #[cfg(feature = "gc")]
1359    pub fn collector(&mut self, collector: Collector) -> &mut Self {
1360        self.collector = collector;
1361        self
1362    }
1363
1364    /// Creates a default profiler based on the profiling strategy chosen.
1365    ///
1366    /// Profiler creation calls the type's default initializer where the purpose is
1367    /// really just to put in place the type used for profiling.
1368    ///
1369    /// Some [`ProfilingStrategy`] require specific platforms or particular feature
1370    /// to be enabled, such as `ProfilingStrategy::JitDump` requires the `jitdump`
1371    /// feature.
1372    ///
1373    /// # Errors
1374    ///
1375    /// The validation of this field is deferred until the engine is being built, and thus may
1376    /// cause `Engine::new` fail if the required feature is disabled, or the platform is not
1377    /// supported.
1378    pub fn profiler(&mut self, profile: ProfilingStrategy) -> &mut Self {
1379        self.profiling_strategy = profile;
1380        self
1381    }
1382
1383    /// Configures whether the debug verifier of Cranelift is enabled or not.
1384    ///
1385    /// When Cranelift is used as a code generation backend this will configure
1386    /// it to have the `enable_verifier` flag which will enable a number of debug
1387    /// checks inside of Cranelift. This is largely only useful for the
1388    /// developers of wasmtime itself.
1389    ///
1390    /// The default value for this is `false`
1391    ///
1392    /// # Panics
1393    ///
1394    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1395    #[cfg(any(feature = "cranelift", feature = "winch"))]
1396    pub fn cranelift_debug_verifier(&mut self, enable: bool) -> &mut Self {
1397        let val = if enable { "true" } else { "false" };
1398        self.compiler_config_mut()
1399            .settings
1400            .insert("enable_verifier".to_string(), val.to_string());
1401        self
1402    }
1403
1404    /// Configures whether extra debug checks are inserted into
1405    /// Wasmtime-generated code by Cranelift.
1406    ///
1407    /// The default value for this is `false`
1408    ///
1409    /// # Panics
1410    ///
1411    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1412    #[cfg(any(feature = "cranelift", feature = "winch"))]
1413    pub fn cranelift_wasmtime_debug_checks(&mut self, enable: bool) -> &mut Self {
1414        unsafe { self.cranelift_flag_set("wasmtime_debug_checks", &enable.to_string()) }
1415    }
1416
1417    /// Configures the Cranelift code generator optimization level.
1418    ///
1419    /// When the Cranelift code generator is used you can configure the
1420    /// optimization level used for generated code in a few various ways. For
1421    /// more information see the documentation of [`OptLevel`].
1422    ///
1423    /// The default value for this is `OptLevel::Speed`.
1424    ///
1425    /// # Panics
1426    ///
1427    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1428    #[cfg(any(feature = "cranelift", feature = "winch"))]
1429    pub fn cranelift_opt_level(&mut self, level: OptLevel) -> &mut Self {
1430        let val = match level {
1431            OptLevel::None => "none",
1432            OptLevel::Speed => "speed",
1433            OptLevel::SpeedAndSize => "speed_and_size",
1434        };
1435        self.compiler_config_mut()
1436            .settings
1437            .insert("opt_level".to_string(), val.to_string());
1438        self
1439    }
1440
1441    /// Configures the regalloc algorithm used by the Cranelift code generator.
1442    ///
1443    /// Cranelift can select any of several register allocator algorithms. Each
1444    /// of these algorithms generates correct code, but they represent different
1445    /// tradeoffs between compile speed (how expensive the compilation process
1446    /// is) and run-time speed (how fast the generated code runs).
1447    /// For more information see the documentation of [`RegallocAlgorithm`].
1448    ///
1449    /// The default value for this is `RegallocAlgorithm::Backtracking`.
1450    ///
1451    /// # Panics
1452    ///
1453    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1454    #[cfg(any(feature = "cranelift", feature = "winch"))]
1455    pub fn cranelift_regalloc_algorithm(&mut self, algo: RegallocAlgorithm) -> &mut Self {
1456        let val = match algo {
1457            RegallocAlgorithm::Backtracking => "backtracking",
1458            RegallocAlgorithm::SinglePass => "single_pass",
1459        };
1460        self.compiler_config_mut()
1461            .settings
1462            .insert("regalloc_algorithm".to_string(), val.to_string());
1463        self
1464    }
1465
1466    /// Configures whether Cranelift should perform a NaN-canonicalization pass.
1467    ///
1468    /// When Cranelift is used as a code generation backend this will configure
1469    /// it to replace NaNs with a single canonical value. This is useful for
1470    /// users requiring entirely deterministic WebAssembly computation.  This is
1471    /// not required by the WebAssembly spec, so it is not enabled by default.
1472    ///
1473    /// Note that this option affects not only WebAssembly's `f32` and `f64`
1474    /// types but additionally the `v128` type. This option will cause
1475    /// operations using any of these types to have extra checks placed after
1476    /// them to normalize NaN values as needed.
1477    ///
1478    /// The default value for this is `false`
1479    ///
1480    /// # Panics
1481    ///
1482    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1483    #[cfg(any(feature = "cranelift", feature = "winch"))]
1484    pub fn cranelift_nan_canonicalization(&mut self, enable: bool) -> &mut Self {
1485        let val = if enable { "true" } else { "false" };
1486        self.compiler_config_mut()
1487            .settings
1488            .insert("enable_nan_canonicalization".to_string(), val.to_string());
1489        self
1490    }
1491
1492    /// Allows setting a Cranelift boolean flag or preset. This allows
1493    /// fine-tuning of Cranelift settings.
1494    ///
1495    /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1496    /// either; other `Config` functions should be preferred for stability.
1497    ///
1498    /// # Safety
1499    ///
1500    /// This is marked as unsafe, because setting the wrong flag might break invariants,
1501    /// resulting in execution hazards.
1502    ///
1503    /// # Errors
1504    ///
1505    /// The validation of the flags are deferred until the engine is being built, and thus may
1506    /// cause `Engine::new` fail if the flag's name does not exist, or the value is not appropriate
1507    /// for the flag type.
1508    ///
1509    /// # Panics
1510    ///
1511    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1512    #[cfg(any(feature = "cranelift", feature = "winch"))]
1513    pub unsafe fn cranelift_flag_enable(&mut self, flag: &str) -> &mut Self {
1514        self.compiler_config_mut().flags.insert(flag.to_string());
1515        self
1516    }
1517
1518    /// Allows settings another Cranelift flag defined by a flag name and value. This allows
1519    /// fine-tuning of Cranelift settings.
1520    ///
1521    /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1522    /// either; other `Config` functions should be preferred for stability.
1523    ///
1524    /// # Safety
1525    ///
1526    /// This is marked as unsafe, because setting the wrong flag might break invariants,
1527    /// resulting in execution hazards.
1528    ///
1529    /// # Errors
1530    ///
1531    /// The validation of the flags are deferred until the engine is being built, and thus may
1532    /// cause `Engine::new` fail if the flag's name does not exist, or incompatible with other
1533    /// settings.
1534    ///
1535    /// For example, feature `wasm_backtrace` will set `unwind_info` to `true`, but if it's
1536    /// manually set to false then it will fail.
1537    ///
1538    /// # Panics
1539    ///
1540    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1541    #[cfg(any(feature = "cranelift", feature = "winch"))]
1542    pub unsafe fn cranelift_flag_set(&mut self, name: &str, value: &str) -> &mut Self {
1543        self.compiler_config_mut()
1544            .settings
1545            .insert(name.to_string(), value.to_string());
1546        self
1547    }
1548
1549    /// Set a custom [`Cache`].
1550    ///
1551    /// To load a cache configuration from a file, use [`Cache::from_file`]. Otherwise, you can
1552    /// create a new cache config using [`CacheConfig::new`] and passing that to [`Cache::new`].
1553    ///
1554    /// If you want to disable the cache, you can call this method with `None`.
1555    ///
1556    /// By default, new configs do not have caching enabled.
1557    /// Every call to [`Module::new(my_wasm)`][crate::Module::new] will recompile `my_wasm`,
1558    /// even when it is unchanged, unless an enabled `CacheConfig` is provided.
1559    ///
1560    /// This method is only available when the `cache` feature of this crate is
1561    /// enabled.
1562    ///
1563    /// [docs]: https://bytecodealliance.github.io/wasmtime/cli-cache.html
1564    #[cfg(feature = "cache")]
1565    pub fn cache(&mut self, cache: Option<Cache>) -> &mut Self {
1566        self.cache = cache;
1567        self
1568    }
1569
1570    /// Sets a custom memory creator.
1571    ///
1572    /// Custom memory creators are used when creating host `Memory` objects or when
1573    /// creating instance linear memories for the on-demand instance allocation strategy.
1574    #[cfg(feature = "runtime")]
1575    pub fn with_host_memory(&mut self, mem_creator: Arc<dyn MemoryCreator>) -> &mut Self {
1576        self.mem_creator = Some(Arc::new(MemoryCreatorProxy(mem_creator)));
1577        self
1578    }
1579
1580    /// Sets a custom stack creator.
1581    ///
1582    /// Custom memory creators are used when creating creating async instance stacks for
1583    /// the on-demand instance allocation strategy.
1584    #[cfg(feature = "async")]
1585    pub fn with_host_stack(&mut self, stack_creator: Arc<dyn StackCreator>) -> &mut Self {
1586        self.stack_creator = Some(Arc::new(StackCreatorProxy(stack_creator)));
1587        self
1588    }
1589
1590    /// Sets a custom executable-memory publisher.
1591    ///
1592    /// Custom executable-memory publishers are hooks that allow
1593    /// Wasmtime to make certain regions of memory executable when
1594    /// loading precompiled modules or compiling new modules
1595    /// in-process. In most modern operating systems, memory allocated
1596    /// for heap usage is readable and writable by default but not
1597    /// executable. To jump to machine code stored in that memory, we
1598    /// need to make it executable. For security reasons, we usually
1599    /// also make it read-only at the same time, so the executing code
1600    /// can't be modified later.
1601    ///
1602    /// By default, Wasmtime will use the appropriate system calls on
1603    /// the host platform for this work. However, it also allows
1604    /// plugging in a custom implementation via this configuration
1605    /// option. This may be useful on custom or `no_std` platforms,
1606    /// for example, especially where virtual memory is not otherwise
1607    /// used by Wasmtime (no `signals-and-traps` feature).
1608    #[cfg(feature = "runtime")]
1609    pub fn with_custom_code_memory(
1610        &mut self,
1611        custom_code_memory: Option<Arc<dyn CustomCodeMemory>>,
1612    ) -> &mut Self {
1613        self.custom_code_memory = custom_code_memory;
1614        self
1615    }
1616
1617    /// Sets the instance allocation strategy to use.
1618    ///
1619    /// This is notably used in conjunction with
1620    /// [`InstanceAllocationStrategy::Pooling`] and [`PoolingAllocationConfig`].
1621    pub fn allocation_strategy(
1622        &mut self,
1623        strategy: impl Into<InstanceAllocationStrategy>,
1624    ) -> &mut Self {
1625        self.allocation_strategy = strategy.into();
1626        self
1627    }
1628
1629    /// Specifies the capacity of linear memories, in bytes, in their initial
1630    /// allocation.
1631    ///
1632    /// > Note: this value has important performance ramifications, be sure to
1633    /// > benchmark when setting this to a non-default value and read over this
1634    /// > documentation.
1635    ///
1636    /// This function will change the size of the initial memory allocation made
1637    /// for linear memories. This setting is only applicable when the initial
1638    /// size of a linear memory is below this threshold. Linear memories are
1639    /// allocated in the virtual address space of the host process with OS APIs
1640    /// such as `mmap` and this setting affects how large the allocation will
1641    /// be.
1642    ///
1643    /// ## Background: WebAssembly Linear Memories
1644    ///
1645    /// WebAssembly linear memories always start with a minimum size and can
1646    /// possibly grow up to a maximum size. The minimum size is always specified
1647    /// in a WebAssembly module itself and the maximum size can either be
1648    /// optionally specified in the module or inherently limited by the index
1649    /// type. For example for this module:
1650    ///
1651    /// ```wasm
1652    /// (module
1653    ///     (memory $a 4)
1654    ///     (memory $b 4096 4096 (pagesize 1))
1655    ///     (memory $c i64 10)
1656    /// )
1657    /// ```
1658    ///
1659    /// * Memory `$a` initially allocates 4 WebAssembly pages (256KiB) and can
1660    ///   grow up to 4GiB, the limit of the 32-bit index space.
1661    /// * Memory `$b` initially allocates 4096 WebAssembly pages, but in this
1662    ///   case its page size is 1, so it's 4096 bytes. Memory can also grow no
1663    ///   further meaning that it will always be 4096 bytes.
1664    /// * Memory `$c` is a 64-bit linear memory which starts with 640KiB of
1665    ///   memory and can theoretically grow up to 2^64 bytes, although most
1666    ///   hosts will run out of memory long before that.
1667    ///
1668    /// All operations on linear memories done by wasm are required to be
1669    /// in-bounds. Any access beyond the end of a linear memory is considered a
1670    /// trap.
1671    ///
1672    /// ## What this setting affects: Virtual Memory
1673    ///
1674    /// This setting is used to configure the behavior of the size of the linear
1675    /// memory allocation performed for each of these memories. For example the
1676    /// initial linear memory allocation looks like this:
1677    ///
1678    /// ```text
1679    ///              memory_reservation
1680    ///                    |
1681    ///          ◄─────────┴────────────────►
1682    /// ┌───────┬─────────┬──────────────────┬───────┐
1683    /// │ guard │ initial │ ... capacity ... │ guard │
1684    /// └───────┴─────────┴──────────────────┴───────┘
1685    ///  ◄──┬──►                              ◄──┬──►
1686    ///     │                                    │
1687    ///     │                             memory_guard_size
1688    ///     │
1689    ///     │
1690    ///  memory_guard_size (if guard_before_linear_memory)
1691    /// ```
1692    ///
1693    /// Memory in the `initial` range is accessible to the instance and can be
1694    /// read/written by wasm code. Memory in the `guard` regions is never
1695    /// accessible to wasm code and memory in `capacity` is initially
1696    /// inaccessible but may become accessible through `memory.grow` instructions
1697    /// for example.
1698    ///
1699    /// This means that this setting is the size of the initial chunk of virtual
1700    /// memory that a linear memory may grow into.
1701    ///
1702    /// ## What this setting affects: Runtime Speed
1703    ///
1704    /// This is a performance-sensitive setting which is taken into account
1705    /// during the compilation process of a WebAssembly module. For example if a
1706    /// 32-bit WebAssembly linear memory has a `memory_reservation` size of 4GiB
1707    /// then bounds checks can be elided because `capacity` will be guaranteed
1708    /// to be unmapped for all addressable bytes that wasm can access (modulo a
1709    /// few details).
1710    ///
1711    /// If `memory_reservation` was something smaller like 256KiB then that
1712    /// would have a much smaller impact on virtual memory but the compile code
1713    /// would then need to have explicit bounds checks to ensure that
1714    /// loads/stores are in-bounds.
1715    ///
1716    /// The goal of this setting is to enable skipping bounds checks in most
1717    /// modules by default. Some situations which require explicit bounds checks
1718    /// though are:
1719    ///
1720    /// * When `memory_reservation` is smaller than the addressable size of the
1721    ///   linear memory. For example if 64-bit linear memories always need
1722    ///   bounds checks as they can address the entire virtual address spacce.
1723    ///   For 32-bit linear memories a `memory_reservation` minimum size of 4GiB
1724    ///   is required to elide bounds checks.
1725    ///
1726    /// * When linear memories have a page size of 1 then bounds checks are
1727    ///   required. In this situation virtual memory can't be relied upon
1728    ///   because that operates at the host page size granularity where wasm
1729    ///   requires a per-byte level granularity.
1730    ///
1731    /// * Configuration settings such as [`Config::signals_based_traps`] can be
1732    ///   used to disable the use of signal handlers and virtual memory so
1733    ///   explicit bounds checks are required.
1734    ///
1735    /// * When [`Config::memory_guard_size`] is too small a bounds check may be
1736    ///   required. For 32-bit wasm addresses are actually 33-bit effective
1737    ///   addresses because loads/stores have a 32-bit static offset to add to
1738    ///   the dynamic 32-bit address. If the static offset is larger than the
1739    ///   size of the guard region then an explicit bounds check is required.
1740    ///
1741    /// ## What this setting affects: Memory Growth Behavior
1742    ///
1743    /// In addition to affecting bounds checks emitted in compiled code this
1744    /// setting also affects how WebAssembly linear memories are grown. The
1745    /// `memory.grow` instruction can be used to make a linear memory larger and
1746    /// this is also affected by APIs such as
1747    /// [`Memory::grow`](crate::Memory::grow).
1748    ///
1749    /// In these situations when the amount being grown is small enough to fit
1750    /// within the remaining capacity then the linear memory doesn't have to be
1751    /// moved at runtime. If the capacity runs out though then a new linear
1752    /// memory allocation must be made and the contents of linear memory is
1753    /// copied over.
1754    ///
1755    /// For example here's a situation where a copy happens:
1756    ///
1757    /// * The `memory_reservation` setting is configured to 128KiB.
1758    /// * A WebAssembly linear memory starts with a single 64KiB page.
1759    /// * This memory can be grown by one page to contain the full 128KiB of
1760    ///   memory.
1761    /// * If grown by one more page, though, then a 192KiB allocation must be
1762    ///   made and the previous 128KiB of contents are copied into the new
1763    ///   allocation.
1764    ///
1765    /// This growth behavior can have a significant performance impact if lots
1766    /// of data needs to be copied on growth. Conversely if memory growth never
1767    /// needs to happen because the capacity will always be large enough then
1768    /// optimizations can be applied to cache the base pointer of linear memory.
1769    ///
1770    /// When memory is grown then the
1771    /// [`Config::memory_reservation_for_growth`] is used for the new
1772    /// memory allocation to have memory to grow into.
1773    ///
1774    /// When using the pooling allocator via [`PoolingAllocationConfig`] then
1775    /// memories are never allowed to move so requests for growth are instead
1776    /// rejected with an error.
1777    ///
1778    /// ## When this setting is not used
1779    ///
1780    /// This setting is ignored and unused when the initial size of linear
1781    /// memory is larger than this threshold. For example if this setting is set
1782    /// to 1MiB but a wasm module requires a 2MiB minimum allocation then this
1783    /// setting is ignored. In this situation the minimum size of memory will be
1784    /// allocated along with [`Config::memory_reservation_for_growth`]
1785    /// after it to grow into.
1786    ///
1787    /// That means that this value can be set to zero. That can be useful in
1788    /// benchmarking to see the overhead of bounds checks for example.
1789    /// Additionally it can be used to minimize the virtual memory allocated by
1790    /// Wasmtime.
1791    ///
1792    /// ## Default Value
1793    ///
1794    /// The default value for this property depends on the host platform. For
1795    /// 64-bit platforms there's lots of address space available, so the default
1796    /// configured here is 4GiB. When coupled with the default size of
1797    /// [`Config::memory_guard_size`] this means that 32-bit WebAssembly linear
1798    /// memories with 64KiB page sizes will skip almost all bounds checks by
1799    /// default.
1800    ///
1801    /// For 32-bit platforms this value defaults to 10MiB. This means that
1802    /// bounds checks will be required on 32-bit platforms.
1803    pub fn memory_reservation(&mut self, bytes: u64) -> &mut Self {
1804        self.tunables.memory_reservation = Some(bytes);
1805        self
1806    }
1807
1808    /// Indicates whether linear memories may relocate their base pointer at
1809    /// runtime.
1810    ///
1811    /// WebAssembly linear memories either have a maximum size that's explicitly
1812    /// listed in the type of a memory or inherently limited by the index type
1813    /// of the memory (e.g. 4GiB for 32-bit linear memories). Depending on how
1814    /// the linear memory is allocated (see [`Config::memory_reservation`]) it
1815    /// may be necessary to move the memory in the host's virtual address space
1816    /// during growth. This option controls whether this movement is allowed or
1817    /// not.
1818    ///
1819    /// An example of a linear memory needing to move is when
1820    /// [`Config::memory_reservation`] is 0 then a linear memory will be
1821    /// allocated as the minimum size of the memory plus
1822    /// [`Config::memory_reservation_for_growth`]. When memory grows beyond the
1823    /// reservation for growth then the memory needs to be relocated.
1824    ///
1825    /// When this option is set to `false` then it can have a number of impacts
1826    /// on how memories work at runtime:
1827    ///
1828    /// * Modules can be compiled with static knowledge the base pointer of
1829    ///   linear memory never changes to enable optimizations such as
1830    ///   loop invariant code motion (hoisting the base pointer out of a loop).
1831    ///
1832    /// * Memories cannot grow in excess of their original allocation. This
1833    ///   means that [`Config::memory_reservation`] and
1834    ///   [`Config::memory_reservation_for_growth`] may need tuning to ensure
1835    ///   the memory configuration works at runtime.
1836    ///
1837    /// The default value for this option is `true`.
1838    pub fn memory_may_move(&mut self, enable: bool) -> &mut Self {
1839        self.tunables.memory_may_move = Some(enable);
1840        self
1841    }
1842
1843    /// Configures the size, in bytes, of the guard region used at the end of a
1844    /// linear memory's address space reservation.
1845    ///
1846    /// > Note: this value has important performance ramifications, be sure to
1847    /// > understand what this value does before tweaking it and benchmarking.
1848    ///
1849    /// This setting controls how many bytes are guaranteed to be unmapped after
1850    /// the virtual memory allocation of a linear memory. When
1851    /// combined with sufficiently large values of
1852    /// [`Config::memory_reservation`] (e.g. 4GiB for 32-bit linear memories)
1853    /// then a guard region can be used to eliminate bounds checks in generated
1854    /// code.
1855    ///
1856    /// This setting additionally can be used to help deduplicate bounds checks
1857    /// in code that otherwise requires bounds checks. For example with a 4KiB
1858    /// guard region then a 64-bit linear memory which accesses addresses `x+8`
1859    /// and `x+16` only needs to perform a single bounds check on `x`. If that
1860    /// bounds check passes then the offset is guaranteed to either reside in
1861    /// linear memory or the guard region, resulting in deterministic behavior
1862    /// either way.
1863    ///
1864    /// ## How big should the guard be?
1865    ///
1866    /// In general, like with configuring [`Config::memory_reservation`], you
1867    /// probably don't want to change this value from the defaults. Removing
1868    /// bounds checks is dependent on a number of factors where the size of the
1869    /// guard region is only one piece of the equation. Other factors include:
1870    ///
1871    /// * [`Config::memory_reservation`]
1872    /// * The index type of the linear memory (e.g. 32-bit or 64-bit)
1873    /// * The page size of the linear memory
1874    /// * Other settings such as [`Config::signals_based_traps`]
1875    ///
1876    /// Embeddings using virtual memory almost always want at least some guard
1877    /// region, but otherwise changes from the default should be profiled
1878    /// locally to see the performance impact.
1879    ///
1880    /// ## Default
1881    ///
1882    /// The default value for this property is 32MiB on 64-bit platforms. This
1883    /// allows eliminating almost all bounds checks on loads/stores with an
1884    /// immediate offset of less than 32MiB. On 32-bit platforms this defaults
1885    /// to 64KiB.
1886    pub fn memory_guard_size(&mut self, bytes: u64) -> &mut Self {
1887        self.tunables.memory_guard_size = Some(bytes);
1888        self
1889    }
1890
1891    /// Configures the size, in bytes, of the extra virtual memory space
1892    /// reserved after a linear memory is relocated.
1893    ///
1894    /// This setting is used in conjunction with [`Config::memory_reservation`]
1895    /// to configure what happens after a linear memory is relocated in the host
1896    /// address space. If the initial size of a linear memory exceeds
1897    /// [`Config::memory_reservation`] or if it grows beyond that size
1898    /// throughout its lifetime then this setting will be used.
1899    ///
1900    /// When a linear memory is relocated it will initially look like this:
1901    ///
1902    /// ```text
1903    ///            memory.size
1904    ///                 │
1905    ///          ◄──────┴─────►
1906    /// ┌───────┬──────────────┬───────┐
1907    /// │ guard │  accessible  │ guard │
1908    /// └───────┴──────────────┴───────┘
1909    ///                         ◄──┬──►
1910    ///                            │
1911    ///                     memory_guard_size
1912    /// ```
1913    ///
1914    /// where `accessible` needs to be grown but there's no more memory to grow
1915    /// into. A new region of the virtual address space will be allocated that
1916    /// looks like this:
1917    ///
1918    /// ```text
1919    ///                           memory_reservation_for_growth
1920    ///                                       │
1921    ///            memory.size                │
1922    ///                 │                     │
1923    ///          ◄──────┴─────► ◄─────────────┴───────────►
1924    /// ┌───────┬──────────────┬───────────────────────────┬───────┐
1925    /// │ guard │  accessible  │ .. reserved for growth .. │ guard │
1926    /// └───────┴──────────────┴───────────────────────────┴───────┘
1927    ///                                                     ◄──┬──►
1928    ///                                                        │
1929    ///                                               memory_guard_size
1930    /// ```
1931    ///
1932    /// This means that up to `memory_reservation_for_growth` bytes can be
1933    /// allocated again before the entire linear memory needs to be moved again
1934    /// when another `memory_reservation_for_growth` bytes will be appended to
1935    /// the size of the allocation.
1936    ///
1937    /// Note that this is a currently simple heuristic for optimizing the growth
1938    /// of dynamic memories, primarily implemented for the memory64 proposal
1939    /// where the maximum size of memory is larger than 4GiB. This setting is
1940    /// unlikely to be a one-size-fits-all style approach and if you're an
1941    /// embedder running into issues with growth and are interested in having
1942    /// other growth strategies available here please feel free to [open an
1943    /// issue on the Wasmtime repository][issue]!
1944    ///
1945    /// [issue]: https://github.com/bytecodealliance/wasmtime/issues/new
1946    ///
1947    /// ## Default
1948    ///
1949    /// For 64-bit platforms this defaults to 2GiB, and for 32-bit platforms
1950    /// this defaults to 1MiB.
1951    pub fn memory_reservation_for_growth(&mut self, bytes: u64) -> &mut Self {
1952        self.tunables.memory_reservation_for_growth = Some(bytes);
1953        self
1954    }
1955
1956    /// Configures the initial size, in bytes, to be allocated for GC heaps.
1957    ///
1958    /// This is similar to [`Config::memory_reservation`] but applies to the GC
1959    /// heap rather than to linear memories. See that method for more details
1960    /// on what "reservation" means and the implications of this setting.
1961    ///
1962    /// ## Default
1963    ///
1964    /// If none of the `gc_heap_*` tunables are explicitly configured, they
1965    /// default to the same values as their `memory_*` counterparts. Otherwise,
1966    /// the default value for this property depends on the host platform: for
1967    /// 64-bit platforms this defaults to 4GiB, and for 32-bit platforms this
1968    /// defaults to 10MiB.
1969    pub fn gc_heap_reservation(&mut self, bytes: u64) -> &mut Self {
1970        self.tunables.gc_heap_reservation = Some(bytes);
1971        self
1972    }
1973
1974    /// Configures the size, in bytes, of the guard page region for GC heaps.
1975    ///
1976    /// This is similar to [`Config::memory_guard_size`] but applies to the GC
1977    /// heap rather than to linear memories. See that method for more details on
1978    /// what guard pages are and the implications of this setting.
1979    ///
1980    /// ## Default
1981    ///
1982    /// If none of the `gc_heap_*` tunables are explicitly configured, they
1983    /// default to the same values as their `memory_*` counterparts. Otherwise,
1984    /// the default value for this property is 32MiB on 64-bit platforms and
1985    /// 64KiB on 32-bit platforms.
1986    pub fn gc_heap_guard_size(&mut self, bytes: u64) -> &mut Self {
1987        self.tunables.gc_heap_guard_size = Some(bytes);
1988        self
1989    }
1990
1991    /// Configures the size, in bytes, of the extra virtual memory space
1992    /// reserved after a GC heap is relocated.
1993    ///
1994    /// This is similar to [`Config::memory_reservation_for_growth`] but applies
1995    /// to the GC heap rather than to linear memories. See that method for more
1996    /// details.
1997    ///
1998    /// ## Default
1999    ///
2000    /// If none of the `gc_heap_*` tunables are explicitly configured, they
2001    /// default to the same values as their `memory_*` counterparts. Otherwise,
2002    /// for 64-bit platforms this defaults to 2GiB, and for 32-bit platforms
2003    /// this defaults to 1MiB.
2004    pub fn gc_heap_reservation_for_growth(&mut self, bytes: u64) -> &mut Self {
2005        self.tunables.gc_heap_reservation_for_growth = Some(bytes);
2006        self
2007    }
2008
2009    /// Indicates whether GC heaps are allowed to be reallocated after initial
2010    /// allocation at runtime.
2011    ///
2012    /// This is similar to [`Config::memory_may_move`] but applies to the GC
2013    /// heap rather than to linear memories. See that method for more details.
2014    ///
2015    /// ## Default
2016    ///
2017    /// If none of the `gc_heap_*` tunables are explicitly configured, they
2018    /// default to the same values as their `memory_*` counterparts. Otherwise,
2019    /// the default value for this option is `true`.
2020    pub fn gc_heap_may_move(&mut self, enable: bool) -> &mut Self {
2021        self.tunables.gc_heap_may_move = Some(enable);
2022        self
2023    }
2024
2025    /// Indicates whether a guard region is present before allocations of
2026    /// linear memory.
2027    ///
2028    /// Guard regions before linear memories are never used during normal
2029    /// operation of WebAssembly modules, even if they have out-of-bounds
2030    /// loads. The only purpose for a preceding guard region in linear memory
2031    /// is extra protection against possible bugs in code generators like
2032    /// Cranelift. This setting does not affect performance in any way, but will
2033    /// result in larger virtual memory reservations for linear memories (it
2034    /// won't actually ever use more memory, just use more of the address
2035    /// space).
2036    ///
2037    /// The size of the guard region before linear memory is the same as the
2038    /// guard size that comes after linear memory, which is configured by
2039    /// [`Config::memory_guard_size`].
2040    ///
2041    /// ## Default
2042    ///
2043    /// This value defaults to `true`.
2044    pub fn guard_before_linear_memory(&mut self, enable: bool) -> &mut Self {
2045        self.tunables.guard_before_linear_memory = Some(enable);
2046        self
2047    }
2048
2049    /// Indicates whether to initialize tables lazily, so that instantiation
2050    /// is fast but indirect calls are a little slower. If false, tables
2051    /// are initialized eagerly during instantiation from any active element
2052    /// segments that apply to them.
2053    ///
2054    /// **Note** Disabling this option is not compatible with the Winch compiler.
2055    ///
2056    /// ## Default
2057    ///
2058    /// This value defaults to `true`.
2059    pub fn table_lazy_init(&mut self, table_lazy_init: bool) -> &mut Self {
2060        self.tunables.table_lazy_init = Some(table_lazy_init);
2061        self
2062    }
2063
2064    /// Configure the version information used in serialized and deserialized [`crate::Module`]s.
2065    /// This effects the behavior of [`crate::Module::serialize()`], as well as
2066    /// [`crate::Module::deserialize()`] and related functions.
2067    ///
2068    /// The default strategy is to use the wasmtime crate's Cargo package version.
2069    pub fn module_version(&mut self, strategy: ModuleVersionStrategy) -> Result<&mut Self> {
2070        match strategy {
2071            // This case requires special precondition for assertion in SerializedModule::to_bytes
2072            ModuleVersionStrategy::Custom(ref v) => {
2073                if v.as_bytes().len() > 255 {
2074                    bail!("custom module version cannot be more than 255 bytes: {v}");
2075                }
2076            }
2077            _ => {}
2078        }
2079        self.module_version = strategy;
2080        Ok(self)
2081    }
2082
2083    /// Configure whether wasmtime should compile a module using multiple
2084    /// threads.
2085    ///
2086    /// Disabling this will result in a single thread being used to compile
2087    /// the wasm bytecode.
2088    ///
2089    /// By default parallel compilation is enabled.
2090    #[cfg(feature = "parallel-compilation")]
2091    pub fn parallel_compilation(&mut self, parallel: bool) -> &mut Self {
2092        self.parallel_compilation = parallel;
2093        self
2094    }
2095
2096    /// Configures whether compiled artifacts will contain information to map
2097    /// native program addresses back to the original wasm module.
2098    ///
2099    /// This configuration option is `true` by default and, if enabled,
2100    /// generates the appropriate tables in compiled modules to map from native
2101    /// address back to wasm source addresses. This is used for displaying wasm
2102    /// program counters in backtraces as well as generating filenames/line
2103    /// numbers if so configured as well (and the original wasm module has DWARF
2104    /// debugging information present).
2105    pub fn generate_address_map(&mut self, generate: bool) -> &mut Self {
2106        self.tunables.generate_address_map = Some(generate);
2107        self
2108    }
2109
2110    /// Configures whether copy-on-write memory-mapped data is used to
2111    /// initialize a linear memory.
2112    ///
2113    /// Initializing linear memory via a copy-on-write mapping can drastically
2114    /// improve instantiation costs of a WebAssembly module because copying
2115    /// memory is deferred. Additionally if a page of memory is only ever read
2116    /// from WebAssembly and never written too then the same underlying page of
2117    /// data will be reused between all instantiations of a module meaning that
2118    /// if a module is instantiated many times this can lower the overall memory
2119    /// required needed to run that module.
2120    ///
2121    /// The main disadvantage of copy-on-write initialization, however, is that
2122    /// it may be possible for highly-parallel scenarios to be less scalable. If
2123    /// a page is read initially by a WebAssembly module then that page will be
2124    /// mapped to a read-only copy shared between all WebAssembly instances. If
2125    /// the same page is then written, however, then a private copy is created
2126    /// and swapped out from the read-only version. This also requires an [IPI],
2127    /// however, which can be a significant bottleneck in high-parallelism
2128    /// situations.
2129    ///
2130    /// This feature is only applicable when a WebAssembly module meets specific
2131    /// criteria to be initialized in this fashion, such as:
2132    ///
2133    /// * Only memories defined in the module can be initialized this way.
2134    /// * Data segments for memory must use statically known offsets.
2135    /// * Data segments for memory must all be in-bounds.
2136    ///
2137    /// Modules which do not meet these criteria will fall back to
2138    /// initialization of linear memory based on copying memory.
2139    ///
2140    /// This feature of Wasmtime is also platform-specific:
2141    ///
2142    /// * Linux - this feature is supported for all instances of [`Module`].
2143    ///   Modules backed by an existing mmap (such as those created by
2144    ///   [`Module::deserialize_file`]) will reuse that mmap to cow-initialize
2145    ///   memory. Other instance of [`Module`] may use the `memfd_create`
2146    ///   syscall to create an initialization image to `mmap`.
2147    /// * Unix (not Linux) - this feature is only supported when loading modules
2148    ///   from a precompiled file via [`Module::deserialize_file`] where there
2149    ///   is a file descriptor to use to map data into the process. Note that
2150    ///   the module must have been compiled with this setting enabled as well.
2151    /// * Windows - there is no support for this feature at this time. Memory
2152    ///   initialization will always copy bytes.
2153    ///
2154    /// By default this option is enabled.
2155    ///
2156    /// [`Module::deserialize_file`]: crate::Module::deserialize_file
2157    /// [`Module`]: crate::Module
2158    /// [IPI]: https://en.wikipedia.org/wiki/Inter-processor_interrupt
2159    pub fn memory_init_cow(&mut self, enable: bool) -> &mut Self {
2160        self.tunables.memory_init_cow = Some(enable);
2161        self
2162    }
2163
2164    /// A configuration option to force the usage of `memfd_create` on Linux to
2165    /// be used as the backing source for a module's initial memory image.
2166    ///
2167    /// When [`Config::memory_init_cow`] is enabled, which is enabled by
2168    /// default, module memory initialization images are taken from a module's
2169    /// original mmap if possible. If a precompiled module was loaded from disk
2170    /// this means that the disk's file is used as an mmap source for the
2171    /// initial linear memory contents. This option can be used to force, on
2172    /// Linux, that instead of using the original file on disk a new in-memory
2173    /// file is created with `memfd_create` to hold the contents of the initial
2174    /// image.
2175    ///
2176    /// This option can be used to avoid possibly loading the contents of memory
2177    /// from disk through a page fault. Instead with `memfd_create` the contents
2178    /// of memory are always in RAM, meaning that even page faults which
2179    /// initially populate a wasm linear memory will only work with RAM instead
2180    /// of ever hitting the disk that the original precompiled module is stored
2181    /// on.
2182    ///
2183    /// This option is disabled by default.
2184    pub fn force_memory_init_memfd(&mut self, enable: bool) -> &mut Self {
2185        self.force_memory_init_memfd = enable;
2186        self
2187    }
2188
2189    /// Configures whether or not a coredump should be generated and attached to
2190    /// the [`Error`](crate::Error) when a trap is raised.
2191    ///
2192    /// This option is disabled by default.
2193    #[cfg(feature = "coredump")]
2194    pub fn coredump_on_trap(&mut self, enable: bool) -> &mut Self {
2195        self.coredump_on_trap = enable;
2196        self
2197    }
2198
2199    /// Enables memory error checking for wasm programs.
2200    ///
2201    /// This option is disabled by default.
2202    ///
2203    /// # Panics
2204    ///
2205    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
2206    #[cfg(any(feature = "cranelift", feature = "winch"))]
2207    pub fn wmemcheck(&mut self, enable: bool) -> &mut Self {
2208        self.wmemcheck = enable;
2209        self.compiler_config_mut().wmemcheck = enable;
2210        self
2211    }
2212
2213    /// Configures the "guaranteed dense image size" for copy-on-write
2214    /// initialized memories.
2215    ///
2216    /// When using the [`Config::memory_init_cow`] feature to initialize memory
2217    /// efficiently (which is enabled by default), compiled modules contain an
2218    /// image of the module's initial heap. If the module has a fairly sparse
2219    /// initial heap, with just a few data segments at very different offsets,
2220    /// this could result in a large region of zero bytes in the image. In
2221    /// other words, it's not very memory-efficient.
2222    ///
2223    /// We normally use a heuristic to avoid this: if less than half
2224    /// of the initialized range (first non-zero to last non-zero
2225    /// byte) of any memory in the module has pages with nonzero
2226    /// bytes, then we avoid creating a memory image for the entire module.
2227    ///
2228    /// However, if the embedder always needs the instantiation-time efficiency
2229    /// of copy-on-write initialization, and is otherwise carefully controlling
2230    /// parameters of the modules (for example, by limiting the maximum heap
2231    /// size of the modules), then it may be desirable to ensure a memory image
2232    /// is created even if this could go against the heuristic above. Thus, we
2233    /// add another condition: there is a size of initialized data region up to
2234    /// which we *always* allow a memory image. The embedder can set this to a
2235    /// known maximum heap size if they desire to always get the benefits of
2236    /// copy-on-write images.
2237    ///
2238    /// In the future we may implement a "best of both worlds"
2239    /// solution where we have a dense image up to some limit, and
2240    /// then support a sparse list of initializers beyond that; this
2241    /// would get most of the benefit of copy-on-write and pay the incremental
2242    /// cost of eager initialization only for those bits of memory
2243    /// that are out-of-bounds. However, for now, an embedder desiring
2244    /// fast instantiation should ensure that this setting is as large
2245    /// as the maximum module initial memory content size.
2246    ///
2247    /// By default this value is 16 MiB.
2248    pub fn memory_guaranteed_dense_image_size(&mut self, size_in_bytes: u64) -> &mut Self {
2249        self.memory_guaranteed_dense_image_size = size_in_bytes;
2250        self
2251    }
2252
2253    /// Whether to enable function inlining during compilation or not.
2254    ///
2255    /// This may result in faster execution at runtime, but adds additional
2256    /// compilation time. Inlining may also enlarge the size of compiled
2257    /// artifacts (for example, the size of the result of
2258    /// [`Engine::precompile_component`](crate::Engine::precompile_component)).
2259    ///
2260    /// Inlining is not supported by all of Wasmtime's compilation strategies;
2261    /// currently, it only Cranelift supports it. This setting will be ignored
2262    /// when using a compilation strategy that does not support inlining, like
2263    /// Winch.
2264    ///
2265    /// Note that inlining is still somewhat experimental at the moment (as of
2266    /// the Wasmtime version 36).
2267    pub fn compiler_inlining(&mut self, inlining: bool) -> &mut Self {
2268        self.tunables.inlining = Some(inlining);
2269        self
2270    }
2271
2272    /// Returns the set of features that the currently selected compiler backend
2273    /// does not support at all and may panic on.
2274    ///
2275    /// Wasmtime strives to reject unknown modules or unsupported modules with
2276    /// first-class errors instead of panics. Not all compiler backends have the
2277    /// same level of feature support on all platforms as well. This method
2278    /// returns a set of features that the currently selected compiler
2279    /// configuration is known to not support and may panic on. This acts as a
2280    /// first-level filter on incoming wasm modules/configuration to fail-fast
2281    /// instead of panicking later on.
2282    ///
2283    /// Note that if a feature is not listed here it does not mean that the
2284    /// backend fully supports the proposal. Instead that means that the backend
2285    /// doesn't ever panic on the proposal, but errors during compilation may
2286    /// still be returned. This means that features listed here are definitely
2287    /// not supported at all, but features not listed here may still be
2288    /// partially supported. For example at the time of this writing the Winch
2289    /// backend partially supports simd so it's not listed here. Winch doesn't
2290    /// fully support simd but unimplemented instructions just return errors.
2291    fn compiler_panicking_wasm_features(&self) -> WasmFeatures {
2292        // First we compute the set of features that Wasmtime itself knows;
2293        // this is a sort of "maximal set" that we invert to create a set
2294        // of features we _definitely can't support_ because wasmtime
2295        // has never heard of them.
2296        let features_known_to_wasmtime = WasmFeatures::empty()
2297            | WasmFeatures::MUTABLE_GLOBAL
2298            | WasmFeatures::SATURATING_FLOAT_TO_INT
2299            | WasmFeatures::SIGN_EXTENSION
2300            | WasmFeatures::REFERENCE_TYPES
2301            | WasmFeatures::CALL_INDIRECT_OVERLONG
2302            | WasmFeatures::MULTI_VALUE
2303            | WasmFeatures::BULK_MEMORY
2304            | WasmFeatures::BULK_MEMORY_OPT
2305            | WasmFeatures::SIMD
2306            | WasmFeatures::RELAXED_SIMD
2307            | WasmFeatures::THREADS
2308            | WasmFeatures::SHARED_EVERYTHING_THREADS
2309            | WasmFeatures::TAIL_CALL
2310            | WasmFeatures::FLOATS
2311            | WasmFeatures::MULTI_MEMORY
2312            | WasmFeatures::EXCEPTIONS
2313            | WasmFeatures::MEMORY64
2314            | WasmFeatures::EXTENDED_CONST
2315            | WasmFeatures::COMPONENT_MODEL
2316            | WasmFeatures::FUNCTION_REFERENCES
2317            | WasmFeatures::GC
2318            | WasmFeatures::CUSTOM_PAGE_SIZES
2319            | WasmFeatures::GC_TYPES
2320            | WasmFeatures::STACK_SWITCHING
2321            | WasmFeatures::WIDE_ARITHMETIC
2322            | WasmFeatures::CM_ASYNC
2323            | WasmFeatures::CM_ASYNC_STACKFUL
2324            | WasmFeatures::CM_ASYNC_BUILTINS
2325            | WasmFeatures::CM_THREADING
2326            | WasmFeatures::CM_ERROR_CONTEXT
2327            | WasmFeatures::CM_GC
2328            | WasmFeatures::CM_MAP
2329            | WasmFeatures::CM_FIXED_LENGTH_LISTS;
2330
2331        #[allow(unused_mut, reason = "easier to avoid #[cfg]")]
2332        let mut unsupported = !features_known_to_wasmtime;
2333
2334        #[cfg(any(feature = "cranelift", feature = "winch"))]
2335        match self.compiler_config.as_ref().and_then(|c| c.strategy) {
2336            None | Some(Strategy::Cranelift) => {
2337                // Pulley at this time fundamentally doesn't support the
2338                // `threads` proposal, notably shared memory, because Rust can't
2339                // safely implement loads/stores in the face of shared memory.
2340                // Stack switching is not implemented, either.
2341                if self.compiler_target().is_pulley() {
2342                    unsupported |= WasmFeatures::THREADS;
2343                    unsupported |= WasmFeatures::STACK_SWITCHING;
2344                }
2345
2346                use target_lexicon::*;
2347                match self.compiler_target() {
2348                    Triple {
2349                        architecture: Architecture::X86_64 | Architecture::X86_64h,
2350                        operating_system:
2351                            OperatingSystem::Linux
2352                            | OperatingSystem::MacOSX(_)
2353                            | OperatingSystem::Darwin(_),
2354                        ..
2355                    } => {
2356                        // Stack switching supported on (non-Pulley) Cranelift.
2357                    }
2358
2359                    _ => {
2360                        // On platforms other than x64 Unix-like, we don't
2361                        // support stack switching.
2362                        unsupported |= WasmFeatures::STACK_SWITCHING;
2363                    }
2364                }
2365            }
2366            Some(Strategy::Winch) => {
2367                unsupported |= WasmFeatures::GC
2368                    | WasmFeatures::FUNCTION_REFERENCES
2369                    | WasmFeatures::RELAXED_SIMD
2370                    | WasmFeatures::TAIL_CALL
2371                    | WasmFeatures::GC_TYPES
2372                    | WasmFeatures::EXCEPTIONS
2373                    | WasmFeatures::LEGACY_EXCEPTIONS
2374                    | WasmFeatures::STACK_SWITCHING
2375                    | WasmFeatures::CM_ASYNC;
2376                match self.compiler_target().architecture {
2377                    target_lexicon::Architecture::Aarch64(_) => {
2378                        unsupported |= WasmFeatures::THREADS;
2379                        unsupported |= WasmFeatures::WIDE_ARITHMETIC;
2380                    }
2381
2382                    // Winch doesn't support other non-x64 architectures at this
2383                    // time either but will return an first-class error for
2384                    // them.
2385                    _ => {}
2386                }
2387            }
2388            Some(Strategy::Auto) => unreachable!(),
2389        }
2390        unsupported
2391    }
2392
2393    /// Calculates the set of features that are enabled for this `Config`.
2394    ///
2395    /// This method internally will start with the an empty set of features to
2396    /// avoid being tied to wasmparser's defaults. Next Wasmtime's set of
2397    /// default features are added to this set, some of which are conditional
2398    /// depending on crate features. Finally explicitly requested features via
2399    /// `wasm_*` methods on `Config` are applied. Everything is then validated
2400    /// later in `Config::validate`.
2401    fn features(&self) -> WasmFeatures {
2402        // Wasmtime by default supports all of the wasm 2.0 version of the
2403        // specification.
2404        let mut features = WasmFeatures::WASM2;
2405
2406        // On-by-default features that wasmtime has. Note that these are all
2407        // subject to the criteria at
2408        // https://docs.wasmtime.dev/contributing-implementing-wasm-proposals.html
2409        // and
2410        // https://docs.wasmtime.dev/stability-wasm-proposals.html
2411        features |= WasmFeatures::MULTI_MEMORY;
2412        features |= WasmFeatures::RELAXED_SIMD;
2413        features |= WasmFeatures::TAIL_CALL;
2414        features |= WasmFeatures::EXTENDED_CONST;
2415        features |= WasmFeatures::MEMORY64;
2416        // NB: if you add a feature above this line please double-check
2417        // https://docs.wasmtime.dev/stability-wasm-proposals.html
2418        // to ensure all requirements are met and/or update the documentation
2419        // there too.
2420
2421        // Set some features to their conditionally-enabled defaults depending
2422        // on crate compile-time features.
2423        features.set(WasmFeatures::GC_TYPES, cfg!(feature = "gc"));
2424        features.set(WasmFeatures::THREADS, cfg!(feature = "threads"));
2425        features.set(
2426            WasmFeatures::COMPONENT_MODEL,
2427            cfg!(feature = "component-model"),
2428        );
2429
2430        // From the default set of proposals remove any that the current
2431        // compiler backend may panic on if the module contains them.
2432        features = features & !self.compiler_panicking_wasm_features();
2433
2434        // After wasmtime's defaults are configured then factor in user requests
2435        // and disable/enable features. Note that the enable/disable sets should
2436        // be disjoint.
2437        debug_assert!((self.enabled_features & self.disabled_features).is_empty());
2438        features &= !self.disabled_features;
2439        features |= self.enabled_features;
2440
2441        features
2442    }
2443
2444    /// Returns the configured compiler target for this `Config`.
2445    pub(crate) fn compiler_target(&self) -> target_lexicon::Triple {
2446        // If a target is explicitly configured, always use that.
2447        if let Some(target) = self.target.clone() {
2448            return target;
2449        }
2450
2451        // If the `build.rs` script determined that this platform uses pulley by
2452        // default, then use Pulley.
2453        if cfg!(default_target_pulley) {
2454            return target_lexicon::Triple::pulley_host();
2455        }
2456
2457        // And at this point the target is for sure the host.
2458        target_lexicon::Triple::host()
2459    }
2460
2461    /// Returns `true` if any of the `gc_heap_*` tunables have been explicitly
2462    /// configured.
2463    fn any_gc_heap_tunables_configured(&self) -> bool {
2464        self.tunables.gc_heap_reservation.is_some()
2465            || self.tunables.gc_heap_guard_size.is_some()
2466            || self.tunables.gc_heap_reservation_for_growth.is_some()
2467            || self.tunables.gc_heap_may_move.is_some()
2468    }
2469
2470    pub(crate) fn validate(&self) -> Result<(Tunables, WasmFeatures)> {
2471        let features = self.features();
2472
2473        // First validate that the selected compiler backend and configuration
2474        // supports the set of `features` that are enabled. This will help
2475        // provide more first class errors instead of panics about unsupported
2476        // features and configurations.
2477        let unsupported = features & self.compiler_panicking_wasm_features();
2478        if !unsupported.is_empty() {
2479            for flag in WasmFeatures::FLAGS.iter() {
2480                if !unsupported.contains(*flag.value()) {
2481                    continue;
2482                }
2483                bail!(
2484                    "the wasm_{} feature is not supported on this compiler configuration",
2485                    flag.name().to_lowercase()
2486                );
2487            }
2488
2489            panic!("should have returned an error by now")
2490        }
2491
2492        #[cfg(any(feature = "async", feature = "stack-switching"))]
2493        if self.max_wasm_stack > self.async_stack_size {
2494            bail!("max_wasm_stack size cannot exceed the async_stack_size");
2495        }
2496        if self.max_wasm_stack == 0 {
2497            bail!("max_wasm_stack size cannot be zero");
2498        }
2499        if !cfg!(feature = "wmemcheck") && self.wmemcheck {
2500            bail!("wmemcheck (memory checker) was requested but is not enabled in this build");
2501        }
2502
2503        if !cfg!(feature = "gc") && features.gc_types() {
2504            bail!("support for GC was disabled at compile time")
2505        }
2506
2507        if !cfg!(feature = "gc") && features.contains(WasmFeatures::EXCEPTIONS) {
2508            bail!("exceptions support requires garbage collection (GC) to be enabled in the build");
2509        }
2510
2511        match &self.rr_config {
2512            #[cfg(feature = "rr")]
2513            RRConfig::Recording | RRConfig::Replaying => {
2514                self.validate_rr_determinism_conflicts()?;
2515            }
2516            RRConfig::None => {}
2517        };
2518
2519        let mut tunables = Tunables::default_for_target(&self.compiler_target())?;
2520
2521        // By default this is enabled with the Cargo feature, and if the feature
2522        // is missing this is disabled.
2523        tunables.concurrency_support = cfg!(feature = "component-model-async");
2524
2525        #[cfg(feature = "rr")]
2526        {
2527            tunables.recording = matches!(self.rr_config, RRConfig::Recording);
2528        }
2529
2530        // If no target is explicitly specified then further refine `tunables`
2531        // for the configuration of this host depending on what platform
2532        // features were found available at compile time. This means that anyone
2533        // cross-compiling for a customized host will need to further refine
2534        // compilation options.
2535        if self.target.is_none() {
2536            // If this platform doesn't have native signals then change some
2537            // defaults to account for that. Note that VM guards are turned off
2538            // here because that's primarily a feature of eliding
2539            // bounds-checks.
2540            if !cfg!(has_native_signals) {
2541                tunables.signals_based_traps = cfg!(has_native_signals);
2542                tunables.memory_guard_size = 0;
2543                tunables.gc_heap_guard_size = 0;
2544            }
2545
2546            // When virtual memory is not available use slightly different
2547            // defaults for tunables to be more amenable to `MallocMemory`.
2548            // Note that these can still be overridden by config options.
2549            if !cfg!(has_virtual_memory) {
2550                tunables.memory_reservation = 0;
2551                tunables.memory_reservation_for_growth = 1 << 20; // 1MB
2552                tunables.memory_init_cow = false;
2553                tunables.gc_heap_reservation = 0;
2554                tunables.gc_heap_reservation_for_growth = 1 << 20; // 1MB
2555            }
2556        }
2557
2558        // If guest-debugging is enabled, we must disable
2559        // signals-based traps. Do this before we process the user's
2560        // provided tunables settings so we can detect a conflict with
2561        // an explicit request to use signals-based traps.
2562        #[cfg(feature = "debug")]
2563        if self.tunables.debug_guest == Some(true) {
2564            tunables.signals_based_traps = false;
2565        }
2566
2567        self.tunables.configure(&mut tunables);
2568
2569        // If no GC heap tunables are explicitly configured, copy the memory
2570        // tunables' configured values so that GC heaps default to the same
2571        // configuration as linear memories.
2572        if !self.any_gc_heap_tunables_configured() {
2573            tunables.gc_heap_reservation = tunables.memory_reservation;
2574            tunables.gc_heap_guard_size = tunables.memory_guard_size;
2575            tunables.gc_heap_reservation_for_growth = tunables.memory_reservation_for_growth;
2576            tunables.gc_heap_may_move = tunables.memory_may_move;
2577        }
2578
2579        // If we're going to compile with winch, we must use the winch calling convention.
2580        #[cfg(any(feature = "cranelift", feature = "winch"))]
2581        {
2582            tunables.winch_callable = self
2583                .compiler_config
2584                .as_ref()
2585                .is_some_and(|c| c.strategy == Some(Strategy::Winch));
2586        }
2587
2588        tunables.collector = if features.gc_types() {
2589            #[cfg(feature = "gc")]
2590            {
2591                use wasmtime_environ::Collector as EnvCollector;
2592                Some(match self.collector.try_not_auto()? {
2593                    Collector::DeferredReferenceCounting => EnvCollector::DeferredReferenceCounting,
2594                    Collector::Null => EnvCollector::Null,
2595                    Collector::Copying => EnvCollector::Copying,
2596                    Collector::Auto => unreachable!(),
2597                })
2598            }
2599            #[cfg(not(feature = "gc"))]
2600            bail!("cannot use GC types: the `gc` feature was disabled at compile time")
2601        } else {
2602            None
2603        };
2604
2605        if tunables.debug_guest {
2606            ensure!(
2607                cfg!(feature = "debug"),
2608                "debug instrumentation support was disabled at compile time"
2609            );
2610            ensure!(
2611                !tunables.signals_based_traps,
2612                "cannot use signals-based traps with guest debugging enabled"
2613            );
2614        }
2615
2616        // Concurrency support is required for some component model features.
2617        let requires_concurrency = WasmFeatures::CM_ASYNC
2618            | WasmFeatures::CM_ASYNC_BUILTINS
2619            | WasmFeatures::CM_ASYNC_STACKFUL
2620            | WasmFeatures::CM_THREADING
2621            | WasmFeatures::CM_ERROR_CONTEXT;
2622        if tunables.concurrency_support && !cfg!(feature = "component-model-async") {
2623            bail!(
2624                "concurrency support was requested but was not \
2625                 compiled into this build of Wasmtime"
2626            )
2627        }
2628        if !tunables.concurrency_support && features.intersects(requires_concurrency) {
2629            bail!(
2630                "concurrency support must be enabled to use the component \
2631                 model async or threading features"
2632            )
2633        }
2634
2635        // If the pooling allocator is used and GC is enabled, check that
2636        // memories and the GC heap are configured identically, since the
2637        // pooling allocator can't support differently-configured heaps.
2638        #[cfg(feature = "pooling-allocator")]
2639        if matches!(
2640            &self.allocation_strategy,
2641            InstanceAllocationStrategy::Pooling(_)
2642        ) && tunables.collector.is_some()
2643        {
2644            if tunables.memory_reservation != tunables.gc_heap_reservation {
2645                bail!(
2646                    "when using the pooling allocator with GC, `memory_reservation` ({}) \
2647                     and `gc_heap_reservation` ({}) must be the same",
2648                    tunables.memory_reservation,
2649                    tunables.gc_heap_reservation,
2650                );
2651            }
2652            if tunables.memory_guard_size != tunables.gc_heap_guard_size {
2653                bail!(
2654                    "when using the pooling allocator with GC, `memory_guard_size` ({}) \
2655                     and `gc_heap_guard_size` ({}) must be the same",
2656                    tunables.memory_guard_size,
2657                    tunables.gc_heap_guard_size,
2658                );
2659            }
2660            if tunables.memory_reservation_for_growth != tunables.gc_heap_reservation_for_growth {
2661                bail!(
2662                    "when using the pooling allocator with GC, \
2663                     `memory_reservation_for_growth` ({}) and \
2664                     `gc_heap_reservation_for_growth` ({}) must be the same",
2665                    tunables.memory_reservation_for_growth,
2666                    tunables.gc_heap_reservation_for_growth,
2667                );
2668            }
2669            if tunables.memory_may_move != tunables.gc_heap_may_move {
2670                bail!(
2671                    "when using the pooling allocator with GC, `memory_may_move` ({}) \
2672                     and `gc_heap_may_move` ({}) must be the same",
2673                    tunables.memory_may_move,
2674                    tunables.gc_heap_may_move,
2675                );
2676            }
2677        }
2678
2679        Ok((tunables, features))
2680    }
2681
2682    #[cfg(feature = "runtime")]
2683    pub(crate) fn build_allocator(
2684        &self,
2685        tunables: &Tunables,
2686    ) -> Result<Box<dyn InstanceAllocator + Send + Sync>> {
2687        #[cfg(feature = "async")]
2688        let (stack_size, stack_zeroing) = (self.async_stack_size, self.async_stack_zeroing);
2689
2690        #[cfg(not(feature = "async"))]
2691        let (stack_size, stack_zeroing) = (0, false);
2692
2693        let _ = tunables;
2694
2695        match &self.allocation_strategy {
2696            InstanceAllocationStrategy::OnDemand => {
2697                let mut _allocator = try_new::<Box<_>>(OnDemandInstanceAllocator::new(
2698                    self.mem_creator.clone(),
2699                    stack_size,
2700                    stack_zeroing,
2701                ))?;
2702                #[cfg(feature = "async")]
2703                if let Some(stack_creator) = &self.stack_creator {
2704                    _allocator.set_stack_creator(stack_creator.clone());
2705                }
2706                Ok(_allocator as _)
2707            }
2708            #[cfg(feature = "pooling-allocator")]
2709            InstanceAllocationStrategy::Pooling(config) => {
2710                let mut config = config.config;
2711                config.stack_size = stack_size;
2712                config.async_stack_zeroing = stack_zeroing;
2713                let allocator = try_new::<Box<_>>(
2714                    crate::runtime::vm::PoolingInstanceAllocator::new(&config, tunables)?,
2715                )?;
2716                Ok(allocator as _)
2717            }
2718        }
2719    }
2720
2721    #[cfg(feature = "runtime")]
2722    pub(crate) fn build_gc_runtime(&self) -> Result<Option<Arc<dyn GcRuntime>>> {
2723        if !self.features().gc_types() {
2724            return Ok(None);
2725        }
2726
2727        #[cfg(not(feature = "gc"))]
2728        bail!("cannot create a GC runtime: the `gc` feature was disabled at compile time");
2729
2730        #[cfg(feature = "gc")]
2731        #[cfg_attr(
2732            not(any(feature = "gc-null", feature = "gc-drc", feature = "gc-copying")),
2733            expect(unreachable_code, reason = "definitions known to be dummy")
2734        )]
2735        {
2736            Ok(Some(match self.collector.try_not_auto()? {
2737                #[cfg(feature = "gc-drc")]
2738                Collector::DeferredReferenceCounting => {
2739                    try_new::<Arc<_>>(crate::runtime::vm::DrcCollector::default())? as _
2740                }
2741                #[cfg(not(feature = "gc-drc"))]
2742                Collector::DeferredReferenceCounting => unreachable!(),
2743
2744                #[cfg(feature = "gc-null")]
2745                Collector::Null => {
2746                    try_new::<Arc<_>>(crate::runtime::vm::NullCollector::default())? as _
2747                }
2748                #[cfg(not(feature = "gc-null"))]
2749                Collector::Null => unreachable!(),
2750
2751                #[cfg(feature = "gc-copying")]
2752                Collector::Copying => {
2753                    try_new::<Arc<_>>(crate::runtime::vm::CopyingCollector::default())? as _
2754                }
2755                #[cfg(not(feature = "gc-copying"))]
2756                Collector::Copying => unreachable!(),
2757
2758                Collector::Auto => unreachable!(),
2759            }))
2760        }
2761    }
2762
2763    #[cfg(feature = "runtime")]
2764    pub(crate) fn build_profiler(&self) -> Result<Box<dyn ProfilingAgent>> {
2765        Ok(match self.profiling_strategy {
2766            ProfilingStrategy::PerfMap => profiling_agent::new_perfmap()?,
2767            ProfilingStrategy::JitDump => profiling_agent::new_jitdump()?,
2768            ProfilingStrategy::VTune => profiling_agent::new_vtune()?,
2769            ProfilingStrategy::None => profiling_agent::new_null(),
2770            ProfilingStrategy::Pulley => profiling_agent::new_pulley()?,
2771        })
2772    }
2773
2774    #[cfg(any(feature = "cranelift", feature = "winch"))]
2775    pub(crate) fn build_compiler(
2776        mut self,
2777        tunables: &mut Tunables,
2778        features: WasmFeatures,
2779    ) -> Result<(Self, Box<dyn wasmtime_environ::Compiler>)> {
2780        let target = self.compiler_target();
2781
2782        // The target passed to the builders below is an `Option<Triple>` where
2783        // `None` represents the current host with CPU features inferred from
2784        // the host's CPU itself. The `target` above is not an `Option`, so
2785        // switch it to `None` in the case that a target wasn't explicitly
2786        // specified (which indicates no feature inference) and the target
2787        // matches the host.
2788        let target_for_builder =
2789            if self.target.is_none() && target == target_lexicon::Triple::host() {
2790                None
2791            } else {
2792                Some(target.clone())
2793            };
2794
2795        let mut compiler = match self.compiler_config_mut().strategy {
2796            #[cfg(feature = "cranelift")]
2797            Some(Strategy::Cranelift) => wasmtime_cranelift::builder(target_for_builder)?,
2798            #[cfg(not(feature = "cranelift"))]
2799            Some(Strategy::Cranelift) => bail!("cranelift support not compiled in"),
2800            #[cfg(feature = "winch")]
2801            Some(Strategy::Winch) => wasmtime_winch::builder(target_for_builder)?,
2802            #[cfg(not(feature = "winch"))]
2803            Some(Strategy::Winch) => bail!("winch support not compiled in"),
2804
2805            None | Some(Strategy::Auto) => unreachable!(),
2806        };
2807
2808        if let Some(path) = &self.compiler_config_mut().clif_dir {
2809            compiler.clif_dir(path)?;
2810        }
2811
2812        // If probestack is enabled for a target, Wasmtime will always use the
2813        // inline strategy which doesn't require us to define a `__probestack`
2814        // function or similar.
2815        self.compiler_config_mut()
2816            .settings
2817            .insert("probestack_strategy".into(), "inline".into());
2818
2819        // We enable stack probing by default on all targets.
2820        // This is required on Windows because of the way Windows
2821        // commits its stacks, but it's also a good idea on other
2822        // platforms to ensure guard pages are hit for large frame
2823        // sizes.
2824        self.compiler_config_mut()
2825            .flags
2826            .insert("enable_probestack".into());
2827
2828        // The current wasm multivalue implementation depends on this.
2829        // FIXME(#9510) handle this in wasmtime-cranelift instead.
2830        self.compiler_config_mut()
2831            .flags
2832            .insert("enable_multi_ret_implicit_sret".into());
2833
2834        if let Some(unwind_requested) = self.native_unwind_info {
2835            if !self
2836                .compiler_config_mut()
2837                .ensure_setting_unset_or_given("unwind_info", &unwind_requested.to_string())
2838            {
2839                bail!(
2840                    "incompatible settings requested for Cranelift and Wasmtime `unwind-info` settings"
2841                );
2842            }
2843        }
2844
2845        if target.operating_system == target_lexicon::OperatingSystem::Windows {
2846            if !self
2847                .compiler_config_mut()
2848                .ensure_setting_unset_or_given("unwind_info", "true")
2849            {
2850                bail!("`native_unwind_info` cannot be disabled on Windows");
2851            }
2852        }
2853
2854        // We require frame pointers for correct stack walking, which is safety
2855        // critical in the presence of reference types, and otherwise it is just
2856        // really bad developer experience to get wrong.
2857        self.compiler_config_mut()
2858            .settings
2859            .insert("preserve_frame_pointers".into(), "true".into());
2860
2861        if !tunables.signals_based_traps {
2862            let mut ok = self
2863                .compiler_config_mut()
2864                .ensure_setting_unset_or_given("enable_table_access_spectre_mitigation", "false");
2865            ok = ok
2866                && self.compiler_config_mut().ensure_setting_unset_or_given(
2867                    "enable_heap_access_spectre_mitigation",
2868                    "false",
2869                );
2870
2871            // Right now spectre-mitigated bounds checks will load from zero so
2872            // if host-based signal handlers are disabled then that's a mismatch
2873            // and doesn't work right now. Fixing this will require more thought
2874            // of how to implement the bounds check in spectre-only mode.
2875            if !ok {
2876                bail!(
2877                    "when signals-based traps are disabled then spectre \
2878                     mitigations must also be disabled"
2879                );
2880            }
2881        }
2882
2883        if features.contains(WasmFeatures::RELAXED_SIMD) && !features.contains(WasmFeatures::SIMD) {
2884            bail!("cannot disable the simd proposal but enable the relaxed simd proposal");
2885        }
2886
2887        if features.contains(WasmFeatures::STACK_SWITCHING) {
2888            use target_lexicon::OperatingSystem;
2889            let model = match target.operating_system {
2890                OperatingSystem::Windows => "update_windows_tib",
2891                OperatingSystem::Linux
2892                | OperatingSystem::MacOSX(_)
2893                | OperatingSystem::Darwin(_) => "basic",
2894                _ => bail!("stack-switching feature not supported on this platform "),
2895            };
2896
2897            if !self
2898                .compiler_config_mut()
2899                .ensure_setting_unset_or_given("stack_switch_model", model)
2900            {
2901                bail!(
2902                    "compiler option 'stack_switch_model' must be set to '{model}' on this platform"
2903                );
2904            }
2905        }
2906
2907        // Apply compiler settings and flags
2908        compiler.set_tunables(tunables.clone())?;
2909        for (k, v) in self.compiler_config_mut().settings.iter() {
2910            compiler.set(k, v)?;
2911        }
2912        for flag in self.compiler_config_mut().flags.iter() {
2913            compiler.enable(flag)?;
2914        }
2915        *tunables = compiler.tunables().cloned().unwrap();
2916
2917        #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
2918        if let Some(cache_store) = &self.compiler_config_mut().cache_store {
2919            compiler.enable_incremental_compilation(cache_store.clone())?;
2920        }
2921
2922        compiler.wmemcheck(self.compiler_config_mut().wmemcheck);
2923
2924        Ok((self, compiler.build()?))
2925    }
2926
2927    /// Internal setting for whether adapter modules for components will have
2928    /// extra WebAssembly instructions inserted performing more debug checks
2929    /// then are necessary.
2930    #[cfg(feature = "component-model")]
2931    pub fn debug_adapter_modules(&mut self, debug: bool) -> &mut Self {
2932        self.tunables.debug_adapter_modules = Some(debug);
2933        self
2934    }
2935
2936    /// Enables clif output when compiling a WebAssembly module.
2937    #[cfg(any(feature = "cranelift", feature = "winch"))]
2938    pub fn emit_clif(&mut self, path: &Path) -> &mut Self {
2939        self.compiler_config_mut().clif_dir = Some(path.to_path_buf());
2940        self
2941    }
2942
2943    /// Configures whether, when on macOS, Mach ports are used for exception
2944    /// handling instead of traditional Unix-based signal handling.
2945    ///
2946    /// WebAssembly traps in Wasmtime are implemented with native faults, for
2947    /// example a `SIGSEGV` will occur when a WebAssembly guest accesses
2948    /// out-of-bounds memory. Handling this can be configured to either use Unix
2949    /// signals or Mach ports on macOS. By default Mach ports are used.
2950    ///
2951    /// Mach ports enable Wasmtime to work by default with foreign
2952    /// error-handling systems such as breakpad which also use Mach ports to
2953    /// handle signals. In this situation Wasmtime will continue to handle guest
2954    /// faults gracefully while any non-guest faults will get forwarded to
2955    /// process-level handlers such as breakpad. Some more background on this
2956    /// can be found in #2456.
2957    ///
2958    /// A downside of using mach ports, however, is that they don't interact
2959    /// well with `fork()`. Forking a Wasmtime process on macOS will produce a
2960    /// child process that cannot successfully run WebAssembly. In this
2961    /// situation traditional Unix signal handling should be used as that's
2962    /// inherited and works across forks.
2963    ///
2964    /// If your embedding wants to use a custom error handler which leverages
2965    /// Mach ports and you additionally wish to `fork()` the process and use
2966    /// Wasmtime in the child process that's not currently possible. Please
2967    /// reach out to us if you're in this bucket!
2968    ///
2969    /// This option defaults to `true`, using Mach ports by default.
2970    pub fn macos_use_mach_ports(&mut self, mach_ports: bool) -> &mut Self {
2971        self.macos_use_mach_ports = mach_ports;
2972        self
2973    }
2974
2975    /// Configures an embedder-provided function, `detect`, which is used to
2976    /// determine if an ISA-specific feature is available on the current host.
2977    ///
2978    /// This function is used to verify that any features enabled for a compiler
2979    /// backend, such as AVX support on x86\_64, are also available on the host.
2980    /// It is undefined behavior to execute an AVX instruction on a host that
2981    /// doesn't support AVX instructions, for example.
2982    ///
2983    /// When the `std` feature is active on this crate then this function is
2984    /// configured to a default implementation that uses the standard library's
2985    /// feature detection. When the `std` feature is disabled then there is no
2986    /// default available and this method must be called to configure a feature
2987    /// probing function.
2988    ///
2989    /// The `detect` function provided is given a string name of an ISA feature.
2990    /// The function should then return:
2991    ///
2992    /// * `Some(true)` - indicates that the feature was found on the host and it
2993    ///   is supported.
2994    /// * `Some(false)` - the feature name was recognized but it was not
2995    ///   detected on the host, for example the CPU is too old.
2996    /// * `None` - the feature name was not recognized and it's not known
2997    ///   whether it's on the host or not.
2998    ///
2999    /// Feature names passed to `detect` match the same feature name used in the
3000    /// Rust standard library. For example `"sse4.2"` is used on x86\_64.
3001    ///
3002    /// # Unsafety
3003    ///
3004    /// This function is `unsafe` because it is undefined behavior to execute
3005    /// instructions that a host does not support. This means that the result of
3006    /// `detect` must be correct for memory safe execution at runtime.
3007    pub unsafe fn detect_host_feature(&mut self, detect: fn(&str) -> Option<bool>) -> &mut Self {
3008        self.detect_host_feature = Some(detect);
3009        self
3010    }
3011
3012    /// Configures Wasmtime to not use signals-based trap handlers, for example
3013    /// disables `SIGILL` and `SIGSEGV` handler registration on Unix platforms.
3014    ///
3015    /// > **Note:** this option has important performance ramifications, be sure
3016    /// > to understand the implications. Wasm programs have been measured to
3017    /// > run up to 2x slower when signals-based traps are disabled.
3018    ///
3019    /// Wasmtime will by default leverage signals-based trap handlers (or the
3020    /// platform equivalent, for example "vectored exception handlers" on
3021    /// Windows) to make generated code more efficient. For example, when
3022    /// Wasmtime can use signals-based traps, it can elide explicit bounds
3023    /// checks for Wasm linear memory accesses, instead relying on virtual
3024    /// memory guard pages to raise a `SIGSEGV` (on Unix) for out-of-bounds
3025    /// accesses, which Wasmtime's runtime then catches and handles. Another
3026    /// example is divide-by-zero: with signals-based traps, Wasmtime can let
3027    /// the hardware raise a trap when the divisor is zero. Without
3028    /// signals-based traps, Wasmtime must explicitly emit additional
3029    /// instructions to check for zero and conditionally branch to a trapping
3030    /// code path.
3031    ///
3032    /// Some environments however may not have access to signal handlers. For
3033    /// example embedded scenarios may not support virtual memory. Other
3034    /// environments where Wasmtime is embedded within the surrounding
3035    /// environment may require that new signal handlers aren't registered due
3036    /// to the global nature of signal handlers. This option exists to disable
3037    /// the signal handler registration when required for these scenarios.
3038    ///
3039    /// When signals-based trap handlers are disabled, then Wasmtime and its
3040    /// generated code will *never* rely on segfaults or other
3041    /// signals. Generated code will be slower because bounds must be explicitly
3042    /// checked along with other conditions like division by zero.
3043    ///
3044    /// The following additional factors can also affect Wasmtime's ability to
3045    /// elide explicit bounds checks and leverage signals-based traps:
3046    ///
3047    /// * The [`Config::memory_reservation`] and [`Config::memory_guard_size`]
3048    ///   settings
3049    /// * The index type of the linear memory (e.g. 32-bit or 64-bit)
3050    /// * The page size of the linear memory
3051    ///
3052    /// When this option is disabled, the
3053    /// `enable_heap_access_spectre_mitigation` and
3054    /// `enable_table_access_spectre_mitigation` Cranelift settings must also be
3055    /// disabled. This means that generated code must have spectre mitigations
3056    /// disabled. This is because spectre mitigations rely on faults from
3057    /// loading from the null address to implement bounds checks.
3058    ///
3059    /// This option defaults to `true`: signals-based trap handlers are enabled
3060    /// by default.
3061    ///
3062    /// > **Note:** Disabling this option is not compatible with the Winch
3063    /// > compiler.
3064    pub fn signals_based_traps(&mut self, enable: bool) -> &mut Self {
3065        self.tunables.signals_based_traps = Some(enable);
3066        self
3067    }
3068
3069    /// Enable/disable GC support in Wasmtime entirely.
3070    ///
3071    /// This flag can be used to gate whether GC infrastructure is enabled or
3072    /// initialized in Wasmtime at all. Wasmtime's GC implementation is required
3073    /// for the [`Self::wasm_gc`] proposal, [`Self::wasm_function_references`],
3074    /// and [`Self::wasm_exceptions`] at this time. None of those proposal can
3075    /// be enabled without also having this option enabled.
3076    ///
3077    /// This option defaults to whether the crate `gc` feature is enabled or
3078    /// not.
3079    pub fn gc_support(&mut self, enable: bool) -> &mut Self {
3080        self.wasm_features(WasmFeatures::GC_TYPES, enable)
3081    }
3082
3083    /// Explicitly indicate or not whether the host is using a hardware float
3084    /// ABI on x86 targets.
3085    ///
3086    /// This configuration option is only applicable on the
3087    /// `x86_64-unknown-none` Rust target and has no effect on other host
3088    /// targets. The `x86_64-unknown-none` Rust target does not support hardware
3089    /// floats by default and uses a "soft float" implementation and ABI. This
3090    /// means that `f32`, for example, is passed in a general-purpose register
3091    /// between functions instead of a floating-point register. This does not
3092    /// match Cranelift's ABI for `f32` where it's passed in floating-point
3093    /// registers.  Cranelift does not have support for a "soft float"
3094    /// implementation where all floating-point operations are lowered to
3095    /// libcalls.
3096    ///
3097    /// This means that for the `x86_64-unknown-none` target the ABI between
3098    /// Wasmtime's libcalls and the host is incompatible when floats are used.
3099    /// This further means that, by default, Wasmtime is unable to load native
3100    /// code when compiled to the `x86_64-unknown-none` target. The purpose of
3101    /// this option is to explicitly allow loading code and bypass this check.
3102    ///
3103    /// Setting this configuration option to `true` indicates that either:
3104    /// (a) the Rust target is compiled with the hard-float ABI manually via
3105    /// `-Zbuild-std` and a custom target JSON configuration, or (b) sufficient
3106    /// x86 features have been enabled in the compiler such that float libcalls
3107    /// will not be used in Wasmtime. For (a) there is no way in Rust at this
3108    /// time to detect whether a hard-float or soft-float ABI is in use on
3109    /// stable Rust, so this manual opt-in is required. For (b) the only
3110    /// instance where Wasmtime passes a floating-point value in a register
3111    /// between the host and compiled wasm code is with libcalls.
3112    ///
3113    /// Float-based libcalls are only used when the compilation target for a
3114    /// wasm module has insufficient target features enabled for native
3115    /// support. For example SSE4.1 is required for the `f32.ceil` WebAssembly
3116    /// instruction to be compiled to a native instruction. If SSE4.1 is not
3117    /// enabled then `f32.ceil` is translated to a "libcall" which is
3118    /// implemented on the host. Float-based libcalls can be avoided with
3119    /// sufficient target features enabled, for example:
3120    ///
3121    /// * `self.cranelift_flag_enable("has_sse3")`
3122    /// * `self.cranelift_flag_enable("has_ssse3")`
3123    /// * `self.cranelift_flag_enable("has_sse41")`
3124    /// * `self.cranelift_flag_enable("has_sse42")`
3125    /// * `self.cranelift_flag_enable("has_fma")`
3126    ///
3127    /// Note that when these features are enabled Wasmtime will perform a
3128    /// runtime check to determine that the host actually has the feature
3129    /// present.
3130    ///
3131    /// For some more discussion see [#11506].
3132    ///
3133    /// [#11506]: https://github.com/bytecodealliance/wasmtime/issues/11506
3134    ///
3135    /// # Safety
3136    ///
3137    /// This method is not safe because it cannot be detected in Rust right now
3138    /// whether the host is compiled with a soft or hard float ABI. Additionally
3139    /// if the host is compiled with a soft float ABI disabling this check does
3140    /// not ensure that the wasm module in question has zero usage of floats
3141    /// in the boundary to the host.
3142    ///
3143    /// Safely using this method requires one of:
3144    ///
3145    /// * The host target is compiled to use hardware floats.
3146    /// * Wasm modules loaded are compiled with enough x86 Cranelift features
3147    ///   enabled to avoid float-related hostcalls.
3148    pub unsafe fn x86_float_abi_ok(&mut self, enable: bool) -> &mut Self {
3149        self.x86_float_abi_ok = Some(enable);
3150        self
3151    }
3152
3153    /// Enable or disable the ability to create a
3154    /// [`SharedMemory`](crate::SharedMemory).
3155    ///
3156    /// The WebAssembly threads proposal, configured by [`Config::wasm_threads`]
3157    /// is on-by-default but there are enough deficiencies in Wasmtime's
3158    /// implementation and API integration that creation of a shared memory is
3159    /// disabled by default. This configuration knob can be used to enable this.
3160    ///
3161    /// When enabling this method be aware that wasm threads are, at this time,
3162    /// a [tier 2
3163    /// feature](https://docs.wasmtime.dev/stability-tiers.html#tier-2) in
3164    /// Wasmtime meaning that it will not receive security updates or fixes to
3165    /// historical releases. Additionally security CVEs will not be issued for
3166    /// bugs in the implementation.
3167    ///
3168    /// This option is `false` by default.
3169    pub fn shared_memory(&mut self, enable: bool) -> &mut Self {
3170        self.shared_memory = enable;
3171        self
3172    }
3173
3174    /// Specifies whether support for concurrent execution of WebAssembly is
3175    /// supported within this store.
3176    ///
3177    /// This configuration option affects whether runtime data structures are
3178    /// initialized within a `Store` on creation to support concurrent execution
3179    /// of WebAssembly guests. This is primarily applicable to the
3180    /// [`Config::wasm_component_model_async`] configuration which is the first
3181    /// time Wasmtime has supported concurrent execution of guests. This
3182    /// configuration option, for example, enables usage of
3183    /// [`Store::run_concurrent`], [`Func::call_concurrent`], [`StreamReader`],
3184    /// etc.
3185    ///
3186    /// This configuration option can be manually disabled to avoid initializing
3187    /// data structures in the [`Store`] related to concurrent execution. When
3188    /// this option is disabled then APIs related to concurrency will all fail
3189    /// with a panic. For example [`Store::run_concurrent`] will panic, creating
3190    /// a [`StreamReader`] will panic, etc.
3191    ///
3192    /// The value of this option additionally affects whether a [`Config`] is
3193    /// valid and the default set of enabled WebAssembly features. If this
3194    /// option is disabled then component-model features related to concurrency
3195    /// will all be disabled. If this option is enabled, then the options will
3196    /// retain their normal defaults. It is not valid to create a [`Config`]
3197    /// with component-model-async explicitly enabled and this option explicitly
3198    /// disabled, however.
3199    ///
3200    /// This option defaults to `true`.
3201    ///
3202    /// [`Store`]: crate::Store
3203    /// [`Store::run_concurrent`]: crate::Store::run_concurrent
3204    /// [`Func::call_concurrent`]: crate::component::Func::call_concurrent
3205    /// [`StreamReader`]: crate::component::StreamReader
3206    pub fn concurrency_support(&mut self, enable: bool) -> &mut Self {
3207        self.tunables.concurrency_support = Some(enable);
3208        self
3209    }
3210
3211    /// Validate if the current configuration has conflicting overrides that prevent
3212    /// execution determinism. Returns an error if a conflict exists.
3213    ///
3214    /// Note: Keep this in sync with [`Config::enforce_determinism`].
3215    #[inline]
3216    #[cfg(feature = "rr")]
3217    pub(crate) fn validate_rr_determinism_conflicts(&self) -> Result<()> {
3218        if let Some(v) = self.tunables.relaxed_simd_deterministic {
3219            if v == false {
3220                bail!("Relaxed deterministic SIMD cannot be disabled when determinism is enforced");
3221            }
3222        }
3223        #[cfg(any(feature = "cranelift", feature = "winch"))]
3224        if let Some(v) = self
3225            .compiler_config
3226            .as_ref()
3227            .and_then(|c| c.settings.get("enable_nan_canonicalization"))
3228        {
3229            if v != "true" {
3230                bail!("NaN canonicalization cannot be disabled when determinism is enforced");
3231            }
3232        }
3233        Ok(())
3234    }
3235
3236    /// Enable execution trace recording or replaying to the configuration.
3237    ///
3238    /// When either recording/replaying are enabled, validation fails if settings
3239    /// that control determinism are not set appropriately. In particular, RR requires
3240    /// doing the following:
3241    /// * Enabling NaN canonicalization with [`Config::cranelift_nan_canonicalization`].
3242    /// * Enabling deterministic relaxed SIMD with [`Config::relaxed_simd_deterministic`].
3243    #[inline]
3244    pub fn rr(&mut self, cfg: RRConfig) -> &mut Self {
3245        self.rr_config = cfg;
3246        self
3247    }
3248}
3249
3250impl Default for Config {
3251    fn default() -> Config {
3252        Config::new()
3253    }
3254}
3255
3256impl fmt::Debug for Config {
3257    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
3258        let mut f = f.debug_struct("Config");
3259
3260        // Not every flag in WasmFeatures can be enabled as part of creating
3261        // a Config. This impl gives a complete picture of all WasmFeatures
3262        // enabled, and doesn't require maintenance by hand (which has become out
3263        // of date in the past), at the cost of possible confusion for why
3264        // a flag in this set doesn't have a Config setter.
3265        let features = self.features();
3266        for flag in WasmFeatures::FLAGS.iter() {
3267            f.field(
3268                &format!("wasm_{}", flag.name().to_lowercase()),
3269                &features.contains(*flag.value()),
3270            );
3271        }
3272
3273        f.field("parallel_compilation", &self.parallel_compilation);
3274        #[cfg(any(feature = "cranelift", feature = "winch"))]
3275        {
3276            f.field("compiler_config", &self.compiler_config);
3277        }
3278
3279        self.tunables.format(&mut f);
3280        f.finish()
3281    }
3282}
3283
3284/// Possible Compilation strategies for a wasm module.
3285///
3286/// This is used as an argument to the [`Config::strategy`] method.
3287#[non_exhaustive]
3288#[derive(PartialEq, Eq, Clone, Debug, Copy)]
3289pub enum Strategy {
3290    /// An indicator that the compilation strategy should be automatically
3291    /// selected.
3292    ///
3293    /// This is generally what you want for most projects and indicates that the
3294    /// `wasmtime` crate itself should make the decision about what the best
3295    /// code generator for a wasm module is.
3296    ///
3297    /// Currently this always defaults to Cranelift, but the default value may
3298    /// change over time.
3299    Auto,
3300
3301    /// Currently the default backend, Cranelift aims to be a reasonably fast
3302    /// code generator which generates high quality machine code.
3303    Cranelift,
3304
3305    /// A low-latency baseline compiler for WebAssembly.
3306    /// For more details regarding ISA support and Wasm proposals support
3307    /// see <https://docs.wasmtime.dev/stability-tiers.html#current-tier-status>
3308    Winch,
3309}
3310
3311#[cfg(any(feature = "winch", feature = "cranelift"))]
3312impl Strategy {
3313    fn not_auto(&self) -> Option<Strategy> {
3314        match self {
3315            Strategy::Auto => {
3316                if cfg!(feature = "cranelift") {
3317                    Some(Strategy::Cranelift)
3318                } else if cfg!(feature = "winch") {
3319                    Some(Strategy::Winch)
3320                } else {
3321                    None
3322                }
3323            }
3324            other => Some(*other),
3325        }
3326    }
3327}
3328
3329/// Possible garbage collector implementations for Wasm.
3330///
3331/// This is used as an argument to the [`Config::collector`] method.
3332///
3333/// The properties of Wasmtime's available collectors are summarized in the
3334/// following table:
3335///
3336/// | Collector                   | Collects Garbage[^1]  | Latency[^2] | Throughput[^3] | Allocation Speed[^4] | Heap Utilization[^5] |
3337/// |-----------------------------|-----------------------|-------------|----------------|----------------------|----------------------|
3338/// | `DeferredReferenceCounting` | Yes, but not cycles   | 🙂         | 🙁             | 😐                   | 😐                  |
3339/// | `Null`                      | No                    | 🙂         | 🙂             | 🙂                   | 🙂                  |
3340/// | `Copying`[^copying]         | Yes, including cycles | 🙁         | 🙂             | 🙂                   | 🙁                  |
3341///
3342/// [^1]: Whether or not the collector is capable of collecting garbage and cyclic garbage.
3343///
3344/// [^2]: How long the Wasm program is paused during garbage
3345///       collections. Shorter is better. In general, better latency implies
3346///       worse throughput and vice versa.
3347///
3348/// [^3]: How fast the Wasm program runs when using this collector. Roughly
3349///       equivalent to the number of Wasm instructions executed per
3350///       second. Faster is better. In general, better throughput implies worse
3351///       latency and vice versa.
3352///
3353/// [^4]: How fast can individual objects be allocated?
3354///
3355/// [^5]: How many objects can the collector fit into N bytes of memory? That
3356///       is, how much space for bookkeeping and metadata does this collector
3357///       require? Less space taken up by metadata means more space for
3358///       additional objects. Reference counts are larger than mark bits and
3359///       free lists are larger than bump pointers, for example.
3360///
3361/// [^copying]: The copying collector is still under construction and is not yet
3362///             functional.
3363#[non_exhaustive]
3364#[derive(PartialEq, Eq, Clone, Debug, Copy)]
3365pub enum Collector {
3366    /// An indicator that the garbage collector should be automatically
3367    /// selected.
3368    ///
3369    /// This is generally what you want for most projects and indicates that the
3370    /// `wasmtime` crate itself should make the decision about what the best
3371    /// collector for a wasm module is.
3372    ///
3373    /// Currently this always defaults to the deferred reference-counting
3374    /// collector, but the default value may change over time.
3375    Auto,
3376
3377    /// The deferred reference-counting collector.
3378    ///
3379    /// A reference-counting collector, generally trading improved latency for
3380    /// worsened throughput. However, to avoid the largest overheads of
3381    /// reference counting, it avoids manipulating reference counts for Wasm
3382    /// objects on the stack. Instead, it will hold a reference count for an
3383    /// over-approximation of all objects that are currently on the stack, trace
3384    /// the stack during collection to find the precise set of on-stack roots,
3385    /// and decrement the reference count of any object that was in the
3386    /// over-approximation but not the precise set. This improves throughput,
3387    /// compared to "pure" reference counting, by performing many fewer
3388    /// refcount-increment and -decrement operations. The cost is the increased
3389    /// latency associated with tracing the stack.
3390    ///
3391    /// This collector cannot currently collect cycles; they will leak until the
3392    /// GC heap's store is dropped.
3393    DeferredReferenceCounting,
3394
3395    /// The null collector.
3396    ///
3397    /// This collector does not actually collect any garbage. It simply
3398    /// allocates objects until it runs out of memory, at which point further
3399    /// objects allocation attempts will trap.
3400    ///
3401    /// This collector is useful for incredibly short-running Wasm instances
3402    /// where additionally you would rather halt an over-allocating Wasm program
3403    /// than spend time collecting its garbage to allow it to keep running. It
3404    /// is also useful for measuring the overheads associated with other
3405    /// collectors, as this collector imposes as close to zero throughput and
3406    /// latency overhead as possible.
3407    Null,
3408
3409    /// The copying collector.
3410    ///
3411    /// A tracing collector that splits the GC heap in half, bump-allocates
3412    /// objects in one half until it fills up, and then does a GC and copies
3413    /// live objects into the other half, and repeats the process. It has fast
3414    /// allocation, collects cyclic garbage, and good collection throughput,
3415    /// however it suffers from poor latency due to its stop-the-world
3416    /// collections and poor heap utilization due to only using half the GC
3417    /// heap's full capacity at any given time.
3418    ///
3419    /// Note that this collector is still under construction and is not yet
3420    /// functional.
3421    Copying,
3422}
3423
3424impl Default for Collector {
3425    fn default() -> Collector {
3426        Collector::Auto
3427    }
3428}
3429
3430#[cfg(feature = "gc")]
3431impl Collector {
3432    fn not_auto(&self) -> Option<Collector> {
3433        match self {
3434            Collector::Auto => {
3435                if cfg!(feature = "gc-drc") {
3436                    Some(Collector::DeferredReferenceCounting)
3437                } else if cfg!(feature = "gc-null") {
3438                    Some(Collector::Null)
3439                } else {
3440                    None
3441                }
3442            }
3443            other => Some(*other),
3444        }
3445    }
3446
3447    fn try_not_auto(&self) -> Result<Self> {
3448        match self.not_auto() {
3449            #[cfg(feature = "gc-drc")]
3450            Some(c @ Collector::DeferredReferenceCounting) => Ok(c),
3451            #[cfg(not(feature = "gc-drc"))]
3452            Some(Collector::DeferredReferenceCounting) => bail!(
3453                "cannot create an engine using the deferred reference-counting \
3454                 collector because the `gc-drc` feature was not enabled at \
3455                 compile time",
3456            ),
3457
3458            #[cfg(feature = "gc-null")]
3459            Some(c @ Collector::Null) => Ok(c),
3460            #[cfg(not(feature = "gc-null"))]
3461            Some(Collector::Null) => bail!(
3462                "cannot create an engine using the null collector because \
3463                 the `gc-null` feature was not enabled at compile time",
3464            ),
3465
3466            #[cfg(feature = "gc-copying")]
3467            Some(c @ Collector::Copying) => Ok(c),
3468            #[cfg(not(feature = "gc-copying"))]
3469            Some(Collector::Copying) => bail!(
3470                "cannot create an engine using the copying collector because \
3471                 the `gc-copying` feature was not enabled at compile time",
3472            ),
3473
3474            Some(Collector::Auto) => unreachable!(),
3475
3476            None => bail!(
3477                "cannot create an engine with GC support when none of the \
3478                 collectors are available; enable one of the following \
3479                 features: `gc-drc`, `gc-null`, `gc-copying`",
3480            ),
3481        }
3482    }
3483}
3484
3485/// Possible optimization levels for the Cranelift codegen backend.
3486#[non_exhaustive]
3487#[derive(Copy, Clone, Debug, Eq, PartialEq)]
3488pub enum OptLevel {
3489    /// No optimizations performed, minimizes compilation time by disabling most
3490    /// optimizations.
3491    None,
3492    /// Generates the fastest possible code, but may take longer.
3493    Speed,
3494    /// Similar to `speed`, but also performs transformations aimed at reducing
3495    /// code size.
3496    SpeedAndSize,
3497}
3498
3499/// Possible register allocator algorithms for the Cranelift codegen backend.
3500#[non_exhaustive]
3501#[derive(Copy, Clone, Debug, Eq, PartialEq)]
3502pub enum RegallocAlgorithm {
3503    /// Generates the fastest possible code, but may take longer.
3504    ///
3505    /// This algorithm performs "backtracking", which means that it may
3506    /// undo its earlier work and retry as it discovers conflicts. This
3507    /// results in better register utilization, producing fewer spills
3508    /// and moves, but can cause super-linear compile runtime.
3509    Backtracking,
3510    /// Generates acceptable code very quickly.
3511    ///
3512    /// This algorithm performs a single pass through the code,
3513    /// guaranteed to work in linear time.  (Note that the rest of
3514    /// Cranelift is not necessarily guaranteed to run in linear time,
3515    /// however.) It cannot undo earlier decisions, however, and it
3516    /// cannot foresee constraints or issues that may occur further
3517    /// ahead in the code, so the code may have more spills and moves as
3518    /// a result.
3519    ///
3520    /// > **Note**: This algorithm is not yet production-ready and has
3521    /// > historically had known problems. It is not recommended to enable this
3522    /// > algorithm for security-sensitive applications and the Wasmtime project
3523    /// > does not consider this configuration option for issuing security
3524    /// > advisories at this time.
3525    SinglePass,
3526}
3527
3528/// Select which profiling technique to support.
3529#[derive(Debug, Clone, Copy, PartialEq)]
3530pub enum ProfilingStrategy {
3531    /// No profiler support.
3532    None,
3533
3534    /// Collect function name information as the "perf map" file format, used with `perf` on Linux.
3535    PerfMap,
3536
3537    /// Collect profiling info for "jitdump" file format, used with `perf` on
3538    /// Linux.
3539    JitDump,
3540
3541    /// Collect profiling info using the "ittapi", used with `VTune` on Linux.
3542    VTune,
3543
3544    /// Support for profiling Pulley, Wasmtime's interpreter. Note that enabling
3545    /// this at runtime requires enabling the `profile-pulley` Cargo feature at
3546    /// compile time.
3547    Pulley,
3548}
3549
3550/// Select how wasm backtrace detailed information is handled.
3551#[derive(Debug, Clone, Copy)]
3552pub enum WasmBacktraceDetails {
3553    /// Support is unconditionally enabled and wasmtime will parse and read
3554    /// debug information.
3555    Enable,
3556
3557    /// Support is disabled, and wasmtime will not parse debug information for
3558    /// backtrace details.
3559    Disable,
3560
3561    /// Support for backtrace details is conditional on the
3562    /// `WASMTIME_BACKTRACE_DETAILS` environment variable.
3563    Environment,
3564}
3565
3566/// Describe the tri-state configuration of keys such as MPK or PAGEMAP_SCAN.
3567#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
3568pub enum Enabled {
3569    /// Enable this feature if it's detected on the host system, otherwise leave
3570    /// it disabled.
3571    Auto,
3572    /// Enable this feature and fail configuration if the feature is not
3573    /// detected on the host system.
3574    Yes,
3575    /// Do not enable this feature, even if the host system supports it.
3576    No,
3577}
3578
3579/// Configuration options used with [`InstanceAllocationStrategy::Pooling`] to
3580/// change the behavior of the pooling instance allocator.
3581///
3582/// This structure has a builder-style API in the same manner as [`Config`] and
3583/// is configured with [`Config::allocation_strategy`].
3584///
3585/// Note that usage of the pooling allocator does not affect compiled
3586/// WebAssembly code. Compiled `*.cwasm` files, for example, are usable both
3587/// with and without the pooling allocator.
3588///
3589/// ## Advantages of Pooled Allocation
3590///
3591/// The main benefit of the pooling allocator is to make WebAssembly
3592/// instantiation both faster and more scalable in terms of parallelism.
3593/// Allocation is faster because virtual memory is already configured and ready
3594/// to go within the pool, there's no need to [`mmap`] (for example on Unix) a
3595/// new region and configure it with guard pages. By avoiding [`mmap`] this
3596/// avoids whole-process virtual memory locks which can improve scalability and
3597/// performance through avoiding this.
3598///
3599/// Additionally with pooled allocation it's possible to create "affine slots"
3600/// to a particular WebAssembly module or component over time. For example if
3601/// the same module is multiple times over time the pooling allocator will, by
3602/// default, attempt to reuse the same slot. This mean that the slot has been
3603/// pre-configured and can retain virtual memory mappings for a copy-on-write
3604/// image, for example (see [`Config::memory_init_cow`] for more information.
3605/// This means that in a steady state instance deallocation is a single
3606/// [`madvise`] to reset linear memory to its original contents followed by a
3607/// single (optional) [`mprotect`] during the next instantiation to shrink
3608/// memory back to its original size. Compared to non-pooled allocation this
3609/// avoids the need to [`mmap`] a new region of memory, [`munmap`] it, and
3610/// [`mprotect`] regions too.
3611///
3612/// Another benefit of pooled allocation is that it's possible to configure
3613/// things such that no virtual memory management is required at all in a steady
3614/// state. For example a pooling allocator can be configured with:
3615///
3616/// * [`Config::memory_init_cow`] disabled
3617/// * [`Config::memory_guard_size`] disabled
3618/// * [`Config::memory_reservation`] shrunk to minimal size
3619/// * [`PoolingAllocationConfig::table_keep_resident`] sufficiently large
3620/// * [`PoolingAllocationConfig::linear_memory_keep_resident`] sufficiently large
3621///
3622/// With all these options in place no virtual memory tricks are used at all and
3623/// everything is manually managed by Wasmtime (for example resetting memory is
3624/// a `memset(0)`). This is not as fast in a single-threaded scenario but can
3625/// provide benefits in high-parallelism situations as no virtual memory locks
3626/// or IPIs need happen.
3627///
3628/// ## Disadvantages of Pooled Allocation
3629///
3630/// Despite the above advantages to instantiation performance the pooling
3631/// allocator is not enabled by default in Wasmtime. One reason is that the
3632/// performance advantages are not necessarily portable, for example while the
3633/// pooling allocator works on Windows it has not been tuned for performance on
3634/// Windows in the same way it has on Linux.
3635///
3636/// Additionally the main cost of the pooling allocator is that it requires a
3637/// very large reservation of virtual memory (on the order of most of the
3638/// addressable virtual address space). WebAssembly 32-bit linear memories in
3639/// Wasmtime are, by default 4G address space reservations with a small guard
3640/// region both before and after the linear memory. Memories in the pooling
3641/// allocator are contiguous which means that we only need a guard after linear
3642/// memory because the previous linear memory's slot post-guard is our own
3643/// pre-guard. This means that, by default, the pooling allocator uses roughly
3644/// 4G of virtual memory per WebAssembly linear memory slot. 4G of virtual
3645/// memory is 32 bits of a 64-bit address. Many 64-bit systems can only
3646/// actually use 48-bit addresses by default (although this can be extended on
3647/// architectures nowadays too), and of those 48 bits one of them is reserved
3648/// to indicate kernel-vs-userspace. This leaves 47-32=15 bits left,
3649/// meaning you can only have at most 32k slots of linear memories on many
3650/// systems by default. This is a relatively small number and shows how the
3651/// pooling allocator can quickly exhaust all of virtual memory.
3652///
3653/// Another disadvantage of the pooling allocator is that it may keep memory
3654/// alive when nothing is using it. A previously used slot for an instance might
3655/// have paged-in memory that will not get paged out until the
3656/// [`Engine`](crate::Engine) owning the pooling allocator is dropped. While
3657/// suitable for some applications this behavior may not be suitable for all
3658/// applications.
3659///
3660/// Finally the last disadvantage of the pooling allocator is that the
3661/// configuration values for the maximum number of instances, memories, tables,
3662/// etc, must all be fixed up-front. There's not always a clear answer as to
3663/// what these values should be so not all applications may be able to work
3664/// with this constraint.
3665///
3666/// [`madvise`]: https://man7.org/linux/man-pages/man2/madvise.2.html
3667/// [`mprotect`]: https://man7.org/linux/man-pages/man2/mprotect.2.html
3668/// [`mmap`]: https://man7.org/linux/man-pages/man2/mmap.2.html
3669/// [`munmap`]: https://man7.org/linux/man-pages/man2/munmap.2.html
3670#[cfg(feature = "pooling-allocator")]
3671#[derive(Debug, Clone, Default)]
3672pub struct PoolingAllocationConfig {
3673    config: crate::runtime::vm::PoolingInstanceAllocatorConfig,
3674}
3675
3676#[cfg(feature = "pooling-allocator")]
3677impl PoolingAllocationConfig {
3678    /// Returns a new configuration builder with all default settings
3679    /// configured.
3680    pub fn new() -> PoolingAllocationConfig {
3681        PoolingAllocationConfig::default()
3682    }
3683
3684    /// Configures the maximum number of "unused warm slots" to retain in the
3685    /// pooling allocator.
3686    ///
3687    /// The pooling allocator operates over slots to allocate from, and each
3688    /// slot is considered "cold" if it's never been used before or "warm" if
3689    /// it's been used by some module in the past. Slots in the pooling
3690    /// allocator additionally track an "affinity" flag to a particular core
3691    /// wasm module. When a module is instantiated into a slot then the slot is
3692    /// considered affine to that module, even after the instance has been
3693    /// deallocated.
3694    ///
3695    /// When a new instance is created then a slot must be chosen, and the
3696    /// current algorithm for selecting a slot is:
3697    ///
3698    /// * If there are slots that are affine to the module being instantiated,
3699    ///   then the most recently used slot is selected to be allocated from.
3700    ///   This is done to improve reuse of resources such as memory mappings and
3701    ///   additionally try to benefit from temporal locality for things like
3702    ///   caches.
3703    ///
3704    /// * Otherwise if there are more than N affine slots to other modules, then
3705    ///   one of those affine slots is chosen to be allocated. The slot chosen
3706    ///   is picked on a least-recently-used basis.
3707    ///
3708    /// * Finally, if there are less than N affine slots to other modules, then
3709    ///   the non-affine slots are allocated from.
3710    ///
3711    /// This setting, `max_unused_warm_slots`, is the value for N in the above
3712    /// algorithm. The purpose of this setting is to have a knob over the RSS
3713    /// impact of "unused slots" for a long-running wasm server.
3714    ///
3715    /// If this setting is set to 0, for example, then affine slots are
3716    /// aggressively reused on a least-recently-used basis. A "cold" slot is
3717    /// only used if there are no affine slots available to allocate from. This
3718    /// means that the set of slots used over the lifetime of a program is the
3719    /// same as the maximum concurrent number of wasm instances.
3720    ///
3721    /// If this setting is set to infinity, however, then cold slots are
3722    /// prioritized to be allocated from. This means that the set of slots used
3723    /// over the lifetime of a program will approach
3724    /// [`PoolingAllocationConfig::total_memories`], or the maximum number of
3725    /// slots in the pooling allocator.
3726    ///
3727    /// Wasmtime does not aggressively decommit all resources associated with a
3728    /// slot when the slot is not in use. For example the
3729    /// [`PoolingAllocationConfig::linear_memory_keep_resident`] option can be
3730    /// used to keep memory associated with a slot, even when it's not in use.
3731    /// This means that the total set of used slots in the pooling instance
3732    /// allocator can impact the overall RSS usage of a program.
3733    ///
3734    /// The default value for this option is `100`.
3735    pub fn max_unused_warm_slots(&mut self, max: u32) -> &mut Self {
3736        self.config.max_unused_warm_slots = max;
3737        self
3738    }
3739
3740    /// The target number of decommits to do per batch.
3741    ///
3742    /// This is not precise, as we can queue up decommits at times when we
3743    /// aren't prepared to immediately flush them, and so we may go over this
3744    /// target size occasionally.
3745    ///
3746    /// A batch size of one effectively disables batching.
3747    ///
3748    /// Defaults to `1`.
3749    pub fn decommit_batch_size(&mut self, batch_size: usize) -> &mut Self {
3750        self.config.decommit_batch_size = batch_size;
3751        self
3752    }
3753
3754    /// How much memory, in bytes, to keep resident for async stacks allocated
3755    /// with the pooling allocator.
3756    ///
3757    /// When [`Config::async_stack_zeroing`] is enabled then Wasmtime will reset
3758    /// the contents of async stacks back to zero upon deallocation. This option
3759    /// can be used to perform the zeroing operation with `memset` up to a
3760    /// certain threshold of bytes instead of using system calls to reset the
3761    /// stack to zero.
3762    ///
3763    /// Note that when using this option the memory with async stacks will
3764    /// never be decommitted.
3765    #[cfg(feature = "async")]
3766    pub fn async_stack_keep_resident(&mut self, size: usize) -> &mut Self {
3767        self.config.async_stack_keep_resident = size;
3768        self
3769    }
3770
3771    /// How much memory, in bytes, to keep resident for each linear memory
3772    /// after deallocation.
3773    ///
3774    /// This option is only applicable on Linux and has no effect on other
3775    /// platforms.
3776    ///
3777    /// By default Wasmtime will use `madvise` to reset the entire contents of
3778    /// linear memory back to zero when a linear memory is deallocated. This
3779    /// option can be used to use `memset` instead to set memory back to zero
3780    /// which can, in some configurations, reduce the number of page faults
3781    /// taken when a slot is reused.
3782    pub fn linear_memory_keep_resident(&mut self, size: usize) -> &mut Self {
3783        self.config.linear_memory_keep_resident = size;
3784        self
3785    }
3786
3787    /// How much memory, in bytes, to keep resident for each table after
3788    /// deallocation.
3789    ///
3790    /// This option is only applicable on Linux and has no effect on other
3791    /// platforms.
3792    ///
3793    /// This option is the same as
3794    /// [`PoolingAllocationConfig::linear_memory_keep_resident`] except that it
3795    /// is applicable to tables instead.
3796    pub fn table_keep_resident(&mut self, size: usize) -> &mut Self {
3797        self.config.table_keep_resident = size;
3798        self
3799    }
3800
3801    /// The maximum number of concurrent component instances supported (default
3802    /// is `1000`).
3803    ///
3804    /// This provides an upper-bound on the total size of component
3805    /// metadata-related allocations, along with
3806    /// [`PoolingAllocationConfig::max_component_instance_size`]. The upper bound is
3807    ///
3808    /// ```text
3809    /// total_component_instances * max_component_instance_size
3810    /// ```
3811    ///
3812    /// where `max_component_instance_size` is rounded up to the size and alignment
3813    /// of the internal representation of the metadata.
3814    pub fn total_component_instances(&mut self, count: u32) -> &mut Self {
3815        self.config.limits.total_component_instances = count;
3816        self
3817    }
3818
3819    /// The maximum size, in bytes, allocated for a component instance's
3820    /// `VMComponentContext` metadata as well as the aggregate size of this
3821    /// component's core instances `VMContext` metadata.
3822    ///
3823    /// The [`wasmtime::component::Instance`][crate::component::Instance] type
3824    /// has a static size but its internal `VMComponentContext` is dynamically
3825    /// sized depending on the component being instantiated. This size limit
3826    /// loosely correlates to the size of the component, taking into account
3827    /// factors such as:
3828    ///
3829    /// * number of lifted and lowered functions,
3830    /// * number of memories
3831    /// * number of inner instances
3832    /// * number of resources
3833    ///
3834    /// If the allocated size per instance is too small then instantiation of a
3835    /// module will fail at runtime with an error indicating how many bytes were
3836    /// needed.
3837    ///
3838    /// In addition to the memory in the runtime for the component itself,
3839    /// components contain one or more core module instances. Each of these
3840    /// require some memory in the runtime as described in
3841    /// [`PoolingAllocationConfig::max_core_instance_size`]. The limit here
3842    /// applies against the sum of all of these individual allocations.
3843    ///
3844    /// The default value for this is 1MiB.
3845    ///
3846    /// This provides an upper-bound on the total size of all component's
3847    /// metadata-related allocations (for both the component and its embedded
3848    /// core module instances), along with
3849    /// [`PoolingAllocationConfig::total_component_instances`]. The upper bound is
3850    ///
3851    /// ```text
3852    /// total_component_instances * max_component_instance_size
3853    /// ```
3854    ///
3855    /// where `max_component_instance_size` is rounded up to the size and alignment
3856    /// of the internal representation of the metadata.
3857    pub fn max_component_instance_size(&mut self, size: usize) -> &mut Self {
3858        self.config.limits.component_instance_size = size;
3859        self
3860    }
3861
3862    /// The maximum number of core instances a single component may contain
3863    /// (default is unlimited).
3864    ///
3865    /// This method (along with
3866    /// [`PoolingAllocationConfig::max_memories_per_component`],
3867    /// [`PoolingAllocationConfig::max_tables_per_component`], and
3868    /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3869    /// the amount of resources a single component allocation consumes.
3870    ///
3871    /// If a component will instantiate more core instances than `count`, then
3872    /// the component will fail to instantiate.
3873    pub fn max_core_instances_per_component(&mut self, count: u32) -> &mut Self {
3874        self.config.limits.max_core_instances_per_component = count;
3875        self
3876    }
3877
3878    /// The maximum number of Wasm linear memories that a single component may
3879    /// transitively contain (default is unlimited).
3880    ///
3881    /// This method (along with
3882    /// [`PoolingAllocationConfig::max_core_instances_per_component`],
3883    /// [`PoolingAllocationConfig::max_tables_per_component`], and
3884    /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3885    /// the amount of resources a single component allocation consumes.
3886    ///
3887    /// If a component transitively contains more linear memories than `count`,
3888    /// then the component will fail to instantiate.
3889    pub fn max_memories_per_component(&mut self, count: u32) -> &mut Self {
3890        self.config.limits.max_memories_per_component = count;
3891        self
3892    }
3893
3894    /// The maximum number of tables that a single component may transitively
3895    /// contain (default is unlimited).
3896    ///
3897    /// This method (along with
3898    /// [`PoolingAllocationConfig::max_core_instances_per_component`],
3899    /// [`PoolingAllocationConfig::max_memories_per_component`],
3900    /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3901    /// the amount of resources a single component allocation consumes.
3902    ///
3903    /// If a component will transitively contains more tables than `count`, then
3904    /// the component will fail to instantiate.
3905    pub fn max_tables_per_component(&mut self, count: u32) -> &mut Self {
3906        self.config.limits.max_tables_per_component = count;
3907        self
3908    }
3909
3910    /// The maximum number of concurrent Wasm linear memories supported (default
3911    /// is `1000`).
3912    ///
3913    /// This value has a direct impact on the amount of memory allocated by the pooling
3914    /// instance allocator.
3915    ///
3916    /// The pooling instance allocator allocates a memory pool, where each entry
3917    /// in the pool contains the reserved address space for each linear memory
3918    /// supported by an instance.
3919    ///
3920    /// The memory pool will reserve a large quantity of host process address
3921    /// space to elide the bounds checks required for correct WebAssembly memory
3922    /// semantics. Even with 64-bit address spaces, the address space is limited
3923    /// when dealing with a large number of linear memories.
3924    ///
3925    /// For example, on Linux x86_64, the userland address space limit is 128
3926    /// TiB. That might seem like a lot, but each linear memory will *reserve* 6
3927    /// GiB of space by default.
3928    pub fn total_memories(&mut self, count: u32) -> &mut Self {
3929        self.config.limits.total_memories = count;
3930        self
3931    }
3932
3933    /// The maximum number of concurrent tables supported (default is `1000`).
3934    ///
3935    /// This value has a direct impact on the amount of memory allocated by the
3936    /// pooling instance allocator.
3937    ///
3938    /// The pooling instance allocator allocates a table pool, where each entry
3939    /// in the pool contains the space needed for each WebAssembly table
3940    /// supported by an instance (see `table_elements` to control the size of
3941    /// each table).
3942    pub fn total_tables(&mut self, count: u32) -> &mut Self {
3943        self.config.limits.total_tables = count;
3944        self
3945    }
3946
3947    /// The maximum number of execution stacks allowed for asynchronous
3948    /// execution, when enabled (default is `1000`).
3949    ///
3950    /// This value has a direct impact on the amount of memory allocated by the
3951    /// pooling instance allocator.
3952    #[cfg(feature = "async")]
3953    pub fn total_stacks(&mut self, count: u32) -> &mut Self {
3954        self.config.limits.total_stacks = count;
3955        self
3956    }
3957
3958    /// The maximum number of concurrent core instances supported (default is
3959    /// `1000`).
3960    ///
3961    /// This provides an upper-bound on the total size of core instance
3962    /// metadata-related allocations, along with
3963    /// [`PoolingAllocationConfig::max_core_instance_size`]. The upper bound is
3964    ///
3965    /// ```text
3966    /// total_core_instances * max_core_instance_size
3967    /// ```
3968    ///
3969    /// where `max_core_instance_size` is rounded up to the size and alignment of
3970    /// the internal representation of the metadata.
3971    pub fn total_core_instances(&mut self, count: u32) -> &mut Self {
3972        self.config.limits.total_core_instances = count;
3973        self
3974    }
3975
3976    /// The maximum size, in bytes, allocated for a core instance's `VMContext`
3977    /// metadata.
3978    ///
3979    /// The [`Instance`][crate::Instance] type has a static size but its
3980    /// `VMContext` metadata is dynamically sized depending on the module being
3981    /// instantiated. This size limit loosely correlates to the size of the Wasm
3982    /// module, taking into account factors such as:
3983    ///
3984    /// * number of functions
3985    /// * number of globals
3986    /// * number of memories
3987    /// * number of tables
3988    /// * number of function types
3989    ///
3990    /// If the allocated size per instance is too small then instantiation of a
3991    /// module will fail at runtime with an error indicating how many bytes were
3992    /// needed.
3993    ///
3994    /// The default value for this is 1MiB.
3995    ///
3996    /// This provides an upper-bound on the total size of core instance
3997    /// metadata-related allocations, along with
3998    /// [`PoolingAllocationConfig::total_core_instances`]. The upper bound is
3999    ///
4000    /// ```text
4001    /// total_core_instances * max_core_instance_size
4002    /// ```
4003    ///
4004    /// where `max_core_instance_size` is rounded up to the size and alignment of
4005    /// the internal representation of the metadata.
4006    pub fn max_core_instance_size(&mut self, size: usize) -> &mut Self {
4007        self.config.limits.core_instance_size = size;
4008        self
4009    }
4010
4011    /// The maximum number of defined tables for a core module (default is `1`).
4012    ///
4013    /// This value controls the capacity of the `VMTableDefinition` table in
4014    /// each instance's `VMContext` structure.
4015    ///
4016    /// The allocated size of the table will be `tables *
4017    /// sizeof(VMTableDefinition)` for each instance regardless of how many
4018    /// tables are defined by an instance's module.
4019    pub fn max_tables_per_module(&mut self, tables: u32) -> &mut Self {
4020        self.config.limits.max_tables_per_module = tables;
4021        self
4022    }
4023
4024    /// The maximum table elements for any table defined in a module (default is
4025    /// `20000`).
4026    ///
4027    /// If a table's minimum element limit is greater than this value, the
4028    /// module will fail to instantiate.
4029    ///
4030    /// If a table's maximum element limit is unbounded or greater than this
4031    /// value, the maximum will be `table_elements` for the purpose of any
4032    /// `table.grow` instruction.
4033    ///
4034    /// This value is used to reserve the maximum space for each supported
4035    /// table; table elements are pointer-sized in the Wasmtime runtime.
4036    /// Therefore, the space reserved for each instance is `tables *
4037    /// table_elements * sizeof::<*const ()>`.
4038    pub fn table_elements(&mut self, elements: usize) -> &mut Self {
4039        self.config.limits.table_elements = elements;
4040        self
4041    }
4042
4043    /// The maximum number of defined linear memories for a module (default is
4044    /// `1`).
4045    ///
4046    /// This value controls the capacity of the `VMMemoryDefinition` table in
4047    /// each core instance's `VMContext` structure.
4048    ///
4049    /// The allocated size of the table will be `memories *
4050    /// sizeof(VMMemoryDefinition)` for each core instance regardless of how
4051    /// many memories are defined by the core instance's module.
4052    pub fn max_memories_per_module(&mut self, memories: u32) -> &mut Self {
4053        self.config.limits.max_memories_per_module = memories;
4054        self
4055    }
4056
4057    /// The maximum byte size that any WebAssembly linear memory may grow to.
4058    ///
4059    /// This option defaults to 4 GiB meaning that for 32-bit linear memories
4060    /// there is no restrictions. 64-bit linear memories will not be allowed to
4061    /// grow beyond 4 GiB by default.
4062    ///
4063    /// If a memory's minimum size is greater than this value, the module will
4064    /// fail to instantiate.
4065    ///
4066    /// If a memory's maximum size is unbounded or greater than this value, the
4067    /// maximum will be `max_memory_size` for the purpose of any `memory.grow`
4068    /// instruction.
4069    ///
4070    /// This value is used to control the maximum accessible space for each
4071    /// linear memory of a core instance. This can be thought of as a simple
4072    /// mechanism like [`Store::limiter`](crate::Store::limiter) to limit memory
4073    /// at runtime. This value can also affect striping/coloring behavior when
4074    /// used in conjunction with
4075    /// [`memory_protection_keys`](PoolingAllocationConfig::memory_protection_keys).
4076    ///
4077    /// The virtual memory reservation size of each linear memory is controlled
4078    /// by the [`Config::memory_reservation`] setting and this method's
4079    /// configuration cannot exceed [`Config::memory_reservation`].
4080    pub fn max_memory_size(&mut self, bytes: usize) -> &mut Self {
4081        self.config.limits.max_memory_size = bytes;
4082        self
4083    }
4084
4085    /// Configures whether memory protection keys (MPK) should be used for more
4086    /// efficient layout of pool-allocated memories.
4087    ///
4088    /// When using the pooling allocator (see [`Config::allocation_strategy`],
4089    /// [`InstanceAllocationStrategy::Pooling`]), memory protection keys can
4090    /// reduce the total amount of allocated virtual memory by eliminating guard
4091    /// regions between WebAssembly memories in the pool. It does so by
4092    /// "coloring" memory regions with different memory keys and setting which
4093    /// regions are accessible each time executions switches from host to guest
4094    /// (or vice versa).
4095    ///
4096    /// Leveraging MPK requires configuring a smaller-than-default
4097    /// [`max_memory_size`](PoolingAllocationConfig::max_memory_size) to enable
4098    /// this coloring/striping behavior. For example embeddings might want to
4099    /// reduce the default 4G allowance to 128M.
4100    ///
4101    /// MPK is only available on Linux (called `pku` there) and recent x86
4102    /// systems; we check for MPK support at runtime by examining the `CPUID`
4103    /// register. This configuration setting can be in three states:
4104    ///
4105    /// - `auto`: if MPK support is available the guard regions are removed; if
4106    ///   not, the guard regions remain
4107    /// - `yes`: use MPK to eliminate guard regions; fail if MPK is not
4108    ///   supported
4109    /// - `no`: never use MPK
4110    ///
4111    /// By default this value is `no`, but may become `auto` in future
4112    /// releases.
4113    ///
4114    /// __WARNING__: this configuration options is still experimental--use at
4115    /// your own risk! MPK uses kernel and CPU features to protect memory
4116    /// regions; you may observe segmentation faults if anything is
4117    /// misconfigured.
4118    #[cfg(feature = "memory-protection-keys")]
4119    pub fn memory_protection_keys(&mut self, enable: Enabled) -> &mut Self {
4120        self.config.memory_protection_keys = enable;
4121        self
4122    }
4123
4124    /// Sets an upper limit on how many memory protection keys (MPK) Wasmtime
4125    /// will use.
4126    ///
4127    /// This setting is only applicable when
4128    /// [`PoolingAllocationConfig::memory_protection_keys`] is set to `enable`
4129    /// or `auto`. Configuring this above the HW and OS limits (typically 15)
4130    /// has no effect.
4131    ///
4132    /// If multiple Wasmtime engines are used in the same process, note that all
4133    /// engines will share the same set of allocated keys; this setting will
4134    /// limit how many keys are allocated initially and thus available to all
4135    /// other engines.
4136    #[cfg(feature = "memory-protection-keys")]
4137    pub fn max_memory_protection_keys(&mut self, max: usize) -> &mut Self {
4138        self.config.max_memory_protection_keys = max;
4139        self
4140    }
4141
4142    /// Check if memory protection keys (MPK) are available on the current host.
4143    ///
4144    /// This is a convenience method for determining MPK availability using the
4145    /// same method that [`Enabled::Auto`] does. See
4146    /// [`PoolingAllocationConfig::memory_protection_keys`] for more
4147    /// information.
4148    #[cfg(feature = "memory-protection-keys")]
4149    pub fn are_memory_protection_keys_available() -> bool {
4150        crate::runtime::vm::mpk::is_supported()
4151    }
4152
4153    /// The maximum number of concurrent GC heaps supported (default is `1000`).
4154    ///
4155    /// This value has a direct impact on the amount of memory allocated by the
4156    /// pooling instance allocator.
4157    ///
4158    /// The pooling instance allocator allocates a GC heap pool, where each
4159    /// entry in the pool contains the space needed for each GC heap used by a
4160    /// store.
4161    #[cfg(feature = "gc")]
4162    pub fn total_gc_heaps(&mut self, count: u32) -> &mut Self {
4163        self.config.limits.total_gc_heaps = count;
4164        self
4165    }
4166
4167    /// Configures whether the Linux-specific [`PAGEMAP_SCAN` ioctl][ioctl] is
4168    /// used to help reset linear memory.
4169    ///
4170    /// When [`Self::linear_memory_keep_resident`] or
4171    /// [`Self::table_keep_resident`] options are configured to nonzero values
4172    /// the default behavior is to `memset` the lowest addresses of a table or
4173    /// memory back to their original contents. With the `PAGEMAP_SCAN` ioctl on
4174    /// Linux this can be done to more intelligently scan for resident pages in
4175    /// the region and only reset those pages back to their original contents
4176    /// with `memset` rather than assuming the low addresses are all resident.
4177    ///
4178    /// This ioctl has the potential to provide a number of performance benefits
4179    /// in high-reuse and high concurrency scenarios. Notably this enables
4180    /// Wasmtime to scan the entire region of WebAssembly linear memory and
4181    /// manually reset memory back to its original contents, up to
4182    /// [`Self::linear_memory_keep_resident`] bytes, possibly skipping an
4183    /// `madvise` entirely. This can be more efficient by avoiding removing
4184    /// pages from the address space entirely and additionally ensuring that
4185    /// future use of the linear memory doesn't incur page faults as the pages
4186    /// remain resident.
4187    ///
4188    /// At this time this configuration option is still being evaluated as to
4189    /// how appropriate it is for all use cases. It currently defaults to
4190    /// `no` or disabled but may change to `auto`, enable if supported, in the
4191    /// future. This option is only supported on Linux and requires a kernel
4192    /// version of 6.7 or higher.
4193    ///
4194    /// [ioctl]: https://www.man7.org/linux/man-pages/man2/PAGEMAP_SCAN.2const.html
4195    pub fn pagemap_scan(&mut self, enable: Enabled) -> &mut Self {
4196        self.config.pagemap_scan = enable;
4197        self
4198    }
4199
4200    /// Tests whether [`Self::pagemap_scan`] is available or not on the host
4201    /// system.
4202    pub fn is_pagemap_scan_available() -> bool {
4203        crate::runtime::vm::PoolingInstanceAllocatorConfig::is_pagemap_scan_available()
4204    }
4205}
4206
4207#[cfg(feature = "std")]
4208fn detect_host_feature(feature: &str) -> Option<bool> {
4209    #[cfg(target_arch = "aarch64")]
4210    {
4211        return match feature {
4212            "lse" => Some(std::arch::is_aarch64_feature_detected!("lse")),
4213            "paca" => Some(std::arch::is_aarch64_feature_detected!("paca")),
4214            "fp16" => Some(std::arch::is_aarch64_feature_detected!("fp16")),
4215
4216            _ => None,
4217        };
4218    }
4219
4220    // `is_s390x_feature_detected` is nightly only for now, so use the
4221    // STORE FACILITY LIST EXTENDED instruction as a temporary measure.
4222    #[cfg(target_arch = "s390x")]
4223    {
4224        let mut facility_list: [u64; 4] = [0; 4];
4225        unsafe {
4226            core::arch::asm!(
4227                "stfle 0({})",
4228                in(reg_addr) facility_list.as_mut_ptr() ,
4229                inout("r0") facility_list.len() as u64 - 1 => _,
4230                options(nostack)
4231            );
4232        }
4233        let get_facility_bit = |n: usize| {
4234            // NOTE: bits are numbered from the left.
4235            facility_list[n / 64] & (1 << (63 - (n % 64))) != 0
4236        };
4237
4238        return match feature {
4239            "mie3" => Some(get_facility_bit(61)),
4240            "mie4" => Some(get_facility_bit(84)),
4241            "vxrs_ext2" => Some(get_facility_bit(148)),
4242            "vxrs_ext3" => Some(get_facility_bit(198)),
4243
4244            _ => None,
4245        };
4246    }
4247
4248    #[cfg(target_arch = "riscv64")]
4249    {
4250        return match feature {
4251            // due to `is_riscv64_feature_detected` is not stable.
4252            // we cannot use it. For now lie and say all features are always
4253            // found to keep tests working.
4254            _ => Some(true),
4255        };
4256    }
4257
4258    #[cfg(target_arch = "x86_64")]
4259    {
4260        return match feature {
4261            "cmpxchg16b" => Some(std::is_x86_feature_detected!("cmpxchg16b")),
4262            "sse3" => Some(std::is_x86_feature_detected!("sse3")),
4263            "ssse3" => Some(std::is_x86_feature_detected!("ssse3")),
4264            "sse4.1" => Some(std::is_x86_feature_detected!("sse4.1")),
4265            "sse4.2" => Some(std::is_x86_feature_detected!("sse4.2")),
4266            "popcnt" => Some(std::is_x86_feature_detected!("popcnt")),
4267            "avx" => Some(std::is_x86_feature_detected!("avx")),
4268            "avx2" => Some(std::is_x86_feature_detected!("avx2")),
4269            "fma" => Some(std::is_x86_feature_detected!("fma")),
4270            "bmi1" => Some(std::is_x86_feature_detected!("bmi1")),
4271            "bmi2" => Some(std::is_x86_feature_detected!("bmi2")),
4272            "avx512bitalg" => Some(std::is_x86_feature_detected!("avx512bitalg")),
4273            "avx512dq" => Some(std::is_x86_feature_detected!("avx512dq")),
4274            "avx512f" => Some(std::is_x86_feature_detected!("avx512f")),
4275            "avx512vl" => Some(std::is_x86_feature_detected!("avx512vl")),
4276            "avx512vbmi" => Some(std::is_x86_feature_detected!("avx512vbmi")),
4277            "lzcnt" => Some(std::is_x86_feature_detected!("lzcnt")),
4278
4279            _ => None,
4280        };
4281    }
4282
4283    #[allow(
4284        unreachable_code,
4285        reason = "reachable or not depending on if a target above matches"
4286    )]
4287    {
4288        let _ = feature;
4289        return None;
4290    }
4291}