Skip to main content

wasmtime/
config.rs

1use crate::prelude::*;
2use alloc::sync::Arc;
3use bitflags::Flags;
4use core::fmt;
5use core::num::{NonZeroU32, NonZeroUsize};
6use core::str::FromStr;
7#[cfg(any(feature = "cranelift", feature = "winch"))]
8use std::path::Path;
9pub use wasmparser::WasmFeatures;
10use wasmtime_environ::{ConfigTunables, OperatorCost, OperatorCostStrategy, TripleExt, Tunables};
11
12#[cfg(feature = "runtime")]
13use crate::memory::MemoryCreator;
14#[cfg(feature = "runtime")]
15use crate::profiling_agent::{self, ProfilingAgent};
16#[cfg(feature = "runtime")]
17use crate::runtime::vm::{
18    GcRuntime, InstanceAllocator, OnDemandInstanceAllocator, RuntimeMemoryCreator,
19};
20#[cfg(feature = "runtime")]
21use crate::trampoline::MemoryCreatorProxy;
22
23#[cfg(feature = "async")]
24use crate::stack::{StackCreator, StackCreatorProxy};
25#[cfg(feature = "async")]
26use wasmtime_fiber::RuntimeFiberStackCreator;
27
28#[cfg(feature = "runtime")]
29pub use crate::runtime::code_memory::CustomCodeMemory;
30#[cfg(feature = "cache")]
31pub use wasmtime_cache::{Cache, CacheConfig};
32#[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
33pub use wasmtime_environ::CacheStore;
34pub use wasmtime_environ::Inlining;
35
36pub(crate) const DEFAULT_WASM_BACKTRACE_MAX_FRAMES: NonZeroUsize = NonZeroUsize::new(20).unwrap();
37
38/// Represents the module instance allocation strategy to use.
39#[derive(Clone)]
40#[non_exhaustive]
41pub enum InstanceAllocationStrategy {
42    /// The on-demand instance allocation strategy.
43    ///
44    /// Resources related to a module instance are allocated at instantiation time and
45    /// immediately deallocated when the `Store` referencing the instance is dropped.
46    ///
47    /// This is the default allocation strategy for Wasmtime.
48    OnDemand,
49    /// The pooling instance allocation strategy.
50    ///
51    /// A pool of resources is created in advance and module instantiation reuses resources
52    /// from the pool. Resources are returned to the pool when the `Store` referencing the instance
53    /// is dropped.
54    ///
55    /// When GC is enabled, the pooling allocator requires that the GC heap
56    /// configuration matches the linear memory configuration (i.e.,
57    /// `gc_heap_reservation` must equal `memory_reservation`, etc.). By
58    /// default, if no `gc_heap_*` tunables are explicitly configured, they
59    /// automatically inherit the `memory_*` values.
60    #[cfg(feature = "pooling-allocator")]
61    Pooling(PoolingAllocationConfig),
62}
63
64impl InstanceAllocationStrategy {
65    /// The default pooling instance allocation strategy.
66    #[cfg(feature = "pooling-allocator")]
67    pub fn pooling() -> Self {
68        Self::Pooling(Default::default())
69    }
70}
71
72impl Default for InstanceAllocationStrategy {
73    fn default() -> Self {
74        Self::OnDemand
75    }
76}
77
78#[cfg(feature = "pooling-allocator")]
79impl From<PoolingAllocationConfig> for InstanceAllocationStrategy {
80    fn from(cfg: PoolingAllocationConfig) -> InstanceAllocationStrategy {
81        InstanceAllocationStrategy::Pooling(cfg)
82    }
83}
84
85#[derive(Clone)]
86/// Configure the strategy used for versioning in serializing and deserializing [`crate::Module`].
87pub enum ModuleVersionStrategy {
88    /// Use the wasmtime crate's Cargo package version.
89    WasmtimeVersion,
90    /// Use a custom version string. Must be at most 255 bytes.
91    Custom(String),
92    /// Emit no version string in serialization, and accept all version strings in deserialization.
93    None,
94}
95
96impl Default for ModuleVersionStrategy {
97    fn default() -> Self {
98        ModuleVersionStrategy::WasmtimeVersion
99    }
100}
101
102impl core::hash::Hash for ModuleVersionStrategy {
103    fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
104        match self {
105            Self::WasmtimeVersion => env!("CARGO_PKG_VERSION").hash(hasher),
106            Self::Custom(s) => s.hash(hasher),
107            Self::None => {}
108        };
109    }
110}
111
112impl ModuleVersionStrategy {
113    /// Get the string-encoding version of the module.
114    pub fn as_str(&self) -> &str {
115        match &self {
116            Self::WasmtimeVersion => env!("CARGO_PKG_VERSION_MAJOR"),
117            Self::Custom(c) => c,
118            Self::None => "",
119        }
120    }
121}
122
123/// Configuration for record/replay
124#[derive(Clone)]
125#[non_exhaustive]
126pub enum RRConfig {
127    #[cfg(feature = "rr")]
128    /// Recording on store is enabled
129    Recording,
130    #[cfg(feature = "rr")]
131    /// Replaying on store is enabled
132    Replaying,
133    /// No record/replay is enabled
134    None,
135}
136
137/// Global configuration options used to create an [`Engine`](crate::Engine)
138/// and customize its behavior.
139///
140/// This structure exposed a builder-like interface and is primarily consumed by
141/// [`Engine::new()`](crate::Engine::new).
142///
143/// The validation of `Config` is deferred until the engine is being built, thus
144/// a problematic config may cause `Engine::new` to fail.
145///
146/// # Defaults
147///
148/// The `Default` trait implementation and the return value from
149/// [`Config::new()`] are the same and represent the default set of
150/// configuration for an engine. The exact set of defaults will differ based on
151/// properties such as enabled Cargo features at compile time and the configured
152/// target (see [`Config::target`]). Configuration options document their
153/// default values and what the conditional value of the default is where
154/// applicable.
155#[derive(Clone)]
156pub struct Config {
157    #[cfg(any(feature = "cranelift", feature = "winch"))]
158    compiler_config: Option<CompilerConfig>,
159    target: Option<target_lexicon::Triple>,
160    #[cfg(feature = "gc")]
161    collector: Collector,
162    profiling_strategy: ProfilingStrategy,
163    tunables: ConfigTunables,
164
165    #[cfg(feature = "cache")]
166    pub(crate) cache: Option<Cache>,
167    #[cfg(feature = "runtime")]
168    pub(crate) mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>,
169    #[cfg(feature = "runtime")]
170    pub(crate) custom_code_memory: Option<Arc<dyn CustomCodeMemory>>,
171    pub(crate) allocation_strategy: InstanceAllocationStrategy,
172    pub(crate) max_wasm_stack: usize,
173    /// Explicitly enabled features via `Config::wasm_*` methods. This is a
174    /// signal that the embedder specifically wants something turned on
175    /// regardless of the defaults that Wasmtime might otherwise have enabled.
176    ///
177    /// Note that this, and `disabled_features` below, start as the empty set of
178    /// features to only track explicit user requests.
179    pub(crate) enabled_features: WasmFeatures,
180    /// Same as `enabled_features`, but for those that are explicitly disabled.
181    pub(crate) disabled_features: WasmFeatures,
182    pub(crate) wasm_backtrace_details_env_used: bool,
183    pub(crate) wasm_backtrace_max_frames: Option<NonZeroUsize>,
184    pub(crate) native_unwind_info: Option<bool>,
185    #[cfg(any(feature = "async", feature = "stack-switching"))]
186    pub(crate) async_stack_size: usize,
187    #[cfg(feature = "async")]
188    pub(crate) async_stack_zeroing: bool,
189    #[cfg(feature = "async")]
190    pub(crate) stack_creator: Option<Arc<dyn RuntimeFiberStackCreator>>,
191    pub(crate) module_version: ModuleVersionStrategy,
192    pub(crate) parallel_compilation: bool,
193    pub(crate) memory_guaranteed_dense_image_size: u64,
194    pub(crate) force_memory_init_memfd: bool,
195    pub(crate) wmemcheck: bool,
196    #[cfg(feature = "coredump")]
197    pub(crate) coredump_on_trap: bool,
198    pub(crate) macos_use_mach_ports: bool,
199    pub(crate) detect_host_feature: Option<fn(&str) -> Option<bool>>,
200    pub(crate) x86_float_abi_ok: Option<bool>,
201    pub(crate) shared_memory: bool,
202    pub(crate) rr_config: RRConfig,
203}
204
205/// User-provided configuration for the compiler.
206#[cfg(any(feature = "cranelift", feature = "winch"))]
207#[derive(Debug, Clone)]
208struct CompilerConfig {
209    strategy: Option<Strategy>,
210    settings: crate::hash_map::HashMap<String, String>,
211    flags: crate::hash_set::HashSet<String>,
212    #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
213    cache_store: Option<Arc<dyn CacheStore>>,
214    clif_dir: Option<std::path::PathBuf>,
215    wmemcheck: bool,
216}
217
218#[cfg(any(feature = "cranelift", feature = "winch"))]
219impl CompilerConfig {
220    fn new() -> Self {
221        Self {
222            strategy: Strategy::Auto.not_auto(),
223            settings: Default::default(),
224            flags: Default::default(),
225            #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
226            cache_store: None,
227            clif_dir: None,
228            wmemcheck: false,
229        }
230    }
231
232    /// Ensures that the key is not set or equals to the given value.
233    /// If the key is not set, it will be set to the given value.
234    ///
235    /// # Returns
236    ///
237    /// Returns true if successfully set or already had the given setting
238    /// value, or false if the setting was explicitly set to something
239    /// else previously.
240    fn ensure_setting_unset_or_given(&mut self, k: &str, v: &str) -> bool {
241        if let Some(value) = self.settings.get(k) {
242            if value != v {
243                return false;
244            }
245        } else {
246            self.settings.insert(k.to_string(), v.to_string());
247        }
248        true
249    }
250}
251
252#[cfg(any(feature = "cranelift", feature = "winch"))]
253impl Default for CompilerConfig {
254    fn default() -> Self {
255        Self::new()
256    }
257}
258
259impl Config {
260    /// Creates a new configuration object with the default configuration
261    /// specified.
262    pub fn new() -> Self {
263        let mut ret = Self {
264            tunables: ConfigTunables::default(),
265            #[cfg(any(feature = "cranelift", feature = "winch"))]
266            compiler_config: Some(CompilerConfig::default()),
267            target: None,
268            #[cfg(feature = "gc")]
269            collector: Collector::default(),
270            #[cfg(feature = "cache")]
271            cache: None,
272            profiling_strategy: ProfilingStrategy::None,
273            #[cfg(feature = "runtime")]
274            mem_creator: None,
275            #[cfg(feature = "runtime")]
276            custom_code_memory: None,
277            allocation_strategy: InstanceAllocationStrategy::OnDemand,
278            // 512k of stack -- note that this is chosen currently to not be too
279            // big, not be too small, and be a good default for most platforms.
280            // One platform of particular note is Windows where the stack size
281            // of the main thread seems to, by default, be smaller than that of
282            // Linux and macOS. This 512k value at least lets our current test
283            // suite pass on the main thread of Windows (using `--test-threads
284            // 1` forces this), or at least it passed when this change was
285            // committed.
286            max_wasm_stack: 512 * 1024,
287            wasm_backtrace_details_env_used: false,
288            wasm_backtrace_max_frames: Some(DEFAULT_WASM_BACKTRACE_MAX_FRAMES),
289            native_unwind_info: None,
290            enabled_features: WasmFeatures::empty(),
291            disabled_features: WasmFeatures::empty(),
292            #[cfg(any(feature = "async", feature = "stack-switching"))]
293            async_stack_size: 2 << 20,
294            #[cfg(feature = "async")]
295            async_stack_zeroing: false,
296            #[cfg(feature = "async")]
297            stack_creator: None,
298            module_version: ModuleVersionStrategy::default(),
299            parallel_compilation: !cfg!(miri),
300            memory_guaranteed_dense_image_size: 16 << 20,
301            force_memory_init_memfd: false,
302            wmemcheck: false,
303            #[cfg(feature = "coredump")]
304            coredump_on_trap: false,
305            macos_use_mach_ports: !cfg!(miri),
306            #[cfg(feature = "std")]
307            detect_host_feature: Some(detect_host_feature),
308            #[cfg(not(feature = "std"))]
309            detect_host_feature: None,
310            x86_float_abi_ok: None,
311            shared_memory: false,
312            rr_config: RRConfig::None,
313        };
314        ret.wasm_backtrace_details(WasmBacktraceDetails::Environment);
315        ret
316    }
317
318    #[cfg(any(feature = "cranelift", feature = "winch"))]
319    pub(crate) fn has_compiler(&self) -> bool {
320        self.compiler_config.is_some()
321    }
322
323    #[track_caller]
324    #[cfg(any(feature = "cranelift", feature = "winch"))]
325    fn compiler_config_mut(&mut self) -> &mut CompilerConfig {
326        self.compiler_config.as_mut().expect(
327            "cannot configure compiler settings for `Config`s \
328             created by `Config::without_compiler`",
329        )
330    }
331
332    /// Configure whether Wasm compilation is enabled.
333    ///
334    /// Disabling Wasm compilation will allow you to load and run
335    /// [pre-compiled][crate::Engine::precompile_module] Wasm programs, but not
336    /// to compile and run new Wasm programs that have not already been
337    /// pre-compiled.
338    ///
339    /// Many compilation-related configuration methods will panic if compilation
340    /// has been disabled.
341    ///
342    /// Note that there are two ways to disable Wasm compilation:
343    ///
344    /// 1. Statically, by disabling the `"cranelift"` and `"winch"` cargo
345    ///    features when building Wasmtime. These builds of Wasmtime will have
346    ///    smaller code size, since they do not include any of the code to
347    ///    compile Wasm.
348    ///
349    /// 2. Dynamically, by passing `false` to this method at run-time when
350    ///    configuring Wasmtime. The Wasmtime binary will still include the code
351    ///    for compiling Wasm, it just won't be executed, so code size is larger
352    ///    than with the first approach.
353    ///
354    /// The static approach is better in most cases, however dynamically calling
355    /// `enable_compiler(false)` is useful whenever you create multiple
356    /// `Engine`s in the same process, some of which must be able to compile
357    /// Wasm and some of which should never do so. Tests are a common example of
358    /// such a situation, especially when there are multiple Rust binaries in
359    /// the same cargo workspace, and cargo's feature resolution enables the
360    /// `"cranelift"` or `"winch"` features across the whole workspace.
361    #[cfg(any(feature = "cranelift", feature = "winch"))]
362    pub fn enable_compiler(&mut self, enable: bool) -> &mut Self {
363        match (enable, &self.compiler_config) {
364            (true, Some(_)) | (false, None) => {}
365            (true, None) => {
366                self.compiler_config = Some(CompilerConfig::default());
367            }
368            (false, Some(_)) => {
369                self.compiler_config = None;
370            }
371        }
372        self
373    }
374
375    /// Configures the target platform of this [`Config`].
376    ///
377    /// This method is used to configure the output of compilation in an
378    /// [`Engine`](crate::Engine). This can be used, for example, to
379    /// cross-compile from one platform to another. By default, the host target
380    /// triple is used meaning compiled code is suitable to run on the host.
381    ///
382    /// Note that the [`Module`](crate::Module) type can only be created if the
383    /// target configured here matches the host. Otherwise if a cross-compile is
384    /// being performed where the host doesn't match the target then
385    /// [`Engine::precompile_module`](crate::Engine::precompile_module) must be
386    /// used instead.
387    ///
388    /// Target-specific flags (such as CPU features) will not be inferred by
389    /// default for the target when one is provided here. This means that this
390    /// can also be used, for example, with the host architecture to disable all
391    /// host-inferred feature flags. Configuring target-specific flags can be
392    /// done with [`Config::cranelift_flag_set`] and
393    /// [`Config::cranelift_flag_enable`].
394    ///
395    /// # Errors
396    ///
397    /// This method will error if the given target triple is not supported.
398    pub fn target(&mut self, target: &str) -> Result<&mut Self> {
399        self.target =
400            Some(target_lexicon::Triple::from_str(target).map_err(|e| crate::format_err!(e))?);
401
402        Ok(self)
403    }
404
405    /// Enables the incremental compilation cache in Cranelift, using the provided `CacheStore`
406    /// backend for storage.
407    ///
408    /// # Panics
409    ///
410    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
411    #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
412    pub fn enable_incremental_compilation(
413        &mut self,
414        cache_store: Arc<dyn CacheStore>,
415    ) -> Result<&mut Self> {
416        self.compiler_config_mut().cache_store = Some(cache_store);
417        Ok(self)
418    }
419
420    #[doc(hidden)]
421    #[deprecated(note = "no longer has any effect")]
422    #[cfg(feature = "async")]
423    pub fn async_support(&mut self, _enable: bool) -> &mut Self {
424        self
425    }
426
427    /// Configures whether DWARF debug information will be emitted
428    /// during compilation for a native debugger on the Wasmtime
429    /// process to consume.
430    ///
431    /// Note that the `debug-builtins` compile-time Cargo feature must also be
432    /// enabled for native debuggers such as GDB or LLDB to be able to debug
433    /// guest WebAssembly programs.
434    ///
435    /// By default this option is `false`.
436    /// **Note** Enabling this option is not compatible with the Winch compiler.
437    pub fn debug_info(&mut self, enable: bool) -> &mut Self {
438        self.tunables.debug_native = Some(enable);
439        self
440    }
441
442    /// Configures whether compiled guest code will be instrumented to
443    /// provide debugging at the Wasm VM level.
444    ///
445    /// This is required in order to enable a guest-level debugging
446    /// API that can precisely examine Wasm VM state and (eventually,
447    /// once it is complete) set breakpoints and watchpoints and step
448    /// through code.
449    ///
450    /// Without this enabled, debugging can only be done via a native
451    /// debugger operating on the compiled guest code (see
452    /// [`Config::debug_info`] and is "best-effort": we may be able to
453    /// recover some Wasm locals or operand stack values, but it is
454    /// not guaranteed, even when optimizations are disabled.
455    ///
456    /// When this is enabled, additional instrumentation is inserted
457    /// that directly tracks the Wasm VM state at every step. This has
458    /// some performance impact, but allows perfect debugging
459    /// fidelity.
460    ///
461    /// Breakpoints, watchpoints, and stepping are not yet supported,
462    /// but will be added in a future version of Wasmtime.
463    ///
464    /// This enables use of the [`crate::FrameHandle`] API which is
465    /// provided by [`crate::Caller::debug_exit_frames`] or
466    /// [`crate::Store::debug_exit_frames`].
467    ///
468    /// ***Note*** Enabling this option is not compatible with the
469    /// Winch compiler.
470    #[cfg(feature = "debug")]
471    pub fn guest_debug(&mut self, enable: bool) -> &mut Self {
472        self.tunables.debug_guest = Some(enable);
473        self
474    }
475
476    /// Configures whether [`WasmBacktrace`] will be present in the context of
477    /// errors returned from Wasmtime.
478    ///
479    /// This method is deprecated in favor of
480    /// [`Config::wasm_backtrace_max_frames`]. Calling `wasm_backtrace(false)`
481    /// is equivalent to `wasm_backtrace_max_frames(None)`, and
482    /// `wasm_backtrace(true)` will leave `wasm_backtrace_max_frames` unchanged
483    /// if the value is `Some` and will otherwise restore the default `Some`
484    /// value.
485    ///
486    /// [`WasmBacktrace`]: crate::WasmBacktrace
487    #[deprecated = "use `wasm_backtrace_max_frames` instead"]
488    pub fn wasm_backtrace(&mut self, enable: bool) -> &mut Self {
489        match (enable, self.wasm_backtrace_max_frames) {
490            (false, _) => self.wasm_backtrace_max_frames = None,
491            // Wasm backtraces were disabled; enable them with the
492            // default maximum number of frames to capture.
493            (true, None) => {
494                self.wasm_backtrace_max_frames = Some(DEFAULT_WASM_BACKTRACE_MAX_FRAMES)
495            }
496            // Wasm backtraces are already enabled; keep the existing
497            // max-frames configuration.
498            (true, Some(_)) => {}
499        }
500        self
501    }
502
503    /// Configures whether backtraces in `Trap` will parse debug info in the wasm file to
504    /// have filename/line number information.
505    ///
506    /// When enabled this will causes modules to retain debugging information
507    /// found in wasm binaries. This debug information will be used when a trap
508    /// happens to symbolicate each stack frame and attempt to print a
509    /// filename/line number for each wasm frame in the stack trace.
510    ///
511    /// By default this option is `WasmBacktraceDetails::Environment`, meaning
512    /// that wasm will read `WASMTIME_BACKTRACE_DETAILS` to indicate whether
513    /// details should be parsed. Note that the `std` feature of this crate must
514    /// be active to read environment variables, otherwise this is disabled by
515    /// default.
516    pub fn wasm_backtrace_details(&mut self, enable: WasmBacktraceDetails) -> &mut Self {
517        self.wasm_backtrace_details_env_used = false;
518        self.tunables.parse_wasm_debuginfo = match enable {
519            WasmBacktraceDetails::Enable => Some(true),
520            WasmBacktraceDetails::Disable => Some(false),
521            WasmBacktraceDetails::Environment => {
522                #[cfg(feature = "std")]
523                {
524                    self.wasm_backtrace_details_env_used = true;
525                    std::env::var("WASMTIME_BACKTRACE_DETAILS")
526                        .map(|s| Some(s == "1"))
527                        .unwrap_or(Some(false))
528                }
529                #[cfg(not(feature = "std"))]
530                {
531                    Some(false)
532                }
533            }
534        };
535        self
536    }
537
538    /// Configures the maximum number of WebAssembly frames to collect in
539    /// backtraces.
540    ///
541    /// A backtrace may be collected whenever an error is returned from a host
542    /// function call through to WebAssembly or when WebAssembly itself hits a
543    /// trap condition, such as an out-of-bounds memory access. This flag
544    /// indicates, in these conditions, whether the backtrace is collected or
545    /// not and how many frames should be collected.
546    ///
547    /// Currently wasm backtraces are implemented through frame pointer walking.
548    /// This means that collecting a backtrace is expected to be a fast and
549    /// relatively cheap operation. Additionally backtrace collection is
550    /// suitable in concurrent environments since one thread capturing a
551    /// backtrace won't block other threads.
552    ///
553    /// Collected backtraces are attached via
554    /// [`Error::context`](crate::Error::context) to errors returned from host
555    /// functions. The [`WasmBacktrace`] type can be acquired via
556    /// [`Error::downcast_ref`](crate::Error::downcast_ref) to inspect the
557    /// backtrace. When this option is set to `None` then this context is never
558    /// applied to errors coming out of wasm.
559    ///
560    /// The default value is 20.
561    ///
562    /// [`WasmBacktrace`]: crate::WasmBacktrace
563    pub fn wasm_backtrace_max_frames(&mut self, limit: Option<NonZeroUsize>) -> &mut Self {
564        self.wasm_backtrace_max_frames = limit;
565        self
566    }
567
568    /// Configures whether to generate native unwind information
569    /// (e.g. `.eh_frame` on Linux).
570    ///
571    /// This configuration option only exists to help third-party stack
572    /// capturing mechanisms, such as the system's unwinder or the `backtrace`
573    /// crate, determine how to unwind through Wasm frames. It does not affect
574    /// whether Wasmtime can capture Wasm backtraces or not. The presence of
575    /// [`WasmBacktrace`] is controlled by the
576    /// [`Config::wasm_backtrace_max_frames`] option.
577    ///
578    /// Native unwind information is included:
579    /// - When targeting Windows, since the Windows ABI requires it.
580    /// - By default.
581    ///
582    /// Note that systems loading many modules may wish to disable this
583    /// configuration option instead of leaving it on-by-default. Some platforms
584    /// exhibit quadratic behavior when registering/unregistering unwinding
585    /// information which can greatly slow down the module loading/unloading
586    /// process.
587    ///
588    /// [`WasmBacktrace`]: crate::WasmBacktrace
589    pub fn native_unwind_info(&mut self, enable: bool) -> &mut Self {
590        self.native_unwind_info = Some(enable);
591        self
592    }
593
594    /// Configures whether execution of WebAssembly will "consume fuel" to
595    /// either halt or yield execution as desired.
596    ///
597    /// This can be used to deterministically prevent infinitely-executing
598    /// WebAssembly code by instrumenting generated code to consume fuel as it
599    /// executes. When fuel runs out a trap is raised, however [`Store`] can be
600    /// configured to yield execution periodically via
601    /// [`crate::Store::fuel_async_yield_interval`].
602    ///
603    /// Note that a [`Store`] starts with no fuel, so if you enable this option
604    /// you'll have to be sure to pour some fuel into [`Store`] before
605    /// executing some code.
606    ///
607    /// By default this option is `false`.
608    ///
609    /// **Note** Enabling this option is not compatible with the Winch compiler.
610    ///
611    /// [`Store`]: crate::Store
612    pub fn consume_fuel(&mut self, enable: bool) -> &mut Self {
613        self.tunables.consume_fuel = Some(enable);
614        self
615    }
616
617    /// Configures the fuel cost of each WebAssembly operator.
618    ///
619    /// This is only relevant when [`Config::consume_fuel`] is enabled.
620    pub fn operator_cost(&mut self, cost: OperatorCost) -> &mut Self {
621        self.tunables.operator_cost = Some(OperatorCostStrategy::table(cost));
622        self
623    }
624
625    /// Enables epoch-based interruption.
626    ///
627    /// When executing code in async mode, we sometimes want to
628    /// implement a form of cooperative timeslicing: long-running Wasm
629    /// guest code should periodically yield to the executor
630    /// loop. This yielding could be implemented by using "fuel" (see
631    /// [`consume_fuel`](Config::consume_fuel)). However, fuel
632    /// instrumentation is somewhat expensive: it modifies the
633    /// compiled form of the Wasm code so that it maintains a precise
634    /// instruction count, frequently checking this count against the
635    /// remaining fuel. If one does not need this precise count or
636    /// deterministic interruptions, and only needs a periodic
637    /// interrupt of some form, then It would be better to have a more
638    /// lightweight mechanism.
639    ///
640    /// Epoch-based interruption is that mechanism. There is a global
641    /// "epoch", which is a counter that divides time into arbitrary
642    /// periods (or epochs). This counter lives on the
643    /// [`Engine`](crate::Engine) and can be incremented by calling
644    /// [`Engine::increment_epoch`](crate::Engine::increment_epoch).
645    /// Epoch-based instrumentation works by setting a "deadline
646    /// epoch". The compiled code knows the deadline, and at certain
647    /// points, checks the current epoch against that deadline. It
648    /// will yield if the deadline has been reached.
649    ///
650    /// The idea is that checking an infrequently-changing counter is
651    /// cheaper than counting and frequently storing a precise metric
652    /// (instructions executed) locally. The interruptions are not
653    /// deterministic, but if the embedder increments the epoch in a
654    /// periodic way (say, every regular timer tick by a thread or
655    /// signal handler), then we can ensure that all async code will
656    /// yield to the executor within a bounded time.
657    ///
658    /// The deadline check cannot be avoided by malicious wasm code. It is safe
659    /// to use epoch deadlines to limit the execution time of untrusted
660    /// code.
661    ///
662    /// The [`Store`](crate::Store) tracks the deadline, and controls
663    /// what happens when the deadline is reached during
664    /// execution. Several behaviors are possible:
665    ///
666    /// - Trap if code is executing when the epoch deadline is
667    ///   met. See
668    ///   [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap).
669    ///
670    /// - Call an arbitrary function. This function may chose to trap or
671    ///   increment the epoch. See
672    ///   [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback).
673    ///
674    /// - Yield to the executor loop, then resume when the future is
675    ///   next polled. See
676    ///   [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update).
677    ///
678    /// Trapping is the default. The yielding behaviour may be used for
679    /// the timeslicing behavior described above.
680    ///
681    /// This feature is available with or without async support.
682    /// However, without async support, the timeslicing behaviour is
683    /// not available. This means epoch-based interruption can only
684    /// serve as a simple external-interruption mechanism.
685    ///
686    /// An initial deadline must be set before executing code by calling
687    /// [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline). If this
688    /// deadline is not configured then wasm will immediately trap.
689    ///
690    /// ## Interaction with blocking host calls
691    ///
692    /// Epochs (and fuel) do not assist in handling WebAssembly code blocked in
693    /// a call to the host. For example if the WebAssembly function calls
694    /// `wasi:io/poll.poll` to sleep epochs will not assist in waking this up or
695    /// timing it out. Epochs intentionally only affect running WebAssembly code
696    /// itself and it's left to the embedder to determine how best to wake up
697    /// indefinitely blocking code in the host.
698    ///
699    /// The typical solution for this, however, is to use the `async` variant of
700    /// WASI host functions. This models computation as a Rust `Future` which
701    /// means that when blocking happens the future is only suspended and
702    /// control yields back to the main event loop. This gives the embedder the
703    /// opportunity to use `tokio::time::timeout` for example on a wasm
704    /// computation and have the desired effect of cancelling a blocking
705    /// operation when a timeout expires.
706    ///
707    /// ## When to use fuel vs. epochs
708    ///
709    /// In general, epoch-based interruption results in faster
710    /// execution. This difference is sometimes significant: in some
711    /// measurements, up to 2-3x. This is because epoch-based
712    /// interruption does less work: it only watches for a global
713    /// rarely-changing counter to increment, rather than keeping a
714    /// local frequently-changing counter and comparing it to a
715    /// deadline.
716    ///
717    /// Fuel, in contrast, should be used when *deterministic*
718    /// yielding or trapping is needed. For example, if it is required
719    /// that the same function call with the same starting state will
720    /// always either complete or trap with an out-of-fuel error,
721    /// deterministically, then fuel with a fixed bound should be
722    /// used.
723    ///
724    /// **Note** Enabling this option is not compatible with the Winch compiler.
725    ///
726    /// # See Also
727    ///
728    /// - [`Engine::increment_epoch`](crate::Engine::increment_epoch)
729    /// - [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline)
730    /// - [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap)
731    /// - [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback)
732    /// - [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update)
733    pub fn epoch_interruption(&mut self, enable: bool) -> &mut Self {
734        self.tunables.epoch_interruption = Some(enable);
735        self
736    }
737
738    /// XXX: For internal fuzzing and debugging use only!
739    #[doc(hidden)]
740    pub fn gc_zeal_alloc_counter(&mut self, counter: Option<NonZeroU32>) -> Result<&mut Self> {
741        #[cfg(not(gc_zeal))]
742        {
743            let _ = counter;
744            bail!(
745                "cannot set `gc_zeal_alloc_counter` because Wasmtime was not built with `cfg(gc_zeal)`"
746            );
747        }
748
749        #[cfg(gc_zeal)]
750        {
751            self.tunables.gc_zeal_alloc_counter = Some(counter);
752            Ok(self)
753        }
754    }
755
756    /// Configures the maximum amount of stack space available for
757    /// executing WebAssembly code.
758    ///
759    /// WebAssembly has well-defined semantics on stack overflow. This is
760    /// intended to be a knob which can help configure how much stack space
761    /// wasm execution is allowed to consume. Note that the number here is not
762    /// super-precise, but rather wasm will take at most "pretty close to this
763    /// much" stack space.
764    ///
765    /// If a wasm call (or series of nested wasm calls) take more stack space
766    /// than the `size` specified then a stack overflow trap will be raised.
767    ///
768    /// Caveat: this knob only limits the stack space consumed by wasm code.
769    /// More importantly, it does not ensure that this much stack space is
770    /// available on the calling thread stack. Exhausting the thread stack
771    /// typically leads to an **abort** of the process.
772    ///
773    /// Here are some examples of how that could happen:
774    ///
775    /// - Let's assume this option is set to 2 MiB and then a thread that has
776    ///   a stack with 512 KiB left.
777    ///
778    ///   If wasm code consumes more than 512 KiB then the process will be aborted.
779    ///
780    /// - Assuming the same conditions, but this time wasm code does not consume
781    ///   any stack but calls into a host function. The host function consumes
782    ///   more than 512 KiB of stack space. The process will be aborted.
783    ///
784    /// There's another gotcha related to recursive calling into wasm: the stack
785    /// space consumed by a host function is counted towards this limit. The
786    /// host functions are not prevented from consuming more than this limit.
787    /// However, if the host function that used more than this limit and called
788    /// back into wasm, then the execution will trap immediately because of
789    /// stack overflow.
790    ///
791    /// When the `async` feature is enabled, this value cannot exceed the
792    /// `async_stack_size` option. Be careful not to set this value too close
793    /// to `async_stack_size` as doing so may limit how much stack space
794    /// is available for host functions.
795    ///
796    /// By default this option is 512 KiB.
797    ///
798    /// # Errors
799    ///
800    /// The `Engine::new` method will fail if the `size` specified here is
801    /// either 0 or larger than the [`Config::async_stack_size`] configuration.
802    pub fn max_wasm_stack(&mut self, size: usize) -> &mut Self {
803        self.max_wasm_stack = size;
804        self
805    }
806
807    /// Configures the size of the stacks used for asynchronous execution.
808    ///
809    /// This setting configures the size of the stacks that are allocated for
810    /// asynchronous execution. The value cannot be less than `max_wasm_stack`.
811    ///
812    /// The amount of stack space guaranteed for host functions is
813    /// `async_stack_size - max_wasm_stack`, so take care not to set these two values
814    /// close to one another; doing so may cause host functions to overflow the
815    /// stack and abort the process.
816    ///
817    /// By default this option is 2 MiB.
818    ///
819    /// # Errors
820    ///
821    /// The `Engine::new` method will fail if the value for this option is
822    /// smaller than the [`Config::max_wasm_stack`] option.
823    #[cfg(any(feature = "async", feature = "stack-switching"))]
824    pub fn async_stack_size(&mut self, size: usize) -> &mut Self {
825        self.async_stack_size = size;
826        self
827    }
828
829    /// Configures whether or not stacks used for async futures are zeroed
830    /// before (re)use.
831    ///
832    /// When the [`call_async`] variant of calling WebAssembly is used
833    /// then Wasmtime will create a separate runtime execution stack for each
834    /// future produced by [`call_async`]. By default upon allocation, depending
835    /// on the platform, these stacks might be filled with uninitialized
836    /// memory. This is safe and correct because, modulo bugs in Wasmtime,
837    /// compiled Wasm code will never read from a stack slot before it
838    /// initializes the stack slot.
839    ///
840    /// However, as a defense-in-depth mechanism, you may configure Wasmtime to
841    /// ensure that these stacks are zeroed before they are used. Notably, if
842    /// you are using the pooling allocator, stacks can be pooled and reused
843    /// across different Wasm guests; ensuring that stacks are zeroed can
844    /// prevent data leakage between Wasm guests even in the face of potential
845    /// read-of-stack-slot-before-initialization bugs in Wasmtime's compiler.
846    ///
847    /// Stack zeroing can be a costly operation in highly concurrent
848    /// environments due to modifications of the virtual address space requiring
849    /// process-wide synchronization. It can also be costly in `no-std`
850    /// environments that must manually zero memory, and cannot rely on an OS
851    /// and virtual memory to provide zeroed pages.
852    ///
853    /// This option defaults to `false`.
854    ///
855    /// [`call_async`]: crate::TypedFunc::call_async
856    #[cfg(feature = "async")]
857    pub fn async_stack_zeroing(&mut self, enable: bool) -> &mut Self {
858        self.async_stack_zeroing = enable;
859        self
860    }
861
862    /// Explicitly enables (and un-disables) a given set of [`WasmFeatures`].
863    ///
864    /// Note: this is a low-level method that does not necessarily imply that
865    /// wasmtime _supports_ a feature. It should only be used to _disable_
866    /// features that callers want to be rejected by the parser or _enable_
867    /// features callers are certain that the current configuration of wasmtime
868    /// supports.
869    ///
870    /// Feature validation is deferred until an engine is being built, thus by
871    /// enabling features here a caller may cause
872    /// [`Engine::new`](crate::Engine::new) to fail later, if the feature
873    /// configuration isn't supported.
874    pub fn wasm_features(&mut self, flag: WasmFeatures, enable: bool) -> &mut Self {
875        self.enabled_features.set(flag, enable);
876        self.disabled_features.set(flag, !enable);
877        self
878    }
879
880    /// Configures whether the WebAssembly tail calls proposal will be enabled
881    /// for compilation or not.
882    ///
883    /// The [WebAssembly tail calls proposal] introduces the `return_call` and
884    /// `return_call_indirect` instructions. These instructions allow for Wasm
885    /// programs to implement some recursive algorithms with *O(1)* stack space
886    /// usage.
887    ///
888    /// This is `true` by default except when the Winch compiler is enabled.
889    ///
890    /// [WebAssembly tail calls proposal]: https://github.com/WebAssembly/tail-call
891    pub fn wasm_tail_call(&mut self, enable: bool) -> &mut Self {
892        self.wasm_features(WasmFeatures::TAIL_CALL, enable);
893        self
894    }
895
896    /// Configures whether the WebAssembly custom-page-sizes proposal will be
897    /// enabled for compilation or not.
898    ///
899    /// The [WebAssembly custom-page-sizes proposal] allows a memory to
900    /// customize its page sizes. By default, Wasm page sizes are 64KiB
901    /// large. This proposal allows the memory to opt into smaller page sizes
902    /// instead, allowing Wasm to run in environments with less than 64KiB RAM
903    /// available, for example.
904    ///
905    /// Note that the page size is part of the memory's type, and because
906    /// different memories may have different types, they may also have
907    /// different page sizes.
908    ///
909    /// Currently the only valid page sizes are 64KiB (the default) and 1
910    /// byte. Future extensions may relax this constraint and allow all powers
911    /// of two.
912    ///
913    /// Support for this proposal is disabled by default.
914    ///
915    /// [WebAssembly custom-page-sizes proposal]: https://github.com/WebAssembly/custom-page-sizes
916    pub fn wasm_custom_page_sizes(&mut self, enable: bool) -> &mut Self {
917        self.wasm_features(WasmFeatures::CUSTOM_PAGE_SIZES, enable);
918        self
919    }
920
921    /// Configures whether the WebAssembly [threads] proposal will be enabled
922    /// for compilation.
923    ///
924    /// This feature gates items such as shared memories and atomic
925    /// instructions. Note that the threads feature depends on the bulk memory
926    /// feature, which is enabled by default. Additionally note that while the
927    /// wasm feature is called "threads" it does not actually include the
928    /// ability to spawn threads. Spawning threads is part of the [wasi-threads]
929    /// proposal which is a separately gated feature in Wasmtime.
930    ///
931    /// Embeddings of Wasmtime are able to build their own custom threading
932    /// scheme on top of the core wasm threads proposal, however.
933    ///
934    /// The default value for this option is whether the `threads`
935    /// crate feature of Wasmtime is enabled or not. By default this crate
936    /// feature is enabled.
937    ///
938    /// [threads]: https://github.com/webassembly/threads
939    /// [wasi-threads]: https://github.com/webassembly/wasi-threads
940    #[cfg(feature = "threads")]
941    pub fn wasm_threads(&mut self, enable: bool) -> &mut Self {
942        self.wasm_features(WasmFeatures::THREADS, enable);
943        self
944    }
945
946    /// Configures whether the WebAssembly [shared-everything-threads] proposal
947    /// will be enabled for compilation.
948    ///
949    /// This feature gates extended use of the `shared` attribute on items other
950    /// than memories, extra atomic instructions, and new component model
951    /// intrinsics for spawning threads. It depends on the
952    /// [`wasm_threads`][Self::wasm_threads] being enabled.
953    ///
954    /// [shared-everything-threads]:
955    ///     https://github.com/webassembly/shared-everything-threads
956    pub fn wasm_shared_everything_threads(&mut self, enable: bool) -> &mut Self {
957        self.wasm_features(WasmFeatures::SHARED_EVERYTHING_THREADS, enable);
958        self
959    }
960
961    /// Configures whether the [WebAssembly reference types proposal][proposal]
962    /// will be enabled for compilation.
963    ///
964    /// This feature gates items such as the `externref` and `funcref` types as
965    /// well as allowing a module to define multiple tables.
966    ///
967    /// Note that the reference types proposal depends on the bulk memory proposal.
968    ///
969    /// This feature is `true` by default.
970    ///
971    /// # Errors
972    ///
973    /// The validation of this feature are deferred until the engine is being built,
974    /// and thus may cause `Engine::new` fail if the `bulk_memory` feature is disabled.
975    ///
976    /// [proposal]: https://github.com/webassembly/reference-types
977    #[cfg(feature = "gc")]
978    pub fn wasm_reference_types(&mut self, enable: bool) -> &mut Self {
979        self.wasm_features(WasmFeatures::REFERENCE_TYPES, enable);
980        self
981    }
982
983    /// Configures whether the [WebAssembly function references
984    /// proposal][proposal] will be enabled for compilation.
985    ///
986    /// This feature gates non-nullable reference types, function reference
987    /// types, `call_ref`, `ref.func`, and non-nullable reference related
988    /// instructions.
989    ///
990    /// Note that the function references proposal depends on the reference
991    /// types proposal.
992    ///
993    /// This feature is `false` by default.
994    ///
995    /// [proposal]: https://github.com/WebAssembly/function-references
996    #[cfg(feature = "gc")]
997    pub fn wasm_function_references(&mut self, enable: bool) -> &mut Self {
998        self.wasm_features(WasmFeatures::FUNCTION_REFERENCES, enable);
999        self
1000    }
1001
1002    /// Configures whether the [WebAssembly wide-arithmetic][proposal] will be
1003    /// enabled for compilation.
1004    ///
1005    /// This feature is `false` by default.
1006    ///
1007    /// [proposal]: https://github.com/WebAssembly/wide-arithmetic
1008    pub fn wasm_wide_arithmetic(&mut self, enable: bool) -> &mut Self {
1009        self.wasm_features(WasmFeatures::WIDE_ARITHMETIC, enable);
1010        self
1011    }
1012
1013    /// Configures whether the [WebAssembly Garbage Collection
1014    /// proposal][proposal] will be enabled for compilation.
1015    ///
1016    /// This feature gates `struct` and `array` type definitions and references,
1017    /// the `i31ref` type, and all related instructions.
1018    ///
1019    /// Note that the function references proposal depends on the typed function
1020    /// references proposal.
1021    ///
1022    /// This feature is `false` by default.
1023    ///
1024    /// **Warning: Wasmtime's implementation of the GC proposal is still in
1025    /// progress and generally not ready for primetime.**
1026    ///
1027    /// [proposal]: https://github.com/WebAssembly/gc
1028    #[cfg(feature = "gc")]
1029    pub fn wasm_gc(&mut self, enable: bool) -> &mut Self {
1030        self.wasm_features(WasmFeatures::GC, enable);
1031        self
1032    }
1033
1034    /// Configures whether the WebAssembly SIMD proposal will be
1035    /// enabled for compilation.
1036    ///
1037    /// The [WebAssembly SIMD proposal][proposal]. This feature gates items such
1038    /// as the `v128` type and all of its operators being in a module. Note that
1039    /// this does not enable the [relaxed simd proposal].
1040    ///
1041    /// **Note**
1042    ///
1043    /// On x86_64 platforms the base CPU feature requirement for SIMD
1044    /// is SSE2 for the Cranelift compiler and AVX for the Winch compiler.
1045    ///
1046    /// This is `true` by default.
1047    ///
1048    /// [proposal]: https://github.com/webassembly/simd
1049    /// [relaxed simd proposal]: https://github.com/WebAssembly/relaxed-simd
1050    pub fn wasm_simd(&mut self, enable: bool) -> &mut Self {
1051        self.wasm_features(WasmFeatures::SIMD, enable);
1052        self
1053    }
1054
1055    /// Configures whether the WebAssembly Relaxed SIMD proposal will be
1056    /// enabled for compilation.
1057    ///
1058    /// The relaxed SIMD proposal adds new instructions to WebAssembly which,
1059    /// for some specific inputs, are allowed to produce different results on
1060    /// different hosts. More-or-less this proposal enables exposing
1061    /// platform-specific semantics of SIMD instructions in a controlled
1062    /// fashion to a WebAssembly program. From an embedder's perspective this
1063    /// means that WebAssembly programs may execute differently depending on
1064    /// whether the host is x86_64 or AArch64, for example.
1065    ///
1066    /// By default Wasmtime lowers relaxed SIMD instructions to the fastest
1067    /// lowering for the platform it's running on. This means that, by default,
1068    /// some relaxed SIMD instructions may have different results for the same
1069    /// inputs across x86_64 and AArch64. This behavior can be disabled through
1070    /// the [`Config::relaxed_simd_deterministic`] option which will force
1071    /// deterministic behavior across all platforms, as classified by the
1072    /// specification, at the cost of performance.
1073    ///
1074    /// This is `true` by default.
1075    ///
1076    /// [proposal]: https://github.com/webassembly/relaxed-simd
1077    pub fn wasm_relaxed_simd(&mut self, enable: bool) -> &mut Self {
1078        self.wasm_features(WasmFeatures::RELAXED_SIMD, enable);
1079        self
1080    }
1081
1082    /// This option can be used to control the behavior of the [relaxed SIMD
1083    /// proposal's][proposal] instructions.
1084    ///
1085    /// The relaxed SIMD proposal introduces instructions that are allowed to
1086    /// have different behavior on different architectures, primarily to afford
1087    /// an efficient implementation on all architectures. This means, however,
1088    /// that the same module may execute differently on one host than another,
1089    /// which typically is not otherwise the case. This option is provided to
1090    /// force Wasmtime to generate deterministic code for all relaxed simd
1091    /// instructions, at the cost of performance, for all architectures. When
1092    /// this option is enabled then the deterministic behavior of all
1093    /// instructions in the relaxed SIMD proposal is selected.
1094    ///
1095    /// This is `false` by default.
1096    ///
1097    /// [proposal]: https://github.com/webassembly/relaxed-simd
1098    pub fn relaxed_simd_deterministic(&mut self, enable: bool) -> &mut Self {
1099        self.tunables.relaxed_simd_deterministic = Some(enable);
1100        self
1101    }
1102
1103    /// Configures whether the [WebAssembly bulk memory operations
1104    /// proposal][proposal] will be enabled for compilation.
1105    ///
1106    /// This feature gates items such as the `memory.copy` instruction, passive
1107    /// data/table segments, etc, being in a module.
1108    ///
1109    /// This is `true` by default.
1110    ///
1111    /// Feature `reference_types`, which is also `true` by default, requires
1112    /// this feature to be enabled. Thus disabling this feature must also disable
1113    /// `reference_types` as well using [`wasm_reference_types`](crate::Config::wasm_reference_types).
1114    ///
1115    /// # Errors
1116    ///
1117    /// Disabling this feature without disabling `reference_types` will cause
1118    /// `Engine::new` to fail.
1119    ///
1120    /// [proposal]: https://github.com/webassembly/bulk-memory-operations
1121    pub fn wasm_bulk_memory(&mut self, enable: bool) -> &mut Self {
1122        self.wasm_features(WasmFeatures::BULK_MEMORY, enable);
1123        self
1124    }
1125
1126    /// Configures whether the WebAssembly multi-value [proposal] will
1127    /// be enabled for compilation.
1128    ///
1129    /// This feature gates functions and blocks returning multiple values in a
1130    /// module, for example.
1131    ///
1132    /// This is `true` by default.
1133    ///
1134    /// [proposal]: https://github.com/webassembly/multi-value
1135    pub fn wasm_multi_value(&mut self, enable: bool) -> &mut Self {
1136        self.wasm_features(WasmFeatures::MULTI_VALUE, enable);
1137        self
1138    }
1139
1140    /// Configures whether the WebAssembly multi-memory [proposal] will
1141    /// be enabled for compilation.
1142    ///
1143    /// This feature gates modules having more than one linear memory
1144    /// declaration or import.
1145    ///
1146    /// This is `true` by default.
1147    ///
1148    /// [proposal]: https://github.com/webassembly/multi-memory
1149    pub fn wasm_multi_memory(&mut self, enable: bool) -> &mut Self {
1150        self.wasm_features(WasmFeatures::MULTI_MEMORY, enable);
1151        self
1152    }
1153
1154    /// Configures whether the WebAssembly memory64 [proposal] will
1155    /// be enabled for compilation.
1156    ///
1157    /// Note that this the upstream specification is not finalized and Wasmtime
1158    /// may also have bugs for this feature since it hasn't been exercised
1159    /// much.
1160    ///
1161    /// This is `false` by default.
1162    ///
1163    /// [proposal]: https://github.com/webassembly/memory64
1164    pub fn wasm_memory64(&mut self, enable: bool) -> &mut Self {
1165        self.wasm_features(WasmFeatures::MEMORY64, enable);
1166        self
1167    }
1168
1169    /// Configures whether the WebAssembly extended-const [proposal] will
1170    /// be enabled for compilation.
1171    ///
1172    /// This is `true` by default.
1173    ///
1174    /// [proposal]: https://github.com/webassembly/extended-const
1175    pub fn wasm_extended_const(&mut self, enable: bool) -> &mut Self {
1176        self.wasm_features(WasmFeatures::EXTENDED_CONST, enable);
1177        self
1178    }
1179
1180    /// Configures whether the [WebAssembly stack switching
1181    /// proposal][proposal] will be enabled for compilation.
1182    ///
1183    /// This feature gates the use of control tags.
1184    ///
1185    /// This feature depends on the `function_reference_types` and
1186    /// `exceptions` features.
1187    ///
1188    /// This feature is `false` by default.
1189    ///
1190    /// # Errors
1191    ///
1192    /// [proposal]: https://github.com/webassembly/stack-switching
1193    pub fn wasm_stack_switching(&mut self, enable: bool) -> &mut Self {
1194        self.wasm_features(WasmFeatures::STACK_SWITCHING, enable);
1195        self
1196    }
1197
1198    /// Configures whether the WebAssembly component-model [proposal] will
1199    /// be enabled for compilation.
1200    ///
1201    /// This flag can be used to blanket disable all components within Wasmtime.
1202    /// Otherwise usage of components requires statically using
1203    /// [`Component`](crate::component::Component) instead of
1204    /// [`Module`](crate::Module) for example anyway.
1205    ///
1206    /// The default value for this option is whether the `component-model`
1207    /// crate feature of Wasmtime is enabled or not. By default this crate
1208    /// feature is enabled.
1209    ///
1210    /// [proposal]: https://github.com/webassembly/component-model
1211    #[cfg(feature = "component-model")]
1212    pub fn wasm_component_model(&mut self, enable: bool) -> &mut Self {
1213        self.wasm_features(WasmFeatures::COMPONENT_MODEL, enable);
1214        self
1215    }
1216
1217    /// Configures whether components support the async ABI [proposal] for
1218    /// lifting and lowering functions, as well as `stream`, `future`, and
1219    /// `error-context` types.
1220    ///
1221    /// Please note that Wasmtime's support for this feature is _very_
1222    /// incomplete.
1223    ///
1224    /// [proposal]:
1225    ///     https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1226    #[cfg(feature = "component-model-async")]
1227    pub fn wasm_component_model_async(&mut self, enable: bool) -> &mut Self {
1228        self.wasm_features(WasmFeatures::CM_ASYNC, enable);
1229        self
1230    }
1231
1232    /// This corresponds to the 🚝 emoji in the component model specification.
1233    ///
1234    /// Please note that Wasmtime's support for this feature is _very_
1235    /// incomplete.
1236    ///
1237    /// [proposal]:
1238    ///     https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1239    #[cfg(feature = "component-model-async")]
1240    pub fn wasm_component_model_more_async_builtins(&mut self, enable: bool) -> &mut Self {
1241        self.wasm_features(WasmFeatures::CM_MORE_ASYNC_BUILTINS, enable);
1242        self
1243    }
1244
1245    /// This corresponds to the 🚟 emoji in the component model specification.
1246    ///
1247    /// Please note that Wasmtime's support for this feature is _very_
1248    /// incomplete.
1249    ///
1250    /// [proposal]: https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1251    #[cfg(feature = "component-model-async")]
1252    pub fn wasm_component_model_async_stackful(&mut self, enable: bool) -> &mut Self {
1253        self.wasm_features(WasmFeatures::CM_ASYNC_STACKFUL, enable);
1254        self
1255    }
1256
1257    /// This corresponds to the 🧵 emoji in the component model specification.
1258    ///
1259    /// Please note that Wasmtime's support for this feature is _very_
1260    /// incomplete.
1261    ///
1262    /// [proposal]:
1263    ///     https://github.com/WebAssembly/component-model/pull/557
1264    #[cfg(feature = "component-model-async")]
1265    pub fn wasm_component_model_threading(&mut self, enable: bool) -> &mut Self {
1266        self.wasm_features(WasmFeatures::CM_THREADING, enable);
1267        self
1268    }
1269
1270    /// This corresponds to the 📝 emoji in the component model specification.
1271    ///
1272    /// Please note that Wasmtime's support for this feature is _very_
1273    /// incomplete.
1274    ///
1275    /// [proposal]: https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1276    #[cfg(feature = "component-model")]
1277    pub fn wasm_component_model_error_context(&mut self, enable: bool) -> &mut Self {
1278        self.wasm_features(WasmFeatures::CM_ERROR_CONTEXT, enable);
1279        self
1280    }
1281
1282    /// Configures whether the [GC extension to the component-model
1283    /// proposal][proposal] is enabled or not.
1284    ///
1285    /// This corresponds to the 🛸 emoji in the component model specification.
1286    ///
1287    /// Please note that Wasmtime's support for this feature is _very_
1288    /// incomplete.
1289    ///
1290    /// [proposal]: https://github.com/WebAssembly/component-model/issues/525
1291    #[cfg(feature = "component-model")]
1292    pub fn wasm_component_model_gc(&mut self, enable: bool) -> &mut Self {
1293        self.wasm_features(WasmFeatures::CM_GC, enable);
1294        self
1295    }
1296
1297    /// Configures whether the component model map type is enabled or not.
1298    ///
1299    /// This is part of the component model specification and enables the
1300    /// `map<k, v>` type in WIT and the component binary format.
1301    #[cfg(feature = "component-model")]
1302    pub fn wasm_component_model_map(&mut self, enable: bool) -> &mut Self {
1303        self.wasm_features(WasmFeatures::CM_MAP, enable);
1304        self
1305    }
1306
1307    /// This corresponds to the 🔧 emoji in the component model specification.
1308    ///
1309    /// Please note that Wasmtime's support for this feature is _very_
1310    /// incomplete.
1311    #[cfg(feature = "component-model")]
1312    pub fn wasm_component_model_fixed_length_lists(&mut self, enable: bool) -> &mut Self {
1313        self.wasm_features(WasmFeatures::CM_FIXED_LENGTH_LISTS, enable);
1314        self
1315    }
1316
1317    /// Configures whether the [Exception-handling proposal][proposal] is enabled or not.
1318    ///
1319    /// [proposal]: https://github.com/WebAssembly/exception-handling
1320    #[cfg(feature = "gc")]
1321    pub fn wasm_exceptions(&mut self, enable: bool) -> &mut Self {
1322        self.wasm_features(WasmFeatures::EXCEPTIONS, enable);
1323        self
1324    }
1325
1326    #[doc(hidden)] // FIXME(#3427) - if/when implemented then un-hide this
1327    #[deprecated = "This configuration option only exists for internal \
1328                    usage with the spec testsuite. It may be removed at \
1329                    any time and without warning. Do not rely on it!"]
1330    pub fn wasm_legacy_exceptions(&mut self, enable: bool) -> &mut Self {
1331        self.wasm_features(WasmFeatures::LEGACY_EXCEPTIONS, enable);
1332        self
1333    }
1334
1335    /// Configures which compilation strategy will be used for wasm modules.
1336    ///
1337    /// This method can be used to configure which compiler is used for wasm
1338    /// modules, and for more documentation consult the [`Strategy`] enumeration
1339    /// and its documentation.
1340    ///
1341    /// The default value for this is `Strategy::Auto`.
1342    ///
1343    /// # Panics
1344    ///
1345    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1346    #[cfg(any(feature = "cranelift", feature = "winch"))]
1347    pub fn strategy(&mut self, strategy: Strategy) -> &mut Self {
1348        self.compiler_config_mut().strategy = strategy.not_auto();
1349        self
1350    }
1351
1352    /// Configures which garbage collector will be used for Wasm modules.
1353    ///
1354    /// This method can be used to configure which garbage collector
1355    /// implementation is used for Wasm modules. For more documentation, consult
1356    /// the [`Collector`] enumeration and its documentation.
1357    ///
1358    /// The default value for this is `Collector::Auto`.
1359    #[cfg(feature = "gc")]
1360    pub fn collector(&mut self, collector: Collector) -> &mut Self {
1361        self.collector = collector;
1362        self
1363    }
1364
1365    /// Creates a default profiler based on the profiling strategy chosen.
1366    ///
1367    /// Profiler creation calls the type's default initializer where the purpose is
1368    /// really just to put in place the type used for profiling.
1369    ///
1370    /// Some [`ProfilingStrategy`] require specific platforms or particular feature
1371    /// to be enabled, such as `ProfilingStrategy::JitDump` requires the `jitdump`
1372    /// feature.
1373    ///
1374    /// # Errors
1375    ///
1376    /// The validation of this field is deferred until the engine is being built, and thus may
1377    /// cause `Engine::new` fail if the required feature is disabled, or the platform is not
1378    /// supported.
1379    pub fn profiler(&mut self, profile: ProfilingStrategy) -> &mut Self {
1380        self.profiling_strategy = profile;
1381        self
1382    }
1383
1384    /// Configures whether the debug verifier of Cranelift is enabled or not.
1385    ///
1386    /// When Cranelift is used as a code generation backend this will configure
1387    /// it to have the `enable_verifier` flag which will enable a number of debug
1388    /// checks inside of Cranelift. This is largely only useful for the
1389    /// developers of wasmtime itself.
1390    ///
1391    /// The default value for this is `false`
1392    ///
1393    /// # Panics
1394    ///
1395    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1396    #[cfg(any(feature = "cranelift", feature = "winch"))]
1397    pub fn cranelift_debug_verifier(&mut self, enable: bool) -> &mut Self {
1398        let val = if enable { "true" } else { "false" };
1399        self.compiler_config_mut()
1400            .settings
1401            .insert("enable_verifier".to_string(), val.to_string());
1402        self
1403    }
1404
1405    /// Configures whether extra debug checks are inserted into
1406    /// Wasmtime-generated code by Cranelift.
1407    ///
1408    /// The default value for this is `false`
1409    ///
1410    /// # Panics
1411    ///
1412    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1413    #[cfg(any(feature = "cranelift", feature = "winch"))]
1414    pub fn cranelift_wasmtime_debug_checks(&mut self, enable: bool) -> &mut Self {
1415        unsafe { self.cranelift_flag_set("wasmtime_debug_checks", &enable.to_string()) }
1416    }
1417
1418    /// Configures the Cranelift code generator optimization level.
1419    ///
1420    /// When the Cranelift code generator is used you can configure the
1421    /// optimization level used for generated code in a few various ways. For
1422    /// more information see the documentation of [`OptLevel`].
1423    ///
1424    /// The default value for this is `OptLevel::Speed`.
1425    ///
1426    /// # Panics
1427    ///
1428    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1429    #[cfg(any(feature = "cranelift", feature = "winch"))]
1430    pub fn cranelift_opt_level(&mut self, level: OptLevel) -> &mut Self {
1431        let val = match level {
1432            OptLevel::None => "none",
1433            OptLevel::Speed => "speed",
1434            OptLevel::SpeedAndSize => "speed_and_size",
1435        };
1436        self.compiler_config_mut()
1437            .settings
1438            .insert("opt_level".to_string(), val.to_string());
1439        self
1440    }
1441
1442    /// Configures the regalloc algorithm used by the Cranelift code generator.
1443    ///
1444    /// Cranelift can select any of several register allocator algorithms. Each
1445    /// of these algorithms generates correct code, but they represent different
1446    /// tradeoffs between compile speed (how expensive the compilation process
1447    /// is) and run-time speed (how fast the generated code runs).
1448    /// For more information see the documentation of [`RegallocAlgorithm`].
1449    ///
1450    /// The default value for this is `RegallocAlgorithm::Backtracking`.
1451    ///
1452    /// # Panics
1453    ///
1454    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1455    #[cfg(any(feature = "cranelift", feature = "winch"))]
1456    pub fn cranelift_regalloc_algorithm(&mut self, algo: RegallocAlgorithm) -> &mut Self {
1457        let val = match algo {
1458            RegallocAlgorithm::Backtracking => "backtracking",
1459            RegallocAlgorithm::SinglePass => "single_pass",
1460        };
1461        self.compiler_config_mut()
1462            .settings
1463            .insert("regalloc_algorithm".to_string(), val.to_string());
1464        self
1465    }
1466
1467    /// Configures whether Cranelift should perform a NaN-canonicalization pass.
1468    ///
1469    /// When Cranelift is used as a code generation backend this will configure
1470    /// it to replace NaNs with a single canonical value. This is useful for
1471    /// users requiring entirely deterministic WebAssembly computation.  This is
1472    /// not required by the WebAssembly spec, so it is not enabled by default.
1473    ///
1474    /// Note that this option affects not only WebAssembly's `f32` and `f64`
1475    /// types but additionally the `v128` type. This option will cause
1476    /// operations using any of these types to have extra checks placed after
1477    /// them to normalize NaN values as needed.
1478    ///
1479    /// The default value for this is `false`
1480    ///
1481    /// # Panics
1482    ///
1483    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1484    #[cfg(any(feature = "cranelift", feature = "winch"))]
1485    pub fn cranelift_nan_canonicalization(&mut self, enable: bool) -> &mut Self {
1486        let val = if enable { "true" } else { "false" };
1487        self.compiler_config_mut()
1488            .settings
1489            .insert("enable_nan_canonicalization".to_string(), val.to_string());
1490        self
1491    }
1492
1493    /// Allows setting a Cranelift boolean flag or preset. This allows
1494    /// fine-tuning of Cranelift settings.
1495    ///
1496    /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1497    /// either; other `Config` functions should be preferred for stability.
1498    ///
1499    /// # Safety
1500    ///
1501    /// This is marked as unsafe, because setting the wrong flag might break invariants,
1502    /// resulting in execution hazards.
1503    ///
1504    /// # Errors
1505    ///
1506    /// The validation of the flags are deferred until the engine is being built, and thus may
1507    /// cause `Engine::new` fail if the flag's name does not exist, or the value is not appropriate
1508    /// for the flag type.
1509    ///
1510    /// # Panics
1511    ///
1512    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1513    #[cfg(any(feature = "cranelift", feature = "winch"))]
1514    pub unsafe fn cranelift_flag_enable(&mut self, flag: &str) -> &mut Self {
1515        self.compiler_config_mut().flags.insert(flag.to_string());
1516        self
1517    }
1518
1519    /// Allows settings another Cranelift flag defined by a flag name and value. This allows
1520    /// fine-tuning of Cranelift settings.
1521    ///
1522    /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1523    /// either; other `Config` functions should be preferred for stability.
1524    ///
1525    /// # Safety
1526    ///
1527    /// This is marked as unsafe, because setting the wrong flag might break invariants,
1528    /// resulting in execution hazards.
1529    ///
1530    /// # Errors
1531    ///
1532    /// The validation of the flags are deferred until the engine is being built, and thus may
1533    /// cause `Engine::new` fail if the flag's name does not exist, or incompatible with other
1534    /// settings.
1535    ///
1536    /// For example, feature `wasm_backtrace` will set `unwind_info` to `true`, but if it's
1537    /// manually set to false then it will fail.
1538    ///
1539    /// # Panics
1540    ///
1541    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1542    #[cfg(any(feature = "cranelift", feature = "winch"))]
1543    pub unsafe fn cranelift_flag_set(&mut self, name: &str, value: &str) -> &mut Self {
1544        self.compiler_config_mut()
1545            .settings
1546            .insert(name.to_string(), value.to_string());
1547        self
1548    }
1549
1550    /// Set a custom [`Cache`].
1551    ///
1552    /// To load a cache configuration from a file, use [`Cache::from_file`]. Otherwise, you can
1553    /// create a new cache config using [`CacheConfig::new`] and passing that to [`Cache::new`].
1554    ///
1555    /// If you want to disable the cache, you can call this method with `None`.
1556    ///
1557    /// By default, new configs do not have caching enabled.
1558    /// Every call to [`Module::new(my_wasm)`][crate::Module::new] will recompile `my_wasm`,
1559    /// even when it is unchanged, unless an enabled `CacheConfig` is provided.
1560    ///
1561    /// This method is only available when the `cache` feature of this crate is
1562    /// enabled.
1563    ///
1564    /// [docs]: https://bytecodealliance.github.io/wasmtime/cli-cache.html
1565    #[cfg(feature = "cache")]
1566    pub fn cache(&mut self, cache: Option<Cache>) -> &mut Self {
1567        self.cache = cache;
1568        self
1569    }
1570
1571    /// Sets a custom memory creator.
1572    ///
1573    /// Custom memory creators are used when creating host `Memory` objects or when
1574    /// creating instance linear memories for the on-demand instance allocation strategy.
1575    #[cfg(feature = "runtime")]
1576    pub fn with_host_memory(&mut self, mem_creator: Arc<dyn MemoryCreator>) -> &mut Self {
1577        self.mem_creator = Some(Arc::new(MemoryCreatorProxy(mem_creator)));
1578        self
1579    }
1580
1581    /// Sets a custom stack creator.
1582    ///
1583    /// Custom memory creators are used when creating creating async instance stacks for
1584    /// the on-demand instance allocation strategy.
1585    #[cfg(feature = "async")]
1586    pub fn with_host_stack(&mut self, stack_creator: Arc<dyn StackCreator>) -> &mut Self {
1587        self.stack_creator = Some(Arc::new(StackCreatorProxy(stack_creator)));
1588        self
1589    }
1590
1591    /// Sets a custom executable-memory publisher.
1592    ///
1593    /// Custom executable-memory publishers are hooks that allow
1594    /// Wasmtime to make certain regions of memory executable when
1595    /// loading precompiled modules or compiling new modules
1596    /// in-process. In most modern operating systems, memory allocated
1597    /// for heap usage is readable and writable by default but not
1598    /// executable. To jump to machine code stored in that memory, we
1599    /// need to make it executable. For security reasons, we usually
1600    /// also make it read-only at the same time, so the executing code
1601    /// can't be modified later.
1602    ///
1603    /// By default, Wasmtime will use the appropriate system calls on
1604    /// the host platform for this work. However, it also allows
1605    /// plugging in a custom implementation via this configuration
1606    /// option. This may be useful on custom or `no_std` platforms,
1607    /// for example, especially where virtual memory is not otherwise
1608    /// used by Wasmtime (no `signals-and-traps` feature).
1609    #[cfg(feature = "runtime")]
1610    pub fn with_custom_code_memory(
1611        &mut self,
1612        custom_code_memory: Option<Arc<dyn CustomCodeMemory>>,
1613    ) -> &mut Self {
1614        self.custom_code_memory = custom_code_memory;
1615        self
1616    }
1617
1618    /// Sets the instance allocation strategy to use.
1619    ///
1620    /// This is notably used in conjunction with
1621    /// [`InstanceAllocationStrategy::Pooling`] and [`PoolingAllocationConfig`].
1622    pub fn allocation_strategy(
1623        &mut self,
1624        strategy: impl Into<InstanceAllocationStrategy>,
1625    ) -> &mut Self {
1626        self.allocation_strategy = strategy.into();
1627        self
1628    }
1629
1630    /// Specifies the capacity of linear memories, in bytes, in their initial
1631    /// allocation.
1632    ///
1633    /// > Note: this value has important performance ramifications, be sure to
1634    /// > benchmark when setting this to a non-default value and read over this
1635    /// > documentation.
1636    ///
1637    /// This function will change the size of the initial memory allocation made
1638    /// for linear memories. This setting is only applicable when the initial
1639    /// size of a linear memory is below this threshold. Linear memories are
1640    /// allocated in the virtual address space of the host process with OS APIs
1641    /// such as `mmap` and this setting affects how large the allocation will
1642    /// be.
1643    ///
1644    /// ## Background: WebAssembly Linear Memories
1645    ///
1646    /// WebAssembly linear memories always start with a minimum size and can
1647    /// possibly grow up to a maximum size. The minimum size is always specified
1648    /// in a WebAssembly module itself and the maximum size can either be
1649    /// optionally specified in the module or inherently limited by the index
1650    /// type. For example for this module:
1651    ///
1652    /// ```wasm
1653    /// (module
1654    ///     (memory $a 4)
1655    ///     (memory $b 4096 4096 (pagesize 1))
1656    ///     (memory $c i64 10)
1657    /// )
1658    /// ```
1659    ///
1660    /// * Memory `$a` initially allocates 4 WebAssembly pages (256KiB) and can
1661    ///   grow up to 4GiB, the limit of the 32-bit index space.
1662    /// * Memory `$b` initially allocates 4096 WebAssembly pages, but in this
1663    ///   case its page size is 1, so it's 4096 bytes. Memory can also grow no
1664    ///   further meaning that it will always be 4096 bytes.
1665    /// * Memory `$c` is a 64-bit linear memory which starts with 640KiB of
1666    ///   memory and can theoretically grow up to 2^64 bytes, although most
1667    ///   hosts will run out of memory long before that.
1668    ///
1669    /// All operations on linear memories done by wasm are required to be
1670    /// in-bounds. Any access beyond the end of a linear memory is considered a
1671    /// trap.
1672    ///
1673    /// ## What this setting affects: Virtual Memory
1674    ///
1675    /// This setting is used to configure the behavior of the size of the linear
1676    /// memory allocation performed for each of these memories. For example the
1677    /// initial linear memory allocation looks like this:
1678    ///
1679    /// ```text
1680    ///              memory_reservation
1681    ///                    |
1682    ///          ◄─────────┴────────────────►
1683    /// ┌───────┬─────────┬──────────────────┬───────┐
1684    /// │ guard │ initial │ ... capacity ... │ guard │
1685    /// └───────┴─────────┴──────────────────┴───────┘
1686    ///  ◄──┬──►                              ◄──┬──►
1687    ///     │                                    │
1688    ///     │                             memory_guard_size
1689    ///     │
1690    ///     │
1691    ///  memory_guard_size (if guard_before_linear_memory)
1692    /// ```
1693    ///
1694    /// Memory in the `initial` range is accessible to the instance and can be
1695    /// read/written by wasm code. Memory in the `guard` regions is never
1696    /// accessible to wasm code and memory in `capacity` is initially
1697    /// inaccessible but may become accessible through `memory.grow` instructions
1698    /// for example.
1699    ///
1700    /// This means that this setting is the size of the initial chunk of virtual
1701    /// memory that a linear memory may grow into.
1702    ///
1703    /// ## What this setting affects: Runtime Speed
1704    ///
1705    /// This is a performance-sensitive setting which is taken into account
1706    /// during the compilation process of a WebAssembly module. For example if a
1707    /// 32-bit WebAssembly linear memory has a `memory_reservation` size of 4GiB
1708    /// then bounds checks can be elided because `capacity` will be guaranteed
1709    /// to be unmapped for all addressable bytes that wasm can access (modulo a
1710    /// few details).
1711    ///
1712    /// If `memory_reservation` was something smaller like 256KiB then that
1713    /// would have a much smaller impact on virtual memory but the compile code
1714    /// would then need to have explicit bounds checks to ensure that
1715    /// loads/stores are in-bounds.
1716    ///
1717    /// The goal of this setting is to enable skipping bounds checks in most
1718    /// modules by default. Some situations which require explicit bounds checks
1719    /// though are:
1720    ///
1721    /// * When `memory_reservation` is smaller than the addressable size of the
1722    ///   linear memory. For example if 64-bit linear memories always need
1723    ///   bounds checks as they can address the entire virtual address spacce.
1724    ///   For 32-bit linear memories a `memory_reservation` minimum size of 4GiB
1725    ///   is required to elide bounds checks.
1726    ///
1727    /// * When linear memories have a page size of 1 then bounds checks are
1728    ///   required. In this situation virtual memory can't be relied upon
1729    ///   because that operates at the host page size granularity where wasm
1730    ///   requires a per-byte level granularity.
1731    ///
1732    /// * Configuration settings such as [`Config::signals_based_traps`] can be
1733    ///   used to disable the use of signal handlers and virtual memory so
1734    ///   explicit bounds checks are required.
1735    ///
1736    /// * When [`Config::memory_guard_size`] is too small a bounds check may be
1737    ///   required. For 32-bit wasm addresses are actually 33-bit effective
1738    ///   addresses because loads/stores have a 32-bit static offset to add to
1739    ///   the dynamic 32-bit address. If the static offset is larger than the
1740    ///   size of the guard region then an explicit bounds check is required.
1741    ///
1742    /// ## What this setting affects: Memory Growth Behavior
1743    ///
1744    /// In addition to affecting bounds checks emitted in compiled code this
1745    /// setting also affects how WebAssembly linear memories are grown. The
1746    /// `memory.grow` instruction can be used to make a linear memory larger and
1747    /// this is also affected by APIs such as
1748    /// [`Memory::grow`](crate::Memory::grow).
1749    ///
1750    /// In these situations when the amount being grown is small enough to fit
1751    /// within the remaining capacity then the linear memory doesn't have to be
1752    /// moved at runtime. If the capacity runs out though then a new linear
1753    /// memory allocation must be made and the contents of linear memory is
1754    /// copied over.
1755    ///
1756    /// For example here's a situation where a copy happens:
1757    ///
1758    /// * The `memory_reservation` setting is configured to 128KiB.
1759    /// * A WebAssembly linear memory starts with a single 64KiB page.
1760    /// * This memory can be grown by one page to contain the full 128KiB of
1761    ///   memory.
1762    /// * If grown by one more page, though, then a 192KiB allocation must be
1763    ///   made and the previous 128KiB of contents are copied into the new
1764    ///   allocation.
1765    ///
1766    /// This growth behavior can have a significant performance impact if lots
1767    /// of data needs to be copied on growth. Conversely if memory growth never
1768    /// needs to happen because the capacity will always be large enough then
1769    /// optimizations can be applied to cache the base pointer of linear memory.
1770    ///
1771    /// When memory is grown then the
1772    /// [`Config::memory_reservation_for_growth`] is used for the new
1773    /// memory allocation to have memory to grow into.
1774    ///
1775    /// When using the pooling allocator via [`PoolingAllocationConfig`] then
1776    /// memories are never allowed to move so requests for growth are instead
1777    /// rejected with an error.
1778    ///
1779    /// ## When this setting is not used
1780    ///
1781    /// This setting is ignored and unused when the initial size of linear
1782    /// memory is larger than this threshold. For example if this setting is set
1783    /// to 1MiB but a wasm module requires a 2MiB minimum allocation then this
1784    /// setting is ignored. In this situation the minimum size of memory will be
1785    /// allocated along with [`Config::memory_reservation_for_growth`]
1786    /// after it to grow into.
1787    ///
1788    /// That means that this value can be set to zero. That can be useful in
1789    /// benchmarking to see the overhead of bounds checks for example.
1790    /// Additionally it can be used to minimize the virtual memory allocated by
1791    /// Wasmtime.
1792    ///
1793    /// ## Default Value
1794    ///
1795    /// The default value for this property depends on the host platform. For
1796    /// 64-bit platforms there's lots of address space available, so the default
1797    /// configured here is 4GiB. When coupled with the default size of
1798    /// [`Config::memory_guard_size`] this means that 32-bit WebAssembly linear
1799    /// memories with 64KiB page sizes will skip almost all bounds checks by
1800    /// default.
1801    ///
1802    /// For 32-bit platforms this value defaults to 10MiB. This means that
1803    /// bounds checks will be required on 32-bit platforms.
1804    pub fn memory_reservation(&mut self, bytes: u64) -> &mut Self {
1805        self.tunables.memory_reservation = Some(bytes);
1806        self
1807    }
1808
1809    /// Indicates whether linear memories may relocate their base pointer at
1810    /// runtime.
1811    ///
1812    /// WebAssembly linear memories either have a maximum size that's explicitly
1813    /// listed in the type of a memory or inherently limited by the index type
1814    /// of the memory (e.g. 4GiB for 32-bit linear memories). Depending on how
1815    /// the linear memory is allocated (see [`Config::memory_reservation`]) it
1816    /// may be necessary to move the memory in the host's virtual address space
1817    /// during growth. This option controls whether this movement is allowed or
1818    /// not.
1819    ///
1820    /// An example of a linear memory needing to move is when
1821    /// [`Config::memory_reservation`] is 0 then a linear memory will be
1822    /// allocated as the minimum size of the memory plus
1823    /// [`Config::memory_reservation_for_growth`]. When memory grows beyond the
1824    /// reservation for growth then the memory needs to be relocated.
1825    ///
1826    /// When this option is set to `false` then it can have a number of impacts
1827    /// on how memories work at runtime:
1828    ///
1829    /// * Modules can be compiled with static knowledge the base pointer of
1830    ///   linear memory never changes to enable optimizations such as
1831    ///   loop invariant code motion (hoisting the base pointer out of a loop).
1832    ///
1833    /// * Memories cannot grow in excess of their original allocation. This
1834    ///   means that [`Config::memory_reservation`] and
1835    ///   [`Config::memory_reservation_for_growth`] may need tuning to ensure
1836    ///   the memory configuration works at runtime.
1837    ///
1838    /// The default value for this option is `true`.
1839    pub fn memory_may_move(&mut self, enable: bool) -> &mut Self {
1840        self.tunables.memory_may_move = Some(enable);
1841        self
1842    }
1843
1844    /// Configures the size, in bytes, of the guard region used at the end of a
1845    /// linear memory's address space reservation.
1846    ///
1847    /// > Note: this value has important performance ramifications, be sure to
1848    /// > understand what this value does before tweaking it and benchmarking.
1849    ///
1850    /// This setting controls how many bytes are guaranteed to be unmapped after
1851    /// the virtual memory allocation of a linear memory. When
1852    /// combined with sufficiently large values of
1853    /// [`Config::memory_reservation`] (e.g. 4GiB for 32-bit linear memories)
1854    /// then a guard region can be used to eliminate bounds checks in generated
1855    /// code.
1856    ///
1857    /// This setting additionally can be used to help deduplicate bounds checks
1858    /// in code that otherwise requires bounds checks. For example with a 4KiB
1859    /// guard region then a 64-bit linear memory which accesses addresses `x+8`
1860    /// and `x+16` only needs to perform a single bounds check on `x`. If that
1861    /// bounds check passes then the offset is guaranteed to either reside in
1862    /// linear memory or the guard region, resulting in deterministic behavior
1863    /// either way.
1864    ///
1865    /// ## How big should the guard be?
1866    ///
1867    /// In general, like with configuring [`Config::memory_reservation`], you
1868    /// probably don't want to change this value from the defaults. Removing
1869    /// bounds checks is dependent on a number of factors where the size of the
1870    /// guard region is only one piece of the equation. Other factors include:
1871    ///
1872    /// * [`Config::memory_reservation`]
1873    /// * The index type of the linear memory (e.g. 32-bit or 64-bit)
1874    /// * The page size of the linear memory
1875    /// * Other settings such as [`Config::signals_based_traps`]
1876    ///
1877    /// Embeddings using virtual memory almost always want at least some guard
1878    /// region, but otherwise changes from the default should be profiled
1879    /// locally to see the performance impact.
1880    ///
1881    /// ## Default
1882    ///
1883    /// The default value for this property is 32MiB on 64-bit platforms. This
1884    /// allows eliminating almost all bounds checks on loads/stores with an
1885    /// immediate offset of less than 32MiB. On 32-bit platforms this defaults
1886    /// to 64KiB.
1887    pub fn memory_guard_size(&mut self, bytes: u64) -> &mut Self {
1888        self.tunables.memory_guard_size = Some(bytes);
1889        self
1890    }
1891
1892    /// Configures the size, in bytes, of the extra virtual memory space
1893    /// reserved after a linear memory is relocated.
1894    ///
1895    /// This setting is used in conjunction with [`Config::memory_reservation`]
1896    /// to configure what happens after a linear memory is relocated in the host
1897    /// address space. If the initial size of a linear memory exceeds
1898    /// [`Config::memory_reservation`] or if it grows beyond that size
1899    /// throughout its lifetime then this setting will be used.
1900    ///
1901    /// When a linear memory is relocated it will initially look like this:
1902    ///
1903    /// ```text
1904    ///            memory.size
1905    ///                 │
1906    ///          ◄──────┴─────►
1907    /// ┌───────┬──────────────┬───────┐
1908    /// │ guard │  accessible  │ guard │
1909    /// └───────┴──────────────┴───────┘
1910    ///                         ◄──┬──►
1911    ///                            │
1912    ///                     memory_guard_size
1913    /// ```
1914    ///
1915    /// where `accessible` needs to be grown but there's no more memory to grow
1916    /// into. A new region of the virtual address space will be allocated that
1917    /// looks like this:
1918    ///
1919    /// ```text
1920    ///                           memory_reservation_for_growth
1921    ///                                       │
1922    ///            memory.size                │
1923    ///                 │                     │
1924    ///          ◄──────┴─────► ◄─────────────┴───────────►
1925    /// ┌───────┬──────────────┬───────────────────────────┬───────┐
1926    /// │ guard │  accessible  │ .. reserved for growth .. │ guard │
1927    /// └───────┴──────────────┴───────────────────────────┴───────┘
1928    ///                                                     ◄──┬──►
1929    ///                                                        │
1930    ///                                               memory_guard_size
1931    /// ```
1932    ///
1933    /// This means that up to `memory_reservation_for_growth` bytes can be
1934    /// allocated again before the entire linear memory needs to be moved again
1935    /// when another `memory_reservation_for_growth` bytes will be appended to
1936    /// the size of the allocation.
1937    ///
1938    /// Note that this is a currently simple heuristic for optimizing the growth
1939    /// of dynamic memories, primarily implemented for the memory64 proposal
1940    /// where the maximum size of memory is larger than 4GiB. This setting is
1941    /// unlikely to be a one-size-fits-all style approach and if you're an
1942    /// embedder running into issues with growth and are interested in having
1943    /// other growth strategies available here please feel free to [open an
1944    /// issue on the Wasmtime repository][issue]!
1945    ///
1946    /// [issue]: https://github.com/bytecodealliance/wasmtime/issues/new
1947    ///
1948    /// ## Default
1949    ///
1950    /// For 64-bit platforms this defaults to 2GiB, and for 32-bit platforms
1951    /// this defaults to 1MiB.
1952    pub fn memory_reservation_for_growth(&mut self, bytes: u64) -> &mut Self {
1953        self.tunables.memory_reservation_for_growth = Some(bytes);
1954        self
1955    }
1956
1957    /// Configures the initial size, in bytes, to be allocated for GC heaps.
1958    ///
1959    /// This is similar to [`Config::memory_reservation`] but applies to the GC
1960    /// heap rather than to linear memories. See that method for more details
1961    /// on what "reservation" means and the implications of this setting.
1962    ///
1963    /// ## Default
1964    ///
1965    /// If none of the `gc_heap_*` tunables are explicitly configured, they
1966    /// default to the same values as their `memory_*` counterparts. Otherwise,
1967    /// the default value for this property depends on the host platform: for
1968    /// 64-bit platforms this defaults to 4GiB, and for 32-bit platforms this
1969    /// defaults to 10MiB.
1970    pub fn gc_heap_reservation(&mut self, bytes: u64) -> &mut Self {
1971        self.tunables.gc_heap_reservation = Some(bytes);
1972        self
1973    }
1974
1975    /// Configures the size, in bytes, of the guard page region for GC heaps.
1976    ///
1977    /// This is similar to [`Config::memory_guard_size`] but applies to the GC
1978    /// heap rather than to linear memories. See that method for more details on
1979    /// what guard pages are and the implications of this setting.
1980    ///
1981    /// ## Default
1982    ///
1983    /// If none of the `gc_heap_*` tunables are explicitly configured, they
1984    /// default to the same values as their `memory_*` counterparts. Otherwise,
1985    /// the default value for this property is 32MiB on 64-bit platforms and
1986    /// 64KiB on 32-bit platforms.
1987    pub fn gc_heap_guard_size(&mut self, bytes: u64) -> &mut Self {
1988        self.tunables.gc_heap_guard_size = Some(bytes);
1989        self
1990    }
1991
1992    /// Configures the size, in bytes, of the extra virtual memory space
1993    /// reserved after a GC heap is relocated.
1994    ///
1995    /// This is similar to [`Config::memory_reservation_for_growth`] but applies
1996    /// to the GC heap rather than to linear memories. See that method for more
1997    /// details.
1998    ///
1999    /// ## Default
2000    ///
2001    /// If none of the `gc_heap_*` tunables are explicitly configured, they
2002    /// default to the same values as their `memory_*` counterparts. Otherwise,
2003    /// for 64-bit platforms this defaults to 2GiB, and for 32-bit platforms
2004    /// this defaults to 1MiB.
2005    pub fn gc_heap_reservation_for_growth(&mut self, bytes: u64) -> &mut Self {
2006        self.tunables.gc_heap_reservation_for_growth = Some(bytes);
2007        self
2008    }
2009
2010    /// Indicates whether GC heaps are allowed to be reallocated after initial
2011    /// allocation at runtime.
2012    ///
2013    /// This is similar to [`Config::memory_may_move`] but applies to the GC
2014    /// heap rather than to linear memories. See that method for more details.
2015    ///
2016    /// ## Default
2017    ///
2018    /// If none of the `gc_heap_*` tunables are explicitly configured, they
2019    /// default to the same values as their `memory_*` counterparts. Otherwise,
2020    /// the default value for this option is `true`.
2021    pub fn gc_heap_may_move(&mut self, enable: bool) -> &mut Self {
2022        self.tunables.gc_heap_may_move = Some(enable);
2023        self
2024    }
2025
2026    /// Indicates whether a guard region is present before allocations of
2027    /// linear memory.
2028    ///
2029    /// Guard regions before linear memories are never used during normal
2030    /// operation of WebAssembly modules, even if they have out-of-bounds
2031    /// loads. The only purpose for a preceding guard region in linear memory
2032    /// is extra protection against possible bugs in code generators like
2033    /// Cranelift. This setting does not affect performance in any way, but will
2034    /// result in larger virtual memory reservations for linear memories (it
2035    /// won't actually ever use more memory, just use more of the address
2036    /// space).
2037    ///
2038    /// The size of the guard region before linear memory is the same as the
2039    /// guard size that comes after linear memory, which is configured by
2040    /// [`Config::memory_guard_size`].
2041    ///
2042    /// ## Default
2043    ///
2044    /// This value defaults to `true`.
2045    pub fn guard_before_linear_memory(&mut self, enable: bool) -> &mut Self {
2046        self.tunables.guard_before_linear_memory = Some(enable);
2047        self
2048    }
2049
2050    /// Indicates whether to initialize tables lazily, so that instantiation
2051    /// is fast but indirect calls are a little slower. If false, tables
2052    /// are initialized eagerly during instantiation from any active element
2053    /// segments that apply to them.
2054    ///
2055    /// **Note** Disabling this option is not compatible with the Winch compiler.
2056    ///
2057    /// ## Default
2058    ///
2059    /// This value defaults to `true`.
2060    pub fn table_lazy_init(&mut self, table_lazy_init: bool) -> &mut Self {
2061        self.tunables.table_lazy_init = Some(table_lazy_init);
2062        self
2063    }
2064
2065    /// Configure the version information used in serialized and deserialized [`crate::Module`]s.
2066    /// This effects the behavior of [`crate::Module::serialize()`], as well as
2067    /// [`crate::Module::deserialize()`] and related functions.
2068    ///
2069    /// The default strategy is to use the wasmtime crate's Cargo package version.
2070    pub fn module_version(&mut self, strategy: ModuleVersionStrategy) -> Result<&mut Self> {
2071        match strategy {
2072            // This case requires special precondition for assertion in SerializedModule::to_bytes
2073            ModuleVersionStrategy::Custom(ref v) => {
2074                if v.as_bytes().len() > 255 {
2075                    bail!("custom module version cannot be more than 255 bytes: {v}");
2076                }
2077            }
2078            _ => {}
2079        }
2080        self.module_version = strategy;
2081        Ok(self)
2082    }
2083
2084    /// Configure whether wasmtime should compile a module using multiple
2085    /// threads.
2086    ///
2087    /// Disabling this will result in a single thread being used to compile
2088    /// the wasm bytecode.
2089    ///
2090    /// By default parallel compilation is enabled.
2091    #[cfg(feature = "parallel-compilation")]
2092    pub fn parallel_compilation(&mut self, parallel: bool) -> &mut Self {
2093        self.parallel_compilation = parallel;
2094        self
2095    }
2096
2097    /// Configures whether compiled artifacts will contain information to map
2098    /// native program addresses back to the original wasm module.
2099    ///
2100    /// This configuration option is `true` by default and, if enabled,
2101    /// generates the appropriate tables in compiled modules to map from native
2102    /// address back to wasm source addresses. This is used for displaying wasm
2103    /// program counters in backtraces as well as generating filenames/line
2104    /// numbers if so configured as well (and the original wasm module has DWARF
2105    /// debugging information present).
2106    pub fn generate_address_map(&mut self, generate: bool) -> &mut Self {
2107        self.tunables.generate_address_map = Some(generate);
2108        self
2109    }
2110
2111    /// Configures whether copy-on-write memory-mapped data is used to
2112    /// initialize a linear memory.
2113    ///
2114    /// Initializing linear memory via a copy-on-write mapping can drastically
2115    /// improve instantiation costs of a WebAssembly module because copying
2116    /// memory is deferred. Additionally if a page of memory is only ever read
2117    /// from WebAssembly and never written too then the same underlying page of
2118    /// data will be reused between all instantiations of a module meaning that
2119    /// if a module is instantiated many times this can lower the overall memory
2120    /// required needed to run that module.
2121    ///
2122    /// The main disadvantage of copy-on-write initialization, however, is that
2123    /// it may be possible for highly-parallel scenarios to be less scalable. If
2124    /// a page is read initially by a WebAssembly module then that page will be
2125    /// mapped to a read-only copy shared between all WebAssembly instances. If
2126    /// the same page is then written, however, then a private copy is created
2127    /// and swapped out from the read-only version. This also requires an [IPI],
2128    /// however, which can be a significant bottleneck in high-parallelism
2129    /// situations.
2130    ///
2131    /// This feature is only applicable when a WebAssembly module meets specific
2132    /// criteria to be initialized in this fashion, such as:
2133    ///
2134    /// * Only memories defined in the module can be initialized this way.
2135    /// * Data segments for memory must use statically known offsets.
2136    /// * Data segments for memory must all be in-bounds.
2137    ///
2138    /// Modules which do not meet these criteria will fall back to
2139    /// initialization of linear memory based on copying memory.
2140    ///
2141    /// This feature of Wasmtime is also platform-specific:
2142    ///
2143    /// * Linux - this feature is supported for all instances of [`Module`].
2144    ///   Modules backed by an existing mmap (such as those created by
2145    ///   [`Module::deserialize_file`]) will reuse that mmap to cow-initialize
2146    ///   memory. Other instance of [`Module`] may use the `memfd_create`
2147    ///   syscall to create an initialization image to `mmap`.
2148    /// * Unix (not Linux) - this feature is only supported when loading modules
2149    ///   from a precompiled file via [`Module::deserialize_file`] where there
2150    ///   is a file descriptor to use to map data into the process. Note that
2151    ///   the module must have been compiled with this setting enabled as well.
2152    /// * Windows - there is no support for this feature at this time. Memory
2153    ///   initialization will always copy bytes.
2154    ///
2155    /// By default this option is enabled.
2156    ///
2157    /// [`Module::deserialize_file`]: crate::Module::deserialize_file
2158    /// [`Module`]: crate::Module
2159    /// [IPI]: https://en.wikipedia.org/wiki/Inter-processor_interrupt
2160    pub fn memory_init_cow(&mut self, enable: bool) -> &mut Self {
2161        self.tunables.memory_init_cow = Some(enable);
2162        self
2163    }
2164
2165    /// A configuration option to force the usage of `memfd_create` on Linux to
2166    /// be used as the backing source for a module's initial memory image.
2167    ///
2168    /// When [`Config::memory_init_cow`] is enabled, which is enabled by
2169    /// default, module memory initialization images are taken from a module's
2170    /// original mmap if possible. If a precompiled module was loaded from disk
2171    /// this means that the disk's file is used as an mmap source for the
2172    /// initial linear memory contents. This option can be used to force, on
2173    /// Linux, that instead of using the original file on disk a new in-memory
2174    /// file is created with `memfd_create` to hold the contents of the initial
2175    /// image.
2176    ///
2177    /// This option can be used to avoid possibly loading the contents of memory
2178    /// from disk through a page fault. Instead with `memfd_create` the contents
2179    /// of memory are always in RAM, meaning that even page faults which
2180    /// initially populate a wasm linear memory will only work with RAM instead
2181    /// of ever hitting the disk that the original precompiled module is stored
2182    /// on.
2183    ///
2184    /// This option is disabled by default.
2185    pub fn force_memory_init_memfd(&mut self, enable: bool) -> &mut Self {
2186        self.force_memory_init_memfd = enable;
2187        self
2188    }
2189
2190    /// Configures whether or not a coredump should be generated and attached to
2191    /// the [`Error`](crate::Error) when a trap is raised.
2192    ///
2193    /// This option is disabled by default.
2194    #[cfg(feature = "coredump")]
2195    pub fn coredump_on_trap(&mut self, enable: bool) -> &mut Self {
2196        self.coredump_on_trap = enable;
2197        self
2198    }
2199
2200    /// Enables memory error checking for wasm programs.
2201    ///
2202    /// This option is disabled by default.
2203    ///
2204    /// # Panics
2205    ///
2206    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
2207    #[cfg(any(feature = "cranelift", feature = "winch"))]
2208    pub fn wmemcheck(&mut self, enable: bool) -> &mut Self {
2209        self.wmemcheck = enable;
2210        self.compiler_config_mut().wmemcheck = enable;
2211        self
2212    }
2213
2214    /// Configures the "guaranteed dense image size" for copy-on-write
2215    /// initialized memories.
2216    ///
2217    /// When using the [`Config::memory_init_cow`] feature to initialize memory
2218    /// efficiently (which is enabled by default), compiled modules contain an
2219    /// image of the module's initial heap. If the module has a fairly sparse
2220    /// initial heap, with just a few data segments at very different offsets,
2221    /// this could result in a large region of zero bytes in the image. In
2222    /// other words, it's not very memory-efficient.
2223    ///
2224    /// We normally use a heuristic to avoid this: if less than half
2225    /// of the initialized range (first non-zero to last non-zero
2226    /// byte) of any memory in the module has pages with nonzero
2227    /// bytes, then we avoid creating a memory image for the entire module.
2228    ///
2229    /// However, if the embedder always needs the instantiation-time efficiency
2230    /// of copy-on-write initialization, and is otherwise carefully controlling
2231    /// parameters of the modules (for example, by limiting the maximum heap
2232    /// size of the modules), then it may be desirable to ensure a memory image
2233    /// is created even if this could go against the heuristic above. Thus, we
2234    /// add another condition: there is a size of initialized data region up to
2235    /// which we *always* allow a memory image. The embedder can set this to a
2236    /// known maximum heap size if they desire to always get the benefits of
2237    /// copy-on-write images.
2238    ///
2239    /// In the future we may implement a "best of both worlds"
2240    /// solution where we have a dense image up to some limit, and
2241    /// then support a sparse list of initializers beyond that; this
2242    /// would get most of the benefit of copy-on-write and pay the incremental
2243    /// cost of eager initialization only for those bits of memory
2244    /// that are out-of-bounds. However, for now, an embedder desiring
2245    /// fast instantiation should ensure that this setting is as large
2246    /// as the maximum module initial memory content size.
2247    ///
2248    /// By default this value is 16 MiB.
2249    pub fn memory_guaranteed_dense_image_size(&mut self, size_in_bytes: u64) -> &mut Self {
2250        self.memory_guaranteed_dense_image_size = size_in_bytes;
2251        self
2252    }
2253
2254    /// Whether to enable function inlining during compilation or not.
2255    ///
2256    /// This may result in faster execution at runtime, but adds additional
2257    /// compilation time. Inlining may also enlarge the size of compiled
2258    /// artifacts (for example, the size of the result of
2259    /// [`Engine::precompile_component`](crate::Engine::precompile_component)).
2260    ///
2261    /// Inlining is not supported by all of Wasmtime's compilation strategies;
2262    /// currently, it only Cranelift supports it. This setting will be ignored
2263    /// when using a compilation strategy that does not support inlining, like
2264    /// Winch.
2265    ///
2266    /// The default value for this is `Inlining::No`.
2267    pub fn compiler_inlining(&mut self, inlining: Inlining) -> &mut Self {
2268        self.tunables.inlining = Some(inlining);
2269        self
2270    }
2271
2272    /// Returns the set of features that the currently selected compiler backend
2273    /// does not support at all and may panic on.
2274    ///
2275    /// Wasmtime strives to reject unknown modules or unsupported modules with
2276    /// first-class errors instead of panics. Not all compiler backends have the
2277    /// same level of feature support on all platforms as well. This method
2278    /// returns a set of features that the currently selected compiler
2279    /// configuration is known to not support and may panic on. This acts as a
2280    /// first-level filter on incoming wasm modules/configuration to fail-fast
2281    /// instead of panicking later on.
2282    ///
2283    /// Note that if a feature is not listed here it does not mean that the
2284    /// backend fully supports the proposal. Instead that means that the backend
2285    /// doesn't ever panic on the proposal, but errors during compilation may
2286    /// still be returned. This means that features listed here are definitely
2287    /// not supported at all, but features not listed here may still be
2288    /// partially supported. For example at the time of this writing the Winch
2289    /// backend partially supports simd so it's not listed here. Winch doesn't
2290    /// fully support simd but unimplemented instructions just return errors.
2291    fn compiler_panicking_wasm_features(&self) -> WasmFeatures {
2292        // First we compute the set of features that Wasmtime itself knows;
2293        // this is a sort of "maximal set" that we invert to create a set
2294        // of features we _definitely can't support_ because wasmtime
2295        // has never heard of them.
2296        let features_known_to_wasmtime = WasmFeatures::empty()
2297            | WasmFeatures::MUTABLE_GLOBAL
2298            | WasmFeatures::SATURATING_FLOAT_TO_INT
2299            | WasmFeatures::SIGN_EXTENSION
2300            | WasmFeatures::REFERENCE_TYPES
2301            | WasmFeatures::CALL_INDIRECT_OVERLONG
2302            | WasmFeatures::MULTI_VALUE
2303            | WasmFeatures::BULK_MEMORY
2304            | WasmFeatures::BULK_MEMORY_OPT
2305            | WasmFeatures::SIMD
2306            | WasmFeatures::RELAXED_SIMD
2307            | WasmFeatures::THREADS
2308            | WasmFeatures::SHARED_EVERYTHING_THREADS
2309            | WasmFeatures::TAIL_CALL
2310            | WasmFeatures::FLOATS
2311            | WasmFeatures::MULTI_MEMORY
2312            | WasmFeatures::EXCEPTIONS
2313            | WasmFeatures::MEMORY64
2314            | WasmFeatures::EXTENDED_CONST
2315            | WasmFeatures::COMPONENT_MODEL
2316            | WasmFeatures::FUNCTION_REFERENCES
2317            | WasmFeatures::GC
2318            | WasmFeatures::CUSTOM_PAGE_SIZES
2319            | WasmFeatures::GC_TYPES
2320            | WasmFeatures::STACK_SWITCHING
2321            | WasmFeatures::WIDE_ARITHMETIC
2322            | WasmFeatures::CM_ASYNC
2323            | WasmFeatures::CM_ASYNC_STACKFUL
2324            | WasmFeatures::CM_MORE_ASYNC_BUILTINS
2325            | WasmFeatures::CM_THREADING
2326            | WasmFeatures::CM_ERROR_CONTEXT
2327            | WasmFeatures::CM_GC
2328            | WasmFeatures::CM_MAP
2329            | WasmFeatures::CM_FIXED_LENGTH_LISTS;
2330
2331        #[allow(unused_mut, reason = "easier to avoid #[cfg]")]
2332        let mut unsupported = !features_known_to_wasmtime;
2333
2334        #[cfg(any(feature = "cranelift", feature = "winch"))]
2335        match self.compiler_config.as_ref().and_then(|c| c.strategy) {
2336            None | Some(Strategy::Cranelift) => {
2337                // Pulley at this time fundamentally doesn't support the
2338                // `threads` proposal, notably shared memory, because Rust can't
2339                // safely implement loads/stores in the face of shared memory.
2340                // Stack switching is not implemented, either.
2341                if self.compiler_target().is_pulley() {
2342                    unsupported |= WasmFeatures::THREADS;
2343                    unsupported |= WasmFeatures::STACK_SWITCHING;
2344                }
2345
2346                use target_lexicon::*;
2347                match self.compiler_target() {
2348                    Triple {
2349                        architecture: Architecture::X86_64 | Architecture::X86_64h,
2350                        operating_system:
2351                            OperatingSystem::Linux
2352                            | OperatingSystem::MacOSX(_)
2353                            | OperatingSystem::Darwin(_),
2354                        ..
2355                    } => {
2356                        // Stack switching supported on (non-Pulley) Cranelift.
2357                    }
2358
2359                    _ => {
2360                        // On platforms other than x64 Unix-like, we don't
2361                        // support stack switching.
2362                        unsupported |= WasmFeatures::STACK_SWITCHING;
2363                    }
2364                }
2365            }
2366            Some(Strategy::Winch) => {
2367                unsupported |= WasmFeatures::GC
2368                    | WasmFeatures::FUNCTION_REFERENCES
2369                    | WasmFeatures::RELAXED_SIMD
2370                    | WasmFeatures::TAIL_CALL
2371                    | WasmFeatures::GC_TYPES
2372                    | WasmFeatures::EXCEPTIONS
2373                    | WasmFeatures::LEGACY_EXCEPTIONS
2374                    | WasmFeatures::STACK_SWITCHING
2375                    | WasmFeatures::CM_ASYNC;
2376                match self.compiler_target().architecture {
2377                    target_lexicon::Architecture::Aarch64(_) => {
2378                        unsupported |= WasmFeatures::THREADS;
2379                    }
2380
2381                    // Winch doesn't support other non-x64 architectures at this
2382                    // time either but will return an first-class error for
2383                    // them.
2384                    _ => {}
2385                }
2386            }
2387            Some(Strategy::Auto) => unreachable!(),
2388        }
2389        unsupported
2390    }
2391
2392    /// Calculates the set of features that are enabled for this `Config`.
2393    ///
2394    /// This method internally will start with the an empty set of features to
2395    /// avoid being tied to wasmparser's defaults. Next Wasmtime's set of
2396    /// default features are added to this set, some of which are conditional
2397    /// depending on crate features. Finally explicitly requested features via
2398    /// `wasm_*` methods on `Config` are applied. Everything is then validated
2399    /// later in `Config::validate`.
2400    fn features(&self) -> WasmFeatures {
2401        // Wasmtime by default supports all of the wasm 2.0 version of the
2402        // specification.
2403        let mut features = WasmFeatures::WASM2;
2404
2405        // On-by-default features that wasmtime has. Note that these are all
2406        // subject to the criteria at
2407        // https://docs.wasmtime.dev/contributing-implementing-wasm-proposals.html
2408        // and
2409        // https://docs.wasmtime.dev/stability-wasm-proposals.html
2410        features |= WasmFeatures::MULTI_MEMORY;
2411        features |= WasmFeatures::RELAXED_SIMD;
2412        features |= WasmFeatures::TAIL_CALL;
2413        features |= WasmFeatures::EXTENDED_CONST;
2414        features |= WasmFeatures::MEMORY64;
2415        // NB: if you add a feature above this line please double-check
2416        // https://docs.wasmtime.dev/stability-wasm-proposals.html
2417        // to ensure all requirements are met and/or update the documentation
2418        // there too.
2419
2420        // Set some features to their conditionally-enabled defaults depending
2421        // on crate compile-time features.
2422        features.set(WasmFeatures::GC_TYPES, cfg!(feature = "gc"));
2423        features.set(WasmFeatures::THREADS, cfg!(feature = "threads"));
2424        features.set(
2425            WasmFeatures::COMPONENT_MODEL,
2426            cfg!(feature = "component-model"),
2427        );
2428
2429        // From the default set of proposals remove any that the current
2430        // compiler backend may panic on if the module contains them.
2431        features = features & !self.compiler_panicking_wasm_features();
2432
2433        // After wasmtime's defaults are configured then factor in user requests
2434        // and disable/enable features. Note that the enable/disable sets should
2435        // be disjoint.
2436        debug_assert!((self.enabled_features & self.disabled_features).is_empty());
2437        features &= !self.disabled_features;
2438        features |= self.enabled_features;
2439
2440        features
2441    }
2442
2443    /// Returns the configured compiler target for this `Config`.
2444    pub(crate) fn compiler_target(&self) -> target_lexicon::Triple {
2445        // If a target is explicitly configured, always use that.
2446        if let Some(target) = self.target.clone() {
2447            return target;
2448        }
2449
2450        // If the `build.rs` script determined that this platform uses pulley by
2451        // default, then use Pulley.
2452        if cfg!(default_target_pulley) {
2453            return target_lexicon::Triple::pulley_host();
2454        }
2455
2456        // And at this point the target is for sure the host.
2457        target_lexicon::Triple::host()
2458    }
2459
2460    /// Returns `true` if any of the `gc_heap_*` tunables have been explicitly
2461    /// configured.
2462    fn any_gc_heap_tunables_configured(&self) -> bool {
2463        self.tunables.gc_heap_reservation.is_some()
2464            || self.tunables.gc_heap_guard_size.is_some()
2465            || self.tunables.gc_heap_reservation_for_growth.is_some()
2466            || self.tunables.gc_heap_may_move.is_some()
2467    }
2468
2469    pub(crate) fn validate(&self) -> Result<(Tunables, WasmFeatures)> {
2470        let features = self.features();
2471
2472        // First validate that the selected compiler backend and configuration
2473        // supports the set of `features` that are enabled. This will help
2474        // provide more first class errors instead of panics about unsupported
2475        // features and configurations.
2476        let unsupported = features & self.compiler_panicking_wasm_features();
2477        if !unsupported.is_empty() {
2478            for flag in WasmFeatures::FLAGS.iter() {
2479                if !unsupported.contains(*flag.value()) {
2480                    continue;
2481                }
2482                bail!(
2483                    "the wasm_{} feature is not supported on this compiler configuration",
2484                    flag.name().to_lowercase()
2485                );
2486            }
2487
2488            panic!("should have returned an error by now")
2489        }
2490
2491        #[cfg(any(feature = "async", feature = "stack-switching"))]
2492        if self.max_wasm_stack > self.async_stack_size {
2493            bail!("max_wasm_stack size cannot exceed the async_stack_size");
2494        }
2495        if self.max_wasm_stack == 0 {
2496            bail!("max_wasm_stack size cannot be zero");
2497        }
2498        if !cfg!(feature = "wmemcheck") && self.wmemcheck {
2499            bail!("wmemcheck (memory checker) was requested but is not enabled in this build");
2500        }
2501
2502        if !cfg!(feature = "gc") && features.gc_types() {
2503            bail!("support for GC was disabled at compile time")
2504        }
2505
2506        if !cfg!(feature = "gc") && features.contains(WasmFeatures::EXCEPTIONS) {
2507            bail!("exceptions support requires garbage collection (GC) to be enabled in the build");
2508        }
2509
2510        match &self.rr_config {
2511            #[cfg(feature = "rr")]
2512            RRConfig::Recording | RRConfig::Replaying => {
2513                self.validate_rr_determinism_conflicts()?;
2514            }
2515            RRConfig::None => {}
2516        };
2517
2518        let mut tunables = Tunables::default_for_target(&self.compiler_target())?;
2519
2520        // By default this is enabled with the Cargo feature, and if the feature
2521        // is missing this is disabled.
2522        tunables.concurrency_support = cfg!(feature = "component-model-async");
2523
2524        #[cfg(feature = "rr")]
2525        {
2526            tunables.recording = matches!(self.rr_config, RRConfig::Recording);
2527        }
2528
2529        // If no target is explicitly specified then further refine `tunables`
2530        // for the configuration of this host depending on what platform
2531        // features were found available at compile time. This means that anyone
2532        // cross-compiling for a customized host will need to further refine
2533        // compilation options.
2534        if self.target.is_none() {
2535            // If this platform doesn't have native signals then change some
2536            // defaults to account for that. Note that VM guards are turned off
2537            // here because that's primarily a feature of eliding
2538            // bounds-checks.
2539            if !cfg!(has_native_signals) {
2540                tunables.signals_based_traps = cfg!(has_native_signals);
2541                tunables.memory_guard_size = 0;
2542                tunables.gc_heap_guard_size = 0;
2543            }
2544
2545            // When virtual memory is not available use slightly different
2546            // defaults for tunables to be more amenable to `MallocMemory`.
2547            // Note that these can still be overridden by config options.
2548            if !cfg!(has_virtual_memory) {
2549                tunables.memory_reservation = 0;
2550                tunables.memory_reservation_for_growth = 1 << 20; // 1MB
2551                tunables.memory_init_cow = false;
2552                tunables.gc_heap_reservation = 0;
2553                tunables.gc_heap_reservation_for_growth = 1 << 20; // 1MB
2554            }
2555        }
2556
2557        // If guest-debugging is enabled, we must disable
2558        // signals-based traps. Do this before we process the user's
2559        // provided tunables settings so we can detect a conflict with
2560        // an explicit request to use signals-based traps.
2561        #[cfg(feature = "debug")]
2562        if self.tunables.debug_guest == Some(true) {
2563            tunables.signals_based_traps = false;
2564        }
2565
2566        // Inlining currently falls over with the `stack_switch` instruction.
2567        #[cfg(any(feature = "cranelift", feature = "winch"))]
2568        if features.contains(WasmFeatures::STACK_SWITCHING) {
2569            if let Some(inlining) = self.tunables.inlining
2570                && inlining != Inlining::No
2571            {
2572                bail!("cannot enable compiler inlining when stack switching is enabled");
2573            }
2574            tunables.inlining = Inlining::No;
2575        }
2576
2577        self.tunables.configure(&mut tunables);
2578
2579        // If no GC heap tunables are explicitly configured, copy the memory
2580        // tunables' configured values so that GC heaps default to the same
2581        // configuration as linear memories.
2582        if !self.any_gc_heap_tunables_configured() {
2583            tunables.gc_heap_reservation = tunables.memory_reservation;
2584            tunables.gc_heap_guard_size = tunables.memory_guard_size;
2585            tunables.gc_heap_reservation_for_growth = tunables.memory_reservation_for_growth;
2586            tunables.gc_heap_may_move = tunables.memory_may_move;
2587        }
2588
2589        // If we're going to compile with winch, we must use the winch calling convention.
2590        #[cfg(any(feature = "cranelift", feature = "winch"))]
2591        {
2592            tunables.winch_callable = self
2593                .compiler_config
2594                .as_ref()
2595                .is_some_and(|c| c.strategy == Some(Strategy::Winch));
2596        }
2597
2598        tunables.collector = if features.gc_types() {
2599            #[cfg(feature = "gc")]
2600            {
2601                use wasmtime_environ::Collector as EnvCollector;
2602                Some(match self.collector.try_not_auto()? {
2603                    Collector::DeferredReferenceCounting => EnvCollector::DeferredReferenceCounting,
2604                    Collector::Null => EnvCollector::Null,
2605                    Collector::Copying => EnvCollector::Copying,
2606                    Collector::Auto => unreachable!(),
2607                })
2608            }
2609            #[cfg(not(feature = "gc"))]
2610            bail!("cannot use GC types: the `gc` feature was disabled at compile time")
2611        } else {
2612            None
2613        };
2614
2615        if tunables.debug_guest {
2616            ensure!(
2617                cfg!(feature = "debug"),
2618                "debug instrumentation support was disabled at compile time"
2619            );
2620            ensure!(
2621                !tunables.signals_based_traps,
2622                "cannot use signals-based traps with guest debugging enabled"
2623            );
2624        }
2625
2626        // Concurrency support is required for some component model features.
2627        let requires_concurrency = WasmFeatures::CM_ASYNC
2628            | WasmFeatures::CM_MORE_ASYNC_BUILTINS
2629            | WasmFeatures::CM_ASYNC_STACKFUL
2630            | WasmFeatures::CM_THREADING
2631            | WasmFeatures::CM_ERROR_CONTEXT;
2632        if tunables.concurrency_support && !cfg!(feature = "component-model-async") {
2633            bail!(
2634                "concurrency support was requested but was not \
2635                 compiled into this build of Wasmtime"
2636            )
2637        }
2638        if !tunables.concurrency_support && features.intersects(requires_concurrency) {
2639            bail!(
2640                "concurrency support must be enabled to use the component \
2641                 model async or threading features"
2642            )
2643        }
2644
2645        // If the pooling allocator is used and GC is enabled, check that
2646        // memories and the GC heap are configured identically, since the
2647        // pooling allocator can't support differently-configured heaps.
2648        #[cfg(feature = "pooling-allocator")]
2649        if matches!(
2650            &self.allocation_strategy,
2651            InstanceAllocationStrategy::Pooling(_)
2652        ) && tunables.collector.is_some()
2653        {
2654            if tunables.memory_reservation != tunables.gc_heap_reservation {
2655                bail!(
2656                    "when using the pooling allocator with GC, `memory_reservation` ({}) \
2657                     and `gc_heap_reservation` ({}) must be the same",
2658                    tunables.memory_reservation,
2659                    tunables.gc_heap_reservation,
2660                );
2661            }
2662            if tunables.memory_guard_size != tunables.gc_heap_guard_size {
2663                bail!(
2664                    "when using the pooling allocator with GC, `memory_guard_size` ({}) \
2665                     and `gc_heap_guard_size` ({}) must be the same",
2666                    tunables.memory_guard_size,
2667                    tunables.gc_heap_guard_size,
2668                );
2669            }
2670            if tunables.memory_reservation_for_growth != tunables.gc_heap_reservation_for_growth {
2671                bail!(
2672                    "when using the pooling allocator with GC, \
2673                     `memory_reservation_for_growth` ({}) and \
2674                     `gc_heap_reservation_for_growth` ({}) must be the same",
2675                    tunables.memory_reservation_for_growth,
2676                    tunables.gc_heap_reservation_for_growth,
2677                );
2678            }
2679            if tunables.memory_may_move != tunables.gc_heap_may_move {
2680                bail!(
2681                    "when using the pooling allocator with GC, `memory_may_move` ({}) \
2682                     and `gc_heap_may_move` ({}) must be the same",
2683                    tunables.memory_may_move,
2684                    tunables.gc_heap_may_move,
2685                );
2686            }
2687        }
2688
2689        Ok((tunables, features))
2690    }
2691
2692    #[cfg(feature = "runtime")]
2693    pub(crate) fn build_allocator(
2694        &self,
2695        tunables: &Tunables,
2696    ) -> Result<Box<dyn InstanceAllocator + Send + Sync>> {
2697        #[cfg(feature = "async")]
2698        let (stack_size, stack_zeroing) = (self.async_stack_size, self.async_stack_zeroing);
2699
2700        #[cfg(not(feature = "async"))]
2701        let (stack_size, stack_zeroing) = (0, false);
2702
2703        let _ = tunables;
2704
2705        match &self.allocation_strategy {
2706            InstanceAllocationStrategy::OnDemand => {
2707                let mut _allocator = try_new::<Box<_>>(OnDemandInstanceAllocator::new(
2708                    self.mem_creator.clone(),
2709                    stack_size,
2710                    stack_zeroing,
2711                ))?;
2712                #[cfg(feature = "async")]
2713                if let Some(stack_creator) = &self.stack_creator {
2714                    _allocator.set_stack_creator(stack_creator.clone());
2715                }
2716                Ok(_allocator as _)
2717            }
2718            #[cfg(feature = "pooling-allocator")]
2719            InstanceAllocationStrategy::Pooling(config) => {
2720                let mut config = config.config;
2721                config.stack_size = stack_size;
2722                config.async_stack_zeroing = stack_zeroing;
2723                let allocator = try_new::<Box<_>>(
2724                    crate::runtime::vm::PoolingInstanceAllocator::new(&config, tunables)?,
2725                )?;
2726                Ok(allocator as _)
2727            }
2728        }
2729    }
2730
2731    #[cfg(feature = "runtime")]
2732    pub(crate) fn build_gc_runtime(&self) -> Result<Option<Arc<dyn GcRuntime>>> {
2733        if !self.features().gc_types() {
2734            return Ok(None);
2735        }
2736
2737        #[cfg(not(feature = "gc"))]
2738        bail!("cannot create a GC runtime: the `gc` feature was disabled at compile time");
2739
2740        #[cfg(feature = "gc")]
2741        #[cfg_attr(
2742            not(any(feature = "gc-null", feature = "gc-drc", feature = "gc-copying")),
2743            expect(unreachable_code, reason = "definitions known to be dummy")
2744        )]
2745        {
2746            Ok(Some(match self.collector.try_not_auto()? {
2747                #[cfg(feature = "gc-drc")]
2748                Collector::DeferredReferenceCounting => {
2749                    try_new::<Arc<_>>(crate::runtime::vm::DrcCollector::default())? as _
2750                }
2751                #[cfg(not(feature = "gc-drc"))]
2752                Collector::DeferredReferenceCounting => unreachable!(),
2753
2754                #[cfg(feature = "gc-null")]
2755                Collector::Null => {
2756                    try_new::<Arc<_>>(crate::runtime::vm::NullCollector::default())? as _
2757                }
2758                #[cfg(not(feature = "gc-null"))]
2759                Collector::Null => unreachable!(),
2760
2761                #[cfg(feature = "gc-copying")]
2762                Collector::Copying => {
2763                    try_new::<Arc<_>>(crate::runtime::vm::CopyingCollector::default())? as _
2764                }
2765                #[cfg(not(feature = "gc-copying"))]
2766                Collector::Copying => unreachable!(),
2767
2768                Collector::Auto => unreachable!(),
2769            }))
2770        }
2771    }
2772
2773    #[cfg(feature = "runtime")]
2774    pub(crate) fn build_profiler(&self) -> Result<Box<dyn ProfilingAgent>> {
2775        Ok(match self.profiling_strategy {
2776            ProfilingStrategy::PerfMap => profiling_agent::new_perfmap()?,
2777            ProfilingStrategy::JitDump => profiling_agent::new_jitdump()?,
2778            ProfilingStrategy::VTune => profiling_agent::new_vtune()?,
2779            ProfilingStrategy::None => profiling_agent::new_null(),
2780            ProfilingStrategy::Pulley => profiling_agent::new_pulley()?,
2781        })
2782    }
2783
2784    #[cfg(any(feature = "cranelift", feature = "winch"))]
2785    pub(crate) fn build_compiler(
2786        mut self,
2787        tunables: &mut Tunables,
2788        features: WasmFeatures,
2789    ) -> Result<(Self, Box<dyn wasmtime_environ::Compiler>)> {
2790        let target = self.compiler_target();
2791
2792        // The target passed to the builders below is an `Option<Triple>` where
2793        // `None` represents the current host with CPU features inferred from
2794        // the host's CPU itself. The `target` above is not an `Option`, so
2795        // switch it to `None` in the case that a target wasn't explicitly
2796        // specified (which indicates no feature inference) and the target
2797        // matches the host.
2798        let target_for_builder =
2799            if self.target.is_none() && target == target_lexicon::Triple::host() {
2800                None
2801            } else {
2802                Some(target.clone())
2803            };
2804
2805        let mut compiler = match self.compiler_config_mut().strategy {
2806            #[cfg(feature = "cranelift")]
2807            Some(Strategy::Cranelift) => wasmtime_cranelift::builder(target_for_builder)?,
2808            #[cfg(not(feature = "cranelift"))]
2809            Some(Strategy::Cranelift) => bail!("cranelift support not compiled in"),
2810            #[cfg(feature = "winch")]
2811            Some(Strategy::Winch) => wasmtime_winch::builder(target_for_builder)?,
2812            #[cfg(not(feature = "winch"))]
2813            Some(Strategy::Winch) => bail!("winch support not compiled in"),
2814
2815            None | Some(Strategy::Auto) => unreachable!(),
2816        };
2817
2818        if let Some(path) = &self.compiler_config_mut().clif_dir {
2819            compiler.clif_dir(path)?;
2820        }
2821
2822        // If probestack is enabled for a target, Wasmtime will always use the
2823        // inline strategy which doesn't require us to define a `__probestack`
2824        // function or similar.
2825        self.compiler_config_mut()
2826            .settings
2827            .insert("probestack_strategy".into(), "inline".into());
2828
2829        // We enable stack probing by default on all targets.
2830        // This is required on Windows because of the way Windows
2831        // commits its stacks, but it's also a good idea on other
2832        // platforms to ensure guard pages are hit for large frame
2833        // sizes.
2834        self.compiler_config_mut()
2835            .flags
2836            .insert("enable_probestack".into());
2837
2838        // The current wasm multivalue implementation depends on this.
2839        // FIXME(#9510) handle this in wasmtime-cranelift instead.
2840        self.compiler_config_mut()
2841            .flags
2842            .insert("enable_multi_ret_implicit_sret".into());
2843
2844        if let Some(unwind_requested) = self.native_unwind_info {
2845            if !self
2846                .compiler_config_mut()
2847                .ensure_setting_unset_or_given("unwind_info", &unwind_requested.to_string())
2848            {
2849                bail!(
2850                    "incompatible settings requested for Cranelift and Wasmtime `unwind-info` settings"
2851                );
2852            }
2853        }
2854
2855        if target.operating_system == target_lexicon::OperatingSystem::Windows {
2856            if !self
2857                .compiler_config_mut()
2858                .ensure_setting_unset_or_given("unwind_info", "true")
2859            {
2860                bail!("`native_unwind_info` cannot be disabled on Windows");
2861            }
2862        }
2863
2864        // We require frame pointers for correct stack walking, which is safety
2865        // critical in the presence of reference types, and otherwise it is just
2866        // really bad developer experience to get wrong.
2867        self.compiler_config_mut()
2868            .settings
2869            .insert("preserve_frame_pointers".into(), "true".into());
2870
2871        if !tunables.signals_based_traps {
2872            let mut ok = self
2873                .compiler_config_mut()
2874                .ensure_setting_unset_or_given("enable_table_access_spectre_mitigation", "false");
2875            ok = ok
2876                && self.compiler_config_mut().ensure_setting_unset_or_given(
2877                    "enable_heap_access_spectre_mitigation",
2878                    "false",
2879                );
2880
2881            // Right now spectre-mitigated bounds checks will load from zero so
2882            // if host-based signal handlers are disabled then that's a mismatch
2883            // and doesn't work right now. Fixing this will require more thought
2884            // of how to implement the bounds check in spectre-only mode.
2885            if !ok {
2886                bail!(
2887                    "when signals-based traps are disabled then spectre \
2888                     mitigations must also be disabled"
2889                );
2890            }
2891        }
2892
2893        if features.contains(WasmFeatures::RELAXED_SIMD) && !features.contains(WasmFeatures::SIMD) {
2894            bail!("cannot disable the simd proposal but enable the relaxed simd proposal");
2895        }
2896
2897        if features.contains(WasmFeatures::STACK_SWITCHING) {
2898            use target_lexicon::OperatingSystem;
2899            let model = match target.operating_system {
2900                OperatingSystem::Windows => "update_windows_tib",
2901                OperatingSystem::Linux
2902                | OperatingSystem::MacOSX(_)
2903                | OperatingSystem::Darwin(_) => "basic",
2904                _ => bail!("stack-switching feature not supported on this platform "),
2905            };
2906
2907            if !self
2908                .compiler_config_mut()
2909                .ensure_setting_unset_or_given("stack_switch_model", model)
2910            {
2911                bail!(
2912                    "compiler option 'stack_switch_model' must be set to '{model}' on this platform"
2913                );
2914            }
2915        }
2916
2917        // Apply compiler settings and flags
2918        compiler.set_tunables(tunables.clone())?;
2919        for (k, v) in self.compiler_config_mut().settings.iter() {
2920            compiler.set(k, v)?;
2921        }
2922        for flag in self.compiler_config_mut().flags.iter() {
2923            compiler.enable(flag)?;
2924        }
2925        *tunables = compiler.tunables().cloned().unwrap();
2926
2927        #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
2928        if let Some(cache_store) = &self.compiler_config_mut().cache_store {
2929            compiler.enable_incremental_compilation(cache_store.clone())?;
2930        }
2931
2932        compiler.wmemcheck(self.compiler_config_mut().wmemcheck);
2933
2934        Ok((self, compiler.build()?))
2935    }
2936
2937    /// Internal setting for whether adapter modules for components will have
2938    /// extra WebAssembly instructions inserted performing more debug checks
2939    /// then are necessary.
2940    #[cfg(feature = "component-model")]
2941    pub fn debug_adapter_modules(&mut self, debug: bool) -> &mut Self {
2942        self.tunables.debug_adapter_modules = Some(debug);
2943        self
2944    }
2945
2946    /// Enables clif output when compiling a WebAssembly module.
2947    #[cfg(any(feature = "cranelift", feature = "winch"))]
2948    pub fn emit_clif(&mut self, path: &Path) -> &mut Self {
2949        self.compiler_config_mut().clif_dir = Some(path.to_path_buf());
2950        self
2951    }
2952
2953    /// Configures whether, when on macOS, Mach ports are used for exception
2954    /// handling instead of traditional Unix-based signal handling.
2955    ///
2956    /// WebAssembly traps in Wasmtime are implemented with native faults, for
2957    /// example a `SIGSEGV` will occur when a WebAssembly guest accesses
2958    /// out-of-bounds memory. Handling this can be configured to either use Unix
2959    /// signals or Mach ports on macOS. By default Mach ports are used.
2960    ///
2961    /// Mach ports enable Wasmtime to work by default with foreign
2962    /// error-handling systems such as breakpad which also use Mach ports to
2963    /// handle signals. In this situation Wasmtime will continue to handle guest
2964    /// faults gracefully while any non-guest faults will get forwarded to
2965    /// process-level handlers such as breakpad. Some more background on this
2966    /// can be found in #2456.
2967    ///
2968    /// A downside of using mach ports, however, is that they don't interact
2969    /// well with `fork()`. Forking a Wasmtime process on macOS will produce a
2970    /// child process that cannot successfully run WebAssembly. In this
2971    /// situation traditional Unix signal handling should be used as that's
2972    /// inherited and works across forks.
2973    ///
2974    /// If your embedding wants to use a custom error handler which leverages
2975    /// Mach ports and you additionally wish to `fork()` the process and use
2976    /// Wasmtime in the child process that's not currently possible. Please
2977    /// reach out to us if you're in this bucket!
2978    ///
2979    /// This option defaults to `true`, using Mach ports by default.
2980    pub fn macos_use_mach_ports(&mut self, mach_ports: bool) -> &mut Self {
2981        self.macos_use_mach_ports = mach_ports;
2982        self
2983    }
2984
2985    /// Configures an embedder-provided function, `detect`, which is used to
2986    /// determine if an ISA-specific feature is available on the current host.
2987    ///
2988    /// This function is used to verify that any features enabled for a compiler
2989    /// backend, such as AVX support on x86\_64, are also available on the host.
2990    /// It is undefined behavior to execute an AVX instruction on a host that
2991    /// doesn't support AVX instructions, for example.
2992    ///
2993    /// When the `std` feature is active on this crate then this function is
2994    /// configured to a default implementation that uses the standard library's
2995    /// feature detection. When the `std` feature is disabled then there is no
2996    /// default available and this method must be called to configure a feature
2997    /// probing function.
2998    ///
2999    /// The `detect` function provided is given a string name of an ISA feature.
3000    /// The function should then return:
3001    ///
3002    /// * `Some(true)` - indicates that the feature was found on the host and it
3003    ///   is supported.
3004    /// * `Some(false)` - the feature name was recognized but it was not
3005    ///   detected on the host, for example the CPU is too old.
3006    /// * `None` - the feature name was not recognized and it's not known
3007    ///   whether it's on the host or not.
3008    ///
3009    /// Feature names passed to `detect` match the same feature name used in the
3010    /// Rust standard library. For example `"sse4.2"` is used on x86\_64.
3011    ///
3012    /// # Unsafety
3013    ///
3014    /// This function is `unsafe` because it is undefined behavior to execute
3015    /// instructions that a host does not support. This means that the result of
3016    /// `detect` must be correct for memory safe execution at runtime.
3017    pub unsafe fn detect_host_feature(&mut self, detect: fn(&str) -> Option<bool>) -> &mut Self {
3018        self.detect_host_feature = Some(detect);
3019        self
3020    }
3021
3022    /// Configures Wasmtime to not use signals-based trap handlers, for example
3023    /// disables `SIGILL` and `SIGSEGV` handler registration on Unix platforms.
3024    ///
3025    /// > **Note:** this option has important performance ramifications, be sure
3026    /// > to understand the implications. Wasm programs have been measured to
3027    /// > run up to 2x slower when signals-based traps are disabled.
3028    ///
3029    /// Wasmtime will by default leverage signals-based trap handlers (or the
3030    /// platform equivalent, for example "vectored exception handlers" on
3031    /// Windows) to make generated code more efficient. For example, when
3032    /// Wasmtime can use signals-based traps, it can elide explicit bounds
3033    /// checks for Wasm linear memory accesses, instead relying on virtual
3034    /// memory guard pages to raise a `SIGSEGV` (on Unix) for out-of-bounds
3035    /// accesses, which Wasmtime's runtime then catches and handles. Another
3036    /// example is divide-by-zero: with signals-based traps, Wasmtime can let
3037    /// the hardware raise a trap when the divisor is zero. Without
3038    /// signals-based traps, Wasmtime must explicitly emit additional
3039    /// instructions to check for zero and conditionally branch to a trapping
3040    /// code path.
3041    ///
3042    /// Some environments however may not have access to signal handlers. For
3043    /// example embedded scenarios may not support virtual memory. Other
3044    /// environments where Wasmtime is embedded within the surrounding
3045    /// environment may require that new signal handlers aren't registered due
3046    /// to the global nature of signal handlers. This option exists to disable
3047    /// the signal handler registration when required for these scenarios.
3048    ///
3049    /// When signals-based trap handlers are disabled, then Wasmtime and its
3050    /// generated code will *never* rely on segfaults or other
3051    /// signals. Generated code will be slower because bounds must be explicitly
3052    /// checked along with other conditions like division by zero.
3053    ///
3054    /// The following additional factors can also affect Wasmtime's ability to
3055    /// elide explicit bounds checks and leverage signals-based traps:
3056    ///
3057    /// * The [`Config::memory_reservation`] and [`Config::memory_guard_size`]
3058    ///   settings
3059    /// * The index type of the linear memory (e.g. 32-bit or 64-bit)
3060    /// * The page size of the linear memory
3061    ///
3062    /// When this option is disabled, the
3063    /// `enable_heap_access_spectre_mitigation` and
3064    /// `enable_table_access_spectre_mitigation` Cranelift settings must also be
3065    /// disabled. This means that generated code must have spectre mitigations
3066    /// disabled. This is because spectre mitigations rely on faults from
3067    /// loading from the null address to implement bounds checks.
3068    ///
3069    /// This option defaults to `true`: signals-based trap handlers are enabled
3070    /// by default.
3071    ///
3072    /// > **Note:** Disabling this option is not compatible with the Winch
3073    /// > compiler.
3074    pub fn signals_based_traps(&mut self, enable: bool) -> &mut Self {
3075        self.tunables.signals_based_traps = Some(enable);
3076        self
3077    }
3078
3079    /// Enable/disable GC support in Wasmtime entirely.
3080    ///
3081    /// This flag can be used to gate whether GC infrastructure is enabled or
3082    /// initialized in Wasmtime at all. Wasmtime's GC implementation is required
3083    /// for the [`Self::wasm_gc`] proposal, [`Self::wasm_function_references`],
3084    /// and [`Self::wasm_exceptions`] at this time. None of those proposal can
3085    /// be enabled without also having this option enabled.
3086    ///
3087    /// This option defaults to whether the crate `gc` feature is enabled or
3088    /// not.
3089    pub fn gc_support(&mut self, enable: bool) -> &mut Self {
3090        self.wasm_features(WasmFeatures::GC_TYPES, enable)
3091    }
3092
3093    /// Explicitly indicate or not whether the host is using a hardware float
3094    /// ABI on x86 targets.
3095    ///
3096    /// This configuration option is only applicable on the
3097    /// `x86_64-unknown-none` Rust target and has no effect on other host
3098    /// targets. The `x86_64-unknown-none` Rust target does not support hardware
3099    /// floats by default and uses a "soft float" implementation and ABI. This
3100    /// means that `f32`, for example, is passed in a general-purpose register
3101    /// between functions instead of a floating-point register. This does not
3102    /// match Cranelift's ABI for `f32` where it's passed in floating-point
3103    /// registers.  Cranelift does not have support for a "soft float"
3104    /// implementation where all floating-point operations are lowered to
3105    /// libcalls.
3106    ///
3107    /// This means that for the `x86_64-unknown-none` target the ABI between
3108    /// Wasmtime's libcalls and the host is incompatible when floats are used.
3109    /// This further means that, by default, Wasmtime is unable to load native
3110    /// code when compiled to the `x86_64-unknown-none` target. The purpose of
3111    /// this option is to explicitly allow loading code and bypass this check.
3112    ///
3113    /// Setting this configuration option to `true` indicates that either:
3114    /// (a) the Rust target is compiled with the hard-float ABI manually via
3115    /// `-Zbuild-std` and a custom target JSON configuration, or (b) sufficient
3116    /// x86 features have been enabled in the compiler such that float libcalls
3117    /// will not be used in Wasmtime. For (a) there is no way in Rust at this
3118    /// time to detect whether a hard-float or soft-float ABI is in use on
3119    /// stable Rust, so this manual opt-in is required. For (b) the only
3120    /// instance where Wasmtime passes a floating-point value in a register
3121    /// between the host and compiled wasm code is with libcalls.
3122    ///
3123    /// Float-based libcalls are only used when the compilation target for a
3124    /// wasm module has insufficient target features enabled for native
3125    /// support. For example SSE4.1 is required for the `f32.ceil` WebAssembly
3126    /// instruction to be compiled to a native instruction. If SSE4.1 is not
3127    /// enabled then `f32.ceil` is translated to a "libcall" which is
3128    /// implemented on the host. Float-based libcalls can be avoided with
3129    /// sufficient target features enabled, for example:
3130    ///
3131    /// * `self.cranelift_flag_enable("has_sse3")`
3132    /// * `self.cranelift_flag_enable("has_ssse3")`
3133    /// * `self.cranelift_flag_enable("has_sse41")`
3134    /// * `self.cranelift_flag_enable("has_sse42")`
3135    /// * `self.cranelift_flag_enable("has_fma")`
3136    ///
3137    /// Note that when these features are enabled Wasmtime will perform a
3138    /// runtime check to determine that the host actually has the feature
3139    /// present.
3140    ///
3141    /// For some more discussion see [#11506].
3142    ///
3143    /// [#11506]: https://github.com/bytecodealliance/wasmtime/issues/11506
3144    ///
3145    /// # Safety
3146    ///
3147    /// This method is not safe because it cannot be detected in Rust right now
3148    /// whether the host is compiled with a soft or hard float ABI. Additionally
3149    /// if the host is compiled with a soft float ABI disabling this check does
3150    /// not ensure that the wasm module in question has zero usage of floats
3151    /// in the boundary to the host.
3152    ///
3153    /// Safely using this method requires one of:
3154    ///
3155    /// * The host target is compiled to use hardware floats.
3156    /// * Wasm modules loaded are compiled with enough x86 Cranelift features
3157    ///   enabled to avoid float-related hostcalls.
3158    pub unsafe fn x86_float_abi_ok(&mut self, enable: bool) -> &mut Self {
3159        self.x86_float_abi_ok = Some(enable);
3160        self
3161    }
3162
3163    /// Enable or disable the ability to create a
3164    /// [`SharedMemory`](crate::SharedMemory).
3165    ///
3166    /// The WebAssembly threads proposal, configured by [`Config::wasm_threads`]
3167    /// is on-by-default but there are enough deficiencies in Wasmtime's
3168    /// implementation and API integration that creation of a shared memory is
3169    /// disabled by default. This configuration knob can be used to enable this.
3170    ///
3171    /// When enabling this method be aware that wasm threads are, at this time,
3172    /// a [tier 2
3173    /// feature](https://docs.wasmtime.dev/stability-tiers.html#tier-2) in
3174    /// Wasmtime meaning that it will not receive security updates or fixes to
3175    /// historical releases. Additionally security CVEs will not be issued for
3176    /// bugs in the implementation.
3177    ///
3178    /// This option is `false` by default.
3179    pub fn shared_memory(&mut self, enable: bool) -> &mut Self {
3180        self.shared_memory = enable;
3181        self
3182    }
3183
3184    /// Specifies whether support for concurrent execution of WebAssembly is
3185    /// supported within this store.
3186    ///
3187    /// This configuration option affects whether runtime data structures are
3188    /// initialized within a `Store` on creation to support concurrent execution
3189    /// of WebAssembly guests. This is primarily applicable to the
3190    /// [`Config::wasm_component_model_async`] configuration which is the first
3191    /// time Wasmtime has supported concurrent execution of guests. This
3192    /// configuration option, for example, enables usage of
3193    /// [`Store::run_concurrent`], [`Func::call_concurrent`], [`StreamReader`],
3194    /// etc.
3195    ///
3196    /// This configuration option can be manually disabled to avoid initializing
3197    /// data structures in the [`Store`] related to concurrent execution. When
3198    /// this option is disabled then APIs related to concurrency will all fail
3199    /// with a panic. For example [`Store::run_concurrent`] will panic, creating
3200    /// a [`StreamReader`] will panic, etc.
3201    ///
3202    /// The value of this option additionally affects whether a [`Config`] is
3203    /// valid and the default set of enabled WebAssembly features. If this
3204    /// option is disabled then component-model features related to concurrency
3205    /// will all be disabled. If this option is enabled, then the options will
3206    /// retain their normal defaults. It is not valid to create a [`Config`]
3207    /// with component-model-async explicitly enabled and this option explicitly
3208    /// disabled, however.
3209    ///
3210    /// This option defaults to `true`.
3211    ///
3212    /// [`Store`]: crate::Store
3213    /// [`Store::run_concurrent`]: crate::Store::run_concurrent
3214    /// [`Func::call_concurrent`]: crate::component::Func::call_concurrent
3215    /// [`StreamReader`]: crate::component::StreamReader
3216    pub fn concurrency_support(&mut self, enable: bool) -> &mut Self {
3217        self.tunables.concurrency_support = Some(enable);
3218        self
3219    }
3220
3221    /// Validate if the current configuration has conflicting overrides that prevent
3222    /// execution determinism. Returns an error if a conflict exists.
3223    ///
3224    /// Note: Keep this in sync with [`Config::enforce_determinism`].
3225    #[inline]
3226    #[cfg(feature = "rr")]
3227    pub(crate) fn validate_rr_determinism_conflicts(&self) -> Result<()> {
3228        if let Some(v) = self.tunables.relaxed_simd_deterministic {
3229            if v == false {
3230                bail!("Relaxed deterministic SIMD cannot be disabled when determinism is enforced");
3231            }
3232        }
3233        #[cfg(any(feature = "cranelift", feature = "winch"))]
3234        if let Some(v) = self
3235            .compiler_config
3236            .as_ref()
3237            .and_then(|c| c.settings.get("enable_nan_canonicalization"))
3238        {
3239            if v != "true" {
3240                bail!("NaN canonicalization cannot be disabled when determinism is enforced");
3241            }
3242        }
3243        Ok(())
3244    }
3245
3246    /// Enable execution trace recording or replaying to the configuration.
3247    ///
3248    /// When either recording/replaying are enabled, validation fails if settings
3249    /// that control determinism are not set appropriately. In particular, RR requires
3250    /// doing the following:
3251    /// * Enabling NaN canonicalization with [`Config::cranelift_nan_canonicalization`].
3252    /// * Enabling deterministic relaxed SIMD with [`Config::relaxed_simd_deterministic`].
3253    #[inline]
3254    pub fn rr(&mut self, cfg: RRConfig) -> &mut Self {
3255        self.rr_config = cfg;
3256        self
3257    }
3258}
3259
3260impl Default for Config {
3261    fn default() -> Config {
3262        Config::new()
3263    }
3264}
3265
3266impl fmt::Debug for Config {
3267    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
3268        let mut f = f.debug_struct("Config");
3269
3270        // Not every flag in WasmFeatures can be enabled as part of creating
3271        // a Config. This impl gives a complete picture of all WasmFeatures
3272        // enabled, and doesn't require maintenance by hand (which has become out
3273        // of date in the past), at the cost of possible confusion for why
3274        // a flag in this set doesn't have a Config setter.
3275        let features = self.features();
3276        for flag in WasmFeatures::FLAGS.iter() {
3277            f.field(
3278                &format!("wasm_{}", flag.name().to_lowercase()),
3279                &features.contains(*flag.value()),
3280            );
3281        }
3282
3283        f.field("parallel_compilation", &self.parallel_compilation);
3284        #[cfg(any(feature = "cranelift", feature = "winch"))]
3285        {
3286            f.field("compiler_config", &self.compiler_config);
3287        }
3288
3289        self.tunables.format(&mut f);
3290        f.finish()
3291    }
3292}
3293
3294/// Possible Compilation strategies for a wasm module.
3295///
3296/// This is used as an argument to the [`Config::strategy`] method.
3297#[non_exhaustive]
3298#[derive(PartialEq, Eq, Clone, Debug, Copy)]
3299pub enum Strategy {
3300    /// An indicator that the compilation strategy should be automatically
3301    /// selected.
3302    ///
3303    /// This is generally what you want for most projects and indicates that the
3304    /// `wasmtime` crate itself should make the decision about what the best
3305    /// code generator for a wasm module is.
3306    ///
3307    /// Currently this always defaults to Cranelift, but the default value may
3308    /// change over time.
3309    Auto,
3310
3311    /// Currently the default backend, Cranelift aims to be a reasonably fast
3312    /// code generator which generates high quality machine code.
3313    Cranelift,
3314
3315    /// A low-latency baseline compiler for WebAssembly.
3316    /// For more details regarding ISA support and Wasm proposals support
3317    /// see <https://docs.wasmtime.dev/stability-tiers.html#current-tier-status>
3318    Winch,
3319}
3320
3321#[cfg(any(feature = "winch", feature = "cranelift"))]
3322impl Strategy {
3323    fn not_auto(&self) -> Option<Strategy> {
3324        match self {
3325            Strategy::Auto => {
3326                if cfg!(feature = "cranelift") {
3327                    Some(Strategy::Cranelift)
3328                } else if cfg!(feature = "winch") {
3329                    Some(Strategy::Winch)
3330                } else {
3331                    None
3332                }
3333            }
3334            other => Some(*other),
3335        }
3336    }
3337}
3338
3339/// Possible garbage collector implementations for Wasm.
3340///
3341/// This is used as an argument to the [`Config::collector`] method.
3342///
3343/// The properties of Wasmtime's available collectors are summarized in the
3344/// following table:
3345///
3346/// | Collector                   | Collects Garbage[^1]  | Latency[^2] | Throughput[^3] | Allocation Speed[^4] | Heap Utilization[^5] |
3347/// |-----------------------------|-----------------------|-------------|----------------|----------------------|----------------------|
3348/// | `DeferredReferenceCounting` | Yes, but not cycles   | 🙂         | 🙁             | 😐                   | 😐                  |
3349/// | `Null`                      | No                    | 🙂         | 🙂             | 🙂                   | 🙂                  |
3350/// | `Copying`[^copying]         | Yes, including cycles | 🙁         | 🙂             | 🙂                   | 🙁                  |
3351///
3352/// [^1]: Whether or not the collector is capable of collecting garbage and cyclic garbage.
3353///
3354/// [^2]: How long the Wasm program is paused during garbage
3355///       collections. Shorter is better. In general, better latency implies
3356///       worse throughput and vice versa.
3357///
3358/// [^3]: How fast the Wasm program runs when using this collector. Roughly
3359///       equivalent to the number of Wasm instructions executed per
3360///       second. Faster is better. In general, better throughput implies worse
3361///       latency and vice versa.
3362///
3363/// [^4]: How fast can individual objects be allocated?
3364///
3365/// [^5]: How many objects can the collector fit into N bytes of memory? That
3366///       is, how much space for bookkeeping and metadata does this collector
3367///       require? Less space taken up by metadata means more space for
3368///       additional objects. Reference counts are larger than mark bits and
3369///       free lists are larger than bump pointers, for example.
3370///
3371/// [^copying]: The copying collector is still under construction and is not yet
3372///             functional.
3373#[non_exhaustive]
3374#[derive(PartialEq, Eq, Clone, Debug, Copy)]
3375pub enum Collector {
3376    /// An indicator that the garbage collector should be automatically
3377    /// selected.
3378    ///
3379    /// This is generally what you want for most projects and indicates that the
3380    /// `wasmtime` crate itself should make the decision about what the best
3381    /// collector for a wasm module is.
3382    ///
3383    /// Currently this always defaults to the deferred reference-counting
3384    /// collector, but the default value may change over time.
3385    Auto,
3386
3387    /// The deferred reference-counting collector.
3388    ///
3389    /// A reference-counting collector, generally trading improved latency for
3390    /// worsened throughput. However, to avoid the largest overheads of
3391    /// reference counting, it avoids manipulating reference counts for Wasm
3392    /// objects on the stack. Instead, it will hold a reference count for an
3393    /// over-approximation of all objects that are currently on the stack, trace
3394    /// the stack during collection to find the precise set of on-stack roots,
3395    /// and decrement the reference count of any object that was in the
3396    /// over-approximation but not the precise set. This improves throughput,
3397    /// compared to "pure" reference counting, by performing many fewer
3398    /// refcount-increment and -decrement operations. The cost is the increased
3399    /// latency associated with tracing the stack.
3400    ///
3401    /// This collector cannot currently collect cycles; they will leak until the
3402    /// GC heap's store is dropped.
3403    DeferredReferenceCounting,
3404
3405    /// The null collector.
3406    ///
3407    /// This collector does not actually collect any garbage. It simply
3408    /// allocates objects until it runs out of memory, at which point further
3409    /// objects allocation attempts will trap.
3410    ///
3411    /// This collector is useful for incredibly short-running Wasm instances
3412    /// where additionally you would rather halt an over-allocating Wasm program
3413    /// than spend time collecting its garbage to allow it to keep running. It
3414    /// is also useful for measuring the overheads associated with other
3415    /// collectors, as this collector imposes as close to zero throughput and
3416    /// latency overhead as possible.
3417    Null,
3418
3419    /// The copying collector.
3420    ///
3421    /// A tracing collector that splits the GC heap in half, bump-allocates
3422    /// objects in one half until it fills up, and then does a GC and copies
3423    /// live objects into the other half, and repeats the process. It has fast
3424    /// allocation, collects cyclic garbage, and good collection throughput,
3425    /// however it suffers from poor latency due to its stop-the-world
3426    /// collections and poor heap utilization due to only using half the GC
3427    /// heap's full capacity at any given time.
3428    ///
3429    /// Note that this collector is still under construction and is not yet
3430    /// functional.
3431    Copying,
3432}
3433
3434impl Default for Collector {
3435    fn default() -> Collector {
3436        Collector::Auto
3437    }
3438}
3439
3440#[cfg(feature = "gc")]
3441impl Collector {
3442    fn not_auto(&self) -> Option<Collector> {
3443        match self {
3444            Collector::Auto => {
3445                if cfg!(feature = "gc-drc") {
3446                    Some(Collector::DeferredReferenceCounting)
3447                } else if cfg!(feature = "gc-null") {
3448                    Some(Collector::Null)
3449                } else {
3450                    None
3451                }
3452            }
3453            other => Some(*other),
3454        }
3455    }
3456
3457    fn try_not_auto(&self) -> Result<Self> {
3458        match self.not_auto() {
3459            #[cfg(feature = "gc-drc")]
3460            Some(c @ Collector::DeferredReferenceCounting) => Ok(c),
3461            #[cfg(not(feature = "gc-drc"))]
3462            Some(Collector::DeferredReferenceCounting) => bail!(
3463                "cannot create an engine using the deferred reference-counting \
3464                 collector because the `gc-drc` feature was not enabled at \
3465                 compile time",
3466            ),
3467
3468            #[cfg(feature = "gc-null")]
3469            Some(c @ Collector::Null) => Ok(c),
3470            #[cfg(not(feature = "gc-null"))]
3471            Some(Collector::Null) => bail!(
3472                "cannot create an engine using the null collector because \
3473                 the `gc-null` feature was not enabled at compile time",
3474            ),
3475
3476            #[cfg(feature = "gc-copying")]
3477            Some(c @ Collector::Copying) => Ok(c),
3478            #[cfg(not(feature = "gc-copying"))]
3479            Some(Collector::Copying) => bail!(
3480                "cannot create an engine using the copying collector because \
3481                 the `gc-copying` feature was not enabled at compile time",
3482            ),
3483
3484            Some(Collector::Auto) => unreachable!(),
3485
3486            None => bail!(
3487                "cannot create an engine with GC support when none of the \
3488                 collectors are available; enable one of the following \
3489                 features: `gc-drc`, `gc-null`, `gc-copying`",
3490            ),
3491        }
3492    }
3493}
3494
3495/// Possible optimization levels for the Cranelift codegen backend.
3496#[non_exhaustive]
3497#[derive(Copy, Clone, Debug, Eq, PartialEq)]
3498pub enum OptLevel {
3499    /// No optimizations performed, minimizes compilation time by disabling most
3500    /// optimizations.
3501    None,
3502    /// Generates the fastest possible code, but may take longer.
3503    Speed,
3504    /// Similar to `speed`, but also performs transformations aimed at reducing
3505    /// code size.
3506    SpeedAndSize,
3507}
3508
3509/// Possible register allocator algorithms for the Cranelift codegen backend.
3510#[non_exhaustive]
3511#[derive(Copy, Clone, Debug, Eq, PartialEq)]
3512pub enum RegallocAlgorithm {
3513    /// Generates the fastest possible code, but may take longer.
3514    ///
3515    /// This algorithm performs "backtracking", which means that it may
3516    /// undo its earlier work and retry as it discovers conflicts. This
3517    /// results in better register utilization, producing fewer spills
3518    /// and moves, but can cause super-linear compile runtime.
3519    Backtracking,
3520    /// Generates acceptable code very quickly.
3521    ///
3522    /// This algorithm performs a single pass through the code,
3523    /// guaranteed to work in linear time.  (Note that the rest of
3524    /// Cranelift is not necessarily guaranteed to run in linear time,
3525    /// however.) It cannot undo earlier decisions, however, and it
3526    /// cannot foresee constraints or issues that may occur further
3527    /// ahead in the code, so the code may have more spills and moves as
3528    /// a result.
3529    ///
3530    /// > **Note**: This algorithm is not yet production-ready and has
3531    /// > historically had known problems. It is not recommended to enable this
3532    /// > algorithm for security-sensitive applications and the Wasmtime project
3533    /// > does not consider this configuration option for issuing security
3534    /// > advisories at this time.
3535    SinglePass,
3536}
3537
3538/// Select which profiling technique to support.
3539#[derive(Debug, Clone, Copy, PartialEq)]
3540pub enum ProfilingStrategy {
3541    /// No profiler support.
3542    None,
3543
3544    /// Collect function name information as the "perf map" file format, used with `perf` on Linux.
3545    PerfMap,
3546
3547    /// Collect profiling info for "jitdump" file format, used with `perf` on
3548    /// Linux.
3549    JitDump,
3550
3551    /// Collect profiling info using the "ittapi", used with `VTune` on Linux.
3552    VTune,
3553
3554    /// Support for profiling Pulley, Wasmtime's interpreter. Note that enabling
3555    /// this at runtime requires enabling the `profile-pulley` Cargo feature at
3556    /// compile time.
3557    Pulley,
3558}
3559
3560/// Select how wasm backtrace detailed information is handled.
3561#[derive(Debug, Clone, Copy)]
3562pub enum WasmBacktraceDetails {
3563    /// Support is unconditionally enabled and wasmtime will parse and read
3564    /// debug information.
3565    Enable,
3566
3567    /// Support is disabled, and wasmtime will not parse debug information for
3568    /// backtrace details.
3569    Disable,
3570
3571    /// Support for backtrace details is conditional on the
3572    /// `WASMTIME_BACKTRACE_DETAILS` environment variable.
3573    Environment,
3574}
3575
3576/// Describe the tri-state configuration of keys such as MPK or PAGEMAP_SCAN.
3577#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
3578pub enum Enabled {
3579    /// Enable this feature if it's detected on the host system, otherwise leave
3580    /// it disabled.
3581    Auto,
3582    /// Enable this feature and fail configuration if the feature is not
3583    /// detected on the host system.
3584    Yes,
3585    /// Do not enable this feature, even if the host system supports it.
3586    No,
3587}
3588
3589/// Configuration options used with [`InstanceAllocationStrategy::Pooling`] to
3590/// change the behavior of the pooling instance allocator.
3591///
3592/// This structure has a builder-style API in the same manner as [`Config`] and
3593/// is configured with [`Config::allocation_strategy`].
3594///
3595/// Note that usage of the pooling allocator does not affect compiled
3596/// WebAssembly code. Compiled `*.cwasm` files, for example, are usable both
3597/// with and without the pooling allocator.
3598///
3599/// ## Advantages of Pooled Allocation
3600///
3601/// The main benefit of the pooling allocator is to make WebAssembly
3602/// instantiation both faster and more scalable in terms of parallelism.
3603/// Allocation is faster because virtual memory is already configured and ready
3604/// to go within the pool, there's no need to [`mmap`] (for example on Unix) a
3605/// new region and configure it with guard pages. By avoiding [`mmap`] this
3606/// avoids whole-process virtual memory locks which can improve scalability and
3607/// performance through avoiding this.
3608///
3609/// Additionally with pooled allocation it's possible to create "affine slots"
3610/// to a particular WebAssembly module or component over time. For example if
3611/// the same module is multiple times over time the pooling allocator will, by
3612/// default, attempt to reuse the same slot. This mean that the slot has been
3613/// pre-configured and can retain virtual memory mappings for a copy-on-write
3614/// image, for example (see [`Config::memory_init_cow`] for more information.
3615/// This means that in a steady state instance deallocation is a single
3616/// [`madvise`] to reset linear memory to its original contents followed by a
3617/// single (optional) [`mprotect`] during the next instantiation to shrink
3618/// memory back to its original size. Compared to non-pooled allocation this
3619/// avoids the need to [`mmap`] a new region of memory, [`munmap`] it, and
3620/// [`mprotect`] regions too.
3621///
3622/// Another benefit of pooled allocation is that it's possible to configure
3623/// things such that no virtual memory management is required at all in a steady
3624/// state. For example a pooling allocator can be configured with:
3625///
3626/// * [`Config::memory_init_cow`] disabled
3627/// * [`Config::memory_guard_size`] disabled
3628/// * [`Config::memory_reservation`] shrunk to minimal size
3629/// * [`PoolingAllocationConfig::table_keep_resident`] sufficiently large
3630/// * [`PoolingAllocationConfig::linear_memory_keep_resident`] sufficiently large
3631///
3632/// With all these options in place no virtual memory tricks are used at all and
3633/// everything is manually managed by Wasmtime (for example resetting memory is
3634/// a `memset(0)`). This is not as fast in a single-threaded scenario but can
3635/// provide benefits in high-parallelism situations as no virtual memory locks
3636/// or IPIs need happen.
3637///
3638/// ## Disadvantages of Pooled Allocation
3639///
3640/// Despite the above advantages to instantiation performance the pooling
3641/// allocator is not enabled by default in Wasmtime. One reason is that the
3642/// performance advantages are not necessarily portable, for example while the
3643/// pooling allocator works on Windows it has not been tuned for performance on
3644/// Windows in the same way it has on Linux.
3645///
3646/// Additionally the main cost of the pooling allocator is that it requires a
3647/// very large reservation of virtual memory (on the order of most of the
3648/// addressable virtual address space). WebAssembly 32-bit linear memories in
3649/// Wasmtime are, by default 4G address space reservations with a small guard
3650/// region both before and after the linear memory. Memories in the pooling
3651/// allocator are contiguous which means that we only need a guard after linear
3652/// memory because the previous linear memory's slot post-guard is our own
3653/// pre-guard. This means that, by default, the pooling allocator uses roughly
3654/// 4G of virtual memory per WebAssembly linear memory slot. 4G of virtual
3655/// memory is 32 bits of a 64-bit address. Many 64-bit systems can only
3656/// actually use 48-bit addresses by default (although this can be extended on
3657/// architectures nowadays too), and of those 48 bits one of them is reserved
3658/// to indicate kernel-vs-userspace. This leaves 47-32=15 bits left,
3659/// meaning you can only have at most 32k slots of linear memories on many
3660/// systems by default. This is a relatively small number and shows how the
3661/// pooling allocator can quickly exhaust all of virtual memory.
3662///
3663/// Another disadvantage of the pooling allocator is that it may keep memory
3664/// alive when nothing is using it. A previously used slot for an instance might
3665/// have paged-in memory that will not get paged out until the
3666/// [`Engine`](crate::Engine) owning the pooling allocator is dropped. While
3667/// suitable for some applications this behavior may not be suitable for all
3668/// applications.
3669///
3670/// Finally the last disadvantage of the pooling allocator is that the
3671/// configuration values for the maximum number of instances, memories, tables,
3672/// etc, must all be fixed up-front. There's not always a clear answer as to
3673/// what these values should be so not all applications may be able to work
3674/// with this constraint.
3675///
3676/// [`madvise`]: https://man7.org/linux/man-pages/man2/madvise.2.html
3677/// [`mprotect`]: https://man7.org/linux/man-pages/man2/mprotect.2.html
3678/// [`mmap`]: https://man7.org/linux/man-pages/man2/mmap.2.html
3679/// [`munmap`]: https://man7.org/linux/man-pages/man2/munmap.2.html
3680#[cfg(feature = "pooling-allocator")]
3681#[derive(Debug, Clone, Default)]
3682pub struct PoolingAllocationConfig {
3683    config: crate::runtime::vm::PoolingInstanceAllocatorConfig,
3684}
3685
3686#[cfg(feature = "pooling-allocator")]
3687impl PoolingAllocationConfig {
3688    /// Returns a new configuration builder with all default settings
3689    /// configured.
3690    pub fn new() -> PoolingAllocationConfig {
3691        PoolingAllocationConfig::default()
3692    }
3693
3694    /// Configures the maximum number of "unused warm slots" to retain in the
3695    /// pooling allocator.
3696    ///
3697    /// The pooling allocator operates over slots to allocate from, and each
3698    /// slot is considered "cold" if it's never been used before or "warm" if
3699    /// it's been used by some module in the past. Slots in the pooling
3700    /// allocator additionally track an "affinity" flag to a particular core
3701    /// wasm module. When a module is instantiated into a slot then the slot is
3702    /// considered affine to that module, even after the instance has been
3703    /// deallocated.
3704    ///
3705    /// When a new instance is created then a slot must be chosen, and the
3706    /// current algorithm for selecting a slot is:
3707    ///
3708    /// * If there are slots that are affine to the module being instantiated,
3709    ///   then the most recently used slot is selected to be allocated from.
3710    ///   This is done to improve reuse of resources such as memory mappings and
3711    ///   additionally try to benefit from temporal locality for things like
3712    ///   caches.
3713    ///
3714    /// * Otherwise if there are more than N affine slots to other modules, then
3715    ///   one of those affine slots is chosen to be allocated. The slot chosen
3716    ///   is picked on a least-recently-used basis.
3717    ///
3718    /// * Finally, if there are less than N affine slots to other modules, then
3719    ///   the non-affine slots are allocated from.
3720    ///
3721    /// This setting, `max_unused_warm_slots`, is the value for N in the above
3722    /// algorithm. The purpose of this setting is to have a knob over the RSS
3723    /// impact of "unused slots" for a long-running wasm server.
3724    ///
3725    /// If this setting is set to 0, for example, then affine slots are
3726    /// aggressively reused on a least-recently-used basis. A "cold" slot is
3727    /// only used if there are no affine slots available to allocate from. This
3728    /// means that the set of slots used over the lifetime of a program is the
3729    /// same as the maximum concurrent number of wasm instances.
3730    ///
3731    /// If this setting is set to infinity, however, then cold slots are
3732    /// prioritized to be allocated from. This means that the set of slots used
3733    /// over the lifetime of a program will approach
3734    /// [`PoolingAllocationConfig::total_memories`], or the maximum number of
3735    /// slots in the pooling allocator.
3736    ///
3737    /// Wasmtime does not aggressively decommit all resources associated with a
3738    /// slot when the slot is not in use. For example the
3739    /// [`PoolingAllocationConfig::linear_memory_keep_resident`] option can be
3740    /// used to keep memory associated with a slot, even when it's not in use.
3741    /// This means that the total set of used slots in the pooling instance
3742    /// allocator can impact the overall RSS usage of a program.
3743    ///
3744    /// The default value for this option is `100`.
3745    pub fn max_unused_warm_slots(&mut self, max: u32) -> &mut Self {
3746        self.config.max_unused_warm_slots = max;
3747        self
3748    }
3749
3750    /// The target number of decommits to do per batch.
3751    ///
3752    /// This is not precise, as we can queue up decommits at times when we
3753    /// aren't prepared to immediately flush them, and so we may go over this
3754    /// target size occasionally.
3755    ///
3756    /// A batch size of one effectively disables batching.
3757    ///
3758    /// Defaults to `1`.
3759    pub fn decommit_batch_size(&mut self, batch_size: usize) -> &mut Self {
3760        self.config.decommit_batch_size = batch_size;
3761        self
3762    }
3763
3764    /// How much memory, in bytes, to keep resident for async stacks allocated
3765    /// with the pooling allocator.
3766    ///
3767    /// When [`Config::async_stack_zeroing`] is enabled then Wasmtime will reset
3768    /// the contents of async stacks back to zero upon deallocation. This option
3769    /// can be used to perform the zeroing operation with `memset` up to a
3770    /// certain threshold of bytes instead of using system calls to reset the
3771    /// stack to zero.
3772    ///
3773    /// Note that when using this option the memory with async stacks will
3774    /// never be decommitted.
3775    #[cfg(feature = "async")]
3776    pub fn async_stack_keep_resident(&mut self, size: usize) -> &mut Self {
3777        self.config.async_stack_keep_resident = size;
3778        self
3779    }
3780
3781    /// How much memory, in bytes, to keep resident for each linear memory
3782    /// after deallocation.
3783    ///
3784    /// This option is only applicable on Linux and has no effect on other
3785    /// platforms.
3786    ///
3787    /// By default Wasmtime will use `madvise` to reset the entire contents of
3788    /// linear memory back to zero when a linear memory is deallocated. This
3789    /// option can be used to use `memset` instead to set memory back to zero
3790    /// which can, in some configurations, reduce the number of page faults
3791    /// taken when a slot is reused.
3792    pub fn linear_memory_keep_resident(&mut self, size: usize) -> &mut Self {
3793        self.config.linear_memory_keep_resident = size;
3794        self
3795    }
3796
3797    /// How much memory, in bytes, to keep resident for each table after
3798    /// deallocation.
3799    ///
3800    /// This option is only applicable on Linux and has no effect on other
3801    /// platforms.
3802    ///
3803    /// This option is the same as
3804    /// [`PoolingAllocationConfig::linear_memory_keep_resident`] except that it
3805    /// is applicable to tables instead.
3806    pub fn table_keep_resident(&mut self, size: usize) -> &mut Self {
3807        self.config.table_keep_resident = size;
3808        self
3809    }
3810
3811    /// The maximum number of concurrent component instances supported (default
3812    /// is `1000`).
3813    ///
3814    /// This provides an upper-bound on the total size of component
3815    /// metadata-related allocations, along with
3816    /// [`PoolingAllocationConfig::max_component_instance_size`]. The upper bound is
3817    ///
3818    /// ```text
3819    /// total_component_instances * max_component_instance_size
3820    /// ```
3821    ///
3822    /// where `max_component_instance_size` is rounded up to the size and alignment
3823    /// of the internal representation of the metadata.
3824    pub fn total_component_instances(&mut self, count: u32) -> &mut Self {
3825        self.config.limits.total_component_instances = count;
3826        self
3827    }
3828
3829    /// The maximum size, in bytes, allocated for a component instance's
3830    /// `VMComponentContext` metadata as well as the aggregate size of this
3831    /// component's core instances `VMContext` metadata.
3832    ///
3833    /// The [`wasmtime::component::Instance`][crate::component::Instance] type
3834    /// has a static size but its internal `VMComponentContext` is dynamically
3835    /// sized depending on the component being instantiated. This size limit
3836    /// loosely correlates to the size of the component, taking into account
3837    /// factors such as:
3838    ///
3839    /// * number of lifted and lowered functions,
3840    /// * number of memories
3841    /// * number of inner instances
3842    /// * number of resources
3843    ///
3844    /// If the allocated size per instance is too small then instantiation of a
3845    /// module will fail at runtime with an error indicating how many bytes were
3846    /// needed.
3847    ///
3848    /// In addition to the memory in the runtime for the component itself,
3849    /// components contain one or more core module instances. Each of these
3850    /// require some memory in the runtime as described in
3851    /// [`PoolingAllocationConfig::max_core_instance_size`]. The limit here
3852    /// applies against the sum of all of these individual allocations.
3853    ///
3854    /// The default value for this is 1MiB.
3855    ///
3856    /// This provides an upper-bound on the total size of all component's
3857    /// metadata-related allocations (for both the component and its embedded
3858    /// core module instances), along with
3859    /// [`PoolingAllocationConfig::total_component_instances`]. The upper bound is
3860    ///
3861    /// ```text
3862    /// total_component_instances * max_component_instance_size
3863    /// ```
3864    ///
3865    /// where `max_component_instance_size` is rounded up to the size and alignment
3866    /// of the internal representation of the metadata.
3867    pub fn max_component_instance_size(&mut self, size: usize) -> &mut Self {
3868        self.config.limits.component_instance_size = size;
3869        self
3870    }
3871
3872    /// The maximum number of core instances a single component may contain
3873    /// (default is unlimited).
3874    ///
3875    /// This method (along with
3876    /// [`PoolingAllocationConfig::max_memories_per_component`],
3877    /// [`PoolingAllocationConfig::max_tables_per_component`], and
3878    /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3879    /// the amount of resources a single component allocation consumes.
3880    ///
3881    /// If a component will instantiate more core instances than `count`, then
3882    /// the component will fail to instantiate.
3883    pub fn max_core_instances_per_component(&mut self, count: u32) -> &mut Self {
3884        self.config.limits.max_core_instances_per_component = count;
3885        self
3886    }
3887
3888    /// The maximum number of Wasm linear memories that a single component may
3889    /// transitively contain (default is unlimited).
3890    ///
3891    /// This method (along with
3892    /// [`PoolingAllocationConfig::max_core_instances_per_component`],
3893    /// [`PoolingAllocationConfig::max_tables_per_component`], and
3894    /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3895    /// the amount of resources a single component allocation consumes.
3896    ///
3897    /// If a component transitively contains more linear memories than `count`,
3898    /// then the component will fail to instantiate.
3899    pub fn max_memories_per_component(&mut self, count: u32) -> &mut Self {
3900        self.config.limits.max_memories_per_component = count;
3901        self
3902    }
3903
3904    /// The maximum number of tables that a single component may transitively
3905    /// contain (default is unlimited).
3906    ///
3907    /// This method (along with
3908    /// [`PoolingAllocationConfig::max_core_instances_per_component`],
3909    /// [`PoolingAllocationConfig::max_memories_per_component`],
3910    /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3911    /// the amount of resources a single component allocation consumes.
3912    ///
3913    /// If a component will transitively contains more tables than `count`, then
3914    /// the component will fail to instantiate.
3915    pub fn max_tables_per_component(&mut self, count: u32) -> &mut Self {
3916        self.config.limits.max_tables_per_component = count;
3917        self
3918    }
3919
3920    /// The maximum number of concurrent Wasm linear memories supported (default
3921    /// is `1000`).
3922    ///
3923    /// This value has a direct impact on the amount of memory allocated by the pooling
3924    /// instance allocator.
3925    ///
3926    /// The pooling instance allocator allocates a memory pool, where each entry
3927    /// in the pool contains the reserved address space for each linear memory
3928    /// supported by an instance.
3929    ///
3930    /// The memory pool will reserve a large quantity of host process address
3931    /// space to elide the bounds checks required for correct WebAssembly memory
3932    /// semantics. Even with 64-bit address spaces, the address space is limited
3933    /// when dealing with a large number of linear memories.
3934    ///
3935    /// For example, on Linux x86_64, the userland address space limit is 128
3936    /// TiB. That might seem like a lot, but each linear memory will *reserve* 6
3937    /// GiB of space by default.
3938    pub fn total_memories(&mut self, count: u32) -> &mut Self {
3939        self.config.limits.total_memories = count;
3940        self
3941    }
3942
3943    /// The maximum number of concurrent tables supported (default is `1000`).
3944    ///
3945    /// This value has a direct impact on the amount of memory allocated by the
3946    /// pooling instance allocator.
3947    ///
3948    /// The pooling instance allocator allocates a table pool, where each entry
3949    /// in the pool contains the space needed for each WebAssembly table
3950    /// supported by an instance (see `table_elements` to control the size of
3951    /// each table).
3952    pub fn total_tables(&mut self, count: u32) -> &mut Self {
3953        self.config.limits.total_tables = count;
3954        self
3955    }
3956
3957    /// The maximum number of execution stacks allowed for asynchronous
3958    /// execution, when enabled (default is `1000`).
3959    ///
3960    /// This value has a direct impact on the amount of memory allocated by the
3961    /// pooling instance allocator.
3962    #[cfg(feature = "async")]
3963    pub fn total_stacks(&mut self, count: u32) -> &mut Self {
3964        self.config.limits.total_stacks = count;
3965        self
3966    }
3967
3968    /// The maximum number of concurrent core instances supported (default is
3969    /// `1000`).
3970    ///
3971    /// This provides an upper-bound on the total size of core instance
3972    /// metadata-related allocations, along with
3973    /// [`PoolingAllocationConfig::max_core_instance_size`]. The upper bound is
3974    ///
3975    /// ```text
3976    /// total_core_instances * max_core_instance_size
3977    /// ```
3978    ///
3979    /// where `max_core_instance_size` is rounded up to the size and alignment of
3980    /// the internal representation of the metadata.
3981    pub fn total_core_instances(&mut self, count: u32) -> &mut Self {
3982        self.config.limits.total_core_instances = count;
3983        self
3984    }
3985
3986    /// The maximum size, in bytes, allocated for a core instance's `VMContext`
3987    /// metadata.
3988    ///
3989    /// The [`Instance`][crate::Instance] type has a static size but its
3990    /// `VMContext` metadata is dynamically sized depending on the module being
3991    /// instantiated. This size limit loosely correlates to the size of the Wasm
3992    /// module, taking into account factors such as:
3993    ///
3994    /// * number of functions
3995    /// * number of globals
3996    /// * number of memories
3997    /// * number of tables
3998    /// * number of function types
3999    ///
4000    /// If the allocated size per instance is too small then instantiation of a
4001    /// module will fail at runtime with an error indicating how many bytes were
4002    /// needed.
4003    ///
4004    /// The default value for this is 1MiB.
4005    ///
4006    /// This provides an upper-bound on the total size of core instance
4007    /// metadata-related allocations, along with
4008    /// [`PoolingAllocationConfig::total_core_instances`]. The upper bound is
4009    ///
4010    /// ```text
4011    /// total_core_instances * max_core_instance_size
4012    /// ```
4013    ///
4014    /// where `max_core_instance_size` is rounded up to the size and alignment of
4015    /// the internal representation of the metadata.
4016    pub fn max_core_instance_size(&mut self, size: usize) -> &mut Self {
4017        self.config.limits.core_instance_size = size;
4018        self
4019    }
4020
4021    /// The maximum number of defined tables for a core module (default is `1`).
4022    ///
4023    /// This value controls the capacity of the `VMTableDefinition` table in
4024    /// each instance's `VMContext` structure.
4025    ///
4026    /// The allocated size of the table will be `tables *
4027    /// sizeof(VMTableDefinition)` for each instance regardless of how many
4028    /// tables are defined by an instance's module.
4029    pub fn max_tables_per_module(&mut self, tables: u32) -> &mut Self {
4030        self.config.limits.max_tables_per_module = tables;
4031        self
4032    }
4033
4034    /// The maximum table elements for any table defined in a module (default is
4035    /// `20000`).
4036    ///
4037    /// If a table's minimum element limit is greater than this value, the
4038    /// module will fail to instantiate.
4039    ///
4040    /// If a table's maximum element limit is unbounded or greater than this
4041    /// value, the maximum will be `table_elements` for the purpose of any
4042    /// `table.grow` instruction.
4043    ///
4044    /// This value is used to reserve the maximum space for each supported
4045    /// table; table elements are pointer-sized in the Wasmtime runtime.
4046    /// Therefore, the space reserved for each instance is `tables *
4047    /// table_elements * sizeof::<*const ()>`.
4048    pub fn table_elements(&mut self, elements: usize) -> &mut Self {
4049        self.config.limits.table_elements = elements;
4050        self
4051    }
4052
4053    /// The maximum number of defined linear memories for a module (default is
4054    /// `1`).
4055    ///
4056    /// This value controls the capacity of the `VMMemoryDefinition` table in
4057    /// each core instance's `VMContext` structure.
4058    ///
4059    /// The allocated size of the table will be `memories *
4060    /// sizeof(VMMemoryDefinition)` for each core instance regardless of how
4061    /// many memories are defined by the core instance's module.
4062    pub fn max_memories_per_module(&mut self, memories: u32) -> &mut Self {
4063        self.config.limits.max_memories_per_module = memories;
4064        self
4065    }
4066
4067    /// The maximum byte size that any WebAssembly linear memory may grow to.
4068    ///
4069    /// This option defaults to 4 GiB meaning that for 32-bit linear memories
4070    /// there is no restrictions. 64-bit linear memories will not be allowed to
4071    /// grow beyond 4 GiB by default.
4072    ///
4073    /// If a memory's minimum size is greater than this value, the module will
4074    /// fail to instantiate.
4075    ///
4076    /// If a memory's maximum size is unbounded or greater than this value, the
4077    /// maximum will be `max_memory_size` for the purpose of any `memory.grow`
4078    /// instruction.
4079    ///
4080    /// This value is used to control the maximum accessible space for each
4081    /// linear memory of a core instance. This can be thought of as a simple
4082    /// mechanism like [`Store::limiter`](crate::Store::limiter) to limit memory
4083    /// at runtime. This value can also affect striping/coloring behavior when
4084    /// used in conjunction with
4085    /// [`memory_protection_keys`](PoolingAllocationConfig::memory_protection_keys).
4086    ///
4087    /// The virtual memory reservation size of each linear memory is controlled
4088    /// by the [`Config::memory_reservation`] setting and this method's
4089    /// configuration cannot exceed [`Config::memory_reservation`].
4090    pub fn max_memory_size(&mut self, bytes: usize) -> &mut Self {
4091        self.config.limits.max_memory_size = bytes;
4092        self
4093    }
4094
4095    /// Configures whether memory protection keys (MPK) should be used for more
4096    /// efficient layout of pool-allocated memories.
4097    ///
4098    /// When using the pooling allocator (see [`Config::allocation_strategy`],
4099    /// [`InstanceAllocationStrategy::Pooling`]), memory protection keys can
4100    /// reduce the total amount of allocated virtual memory by eliminating guard
4101    /// regions between WebAssembly memories in the pool. It does so by
4102    /// "coloring" memory regions with different memory keys and setting which
4103    /// regions are accessible each time executions switches from host to guest
4104    /// (or vice versa).
4105    ///
4106    /// Leveraging MPK requires configuring a smaller-than-default
4107    /// [`max_memory_size`](PoolingAllocationConfig::max_memory_size) to enable
4108    /// this coloring/striping behavior. For example embeddings might want to
4109    /// reduce the default 4G allowance to 128M.
4110    ///
4111    /// MPK is only available on Linux (called `pku` there) and recent x86
4112    /// systems; we check for MPK support at runtime by examining the `CPUID`
4113    /// register. This configuration setting can be in three states:
4114    ///
4115    /// - `auto`: if MPK support is available the guard regions are removed; if
4116    ///   not, the guard regions remain
4117    /// - `yes`: use MPK to eliminate guard regions; fail if MPK is not
4118    ///   supported
4119    /// - `no`: never use MPK
4120    ///
4121    /// By default this value is `no`, but may become `auto` in future
4122    /// releases.
4123    ///
4124    /// __WARNING__: this configuration options is still experimental--use at
4125    /// your own risk! MPK uses kernel and CPU features to protect memory
4126    /// regions; you may observe segmentation faults if anything is
4127    /// misconfigured.
4128    #[cfg(feature = "memory-protection-keys")]
4129    pub fn memory_protection_keys(&mut self, enable: Enabled) -> &mut Self {
4130        self.config.memory_protection_keys = enable;
4131        self
4132    }
4133
4134    /// Sets an upper limit on how many memory protection keys (MPK) Wasmtime
4135    /// will use.
4136    ///
4137    /// This setting is only applicable when
4138    /// [`PoolingAllocationConfig::memory_protection_keys`] is set to `enable`
4139    /// or `auto`. Configuring this above the HW and OS limits (typically 15)
4140    /// has no effect.
4141    ///
4142    /// If multiple Wasmtime engines are used in the same process, note that all
4143    /// engines will share the same set of allocated keys; this setting will
4144    /// limit how many keys are allocated initially and thus available to all
4145    /// other engines.
4146    #[cfg(feature = "memory-protection-keys")]
4147    pub fn max_memory_protection_keys(&mut self, max: usize) -> &mut Self {
4148        self.config.max_memory_protection_keys = max;
4149        self
4150    }
4151
4152    /// Check if memory protection keys (MPK) are available on the current host.
4153    ///
4154    /// This is a convenience method for determining MPK availability using the
4155    /// same method that [`Enabled::Auto`] does. See
4156    /// [`PoolingAllocationConfig::memory_protection_keys`] for more
4157    /// information.
4158    #[cfg(feature = "memory-protection-keys")]
4159    pub fn are_memory_protection_keys_available() -> bool {
4160        crate::runtime::vm::mpk::is_supported()
4161    }
4162
4163    /// The maximum number of concurrent GC heaps supported (default is `1000`).
4164    ///
4165    /// This value has a direct impact on the amount of memory allocated by the
4166    /// pooling instance allocator.
4167    ///
4168    /// The pooling instance allocator allocates a GC heap pool, where each
4169    /// entry in the pool contains the space needed for each GC heap used by a
4170    /// store.
4171    #[cfg(feature = "gc")]
4172    pub fn total_gc_heaps(&mut self, count: u32) -> &mut Self {
4173        self.config.limits.total_gc_heaps = count;
4174        self
4175    }
4176
4177    /// Configures whether the Linux-specific [`PAGEMAP_SCAN` ioctl][ioctl] is
4178    /// used to help reset linear memory.
4179    ///
4180    /// When [`Self::linear_memory_keep_resident`] or
4181    /// [`Self::table_keep_resident`] options are configured to nonzero values
4182    /// the default behavior is to `memset` the lowest addresses of a table or
4183    /// memory back to their original contents. With the `PAGEMAP_SCAN` ioctl on
4184    /// Linux this can be done to more intelligently scan for resident pages in
4185    /// the region and only reset those pages back to their original contents
4186    /// with `memset` rather than assuming the low addresses are all resident.
4187    ///
4188    /// This ioctl has the potential to provide a number of performance benefits
4189    /// in high-reuse and high concurrency scenarios. Notably this enables
4190    /// Wasmtime to scan the entire region of WebAssembly linear memory and
4191    /// manually reset memory back to its original contents, up to
4192    /// [`Self::linear_memory_keep_resident`] bytes, possibly skipping an
4193    /// `madvise` entirely. This can be more efficient by avoiding removing
4194    /// pages from the address space entirely and additionally ensuring that
4195    /// future use of the linear memory doesn't incur page faults as the pages
4196    /// remain resident.
4197    ///
4198    /// At this time this configuration option is still being evaluated as to
4199    /// how appropriate it is for all use cases. It currently defaults to
4200    /// `no` or disabled but may change to `auto`, enable if supported, in the
4201    /// future. This option is only supported on Linux and requires a kernel
4202    /// version of 6.7 or higher.
4203    ///
4204    /// [ioctl]: https://www.man7.org/linux/man-pages/man2/PAGEMAP_SCAN.2const.html
4205    pub fn pagemap_scan(&mut self, enable: Enabled) -> &mut Self {
4206        self.config.pagemap_scan = enable;
4207        self
4208    }
4209
4210    /// Tests whether [`Self::pagemap_scan`] is available or not on the host
4211    /// system.
4212    pub fn is_pagemap_scan_available() -> bool {
4213        crate::runtime::vm::PoolingInstanceAllocatorConfig::is_pagemap_scan_available()
4214    }
4215}
4216
4217#[cfg(feature = "std")]
4218fn detect_host_feature(feature: &str) -> Option<bool> {
4219    #[cfg(target_arch = "aarch64")]
4220    {
4221        return match feature {
4222            "lse" => Some(std::arch::is_aarch64_feature_detected!("lse")),
4223            "paca" => Some(std::arch::is_aarch64_feature_detected!("paca")),
4224            "fp16" => Some(std::arch::is_aarch64_feature_detected!("fp16")),
4225
4226            _ => None,
4227        };
4228    }
4229
4230    // `is_s390x_feature_detected` is nightly only for now, so use the
4231    // STORE FACILITY LIST EXTENDED instruction as a temporary measure.
4232    #[cfg(target_arch = "s390x")]
4233    {
4234        let mut facility_list: [u64; 4] = [0; 4];
4235        unsafe {
4236            core::arch::asm!(
4237                "stfle 0({})",
4238                in(reg_addr) facility_list.as_mut_ptr() ,
4239                inout("r0") facility_list.len() as u64 - 1 => _,
4240                options(nostack)
4241            );
4242        }
4243        let get_facility_bit = |n: usize| {
4244            // NOTE: bits are numbered from the left.
4245            facility_list[n / 64] & (1 << (63 - (n % 64))) != 0
4246        };
4247
4248        return match feature {
4249            "mie3" => Some(get_facility_bit(61)),
4250            "mie4" => Some(get_facility_bit(84)),
4251            "vxrs_ext2" => Some(get_facility_bit(148)),
4252            "vxrs_ext3" => Some(get_facility_bit(198)),
4253
4254            _ => None,
4255        };
4256    }
4257
4258    #[cfg(target_arch = "riscv64")]
4259    {
4260        return match feature {
4261            // due to `is_riscv64_feature_detected` is not stable.
4262            // we cannot use it. For now lie and say all features are always
4263            // found to keep tests working.
4264            _ => Some(true),
4265        };
4266    }
4267
4268    #[cfg(target_arch = "x86_64")]
4269    {
4270        return match feature {
4271            "cmpxchg16b" => Some(std::is_x86_feature_detected!("cmpxchg16b")),
4272            "sse3" => Some(std::is_x86_feature_detected!("sse3")),
4273            "ssse3" => Some(std::is_x86_feature_detected!("ssse3")),
4274            "sse4.1" => Some(std::is_x86_feature_detected!("sse4.1")),
4275            "sse4.2" => Some(std::is_x86_feature_detected!("sse4.2")),
4276            "popcnt" => Some(std::is_x86_feature_detected!("popcnt")),
4277            "avx" => Some(std::is_x86_feature_detected!("avx")),
4278            "avx2" => Some(std::is_x86_feature_detected!("avx2")),
4279            "fma" => Some(std::is_x86_feature_detected!("fma")),
4280            "bmi1" => Some(std::is_x86_feature_detected!("bmi1")),
4281            "bmi2" => Some(std::is_x86_feature_detected!("bmi2")),
4282            "avx512bitalg" => Some(std::is_x86_feature_detected!("avx512bitalg")),
4283            "avx512dq" => Some(std::is_x86_feature_detected!("avx512dq")),
4284            "avx512f" => Some(std::is_x86_feature_detected!("avx512f")),
4285            "avx512vl" => Some(std::is_x86_feature_detected!("avx512vl")),
4286            "avx512vbmi" => Some(std::is_x86_feature_detected!("avx512vbmi")),
4287            "lzcnt" => Some(std::is_x86_feature_detected!("lzcnt")),
4288
4289            _ => None,
4290        };
4291    }
4292
4293    #[allow(
4294        unreachable_code,
4295        reason = "reachable or not depending on if a target above matches"
4296    )]
4297    {
4298        let _ = feature;
4299        return None;
4300    }
4301}