wasmtime/
config.rs

1use crate::prelude::*;
2use alloc::sync::Arc;
3use bitflags::Flags;
4use core::fmt;
5use core::str::FromStr;
6#[cfg(any(feature = "cache", feature = "cranelift", feature = "winch"))]
7use std::path::Path;
8pub use wasmparser::WasmFeatures;
9use wasmtime_environ::{ConfigTunables, TripleExt, Tunables};
10
11#[cfg(feature = "runtime")]
12use crate::memory::MemoryCreator;
13#[cfg(feature = "runtime")]
14use crate::profiling_agent::{self, ProfilingAgent};
15#[cfg(feature = "runtime")]
16use crate::runtime::vm::{
17    GcRuntime, InstanceAllocator, OnDemandInstanceAllocator, RuntimeMemoryCreator,
18};
19#[cfg(feature = "runtime")]
20use crate::trampoline::MemoryCreatorProxy;
21
22#[cfg(feature = "async")]
23use crate::stack::{StackCreator, StackCreatorProxy};
24#[cfg(feature = "async")]
25use wasmtime_fiber::RuntimeFiberStackCreator;
26
27#[cfg(feature = "runtime")]
28pub use crate::runtime::code_memory::CustomCodeMemory;
29#[cfg(feature = "cache")]
30pub use wasmtime_cache::{Cache, CacheConfig};
31#[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
32pub use wasmtime_environ::CacheStore;
33
34/// Represents the module instance allocation strategy to use.
35#[derive(Clone)]
36#[non_exhaustive]
37pub enum InstanceAllocationStrategy {
38    /// The on-demand instance allocation strategy.
39    ///
40    /// Resources related to a module instance are allocated at instantiation time and
41    /// immediately deallocated when the `Store` referencing the instance is dropped.
42    ///
43    /// This is the default allocation strategy for Wasmtime.
44    OnDemand,
45    /// The pooling instance allocation strategy.
46    ///
47    /// A pool of resources is created in advance and module instantiation reuses resources
48    /// from the pool. Resources are returned to the pool when the `Store` referencing the instance
49    /// is dropped.
50    #[cfg(feature = "pooling-allocator")]
51    Pooling(PoolingAllocationConfig),
52}
53
54impl InstanceAllocationStrategy {
55    /// The default pooling instance allocation strategy.
56    #[cfg(feature = "pooling-allocator")]
57    pub fn pooling() -> Self {
58        Self::Pooling(Default::default())
59    }
60}
61
62impl Default for InstanceAllocationStrategy {
63    fn default() -> Self {
64        Self::OnDemand
65    }
66}
67
68#[cfg(feature = "pooling-allocator")]
69impl From<PoolingAllocationConfig> for InstanceAllocationStrategy {
70    fn from(cfg: PoolingAllocationConfig) -> InstanceAllocationStrategy {
71        InstanceAllocationStrategy::Pooling(cfg)
72    }
73}
74
75#[derive(Clone)]
76/// Configure the strategy used for versioning in serializing and deserializing [`crate::Module`].
77pub enum ModuleVersionStrategy {
78    /// Use the wasmtime crate's Cargo package version.
79    WasmtimeVersion,
80    /// Use a custom version string. Must be at most 255 bytes.
81    Custom(String),
82    /// Emit no version string in serialization, and accept all version strings in deserialization.
83    None,
84}
85
86impl Default for ModuleVersionStrategy {
87    fn default() -> Self {
88        ModuleVersionStrategy::WasmtimeVersion
89    }
90}
91
92impl core::hash::Hash for ModuleVersionStrategy {
93    fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
94        match self {
95            Self::WasmtimeVersion => env!("CARGO_PKG_VERSION").hash(hasher),
96            Self::Custom(s) => s.hash(hasher),
97            Self::None => {}
98        };
99    }
100}
101
102/// Global configuration options used to create an [`Engine`](crate::Engine)
103/// and customize its behavior.
104///
105/// This structure exposed a builder-like interface and is primarily consumed by
106/// [`Engine::new()`](crate::Engine::new).
107///
108/// The validation of `Config` is deferred until the engine is being built, thus
109/// a problematic config may cause `Engine::new` to fail.
110///
111/// # Defaults
112///
113/// The `Default` trait implementation and the return value from
114/// [`Config::new()`] are the same and represent the default set of
115/// configuration for an engine. The exact set of defaults will differ based on
116/// properties such as enabled Cargo features at compile time and the configured
117/// target (see [`Config::target`]). Configuration options document their
118/// default values and what the conditional value of the default is where
119/// applicable.
120#[derive(Clone)]
121pub struct Config {
122    #[cfg(any(feature = "cranelift", feature = "winch"))]
123    compiler_config: Option<CompilerConfig>,
124    target: Option<target_lexicon::Triple>,
125    #[cfg(feature = "gc")]
126    collector: Collector,
127    profiling_strategy: ProfilingStrategy,
128    tunables: ConfigTunables,
129
130    #[cfg(feature = "cache")]
131    pub(crate) cache: Option<Cache>,
132    #[cfg(feature = "runtime")]
133    pub(crate) mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>,
134    #[cfg(feature = "runtime")]
135    pub(crate) custom_code_memory: Option<Arc<dyn CustomCodeMemory>>,
136    pub(crate) allocation_strategy: InstanceAllocationStrategy,
137    pub(crate) max_wasm_stack: usize,
138    /// Explicitly enabled features via `Config::wasm_*` methods. This is a
139    /// signal that the embedder specifically wants something turned on
140    /// regardless of the defaults that Wasmtime might otherwise have enabled.
141    ///
142    /// Note that this, and `disabled_features` below, start as the empty set of
143    /// features to only track explicit user requests.
144    pub(crate) enabled_features: WasmFeatures,
145    /// Same as `enabled_features`, but for those that are explicitly disabled.
146    pub(crate) disabled_features: WasmFeatures,
147    pub(crate) wasm_backtrace: bool,
148    pub(crate) wasm_backtrace_details_env_used: bool,
149    pub(crate) native_unwind_info: Option<bool>,
150    #[cfg(any(feature = "async", feature = "stack-switching"))]
151    pub(crate) async_stack_size: usize,
152    #[cfg(feature = "async")]
153    pub(crate) async_stack_zeroing: bool,
154    #[cfg(feature = "async")]
155    pub(crate) stack_creator: Option<Arc<dyn RuntimeFiberStackCreator>>,
156    pub(crate) async_support: bool,
157    pub(crate) module_version: ModuleVersionStrategy,
158    pub(crate) parallel_compilation: bool,
159    pub(crate) memory_guaranteed_dense_image_size: u64,
160    pub(crate) force_memory_init_memfd: bool,
161    pub(crate) wmemcheck: bool,
162    #[cfg(feature = "coredump")]
163    pub(crate) coredump_on_trap: bool,
164    pub(crate) macos_use_mach_ports: bool,
165    pub(crate) detect_host_feature: Option<fn(&str) -> Option<bool>>,
166    pub(crate) x86_float_abi_ok: Option<bool>,
167    pub(crate) shared_memory: bool,
168}
169
170/// User-provided configuration for the compiler.
171#[cfg(any(feature = "cranelift", feature = "winch"))]
172#[derive(Debug, Clone)]
173struct CompilerConfig {
174    strategy: Option<Strategy>,
175    settings: crate::hash_map::HashMap<String, String>,
176    flags: crate::hash_set::HashSet<String>,
177    #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
178    cache_store: Option<Arc<dyn CacheStore>>,
179    clif_dir: Option<std::path::PathBuf>,
180    wmemcheck: bool,
181}
182
183#[cfg(any(feature = "cranelift", feature = "winch"))]
184impl CompilerConfig {
185    fn new() -> Self {
186        Self {
187            strategy: Strategy::Auto.not_auto(),
188            settings: Default::default(),
189            flags: Default::default(),
190            #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
191            cache_store: None,
192            clif_dir: None,
193            wmemcheck: false,
194        }
195    }
196
197    /// Ensures that the key is not set or equals to the given value.
198    /// If the key is not set, it will be set to the given value.
199    ///
200    /// # Returns
201    ///
202    /// Returns true if successfully set or already had the given setting
203    /// value, or false if the setting was explicitly set to something
204    /// else previously.
205    fn ensure_setting_unset_or_given(&mut self, k: &str, v: &str) -> bool {
206        if let Some(value) = self.settings.get(k) {
207            if value != v {
208                return false;
209            }
210        } else {
211            self.settings.insert(k.to_string(), v.to_string());
212        }
213        true
214    }
215}
216
217#[cfg(any(feature = "cranelift", feature = "winch"))]
218impl Default for CompilerConfig {
219    fn default() -> Self {
220        Self::new()
221    }
222}
223
224impl Config {
225    /// Creates a new configuration object with the default configuration
226    /// specified.
227    pub fn new() -> Self {
228        let mut ret = Self {
229            tunables: ConfigTunables::default(),
230            #[cfg(any(feature = "cranelift", feature = "winch"))]
231            compiler_config: Some(CompilerConfig::default()),
232            target: None,
233            #[cfg(feature = "gc")]
234            collector: Collector::default(),
235            #[cfg(feature = "cache")]
236            cache: None,
237            profiling_strategy: ProfilingStrategy::None,
238            #[cfg(feature = "runtime")]
239            mem_creator: None,
240            #[cfg(feature = "runtime")]
241            custom_code_memory: None,
242            allocation_strategy: InstanceAllocationStrategy::OnDemand,
243            // 512k of stack -- note that this is chosen currently to not be too
244            // big, not be too small, and be a good default for most platforms.
245            // One platform of particular note is Windows where the stack size
246            // of the main thread seems to, by default, be smaller than that of
247            // Linux and macOS. This 512k value at least lets our current test
248            // suite pass on the main thread of Windows (using `--test-threads
249            // 1` forces this), or at least it passed when this change was
250            // committed.
251            max_wasm_stack: 512 * 1024,
252            wasm_backtrace: true,
253            wasm_backtrace_details_env_used: false,
254            native_unwind_info: None,
255            enabled_features: WasmFeatures::empty(),
256            disabled_features: WasmFeatures::empty(),
257            #[cfg(any(feature = "async", feature = "stack-switching"))]
258            async_stack_size: 2 << 20,
259            #[cfg(feature = "async")]
260            async_stack_zeroing: false,
261            #[cfg(feature = "async")]
262            stack_creator: None,
263            async_support: false,
264            module_version: ModuleVersionStrategy::default(),
265            parallel_compilation: !cfg!(miri),
266            memory_guaranteed_dense_image_size: 16 << 20,
267            force_memory_init_memfd: false,
268            wmemcheck: false,
269            #[cfg(feature = "coredump")]
270            coredump_on_trap: false,
271            macos_use_mach_ports: !cfg!(miri),
272            #[cfg(feature = "std")]
273            detect_host_feature: Some(detect_host_feature),
274            #[cfg(not(feature = "std"))]
275            detect_host_feature: None,
276            x86_float_abi_ok: None,
277            shared_memory: false,
278        };
279        ret.wasm_backtrace_details(WasmBacktraceDetails::Environment);
280        ret
281    }
282
283    #[cfg(any(feature = "cranelift", feature = "winch"))]
284    pub(crate) fn has_compiler(&self) -> bool {
285        self.compiler_config.is_some()
286    }
287
288    #[track_caller]
289    #[cfg(any(feature = "cranelift", feature = "winch"))]
290    fn compiler_config_mut(&mut self) -> &mut CompilerConfig {
291        self.compiler_config.as_mut().expect(
292            "cannot configure compiler settings for `Config`s \
293             created by `Config::without_compiler`",
294        )
295    }
296
297    /// Configure whether Wasm compilation is enabled.
298    ///
299    /// Disabling Wasm compilation will allow you to load and run
300    /// [pre-compiled][crate::Engine::precompile_module] Wasm programs, but not
301    /// to compile and run new Wasm programs that have not already been
302    /// pre-compiled.
303    ///
304    /// Many compilation-related configuration methods will panic if compilation
305    /// has been disabled.
306    ///
307    /// Note that there are two ways to disable Wasm compilation:
308    ///
309    /// 1. Statically, by disabling the `"cranelift"` and `"winch"` cargo
310    ///    features when building Wasmtime. These builds of Wasmtime will have
311    ///    smaller code size, since they do not include any of the code to
312    ///    compile Wasm.
313    ///
314    /// 2. Dynamically, by passing `false` to this method at run-time when
315    ///    configuring Wasmtime. The Wasmtime binary will still include the code
316    ///    for compiling Wasm, it just won't be executed, so code size is larger
317    ///    than with the first approach.
318    ///
319    /// The static approach is better in most cases, however dynamically calling
320    /// `enable_compiler(false)` is useful whenever you create multiple
321    /// `Engine`s in the same process, some of which must be able to compile
322    /// Wasm and some of which should never do so. Tests are a common example of
323    /// such a situation, especially when there are multiple Rust binaries in
324    /// the same cargo workspace, and cargo's feature resolution enables the
325    /// `"cranelift"` or `"winch"` features across the whole workspace.
326    #[cfg(any(feature = "cranelift", feature = "winch"))]
327    pub fn enable_compiler(&mut self, enable: bool) -> &mut Self {
328        match (enable, &self.compiler_config) {
329            (true, Some(_)) | (false, None) => {}
330            (true, None) => {
331                self.compiler_config = Some(CompilerConfig::default());
332            }
333            (false, Some(_)) => {
334                self.compiler_config = None;
335            }
336        }
337        self
338    }
339
340    /// Configures the target platform of this [`Config`].
341    ///
342    /// This method is used to configure the output of compilation in an
343    /// [`Engine`](crate::Engine). This can be used, for example, to
344    /// cross-compile from one platform to another. By default, the host target
345    /// triple is used meaning compiled code is suitable to run on the host.
346    ///
347    /// Note that the [`Module`](crate::Module) type can only be created if the
348    /// target configured here matches the host. Otherwise if a cross-compile is
349    /// being performed where the host doesn't match the target then
350    /// [`Engine::precompile_module`](crate::Engine::precompile_module) must be
351    /// used instead.
352    ///
353    /// Target-specific flags (such as CPU features) will not be inferred by
354    /// default for the target when one is provided here. This means that this
355    /// can also be used, for example, with the host architecture to disable all
356    /// host-inferred feature flags. Configuring target-specific flags can be
357    /// done with [`Config::cranelift_flag_set`] and
358    /// [`Config::cranelift_flag_enable`].
359    ///
360    /// # Errors
361    ///
362    /// This method will error if the given target triple is not supported.
363    pub fn target(&mut self, target: &str) -> Result<&mut Self> {
364        self.target =
365            Some(target_lexicon::Triple::from_str(target).map_err(|e| anyhow::anyhow!(e))?);
366
367        Ok(self)
368    }
369
370    /// Enables the incremental compilation cache in Cranelift, using the provided `CacheStore`
371    /// backend for storage.
372    ///
373    /// # Panics
374    ///
375    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
376    #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
377    pub fn enable_incremental_compilation(
378        &mut self,
379        cache_store: Arc<dyn CacheStore>,
380    ) -> Result<&mut Self> {
381        self.compiler_config_mut().cache_store = Some(cache_store);
382        Ok(self)
383    }
384
385    /// Whether or not to enable support for asynchronous functions in Wasmtime.
386    ///
387    /// When enabled, the config can optionally define host functions with `async`.
388    /// Instances created and functions called with this `Config` *must* be called
389    /// through their asynchronous APIs, however. For example using
390    /// [`Func::call`](crate::Func::call) will panic when used with this config.
391    ///
392    /// # Asynchronous Wasm
393    ///
394    /// WebAssembly does not currently have a way to specify at the bytecode
395    /// level what is and isn't async. Host-defined functions, however, may be
396    /// defined as `async`. WebAssembly imports always appear synchronous, which
397    /// gives rise to a bit of an impedance mismatch here. To solve this
398    /// Wasmtime supports "asynchronous configs" which enables calling these
399    /// asynchronous functions in a way that looks synchronous to the executing
400    /// WebAssembly code.
401    ///
402    /// An asynchronous config must always invoke wasm code asynchronously,
403    /// meaning we'll always represent its computation as a
404    /// [`Future`](std::future::Future). The `poll` method of the futures
405    /// returned by Wasmtime will perform the actual work of calling the
406    /// WebAssembly. Wasmtime won't manage its own thread pools or similar,
407    /// that's left up to the embedder.
408    ///
409    /// To implement futures in a way that WebAssembly sees asynchronous host
410    /// functions as synchronous, all async Wasmtime futures will execute on a
411    /// separately allocated native stack from the thread otherwise executing
412    /// Wasmtime. This separate native stack can then be switched to and from.
413    /// Using this whenever an `async` host function returns a future that
414    /// resolves to `Pending` we switch away from the temporary stack back to
415    /// the main stack and propagate the `Pending` status.
416    ///
417    /// In general it's encouraged that the integration with `async` and
418    /// wasmtime is designed early on in your embedding of Wasmtime to ensure
419    /// that it's planned that WebAssembly executes in the right context of your
420    /// application.
421    ///
422    /// # Execution in `poll`
423    ///
424    /// The [`Future::poll`](std::future::Future::poll) method is the main
425    /// driving force behind Rust's futures. That method's own documentation
426    /// states "an implementation of `poll` should strive to return quickly, and
427    /// should not block". This, however, can be at odds with executing
428    /// WebAssembly code as part of the `poll` method itself. If your
429    /// WebAssembly is untrusted then this could allow the `poll` method to take
430    /// arbitrarily long in the worst case, likely blocking all other
431    /// asynchronous tasks.
432    ///
433    /// To remedy this situation you have a few possible ways to solve this:
434    ///
435    /// * The most efficient solution is to enable
436    ///   [`Config::epoch_interruption`] in conjunction with
437    ///   [`crate::Store::epoch_deadline_async_yield_and_update`]. Coupled with
438    ///   periodic calls to [`crate::Engine::increment_epoch`] this will cause
439    ///   executing WebAssembly to periodically yield back according to the
440    ///   epoch configuration settings. This enables `Future::poll` to take at
441    ///   most a certain amount of time according to epoch configuration
442    ///   settings and when increments happen. The benefit of this approach is
443    ///   that the instrumentation in compiled code is quite lightweight, but a
444    ///   downside can be that the scheduling is somewhat nondeterministic since
445    ///   increments are usually timer-based which are not always deterministic.
446    ///
447    ///   Note that to prevent infinite execution of wasm it's recommended to
448    ///   place a timeout on the entire future representing executing wasm code
449    ///   and the periodic yields with epochs should ensure that when the
450    ///   timeout is reached it's appropriately recognized.
451    ///
452    /// * Alternatively you can enable the
453    ///   [`Config::consume_fuel`](crate::Config::consume_fuel) method as well
454    ///   as [`crate::Store::fuel_async_yield_interval`] When doing so this will
455    ///   configure Wasmtime futures to yield periodically while they're
456    ///   executing WebAssembly code. After consuming the specified amount of
457    ///   fuel wasm futures will return `Poll::Pending` from their `poll`
458    ///   method, and will get automatically re-polled later. This enables the
459    ///   `Future::poll` method to take roughly a fixed amount of time since
460    ///   fuel is guaranteed to get consumed while wasm is executing. Unlike
461    ///   epoch-based preemption this is deterministic since wasm always
462    ///   consumes a fixed amount of fuel per-operation. The downside of this
463    ///   approach, however, is that the compiled code instrumentation is
464    ///   significantly more expensive than epoch checks.
465    ///
466    ///   Note that to prevent infinite execution of wasm it's recommended to
467    ///   place a timeout on the entire future representing executing wasm code
468    ///   and the periodic yields with epochs should ensure that when the
469    ///   timeout is reached it's appropriately recognized.
470    ///
471    /// In all cases special care needs to be taken when integrating
472    /// asynchronous wasm into your application. You should carefully plan where
473    /// WebAssembly will execute and what compute resources will be allotted to
474    /// it. If Wasmtime doesn't support exactly what you'd like just yet, please
475    /// feel free to open an issue!
476    #[cfg(feature = "async")]
477    pub fn async_support(&mut self, enable: bool) -> &mut Self {
478        self.async_support = enable;
479        self
480    }
481
482    /// Configures whether DWARF debug information will be emitted
483    /// during compilation for a native debugger on the Wasmtime
484    /// process to consume.
485    ///
486    /// Note that the `debug-builtins` compile-time Cargo feature must also be
487    /// enabled for native debuggers such as GDB or LLDB to be able to debug
488    /// guest WebAssembly programs.
489    ///
490    /// By default this option is `false`.
491    /// **Note** Enabling this option is not compatible with the Winch compiler.
492    pub fn debug_info(&mut self, enable: bool) -> &mut Self {
493        self.tunables.debug_native = Some(enable);
494        self
495    }
496
497    /// Configures whether compiled guest code will be instrumented to
498    /// provide debugging at the Wasm VM level.
499    ///
500    /// This is required in order to enable a guest-level debugging
501    /// API that can precisely examine Wasm VM state and (eventually,
502    /// once it is complete) set breakpoints and watchpoints and step
503    /// through code.
504    ///
505    /// Without this enabled, debugging can only be done via a native
506    /// debugger operating on the compiled guest code (see
507    /// [`Config::debug_info`] and is "best-effort": we may be able to
508    /// recover some Wasm locals or operand stack values, but it is
509    /// not guaranteed, even when optimizations are disabled.
510    ///
511    /// When this is enabled, additional instrumentation is inserted
512    /// that directly tracks the Wasm VM state at every step. This has
513    /// some performance impact, but allows perfect debugging
514    /// fidelity.
515    ///
516    /// Breakpoints, watchpoints, and stepping are not yet supported,
517    /// but will be added in a future version of Wasmtime.
518    ///
519    /// This enables use of the [`crate::DebugFrameCursor`] API which is
520    /// provided by [`crate::Caller::debug_frames`] from within a
521    /// hostcall context.
522    ///
523    /// ***Note*** Enabling this option is not compatible with the
524    /// Winch compiler.
525    #[cfg(feature = "debug")]
526    pub fn guest_debug(&mut self, enable: bool) -> &mut Self {
527        self.tunables.debug_guest = Some(enable);
528        self
529    }
530
531    /// Configures whether [`WasmBacktrace`] will be present in the context of
532    /// errors returned from Wasmtime.
533    ///
534    /// A backtrace may be collected whenever an error is returned from a host
535    /// function call through to WebAssembly or when WebAssembly itself hits a
536    /// trap condition, such as an out-of-bounds memory access. This flag
537    /// indicates, in these conditions, whether the backtrace is collected or
538    /// not.
539    ///
540    /// Currently wasm backtraces are implemented through frame pointer walking.
541    /// This means that collecting a backtrace is expected to be a fast and
542    /// relatively cheap operation. Additionally backtrace collection is
543    /// suitable in concurrent environments since one thread capturing a
544    /// backtrace won't block other threads.
545    ///
546    /// Collected backtraces are attached via [`anyhow::Error::context`] to
547    /// errors returned from host functions. The [`WasmBacktrace`] type can be
548    /// acquired via [`anyhow::Error::downcast_ref`] to inspect the backtrace.
549    /// When this option is disabled then this context is never applied to
550    /// errors coming out of wasm.
551    ///
552    /// This option is `true` by default.
553    ///
554    /// [`WasmBacktrace`]: crate::WasmBacktrace
555    pub fn wasm_backtrace(&mut self, enable: bool) -> &mut Self {
556        self.wasm_backtrace = enable;
557        self
558    }
559
560    /// Configures whether backtraces in `Trap` will parse debug info in the wasm file to
561    /// have filename/line number information.
562    ///
563    /// When enabled this will causes modules to retain debugging information
564    /// found in wasm binaries. This debug information will be used when a trap
565    /// happens to symbolicate each stack frame and attempt to print a
566    /// filename/line number for each wasm frame in the stack trace.
567    ///
568    /// By default this option is `WasmBacktraceDetails::Environment`, meaning
569    /// that wasm will read `WASMTIME_BACKTRACE_DETAILS` to indicate whether
570    /// details should be parsed. Note that the `std` feature of this crate must
571    /// be active to read environment variables, otherwise this is disabled by
572    /// default.
573    pub fn wasm_backtrace_details(&mut self, enable: WasmBacktraceDetails) -> &mut Self {
574        self.wasm_backtrace_details_env_used = false;
575        self.tunables.parse_wasm_debuginfo = match enable {
576            WasmBacktraceDetails::Enable => Some(true),
577            WasmBacktraceDetails::Disable => Some(false),
578            WasmBacktraceDetails::Environment => {
579                #[cfg(feature = "std")]
580                {
581                    self.wasm_backtrace_details_env_used = true;
582                    std::env::var("WASMTIME_BACKTRACE_DETAILS")
583                        .map(|s| Some(s == "1"))
584                        .unwrap_or(Some(false))
585                }
586                #[cfg(not(feature = "std"))]
587                {
588                    Some(false)
589                }
590            }
591        };
592        self
593    }
594
595    /// Configures whether to generate native unwind information
596    /// (e.g. `.eh_frame` on Linux).
597    ///
598    /// This configuration option only exists to help third-party stack
599    /// capturing mechanisms, such as the system's unwinder or the `backtrace`
600    /// crate, determine how to unwind through Wasm frames. It does not affect
601    /// whether Wasmtime can capture Wasm backtraces or not. The presence of
602    /// [`WasmBacktrace`] is controlled by the [`Config::wasm_backtrace`]
603    /// option.
604    ///
605    /// Native unwind information is included:
606    /// - When targeting Windows, since the Windows ABI requires it.
607    /// - By default.
608    ///
609    /// Note that systems loading many modules may wish to disable this
610    /// configuration option instead of leaving it on-by-default. Some platforms
611    /// exhibit quadratic behavior when registering/unregistering unwinding
612    /// information which can greatly slow down the module loading/unloading
613    /// process.
614    ///
615    /// [`WasmBacktrace`]: crate::WasmBacktrace
616    pub fn native_unwind_info(&mut self, enable: bool) -> &mut Self {
617        self.native_unwind_info = Some(enable);
618        self
619    }
620
621    /// Configures whether execution of WebAssembly will "consume fuel" to
622    /// either halt or yield execution as desired.
623    ///
624    /// This can be used to deterministically prevent infinitely-executing
625    /// WebAssembly code by instrumenting generated code to consume fuel as it
626    /// executes. When fuel runs out a trap is raised, however [`Store`] can be
627    /// configured to yield execution periodically via
628    /// [`crate::Store::fuel_async_yield_interval`].
629    ///
630    /// Note that a [`Store`] starts with no fuel, so if you enable this option
631    /// you'll have to be sure to pour some fuel into [`Store`] before
632    /// executing some code.
633    ///
634    /// By default this option is `false`.
635    ///
636    /// **Note** Enabling this option is not compatible with the Winch compiler.
637    ///
638    /// [`Store`]: crate::Store
639    pub fn consume_fuel(&mut self, enable: bool) -> &mut Self {
640        self.tunables.consume_fuel = Some(enable);
641        self
642    }
643
644    /// Enables epoch-based interruption.
645    ///
646    /// When executing code in async mode, we sometimes want to
647    /// implement a form of cooperative timeslicing: long-running Wasm
648    /// guest code should periodically yield to the executor
649    /// loop. This yielding could be implemented by using "fuel" (see
650    /// [`consume_fuel`](Config::consume_fuel)). However, fuel
651    /// instrumentation is somewhat expensive: it modifies the
652    /// compiled form of the Wasm code so that it maintains a precise
653    /// instruction count, frequently checking this count against the
654    /// remaining fuel. If one does not need this precise count or
655    /// deterministic interruptions, and only needs a periodic
656    /// interrupt of some form, then It would be better to have a more
657    /// lightweight mechanism.
658    ///
659    /// Epoch-based interruption is that mechanism. There is a global
660    /// "epoch", which is a counter that divides time into arbitrary
661    /// periods (or epochs). This counter lives on the
662    /// [`Engine`](crate::Engine) and can be incremented by calling
663    /// [`Engine::increment_epoch`](crate::Engine::increment_epoch).
664    /// Epoch-based instrumentation works by setting a "deadline
665    /// epoch". The compiled code knows the deadline, and at certain
666    /// points, checks the current epoch against that deadline. It
667    /// will yield if the deadline has been reached.
668    ///
669    /// The idea is that checking an infrequently-changing counter is
670    /// cheaper than counting and frequently storing a precise metric
671    /// (instructions executed) locally. The interruptions are not
672    /// deterministic, but if the embedder increments the epoch in a
673    /// periodic way (say, every regular timer tick by a thread or
674    /// signal handler), then we can ensure that all async code will
675    /// yield to the executor within a bounded time.
676    ///
677    /// The deadline check cannot be avoided by malicious wasm code. It is safe
678    /// to use epoch deadlines to limit the execution time of untrusted
679    /// code.
680    ///
681    /// The [`Store`](crate::Store) tracks the deadline, and controls
682    /// what happens when the deadline is reached during
683    /// execution. Several behaviors are possible:
684    ///
685    /// - Trap if code is executing when the epoch deadline is
686    ///   met. See
687    ///   [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap).
688    ///
689    /// - Call an arbitrary function. This function may chose to trap or
690    ///   increment the epoch. See
691    ///   [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback).
692    ///
693    /// - Yield to the executor loop, then resume when the future is
694    ///   next polled. See
695    ///   [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update).
696    ///
697    /// Trapping is the default. The yielding behaviour may be used for
698    /// the timeslicing behavior described above.
699    ///
700    /// This feature is available with or without async support.
701    /// However, without async support, the timeslicing behaviour is
702    /// not available. This means epoch-based interruption can only
703    /// serve as a simple external-interruption mechanism.
704    ///
705    /// An initial deadline must be set before executing code by calling
706    /// [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline). If this
707    /// deadline is not configured then wasm will immediately trap.
708    ///
709    /// ## Interaction with blocking host calls
710    ///
711    /// Epochs (and fuel) do not assist in handling WebAssembly code blocked in
712    /// a call to the host. For example if the WebAssembly function calls
713    /// `wasi:io/poll.poll` to sleep epochs will not assist in waking this up or
714    /// timing it out. Epochs intentionally only affect running WebAssembly code
715    /// itself and it's left to the embedder to determine how best to wake up
716    /// indefinitely blocking code in the host.
717    ///
718    /// The typical solution for this, however, is to use
719    /// [`Config::async_support(true)`](Config::async_support) and the `async`
720    /// variant of WASI host functions. This models computation as a Rust
721    /// `Future` which means that when blocking happens the future is only
722    /// suspended and control yields back to the main event loop. This gives the
723    /// embedder the opportunity to use `tokio::time::timeout` for example on a
724    /// wasm computation and have the desired effect of cancelling a blocking
725    /// operation when a timeout expires.
726    ///
727    /// ## When to use fuel vs. epochs
728    ///
729    /// In general, epoch-based interruption results in faster
730    /// execution. This difference is sometimes significant: in some
731    /// measurements, up to 2-3x. This is because epoch-based
732    /// interruption does less work: it only watches for a global
733    /// rarely-changing counter to increment, rather than keeping a
734    /// local frequently-changing counter and comparing it to a
735    /// deadline.
736    ///
737    /// Fuel, in contrast, should be used when *deterministic*
738    /// yielding or trapping is needed. For example, if it is required
739    /// that the same function call with the same starting state will
740    /// always either complete or trap with an out-of-fuel error,
741    /// deterministically, then fuel with a fixed bound should be
742    /// used.
743    ///
744    /// **Note** Enabling this option is not compatible with the Winch compiler.
745    ///
746    /// # See Also
747    ///
748    /// - [`Engine::increment_epoch`](crate::Engine::increment_epoch)
749    /// - [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline)
750    /// - [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap)
751    /// - [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback)
752    /// - [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update)
753    pub fn epoch_interruption(&mut self, enable: bool) -> &mut Self {
754        self.tunables.epoch_interruption = Some(enable);
755        self
756    }
757
758    /// Configures the maximum amount of stack space available for
759    /// executing WebAssembly code.
760    ///
761    /// WebAssembly has well-defined semantics on stack overflow. This is
762    /// intended to be a knob which can help configure how much stack space
763    /// wasm execution is allowed to consume. Note that the number here is not
764    /// super-precise, but rather wasm will take at most "pretty close to this
765    /// much" stack space.
766    ///
767    /// If a wasm call (or series of nested wasm calls) take more stack space
768    /// than the `size` specified then a stack overflow trap will be raised.
769    ///
770    /// Caveat: this knob only limits the stack space consumed by wasm code.
771    /// More importantly, it does not ensure that this much stack space is
772    /// available on the calling thread stack. Exhausting the thread stack
773    /// typically leads to an **abort** of the process.
774    ///
775    /// Here are some examples of how that could happen:
776    ///
777    /// - Let's assume this option is set to 2 MiB and then a thread that has
778    ///   a stack with 512 KiB left.
779    ///
780    ///   If wasm code consumes more than 512 KiB then the process will be aborted.
781    ///
782    /// - Assuming the same conditions, but this time wasm code does not consume
783    ///   any stack but calls into a host function. The host function consumes
784    ///   more than 512 KiB of stack space. The process will be aborted.
785    ///
786    /// There's another gotcha related to recursive calling into wasm: the stack
787    /// space consumed by a host function is counted towards this limit. The
788    /// host functions are not prevented from consuming more than this limit.
789    /// However, if the host function that used more than this limit and called
790    /// back into wasm, then the execution will trap immediately because of
791    /// stack overflow.
792    ///
793    /// When the `async` feature is enabled, this value cannot exceed the
794    /// `async_stack_size` option. Be careful not to set this value too close
795    /// to `async_stack_size` as doing so may limit how much stack space
796    /// is available for host functions.
797    ///
798    /// By default this option is 512 KiB.
799    ///
800    /// # Errors
801    ///
802    /// The `Engine::new` method will fail if the `size` specified here is
803    /// either 0 or larger than the [`Config::async_stack_size`] configuration.
804    pub fn max_wasm_stack(&mut self, size: usize) -> &mut Self {
805        self.max_wasm_stack = size;
806        self
807    }
808
809    /// Configures the size of the stacks used for asynchronous execution.
810    ///
811    /// This setting configures the size of the stacks that are allocated for
812    /// asynchronous execution. The value cannot be less than `max_wasm_stack`.
813    ///
814    /// The amount of stack space guaranteed for host functions is
815    /// `async_stack_size - max_wasm_stack`, so take care not to set these two values
816    /// close to one another; doing so may cause host functions to overflow the
817    /// stack and abort the process.
818    ///
819    /// By default this option is 2 MiB.
820    ///
821    /// # Errors
822    ///
823    /// The `Engine::new` method will fail if the value for this option is
824    /// smaller than the [`Config::max_wasm_stack`] option.
825    #[cfg(any(feature = "async", feature = "stack-switching"))]
826    pub fn async_stack_size(&mut self, size: usize) -> &mut Self {
827        self.async_stack_size = size;
828        self
829    }
830
831    /// Configures whether or not stacks used for async futures are zeroed
832    /// before (re)use.
833    ///
834    /// When the [`async_support`](Config::async_support) method is enabled for
835    /// Wasmtime and the [`call_async`] variant of calling WebAssembly is used
836    /// then Wasmtime will create a separate runtime execution stack for each
837    /// future produced by [`call_async`]. By default upon allocation, depending
838    /// on the platform, these stacks might be filled with uninitialized
839    /// memory. This is safe and correct because, modulo bugs in Wasmtime,
840    /// compiled Wasm code will never read from a stack slot before it
841    /// initializes the stack slot.
842    ///
843    /// However, as a defense-in-depth mechanism, you may configure Wasmtime to
844    /// ensure that these stacks are zeroed before they are used. Notably, if
845    /// you are using the pooling allocator, stacks can be pooled and reused
846    /// across different Wasm guests; ensuring that stacks are zeroed can
847    /// prevent data leakage between Wasm guests even in the face of potential
848    /// read-of-stack-slot-before-initialization bugs in Wasmtime's compiler.
849    ///
850    /// Stack zeroing can be a costly operation in highly concurrent
851    /// environments due to modifications of the virtual address space requiring
852    /// process-wide synchronization. It can also be costly in `no-std`
853    /// environments that must manually zero memory, and cannot rely on an OS
854    /// and virtual memory to provide zeroed pages.
855    ///
856    /// This option defaults to `false`.
857    ///
858    /// [`call_async`]: crate::TypedFunc::call_async
859    #[cfg(feature = "async")]
860    pub fn async_stack_zeroing(&mut self, enable: bool) -> &mut Self {
861        self.async_stack_zeroing = enable;
862        self
863    }
864
865    /// Explicitly enables (and un-disables) a given set of [`WasmFeatures`].
866    ///
867    /// Note: this is a low-level method that does not necessarily imply that
868    /// wasmtime _supports_ a feature. It should only be used to _disable_
869    /// features that callers want to be rejected by the parser or _enable_
870    /// features callers are certain that the current configuration of wasmtime
871    /// supports.
872    ///
873    /// Feature validation is deferred until an engine is being built, thus by
874    /// enabling features here a caller may cause [`Engine::new`] to fail later,
875    /// if the feature configuration isn't supported.
876    pub fn wasm_features(&mut self, flag: WasmFeatures, enable: bool) -> &mut Self {
877        self.enabled_features.set(flag, enable);
878        self.disabled_features.set(flag, !enable);
879        self
880    }
881
882    /// Configures whether the WebAssembly tail calls proposal will be enabled
883    /// for compilation or not.
884    ///
885    /// The [WebAssembly tail calls proposal] introduces the `return_call` and
886    /// `return_call_indirect` instructions. These instructions allow for Wasm
887    /// programs to implement some recursive algorithms with *O(1)* stack space
888    /// usage.
889    ///
890    /// This is `true` by default except when the Winch compiler is enabled.
891    ///
892    /// [WebAssembly tail calls proposal]: https://github.com/WebAssembly/tail-call
893    pub fn wasm_tail_call(&mut self, enable: bool) -> &mut Self {
894        self.wasm_features(WasmFeatures::TAIL_CALL, enable);
895        self
896    }
897
898    /// Configures whether the WebAssembly custom-page-sizes proposal will be
899    /// enabled for compilation or not.
900    ///
901    /// The [WebAssembly custom-page-sizes proposal] allows a memory to
902    /// customize its page sizes. By default, Wasm page sizes are 64KiB
903    /// large. This proposal allows the memory to opt into smaller page sizes
904    /// instead, allowing Wasm to run in environments with less than 64KiB RAM
905    /// available, for example.
906    ///
907    /// Note that the page size is part of the memory's type, and because
908    /// different memories may have different types, they may also have
909    /// different page sizes.
910    ///
911    /// Currently the only valid page sizes are 64KiB (the default) and 1
912    /// byte. Future extensions may relax this constraint and allow all powers
913    /// of two.
914    ///
915    /// Support for this proposal is disabled by default.
916    ///
917    /// [WebAssembly custom-page-sizes proposal]: https://github.com/WebAssembly/custom-page-sizes
918    pub fn wasm_custom_page_sizes(&mut self, enable: bool) -> &mut Self {
919        self.wasm_features(WasmFeatures::CUSTOM_PAGE_SIZES, enable);
920        self
921    }
922
923    /// Configures whether the WebAssembly [threads] proposal will be enabled
924    /// for compilation.
925    ///
926    /// This feature gates items such as shared memories and atomic
927    /// instructions. Note that the threads feature depends on the bulk memory
928    /// feature, which is enabled by default. Additionally note that while the
929    /// wasm feature is called "threads" it does not actually include the
930    /// ability to spawn threads. Spawning threads is part of the [wasi-threads]
931    /// proposal which is a separately gated feature in Wasmtime.
932    ///
933    /// Embeddings of Wasmtime are able to build their own custom threading
934    /// scheme on top of the core wasm threads proposal, however.
935    ///
936    /// The default value for this option is whether the `threads`
937    /// crate feature of Wasmtime is enabled or not. By default this crate
938    /// feature is enabled.
939    ///
940    /// [threads]: https://github.com/webassembly/threads
941    /// [wasi-threads]: https://github.com/webassembly/wasi-threads
942    #[cfg(feature = "threads")]
943    pub fn wasm_threads(&mut self, enable: bool) -> &mut Self {
944        self.wasm_features(WasmFeatures::THREADS, enable);
945        self
946    }
947
948    /// Configures whether the WebAssembly [shared-everything-threads] proposal
949    /// will be enabled for compilation.
950    ///
951    /// This feature gates extended use of the `shared` attribute on items other
952    /// than memories, extra atomic instructions, and new component model
953    /// intrinsics for spawning threads. It depends on the
954    /// [`wasm_threads`][Self::wasm_threads] being enabled.
955    ///
956    /// [shared-everything-threads]:
957    ///     https://github.com/webassembly/shared-everything-threads
958    pub fn wasm_shared_everything_threads(&mut self, enable: bool) -> &mut Self {
959        self.wasm_features(WasmFeatures::SHARED_EVERYTHING_THREADS, enable);
960        self
961    }
962
963    /// Configures whether the [WebAssembly reference types proposal][proposal]
964    /// will be enabled for compilation.
965    ///
966    /// This feature gates items such as the `externref` and `funcref` types as
967    /// well as allowing a module to define multiple tables.
968    ///
969    /// Note that the reference types proposal depends on the bulk memory proposal.
970    ///
971    /// This feature is `true` by default.
972    ///
973    /// # Errors
974    ///
975    /// The validation of this feature are deferred until the engine is being built,
976    /// and thus may cause `Engine::new` fail if the `bulk_memory` feature is disabled.
977    ///
978    /// [proposal]: https://github.com/webassembly/reference-types
979    #[cfg(feature = "gc")]
980    pub fn wasm_reference_types(&mut self, enable: bool) -> &mut Self {
981        self.wasm_features(WasmFeatures::REFERENCE_TYPES, enable);
982        self
983    }
984
985    /// Configures whether the [WebAssembly function references
986    /// proposal][proposal] will be enabled for compilation.
987    ///
988    /// This feature gates non-nullable reference types, function reference
989    /// types, `call_ref`, `ref.func`, and non-nullable reference related
990    /// instructions.
991    ///
992    /// Note that the function references proposal depends on the reference
993    /// types proposal.
994    ///
995    /// This feature is `false` by default.
996    ///
997    /// [proposal]: https://github.com/WebAssembly/function-references
998    #[cfg(feature = "gc")]
999    pub fn wasm_function_references(&mut self, enable: bool) -> &mut Self {
1000        self.wasm_features(WasmFeatures::FUNCTION_REFERENCES, enable);
1001        self
1002    }
1003
1004    /// Configures whether the [WebAssembly wide-arithmetic][proposal] will be
1005    /// enabled for compilation.
1006    ///
1007    /// This feature is `false` by default.
1008    ///
1009    /// [proposal]: https://github.com/WebAssembly/wide-arithmetic
1010    pub fn wasm_wide_arithmetic(&mut self, enable: bool) -> &mut Self {
1011        self.wasm_features(WasmFeatures::WIDE_ARITHMETIC, enable);
1012        self
1013    }
1014
1015    /// Configures whether the [WebAssembly Garbage Collection
1016    /// proposal][proposal] will be enabled for compilation.
1017    ///
1018    /// This feature gates `struct` and `array` type definitions and references,
1019    /// the `i31ref` type, and all related instructions.
1020    ///
1021    /// Note that the function references proposal depends on the typed function
1022    /// references proposal.
1023    ///
1024    /// This feature is `false` by default.
1025    ///
1026    /// **Warning: Wasmtime's implementation of the GC proposal is still in
1027    /// progress and generally not ready for primetime.**
1028    ///
1029    /// [proposal]: https://github.com/WebAssembly/gc
1030    #[cfg(feature = "gc")]
1031    pub fn wasm_gc(&mut self, enable: bool) -> &mut Self {
1032        self.wasm_features(WasmFeatures::GC, enable);
1033        self
1034    }
1035
1036    /// Configures whether the WebAssembly SIMD proposal will be
1037    /// enabled for compilation.
1038    ///
1039    /// The [WebAssembly SIMD proposal][proposal]. This feature gates items such
1040    /// as the `v128` type and all of its operators being in a module. Note that
1041    /// this does not enable the [relaxed simd proposal].
1042    ///
1043    /// **Note**
1044    ///
1045    /// On x86_64 platforms the base CPU feature requirement for SIMD
1046    /// is SSE2 for the Cranelift compiler and AVX for the Winch compiler.
1047    ///
1048    /// This is `true` by default.
1049    ///
1050    /// [proposal]: https://github.com/webassembly/simd
1051    /// [relaxed simd proposal]: https://github.com/WebAssembly/relaxed-simd
1052    pub fn wasm_simd(&mut self, enable: bool) -> &mut Self {
1053        self.wasm_features(WasmFeatures::SIMD, enable);
1054        self
1055    }
1056
1057    /// Configures whether the WebAssembly Relaxed SIMD proposal will be
1058    /// enabled for compilation.
1059    ///
1060    /// The relaxed SIMD proposal adds new instructions to WebAssembly which,
1061    /// for some specific inputs, are allowed to produce different results on
1062    /// different hosts. More-or-less this proposal enables exposing
1063    /// platform-specific semantics of SIMD instructions in a controlled
1064    /// fashion to a WebAssembly program. From an embedder's perspective this
1065    /// means that WebAssembly programs may execute differently depending on
1066    /// whether the host is x86_64 or AArch64, for example.
1067    ///
1068    /// By default Wasmtime lowers relaxed SIMD instructions to the fastest
1069    /// lowering for the platform it's running on. This means that, by default,
1070    /// some relaxed SIMD instructions may have different results for the same
1071    /// inputs across x86_64 and AArch64. This behavior can be disabled through
1072    /// the [`Config::relaxed_simd_deterministic`] option which will force
1073    /// deterministic behavior across all platforms, as classified by the
1074    /// specification, at the cost of performance.
1075    ///
1076    /// This is `true` by default.
1077    ///
1078    /// [proposal]: https://github.com/webassembly/relaxed-simd
1079    pub fn wasm_relaxed_simd(&mut self, enable: bool) -> &mut Self {
1080        self.wasm_features(WasmFeatures::RELAXED_SIMD, enable);
1081        self
1082    }
1083
1084    /// This option can be used to control the behavior of the [relaxed SIMD
1085    /// proposal's][proposal] instructions.
1086    ///
1087    /// The relaxed SIMD proposal introduces instructions that are allowed to
1088    /// have different behavior on different architectures, primarily to afford
1089    /// an efficient implementation on all architectures. This means, however,
1090    /// that the same module may execute differently on one host than another,
1091    /// which typically is not otherwise the case. This option is provided to
1092    /// force Wasmtime to generate deterministic code for all relaxed simd
1093    /// instructions, at the cost of performance, for all architectures. When
1094    /// this option is enabled then the deterministic behavior of all
1095    /// instructions in the relaxed SIMD proposal is selected.
1096    ///
1097    /// This is `false` by default.
1098    ///
1099    /// [proposal]: https://github.com/webassembly/relaxed-simd
1100    pub fn relaxed_simd_deterministic(&mut self, enable: bool) -> &mut Self {
1101        self.tunables.relaxed_simd_deterministic = Some(enable);
1102        self
1103    }
1104
1105    /// Configures whether the [WebAssembly bulk memory operations
1106    /// proposal][proposal] will be enabled for compilation.
1107    ///
1108    /// This feature gates items such as the `memory.copy` instruction, passive
1109    /// data/table segments, etc, being in a module.
1110    ///
1111    /// This is `true` by default.
1112    ///
1113    /// Feature `reference_types`, which is also `true` by default, requires
1114    /// this feature to be enabled. Thus disabling this feature must also disable
1115    /// `reference_types` as well using [`wasm_reference_types`](crate::Config::wasm_reference_types).
1116    ///
1117    /// # Errors
1118    ///
1119    /// Disabling this feature without disabling `reference_types` will cause
1120    /// `Engine::new` to fail.
1121    ///
1122    /// [proposal]: https://github.com/webassembly/bulk-memory-operations
1123    pub fn wasm_bulk_memory(&mut self, enable: bool) -> &mut Self {
1124        self.wasm_features(WasmFeatures::BULK_MEMORY, enable);
1125        self
1126    }
1127
1128    /// Configures whether the WebAssembly multi-value [proposal] will
1129    /// be enabled for compilation.
1130    ///
1131    /// This feature gates functions and blocks returning multiple values in a
1132    /// module, for example.
1133    ///
1134    /// This is `true` by default.
1135    ///
1136    /// [proposal]: https://github.com/webassembly/multi-value
1137    pub fn wasm_multi_value(&mut self, enable: bool) -> &mut Self {
1138        self.wasm_features(WasmFeatures::MULTI_VALUE, enable);
1139        self
1140    }
1141
1142    /// Configures whether the WebAssembly multi-memory [proposal] will
1143    /// be enabled for compilation.
1144    ///
1145    /// This feature gates modules having more than one linear memory
1146    /// declaration or import.
1147    ///
1148    /// This is `true` by default.
1149    ///
1150    /// [proposal]: https://github.com/webassembly/multi-memory
1151    pub fn wasm_multi_memory(&mut self, enable: bool) -> &mut Self {
1152        self.wasm_features(WasmFeatures::MULTI_MEMORY, enable);
1153        self
1154    }
1155
1156    /// Configures whether the WebAssembly memory64 [proposal] will
1157    /// be enabled for compilation.
1158    ///
1159    /// Note that this the upstream specification is not finalized and Wasmtime
1160    /// may also have bugs for this feature since it hasn't been exercised
1161    /// much.
1162    ///
1163    /// This is `false` by default.
1164    ///
1165    /// [proposal]: https://github.com/webassembly/memory64
1166    pub fn wasm_memory64(&mut self, enable: bool) -> &mut Self {
1167        self.wasm_features(WasmFeatures::MEMORY64, enable);
1168        self
1169    }
1170
1171    /// Configures whether the WebAssembly extended-const [proposal] will
1172    /// be enabled for compilation.
1173    ///
1174    /// This is `true` by default.
1175    ///
1176    /// [proposal]: https://github.com/webassembly/extended-const
1177    pub fn wasm_extended_const(&mut self, enable: bool) -> &mut Self {
1178        self.wasm_features(WasmFeatures::EXTENDED_CONST, enable);
1179        self
1180    }
1181
1182    /// Configures whether the [WebAssembly stack switching
1183    /// proposal][proposal] will be enabled for compilation.
1184    ///
1185    /// This feature gates the use of control tags.
1186    ///
1187    /// This feature depends on the `function_reference_types` and
1188    /// `exceptions` features.
1189    ///
1190    /// This feature is `false` by default.
1191    ///
1192    /// # Errors
1193    ///
1194    /// [proposal]: https://github.com/webassembly/stack-switching
1195    pub fn wasm_stack_switching(&mut self, enable: bool) -> &mut Self {
1196        self.wasm_features(WasmFeatures::STACK_SWITCHING, enable);
1197        self
1198    }
1199
1200    /// Configures whether the WebAssembly component-model [proposal] will
1201    /// be enabled for compilation.
1202    ///
1203    /// This flag can be used to blanket disable all components within Wasmtime.
1204    /// Otherwise usage of components requires statically using
1205    /// [`Component`](crate::component::Component) instead of
1206    /// [`Module`](crate::Module) for example anyway.
1207    ///
1208    /// The default value for this option is whether the `component-model`
1209    /// crate feature of Wasmtime is enabled or not. By default this crate
1210    /// feature is enabled.
1211    ///
1212    /// [proposal]: https://github.com/webassembly/component-model
1213    #[cfg(feature = "component-model")]
1214    pub fn wasm_component_model(&mut self, enable: bool) -> &mut Self {
1215        self.wasm_features(WasmFeatures::COMPONENT_MODEL, enable);
1216        self
1217    }
1218
1219    /// Configures whether components support the async ABI [proposal] for
1220    /// lifting and lowering functions, as well as `stream`, `future`, and
1221    /// `error-context` types.
1222    ///
1223    /// Please note that Wasmtime's support for this feature is _very_
1224    /// incomplete.
1225    ///
1226    /// [proposal]:
1227    ///     https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1228    #[cfg(feature = "component-model-async")]
1229    pub fn wasm_component_model_async(&mut self, enable: bool) -> &mut Self {
1230        self.wasm_features(WasmFeatures::CM_ASYNC, enable);
1231        self
1232    }
1233
1234    /// This corresponds to the 🚝 emoji in the component model specification.
1235    ///
1236    /// Please note that Wasmtime's support for this feature is _very_
1237    /// incomplete.
1238    ///
1239    /// [proposal]:
1240    ///     https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1241    #[cfg(feature = "component-model-async")]
1242    pub fn wasm_component_model_async_builtins(&mut self, enable: bool) -> &mut Self {
1243        self.wasm_features(WasmFeatures::CM_ASYNC_BUILTINS, enable);
1244        self
1245    }
1246
1247    /// This corresponds to the 🚟 emoji in the component model specification.
1248    ///
1249    /// Please note that Wasmtime's support for this feature is _very_
1250    /// incomplete.
1251    ///
1252    /// [proposal]: https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1253    #[cfg(feature = "component-model-async")]
1254    pub fn wasm_component_model_async_stackful(&mut self, enable: bool) -> &mut Self {
1255        self.wasm_features(WasmFeatures::CM_ASYNC_STACKFUL, enable);
1256        self
1257    }
1258
1259    /// This corresponds to the 🧵 emoji in the component model specification.
1260    ///
1261    /// Please note that Wasmtime's support for this feature is _very_
1262    /// incomplete.
1263    ///
1264    /// [proposal]:
1265    ///     https://github.com/WebAssembly/component-model/pull/557
1266    #[cfg(feature = "component-model-async")]
1267    pub fn wasm_component_model_threading(&mut self, enable: bool) -> &mut Self {
1268        self.wasm_features(WasmFeatures::CM_THREADING, enable);
1269        self
1270    }
1271
1272    /// This corresponds to the 📝 emoji in the component model specification.
1273    ///
1274    /// Please note that Wasmtime's support for this feature is _very_
1275    /// incomplete.
1276    ///
1277    /// [proposal]: https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1278    #[cfg(feature = "component-model")]
1279    pub fn wasm_component_model_error_context(&mut self, enable: bool) -> &mut Self {
1280        self.wasm_features(WasmFeatures::CM_ERROR_CONTEXT, enable);
1281        self
1282    }
1283
1284    /// Configures whether the [GC extension to the component-model
1285    /// proposal][proposal] is enabled or not.
1286    ///
1287    /// This corresponds to the 🛸 emoji in the component model specification.
1288    ///
1289    /// Please note that Wasmtime's support for this feature is _very_
1290    /// incomplete.
1291    ///
1292    /// [proposal]: https://github.com/WebAssembly/component-model/issues/525
1293    #[cfg(feature = "component-model")]
1294    pub fn wasm_component_model_gc(&mut self, enable: bool) -> &mut Self {
1295        self.wasm_features(WasmFeatures::CM_GC, enable);
1296        self
1297    }
1298
1299    /// Configures whether the [Exception-handling proposal][proposal] is enabled or not.
1300    ///
1301    /// [proposal]: https://github.com/WebAssembly/exception-handling
1302    #[cfg(feature = "gc")]
1303    pub fn wasm_exceptions(&mut self, enable: bool) -> &mut Self {
1304        self.wasm_features(WasmFeatures::EXCEPTIONS, enable);
1305        self
1306    }
1307
1308    #[doc(hidden)] // FIXME(#3427) - if/when implemented then un-hide this
1309    #[deprecated = "This configuration option only exists for internal \
1310                    usage with the spec testsuite. It may be removed at \
1311                    any time and without warning. Do not rely on it!"]
1312    pub fn wasm_legacy_exceptions(&mut self, enable: bool) -> &mut Self {
1313        self.wasm_features(WasmFeatures::LEGACY_EXCEPTIONS, enable);
1314        self
1315    }
1316
1317    /// Configures which compilation strategy will be used for wasm modules.
1318    ///
1319    /// This method can be used to configure which compiler is used for wasm
1320    /// modules, and for more documentation consult the [`Strategy`] enumeration
1321    /// and its documentation.
1322    ///
1323    /// The default value for this is `Strategy::Auto`.
1324    ///
1325    /// # Panics
1326    ///
1327    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1328    #[cfg(any(feature = "cranelift", feature = "winch"))]
1329    pub fn strategy(&mut self, strategy: Strategy) -> &mut Self {
1330        self.compiler_config_mut().strategy = strategy.not_auto();
1331        self
1332    }
1333
1334    /// Configures which garbage collector will be used for Wasm modules.
1335    ///
1336    /// This method can be used to configure which garbage collector
1337    /// implementation is used for Wasm modules. For more documentation, consult
1338    /// the [`Collector`] enumeration and its documentation.
1339    ///
1340    /// The default value for this is `Collector::Auto`.
1341    #[cfg(feature = "gc")]
1342    pub fn collector(&mut self, collector: Collector) -> &mut Self {
1343        self.collector = collector;
1344        self
1345    }
1346
1347    /// Creates a default profiler based on the profiling strategy chosen.
1348    ///
1349    /// Profiler creation calls the type's default initializer where the purpose is
1350    /// really just to put in place the type used for profiling.
1351    ///
1352    /// Some [`ProfilingStrategy`] require specific platforms or particular feature
1353    /// to be enabled, such as `ProfilingStrategy::JitDump` requires the `jitdump`
1354    /// feature.
1355    ///
1356    /// # Errors
1357    ///
1358    /// The validation of this field is deferred until the engine is being built, and thus may
1359    /// cause `Engine::new` fail if the required feature is disabled, or the platform is not
1360    /// supported.
1361    pub fn profiler(&mut self, profile: ProfilingStrategy) -> &mut Self {
1362        self.profiling_strategy = profile;
1363        self
1364    }
1365
1366    /// Configures whether the debug verifier of Cranelift is enabled or not.
1367    ///
1368    /// When Cranelift is used as a code generation backend this will configure
1369    /// it to have the `enable_verifier` flag which will enable a number of debug
1370    /// checks inside of Cranelift. This is largely only useful for the
1371    /// developers of wasmtime itself.
1372    ///
1373    /// The default value for this is `false`
1374    ///
1375    /// # Panics
1376    ///
1377    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1378    #[cfg(any(feature = "cranelift", feature = "winch"))]
1379    pub fn cranelift_debug_verifier(&mut self, enable: bool) -> &mut Self {
1380        let val = if enable { "true" } else { "false" };
1381        self.compiler_config_mut()
1382            .settings
1383            .insert("enable_verifier".to_string(), val.to_string());
1384        self
1385    }
1386
1387    /// Configures whether extra debug checks are inserted into
1388    /// Wasmtime-generated code by Cranelift.
1389    ///
1390    /// The default value for this is `false`
1391    ///
1392    /// # Panics
1393    ///
1394    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1395    #[cfg(any(feature = "cranelift", feature = "winch"))]
1396    pub fn cranelift_wasmtime_debug_checks(&mut self, enable: bool) -> &mut Self {
1397        unsafe { self.cranelift_flag_set("wasmtime_debug_checks", &enable.to_string()) }
1398    }
1399
1400    /// Configures the Cranelift code generator optimization level.
1401    ///
1402    /// When the Cranelift code generator is used you can configure the
1403    /// optimization level used for generated code in a few various ways. For
1404    /// more information see the documentation of [`OptLevel`].
1405    ///
1406    /// The default value for this is `OptLevel::Speed`.
1407    ///
1408    /// # Panics
1409    ///
1410    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1411    #[cfg(any(feature = "cranelift", feature = "winch"))]
1412    pub fn cranelift_opt_level(&mut self, level: OptLevel) -> &mut Self {
1413        let val = match level {
1414            OptLevel::None => "none",
1415            OptLevel::Speed => "speed",
1416            OptLevel::SpeedAndSize => "speed_and_size",
1417        };
1418        self.compiler_config_mut()
1419            .settings
1420            .insert("opt_level".to_string(), val.to_string());
1421        self
1422    }
1423
1424    /// Configures the regalloc algorithm used by the Cranelift code generator.
1425    ///
1426    /// Cranelift can select any of several register allocator algorithms. Each
1427    /// of these algorithms generates correct code, but they represent different
1428    /// tradeoffs between compile speed (how expensive the compilation process
1429    /// is) and run-time speed (how fast the generated code runs).
1430    /// For more information see the documentation of [`RegallocAlgorithm`].
1431    ///
1432    /// The default value for this is `RegallocAlgorithm::Backtracking`.
1433    ///
1434    /// # Panics
1435    ///
1436    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1437    #[cfg(any(feature = "cranelift", feature = "winch"))]
1438    pub fn cranelift_regalloc_algorithm(&mut self, algo: RegallocAlgorithm) -> &mut Self {
1439        let val = match algo {
1440            RegallocAlgorithm::Backtracking => "backtracking",
1441            RegallocAlgorithm::SinglePass => "single_pass",
1442        };
1443        self.compiler_config_mut()
1444            .settings
1445            .insert("regalloc_algorithm".to_string(), val.to_string());
1446        self
1447    }
1448
1449    /// Configures whether Cranelift should perform a NaN-canonicalization pass.
1450    ///
1451    /// When Cranelift is used as a code generation backend this will configure
1452    /// it to replace NaNs with a single canonical value. This is useful for
1453    /// users requiring entirely deterministic WebAssembly computation.  This is
1454    /// not required by the WebAssembly spec, so it is not enabled by default.
1455    ///
1456    /// Note that this option affects not only WebAssembly's `f32` and `f64`
1457    /// types but additionally the `v128` type. This option will cause
1458    /// operations using any of these types to have extra checks placed after
1459    /// them to normalize NaN values as needed.
1460    ///
1461    /// The default value for this is `false`
1462    ///
1463    /// # Panics
1464    ///
1465    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1466    #[cfg(any(feature = "cranelift", feature = "winch"))]
1467    pub fn cranelift_nan_canonicalization(&mut self, enable: bool) -> &mut Self {
1468        let val = if enable { "true" } else { "false" };
1469        self.compiler_config_mut()
1470            .settings
1471            .insert("enable_nan_canonicalization".to_string(), val.to_string());
1472        self
1473    }
1474
1475    /// Controls whether proof-carrying code (PCC) is used to validate
1476    /// lowering of Wasm sandbox checks.
1477    ///
1478    /// Proof-carrying code carries "facts" about program values from
1479    /// the IR all the way to machine code, and checks those facts
1480    /// against known machine-instruction semantics. This guards
1481    /// against bugs in instruction lowering that might create holes
1482    /// in the Wasm sandbox.
1483    ///
1484    /// PCC is designed to be fast: it does not require complex
1485    /// solvers or logic engines to verify, but only a linear pass
1486    /// over a trail of "breadcrumbs" or facts at each intermediate
1487    /// value. Thus, it is appropriate to enable in production.
1488    ///
1489    /// # Panics
1490    ///
1491    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1492    #[cfg(any(feature = "cranelift", feature = "winch"))]
1493    pub fn cranelift_pcc(&mut self, enable: bool) -> &mut Self {
1494        let val = if enable { "true" } else { "false" };
1495        self.compiler_config_mut()
1496            .settings
1497            .insert("enable_pcc".to_string(), val.to_string());
1498        self
1499    }
1500
1501    /// Allows setting a Cranelift boolean flag or preset. This allows
1502    /// fine-tuning of Cranelift settings.
1503    ///
1504    /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1505    /// either; other `Config` functions should be preferred for stability.
1506    ///
1507    /// # Safety
1508    ///
1509    /// This is marked as unsafe, because setting the wrong flag might break invariants,
1510    /// resulting in execution hazards.
1511    ///
1512    /// # Errors
1513    ///
1514    /// The validation of the flags are deferred until the engine is being built, and thus may
1515    /// cause `Engine::new` fail if the flag's name does not exist, or the value is not appropriate
1516    /// for the flag type.
1517    ///
1518    /// # Panics
1519    ///
1520    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1521    #[cfg(any(feature = "cranelift", feature = "winch"))]
1522    pub unsafe fn cranelift_flag_enable(&mut self, flag: &str) -> &mut Self {
1523        self.compiler_config_mut().flags.insert(flag.to_string());
1524        self
1525    }
1526
1527    /// Allows settings another Cranelift flag defined by a flag name and value. This allows
1528    /// fine-tuning of Cranelift settings.
1529    ///
1530    /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1531    /// either; other `Config` functions should be preferred for stability.
1532    ///
1533    /// # Safety
1534    ///
1535    /// This is marked as unsafe, because setting the wrong flag might break invariants,
1536    /// resulting in execution hazards.
1537    ///
1538    /// # Errors
1539    ///
1540    /// The validation of the flags are deferred until the engine is being built, and thus may
1541    /// cause `Engine::new` fail if the flag's name does not exist, or incompatible with other
1542    /// settings.
1543    ///
1544    /// For example, feature `wasm_backtrace` will set `unwind_info` to `true`, but if it's
1545    /// manually set to false then it will fail.
1546    ///
1547    /// # Panics
1548    ///
1549    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1550    #[cfg(any(feature = "cranelift", feature = "winch"))]
1551    pub unsafe fn cranelift_flag_set(&mut self, name: &str, value: &str) -> &mut Self {
1552        self.compiler_config_mut()
1553            .settings
1554            .insert(name.to_string(), value.to_string());
1555        self
1556    }
1557
1558    /// Set a custom [`Cache`].
1559    ///
1560    /// To load a cache configuration from a file, use [`Cache::from_file`]. Otherwise, you can
1561    /// create a new cache config using [`CacheConfig::new`] and passing that to [`Cache::new`].
1562    ///
1563    /// If you want to disable the cache, you can call this method with `None`.
1564    ///
1565    /// By default, new configs do not have caching enabled.
1566    /// Every call to [`Module::new(my_wasm)`][crate::Module::new] will recompile `my_wasm`,
1567    /// even when it is unchanged, unless an enabled `CacheConfig` is provided.
1568    ///
1569    /// This method is only available when the `cache` feature of this crate is
1570    /// enabled.
1571    ///
1572    /// [docs]: https://bytecodealliance.github.io/wasmtime/cli-cache.html
1573    #[cfg(feature = "cache")]
1574    pub fn cache(&mut self, cache: Option<Cache>) -> &mut Self {
1575        self.cache = cache;
1576        self
1577    }
1578
1579    /// Sets a custom memory creator.
1580    ///
1581    /// Custom memory creators are used when creating host `Memory` objects or when
1582    /// creating instance linear memories for the on-demand instance allocation strategy.
1583    #[cfg(feature = "runtime")]
1584    pub fn with_host_memory(&mut self, mem_creator: Arc<dyn MemoryCreator>) -> &mut Self {
1585        self.mem_creator = Some(Arc::new(MemoryCreatorProxy(mem_creator)));
1586        self
1587    }
1588
1589    /// Sets a custom stack creator.
1590    ///
1591    /// Custom memory creators are used when creating creating async instance stacks for
1592    /// the on-demand instance allocation strategy.
1593    #[cfg(feature = "async")]
1594    pub fn with_host_stack(&mut self, stack_creator: Arc<dyn StackCreator>) -> &mut Self {
1595        self.stack_creator = Some(Arc::new(StackCreatorProxy(stack_creator)));
1596        self
1597    }
1598
1599    /// Sets a custom executable-memory publisher.
1600    ///
1601    /// Custom executable-memory publishers are hooks that allow
1602    /// Wasmtime to make certain regions of memory executable when
1603    /// loading precompiled modules or compiling new modules
1604    /// in-process. In most modern operating systems, memory allocated
1605    /// for heap usage is readable and writable by default but not
1606    /// executable. To jump to machine code stored in that memory, we
1607    /// need to make it executable. For security reasons, we usually
1608    /// also make it read-only at the same time, so the executing code
1609    /// can't be modified later.
1610    ///
1611    /// By default, Wasmtime will use the appropriate system calls on
1612    /// the host platform for this work. However, it also allows
1613    /// plugging in a custom implementation via this configuration
1614    /// option. This may be useful on custom or `no_std` platforms,
1615    /// for example, especially where virtual memory is not otherwise
1616    /// used by Wasmtime (no `signals-and-traps` feature).
1617    #[cfg(feature = "runtime")]
1618    pub fn with_custom_code_memory(
1619        &mut self,
1620        custom_code_memory: Option<Arc<dyn CustomCodeMemory>>,
1621    ) -> &mut Self {
1622        self.custom_code_memory = custom_code_memory;
1623        self
1624    }
1625
1626    /// Sets the instance allocation strategy to use.
1627    ///
1628    /// This is notably used in conjunction with
1629    /// [`InstanceAllocationStrategy::Pooling`] and [`PoolingAllocationConfig`].
1630    pub fn allocation_strategy(
1631        &mut self,
1632        strategy: impl Into<InstanceAllocationStrategy>,
1633    ) -> &mut Self {
1634        self.allocation_strategy = strategy.into();
1635        self
1636    }
1637
1638    /// Specifies the capacity of linear memories, in bytes, in their initial
1639    /// allocation.
1640    ///
1641    /// > Note: this value has important performance ramifications, be sure to
1642    /// > benchmark when setting this to a non-default value and read over this
1643    /// > documentation.
1644    ///
1645    /// This function will change the size of the initial memory allocation made
1646    /// for linear memories. This setting is only applicable when the initial
1647    /// size of a linear memory is below this threshold. Linear memories are
1648    /// allocated in the virtual address space of the host process with OS APIs
1649    /// such as `mmap` and this setting affects how large the allocation will
1650    /// be.
1651    ///
1652    /// ## Background: WebAssembly Linear Memories
1653    ///
1654    /// WebAssembly linear memories always start with a minimum size and can
1655    /// possibly grow up to a maximum size. The minimum size is always specified
1656    /// in a WebAssembly module itself and the maximum size can either be
1657    /// optionally specified in the module or inherently limited by the index
1658    /// type. For example for this module:
1659    ///
1660    /// ```wasm
1661    /// (module
1662    ///     (memory $a 4)
1663    ///     (memory $b 4096 4096 (pagesize 1))
1664    ///     (memory $c i64 10)
1665    /// )
1666    /// ```
1667    ///
1668    /// * Memory `$a` initially allocates 4 WebAssembly pages (256KiB) and can
1669    ///   grow up to 4GiB, the limit of the 32-bit index space.
1670    /// * Memory `$b` initially allocates 4096 WebAssembly pages, but in this
1671    ///   case its page size is 1, so it's 4096 bytes. Memory can also grow no
1672    ///   further meaning that it will always be 4096 bytes.
1673    /// * Memory `$c` is a 64-bit linear memory which starts with 640KiB of
1674    ///   memory and can theoretically grow up to 2^64 bytes, although most
1675    ///   hosts will run out of memory long before that.
1676    ///
1677    /// All operations on linear memories done by wasm are required to be
1678    /// in-bounds. Any access beyond the end of a linear memory is considered a
1679    /// trap.
1680    ///
1681    /// ## What this setting affects: Virtual Memory
1682    ///
1683    /// This setting is used to configure the behavior of the size of the linear
1684    /// memory allocation performed for each of these memories. For example the
1685    /// initial linear memory allocation looks like this:
1686    ///
1687    /// ```text
1688    ///              memory_reservation
1689    ///                    |
1690    ///          ◄─────────┴────────────────►
1691    /// ┌───────┬─────────┬──────────────────┬───────┐
1692    /// │ guard │ initial │ ... capacity ... │ guard │
1693    /// └───────┴─────────┴──────────────────┴───────┘
1694    ///  ◄──┬──►                              ◄──┬──►
1695    ///     │                                    │
1696    ///     │                             memory_guard_size
1697    ///     │
1698    ///     │
1699    ///  memory_guard_size (if guard_before_linear_memory)
1700    /// ```
1701    ///
1702    /// Memory in the `initial` range is accessible to the instance and can be
1703    /// read/written by wasm code. Memory in the `guard` regions is never
1704    /// accessible to wasm code and memory in `capacity` is initially
1705    /// inaccessible but may become accessible through `memory.grow` instructions
1706    /// for example.
1707    ///
1708    /// This means that this setting is the size of the initial chunk of virtual
1709    /// memory that a linear memory may grow into.
1710    ///
1711    /// ## What this setting affects: Runtime Speed
1712    ///
1713    /// This is a performance-sensitive setting which is taken into account
1714    /// during the compilation process of a WebAssembly module. For example if a
1715    /// 32-bit WebAssembly linear memory has a `memory_reservation` size of 4GiB
1716    /// then bounds checks can be elided because `capacity` will be guaranteed
1717    /// to be unmapped for all addressable bytes that wasm can access (modulo a
1718    /// few details).
1719    ///
1720    /// If `memory_reservation` was something smaller like 256KiB then that
1721    /// would have a much smaller impact on virtual memory but the compile code
1722    /// would then need to have explicit bounds checks to ensure that
1723    /// loads/stores are in-bounds.
1724    ///
1725    /// The goal of this setting is to enable skipping bounds checks in most
1726    /// modules by default. Some situations which require explicit bounds checks
1727    /// though are:
1728    ///
1729    /// * When `memory_reservation` is smaller than the addressable size of the
1730    ///   linear memory. For example if 64-bit linear memories always need
1731    ///   bounds checks as they can address the entire virtual address spacce.
1732    ///   For 32-bit linear memories a `memory_reservation` minimum size of 4GiB
1733    ///   is required to elide bounds checks.
1734    ///
1735    /// * When linear memories have a page size of 1 then bounds checks are
1736    ///   required. In this situation virtual memory can't be relied upon
1737    ///   because that operates at the host page size granularity where wasm
1738    ///   requires a per-byte level granularity.
1739    ///
1740    /// * Configuration settings such as [`Config::signals_based_traps`] can be
1741    ///   used to disable the use of signal handlers and virtual memory so
1742    ///   explicit bounds checks are required.
1743    ///
1744    /// * When [`Config::memory_guard_size`] is too small a bounds check may be
1745    ///   required. For 32-bit wasm addresses are actually 33-bit effective
1746    ///   addresses because loads/stores have a 32-bit static offset to add to
1747    ///   the dynamic 32-bit address. If the static offset is larger than the
1748    ///   size of the guard region then an explicit bounds check is required.
1749    ///
1750    /// ## What this setting affects: Memory Growth Behavior
1751    ///
1752    /// In addition to affecting bounds checks emitted in compiled code this
1753    /// setting also affects how WebAssembly linear memories are grown. The
1754    /// `memory.grow` instruction can be used to make a linear memory larger and
1755    /// this is also affected by APIs such as
1756    /// [`Memory::grow`](crate::Memory::grow).
1757    ///
1758    /// In these situations when the amount being grown is small enough to fit
1759    /// within the remaining capacity then the linear memory doesn't have to be
1760    /// moved at runtime. If the capacity runs out though then a new linear
1761    /// memory allocation must be made and the contents of linear memory is
1762    /// copied over.
1763    ///
1764    /// For example here's a situation where a copy happens:
1765    ///
1766    /// * The `memory_reservation` setting is configured to 128KiB.
1767    /// * A WebAssembly linear memory starts with a single 64KiB page.
1768    /// * This memory can be grown by one page to contain the full 128KiB of
1769    ///   memory.
1770    /// * If grown by one more page, though, then a 192KiB allocation must be
1771    ///   made and the previous 128KiB of contents are copied into the new
1772    ///   allocation.
1773    ///
1774    /// This growth behavior can have a significant performance impact if lots
1775    /// of data needs to be copied on growth. Conversely if memory growth never
1776    /// needs to happen because the capacity will always be large enough then
1777    /// optimizations can be applied to cache the base pointer of linear memory.
1778    ///
1779    /// When memory is grown then the
1780    /// [`Config::memory_reservation_for_growth`] is used for the new
1781    /// memory allocation to have memory to grow into.
1782    ///
1783    /// When using the pooling allocator via [`PoolingAllocationConfig`] then
1784    /// memories are never allowed to move so requests for growth are instead
1785    /// rejected with an error.
1786    ///
1787    /// ## When this setting is not used
1788    ///
1789    /// This setting is ignored and unused when the initial size of linear
1790    /// memory is larger than this threshold. For example if this setting is set
1791    /// to 1MiB but a wasm module requires a 2MiB minimum allocation then this
1792    /// setting is ignored. In this situation the minimum size of memory will be
1793    /// allocated along with [`Config::memory_reservation_for_growth`]
1794    /// after it to grow into.
1795    ///
1796    /// That means that this value can be set to zero. That can be useful in
1797    /// benchmarking to see the overhead of bounds checks for example.
1798    /// Additionally it can be used to minimize the virtual memory allocated by
1799    /// Wasmtime.
1800    ///
1801    /// ## Default Value
1802    ///
1803    /// The default value for this property depends on the host platform. For
1804    /// 64-bit platforms there's lots of address space available, so the default
1805    /// configured here is 4GiB. When coupled with the default size of
1806    /// [`Config::memory_guard_size`] this means that 32-bit WebAssembly linear
1807    /// memories with 64KiB page sizes will skip almost all bounds checks by
1808    /// default.
1809    ///
1810    /// For 32-bit platforms this value defaults to 10MiB. This means that
1811    /// bounds checks will be required on 32-bit platforms.
1812    pub fn memory_reservation(&mut self, bytes: u64) -> &mut Self {
1813        self.tunables.memory_reservation = Some(bytes);
1814        self
1815    }
1816
1817    /// Indicates whether linear memories may relocate their base pointer at
1818    /// runtime.
1819    ///
1820    /// WebAssembly linear memories either have a maximum size that's explicitly
1821    /// listed in the type of a memory or inherently limited by the index type
1822    /// of the memory (e.g. 4GiB for 32-bit linear memories). Depending on how
1823    /// the linear memory is allocated (see [`Config::memory_reservation`]) it
1824    /// may be necessary to move the memory in the host's virtual address space
1825    /// during growth. This option controls whether this movement is allowed or
1826    /// not.
1827    ///
1828    /// An example of a linear memory needing to move is when
1829    /// [`Config::memory_reservation`] is 0 then a linear memory will be
1830    /// allocated as the minimum size of the memory plus
1831    /// [`Config::memory_reservation_for_growth`]. When memory grows beyond the
1832    /// reservation for growth then the memory needs to be relocated.
1833    ///
1834    /// When this option is set to `false` then it can have a number of impacts
1835    /// on how memories work at runtime:
1836    ///
1837    /// * Modules can be compiled with static knowledge the base pointer of
1838    ///   linear memory never changes to enable optimizations such as
1839    ///   loop invariant code motion (hoisting the base pointer out of a loop).
1840    ///
1841    /// * Memories cannot grow in excess of their original allocation. This
1842    ///   means that [`Config::memory_reservation`] and
1843    ///   [`Config::memory_reservation_for_growth`] may need tuning to ensure
1844    ///   the memory configuration works at runtime.
1845    ///
1846    /// The default value for this option is `true`.
1847    pub fn memory_may_move(&mut self, enable: bool) -> &mut Self {
1848        self.tunables.memory_may_move = Some(enable);
1849        self
1850    }
1851
1852    /// Configures the size, in bytes, of the guard region used at the end of a
1853    /// linear memory's address space reservation.
1854    ///
1855    /// > Note: this value has important performance ramifications, be sure to
1856    /// > understand what this value does before tweaking it and benchmarking.
1857    ///
1858    /// This setting controls how many bytes are guaranteed to be unmapped after
1859    /// the virtual memory allocation of a linear memory. When
1860    /// combined with sufficiently large values of
1861    /// [`Config::memory_reservation`] (e.g. 4GiB for 32-bit linear memories)
1862    /// then a guard region can be used to eliminate bounds checks in generated
1863    /// code.
1864    ///
1865    /// This setting additionally can be used to help deduplicate bounds checks
1866    /// in code that otherwise requires bounds checks. For example with a 4KiB
1867    /// guard region then a 64-bit linear memory which accesses addresses `x+8`
1868    /// and `x+16` only needs to perform a single bounds check on `x`. If that
1869    /// bounds check passes then the offset is guaranteed to either reside in
1870    /// linear memory or the guard region, resulting in deterministic behavior
1871    /// either way.
1872    ///
1873    /// ## How big should the guard be?
1874    ///
1875    /// In general, like with configuring [`Config::memory_reservation`], you
1876    /// probably don't want to change this value from the defaults. Removing
1877    /// bounds checks is dependent on a number of factors where the size of the
1878    /// guard region is only one piece of the equation. Other factors include:
1879    ///
1880    /// * [`Config::memory_reservation`]
1881    /// * The index type of the linear memory (e.g. 32-bit or 64-bit)
1882    /// * The page size of the linear memory
1883    /// * Other settings such as [`Config::signals_based_traps`]
1884    ///
1885    /// Embeddings using virtual memory almost always want at least some guard
1886    /// region, but otherwise changes from the default should be profiled
1887    /// locally to see the performance impact.
1888    ///
1889    /// ## Default
1890    ///
1891    /// The default value for this property is 32MiB on 64-bit platforms. This
1892    /// allows eliminating almost all bounds checks on loads/stores with an
1893    /// immediate offset of less than 32MiB. On 32-bit platforms this defaults
1894    /// to 64KiB.
1895    pub fn memory_guard_size(&mut self, bytes: u64) -> &mut Self {
1896        self.tunables.memory_guard_size = Some(bytes);
1897        self
1898    }
1899
1900    /// Configures the size, in bytes, of the extra virtual memory space
1901    /// reserved after a linear memory is relocated.
1902    ///
1903    /// This setting is used in conjunction with [`Config::memory_reservation`]
1904    /// to configure what happens after a linear memory is relocated in the host
1905    /// address space. If the initial size of a linear memory exceeds
1906    /// [`Config::memory_reservation`] or if it grows beyond that size
1907    /// throughout its lifetime then this setting will be used.
1908    ///
1909    /// When a linear memory is relocated it will initially look like this:
1910    ///
1911    /// ```text
1912    ///            memory.size
1913    ///                 │
1914    ///          ◄──────┴─────►
1915    /// ┌───────┬──────────────┬───────┐
1916    /// │ guard │  accessible  │ guard │
1917    /// └───────┴──────────────┴───────┘
1918    ///                         ◄──┬──►
1919    ///                            │
1920    ///                     memory_guard_size
1921    /// ```
1922    ///
1923    /// where `accessible` needs to be grown but there's no more memory to grow
1924    /// into. A new region of the virtual address space will be allocated that
1925    /// looks like this:
1926    ///
1927    /// ```text
1928    ///                           memory_reservation_for_growth
1929    ///                                       │
1930    ///            memory.size                │
1931    ///                 │                     │
1932    ///          ◄──────┴─────► ◄─────────────┴───────────►
1933    /// ┌───────┬──────────────┬───────────────────────────┬───────┐
1934    /// │ guard │  accessible  │ .. reserved for growth .. │ guard │
1935    /// └───────┴──────────────┴───────────────────────────┴───────┘
1936    ///                                                     ◄──┬──►
1937    ///                                                        │
1938    ///                                               memory_guard_size
1939    /// ```
1940    ///
1941    /// This means that up to `memory_reservation_for_growth` bytes can be
1942    /// allocated again before the entire linear memory needs to be moved again
1943    /// when another `memory_reservation_for_growth` bytes will be appended to
1944    /// the size of the allocation.
1945    ///
1946    /// Note that this is a currently simple heuristic for optimizing the growth
1947    /// of dynamic memories, primarily implemented for the memory64 proposal
1948    /// where the maximum size of memory is larger than 4GiB. This setting is
1949    /// unlikely to be a one-size-fits-all style approach and if you're an
1950    /// embedder running into issues with growth and are interested in having
1951    /// other growth strategies available here please feel free to [open an
1952    /// issue on the Wasmtime repository][issue]!
1953    ///
1954    /// [issue]: https://github.com/bytecodealliance/wasmtime/issues/new
1955    ///
1956    /// ## Default
1957    ///
1958    /// For 64-bit platforms this defaults to 2GiB, and for 32-bit platforms
1959    /// this defaults to 1MiB.
1960    pub fn memory_reservation_for_growth(&mut self, bytes: u64) -> &mut Self {
1961        self.tunables.memory_reservation_for_growth = Some(bytes);
1962        self
1963    }
1964
1965    /// Indicates whether a guard region is present before allocations of
1966    /// linear memory.
1967    ///
1968    /// Guard regions before linear memories are never used during normal
1969    /// operation of WebAssembly modules, even if they have out-of-bounds
1970    /// loads. The only purpose for a preceding guard region in linear memory
1971    /// is extra protection against possible bugs in code generators like
1972    /// Cranelift. This setting does not affect performance in any way, but will
1973    /// result in larger virtual memory reservations for linear memories (it
1974    /// won't actually ever use more memory, just use more of the address
1975    /// space).
1976    ///
1977    /// The size of the guard region before linear memory is the same as the
1978    /// guard size that comes after linear memory, which is configured by
1979    /// [`Config::memory_guard_size`].
1980    ///
1981    /// ## Default
1982    ///
1983    /// This value defaults to `true`.
1984    pub fn guard_before_linear_memory(&mut self, enable: bool) -> &mut Self {
1985        self.tunables.guard_before_linear_memory = Some(enable);
1986        self
1987    }
1988
1989    /// Indicates whether to initialize tables lazily, so that instantiation
1990    /// is fast but indirect calls are a little slower. If false, tables
1991    /// are initialized eagerly during instantiation from any active element
1992    /// segments that apply to them.
1993    ///
1994    /// **Note** Disabling this option is not compatible with the Winch compiler.
1995    ///
1996    /// ## Default
1997    ///
1998    /// This value defaults to `true`.
1999    pub fn table_lazy_init(&mut self, table_lazy_init: bool) -> &mut Self {
2000        self.tunables.table_lazy_init = Some(table_lazy_init);
2001        self
2002    }
2003
2004    /// Configure the version information used in serialized and deserialized [`crate::Module`]s.
2005    /// This effects the behavior of [`crate::Module::serialize()`], as well as
2006    /// [`crate::Module::deserialize()`] and related functions.
2007    ///
2008    /// The default strategy is to use the wasmtime crate's Cargo package version.
2009    pub fn module_version(&mut self, strategy: ModuleVersionStrategy) -> Result<&mut Self> {
2010        match strategy {
2011            // This case requires special precondition for assertion in SerializedModule::to_bytes
2012            ModuleVersionStrategy::Custom(ref v) => {
2013                if v.as_bytes().len() > 255 {
2014                    bail!("custom module version cannot be more than 255 bytes: {v}");
2015                }
2016            }
2017            _ => {}
2018        }
2019        self.module_version = strategy;
2020        Ok(self)
2021    }
2022
2023    /// Configure whether wasmtime should compile a module using multiple
2024    /// threads.
2025    ///
2026    /// Disabling this will result in a single thread being used to compile
2027    /// the wasm bytecode.
2028    ///
2029    /// By default parallel compilation is enabled.
2030    #[cfg(feature = "parallel-compilation")]
2031    pub fn parallel_compilation(&mut self, parallel: bool) -> &mut Self {
2032        self.parallel_compilation = parallel;
2033        self
2034    }
2035
2036    /// Configures whether compiled artifacts will contain information to map
2037    /// native program addresses back to the original wasm module.
2038    ///
2039    /// This configuration option is `true` by default and, if enabled,
2040    /// generates the appropriate tables in compiled modules to map from native
2041    /// address back to wasm source addresses. This is used for displaying wasm
2042    /// program counters in backtraces as well as generating filenames/line
2043    /// numbers if so configured as well (and the original wasm module has DWARF
2044    /// debugging information present).
2045    pub fn generate_address_map(&mut self, generate: bool) -> &mut Self {
2046        self.tunables.generate_address_map = Some(generate);
2047        self
2048    }
2049
2050    /// Configures whether copy-on-write memory-mapped data is used to
2051    /// initialize a linear memory.
2052    ///
2053    /// Initializing linear memory via a copy-on-write mapping can drastically
2054    /// improve instantiation costs of a WebAssembly module because copying
2055    /// memory is deferred. Additionally if a page of memory is only ever read
2056    /// from WebAssembly and never written too then the same underlying page of
2057    /// data will be reused between all instantiations of a module meaning that
2058    /// if a module is instantiated many times this can lower the overall memory
2059    /// required needed to run that module.
2060    ///
2061    /// The main disadvantage of copy-on-write initialization, however, is that
2062    /// it may be possible for highly-parallel scenarios to be less scalable. If
2063    /// a page is read initially by a WebAssembly module then that page will be
2064    /// mapped to a read-only copy shared between all WebAssembly instances. If
2065    /// the same page is then written, however, then a private copy is created
2066    /// and swapped out from the read-only version. This also requires an [IPI],
2067    /// however, which can be a significant bottleneck in high-parallelism
2068    /// situations.
2069    ///
2070    /// This feature is only applicable when a WebAssembly module meets specific
2071    /// criteria to be initialized in this fashion, such as:
2072    ///
2073    /// * Only memories defined in the module can be initialized this way.
2074    /// * Data segments for memory must use statically known offsets.
2075    /// * Data segments for memory must all be in-bounds.
2076    ///
2077    /// Modules which do not meet these criteria will fall back to
2078    /// initialization of linear memory based on copying memory.
2079    ///
2080    /// This feature of Wasmtime is also platform-specific:
2081    ///
2082    /// * Linux - this feature is supported for all instances of [`Module`].
2083    ///   Modules backed by an existing mmap (such as those created by
2084    ///   [`Module::deserialize_file`]) will reuse that mmap to cow-initialize
2085    ///   memory. Other instance of [`Module`] may use the `memfd_create`
2086    ///   syscall to create an initialization image to `mmap`.
2087    /// * Unix (not Linux) - this feature is only supported when loading modules
2088    ///   from a precompiled file via [`Module::deserialize_file`] where there
2089    ///   is a file descriptor to use to map data into the process. Note that
2090    ///   the module must have been compiled with this setting enabled as well.
2091    /// * Windows - there is no support for this feature at this time. Memory
2092    ///   initialization will always copy bytes.
2093    ///
2094    /// By default this option is enabled.
2095    ///
2096    /// [`Module::deserialize_file`]: crate::Module::deserialize_file
2097    /// [`Module`]: crate::Module
2098    /// [IPI]: https://en.wikipedia.org/wiki/Inter-processor_interrupt
2099    pub fn memory_init_cow(&mut self, enable: bool) -> &mut Self {
2100        self.tunables.memory_init_cow = Some(enable);
2101        self
2102    }
2103
2104    /// A configuration option to force the usage of `memfd_create` on Linux to
2105    /// be used as the backing source for a module's initial memory image.
2106    ///
2107    /// When [`Config::memory_init_cow`] is enabled, which is enabled by
2108    /// default, module memory initialization images are taken from a module's
2109    /// original mmap if possible. If a precompiled module was loaded from disk
2110    /// this means that the disk's file is used as an mmap source for the
2111    /// initial linear memory contents. This option can be used to force, on
2112    /// Linux, that instead of using the original file on disk a new in-memory
2113    /// file is created with `memfd_create` to hold the contents of the initial
2114    /// image.
2115    ///
2116    /// This option can be used to avoid possibly loading the contents of memory
2117    /// from disk through a page fault. Instead with `memfd_create` the contents
2118    /// of memory are always in RAM, meaning that even page faults which
2119    /// initially populate a wasm linear memory will only work with RAM instead
2120    /// of ever hitting the disk that the original precompiled module is stored
2121    /// on.
2122    ///
2123    /// This option is disabled by default.
2124    pub fn force_memory_init_memfd(&mut self, enable: bool) -> &mut Self {
2125        self.force_memory_init_memfd = enable;
2126        self
2127    }
2128
2129    /// Configures whether or not a coredump should be generated and attached to
2130    /// the anyhow::Error when a trap is raised.
2131    ///
2132    /// This option is disabled by default.
2133    #[cfg(feature = "coredump")]
2134    pub fn coredump_on_trap(&mut self, enable: bool) -> &mut Self {
2135        self.coredump_on_trap = enable;
2136        self
2137    }
2138
2139    /// Enables memory error checking for wasm programs.
2140    ///
2141    /// This option is disabled by default.
2142    ///
2143    /// # Panics
2144    ///
2145    /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
2146    #[cfg(any(feature = "cranelift", feature = "winch"))]
2147    pub fn wmemcheck(&mut self, enable: bool) -> &mut Self {
2148        self.wmemcheck = enable;
2149        self.compiler_config_mut().wmemcheck = enable;
2150        self
2151    }
2152
2153    /// Configures the "guaranteed dense image size" for copy-on-write
2154    /// initialized memories.
2155    ///
2156    /// When using the [`Config::memory_init_cow`] feature to initialize memory
2157    /// efficiently (which is enabled by default), compiled modules contain an
2158    /// image of the module's initial heap. If the module has a fairly sparse
2159    /// initial heap, with just a few data segments at very different offsets,
2160    /// this could result in a large region of zero bytes in the image. In
2161    /// other words, it's not very memory-efficient.
2162    ///
2163    /// We normally use a heuristic to avoid this: if less than half
2164    /// of the initialized range (first non-zero to last non-zero
2165    /// byte) of any memory in the module has pages with nonzero
2166    /// bytes, then we avoid creating a memory image for the entire module.
2167    ///
2168    /// However, if the embedder always needs the instantiation-time efficiency
2169    /// of copy-on-write initialization, and is otherwise carefully controlling
2170    /// parameters of the modules (for example, by limiting the maximum heap
2171    /// size of the modules), then it may be desirable to ensure a memory image
2172    /// is created even if this could go against the heuristic above. Thus, we
2173    /// add another condition: there is a size of initialized data region up to
2174    /// which we *always* allow a memory image. The embedder can set this to a
2175    /// known maximum heap size if they desire to always get the benefits of
2176    /// copy-on-write images.
2177    ///
2178    /// In the future we may implement a "best of both worlds"
2179    /// solution where we have a dense image up to some limit, and
2180    /// then support a sparse list of initializers beyond that; this
2181    /// would get most of the benefit of copy-on-write and pay the incremental
2182    /// cost of eager initialization only for those bits of memory
2183    /// that are out-of-bounds. However, for now, an embedder desiring
2184    /// fast instantiation should ensure that this setting is as large
2185    /// as the maximum module initial memory content size.
2186    ///
2187    /// By default this value is 16 MiB.
2188    pub fn memory_guaranteed_dense_image_size(&mut self, size_in_bytes: u64) -> &mut Self {
2189        self.memory_guaranteed_dense_image_size = size_in_bytes;
2190        self
2191    }
2192
2193    /// Whether to enable function inlining during compilation or not.
2194    ///
2195    /// This may result in faster execution at runtime, but adds additional
2196    /// compilation time. Inlining may also enlarge the size of compiled
2197    /// artifacts (for example, the size of the result of
2198    /// [`Engine::precompile_component`](crate::Engine::precompile_component)).
2199    ///
2200    /// Inlining is not supported by all of Wasmtime's compilation strategies;
2201    /// currently, it only Cranelift supports it. This setting will be ignored
2202    /// when using a compilation strategy that does not support inlining, like
2203    /// Winch.
2204    ///
2205    /// Note that inlining is still somewhat experimental at the moment (as of
2206    /// the Wasmtime version 36).
2207    pub fn compiler_inlining(&mut self, inlining: bool) -> &mut Self {
2208        self.tunables.inlining = Some(inlining);
2209        self
2210    }
2211
2212    /// Returns the set of features that the currently selected compiler backend
2213    /// does not support at all and may panic on.
2214    ///
2215    /// Wasmtime strives to reject unknown modules or unsupported modules with
2216    /// first-class errors instead of panics. Not all compiler backends have the
2217    /// same level of feature support on all platforms as well. This method
2218    /// returns a set of features that the currently selected compiler
2219    /// configuration is known to not support and may panic on. This acts as a
2220    /// first-level filter on incoming wasm modules/configuration to fail-fast
2221    /// instead of panicking later on.
2222    ///
2223    /// Note that if a feature is not listed here it does not mean that the
2224    /// backend fully supports the proposal. Instead that means that the backend
2225    /// doesn't ever panic on the proposal, but errors during compilation may
2226    /// still be returned. This means that features listed here are definitely
2227    /// not supported at all, but features not listed here may still be
2228    /// partially supported. For example at the time of this writing the Winch
2229    /// backend partially supports simd so it's not listed here. Winch doesn't
2230    /// fully support simd but unimplemented instructions just return errors.
2231    fn compiler_panicking_wasm_features(&self) -> WasmFeatures {
2232        // First we compute the set of features that Wasmtime itself knows;
2233        // this is a sort of "maximal set" that we invert to create a set
2234        // of features we _definitely can't support_ because wasmtime
2235        // has never heard of them.
2236        let features_known_to_wasmtime = WasmFeatures::empty()
2237            | WasmFeatures::MUTABLE_GLOBAL
2238            | WasmFeatures::SATURATING_FLOAT_TO_INT
2239            | WasmFeatures::SIGN_EXTENSION
2240            | WasmFeatures::REFERENCE_TYPES
2241            | WasmFeatures::CALL_INDIRECT_OVERLONG
2242            | WasmFeatures::MULTI_VALUE
2243            | WasmFeatures::BULK_MEMORY
2244            | WasmFeatures::BULK_MEMORY_OPT
2245            | WasmFeatures::SIMD
2246            | WasmFeatures::RELAXED_SIMD
2247            | WasmFeatures::THREADS
2248            | WasmFeatures::SHARED_EVERYTHING_THREADS
2249            | WasmFeatures::TAIL_CALL
2250            | WasmFeatures::FLOATS
2251            | WasmFeatures::MULTI_MEMORY
2252            | WasmFeatures::EXCEPTIONS
2253            | WasmFeatures::MEMORY64
2254            | WasmFeatures::EXTENDED_CONST
2255            | WasmFeatures::COMPONENT_MODEL
2256            | WasmFeatures::FUNCTION_REFERENCES
2257            | WasmFeatures::GC
2258            | WasmFeatures::CUSTOM_PAGE_SIZES
2259            | WasmFeatures::GC_TYPES
2260            | WasmFeatures::STACK_SWITCHING
2261            | WasmFeatures::WIDE_ARITHMETIC
2262            | WasmFeatures::CM_ASYNC
2263            | WasmFeatures::CM_ASYNC_STACKFUL
2264            | WasmFeatures::CM_ASYNC_BUILTINS
2265            | WasmFeatures::CM_THREADING
2266            | WasmFeatures::CM_ERROR_CONTEXT
2267            | WasmFeatures::CM_GC;
2268
2269        #[allow(unused_mut, reason = "easier to avoid #[cfg]")]
2270        let mut unsupported = !features_known_to_wasmtime;
2271
2272        #[cfg(any(feature = "cranelift", feature = "winch"))]
2273        match self.compiler_config.as_ref().and_then(|c| c.strategy) {
2274            None | Some(Strategy::Cranelift) => {
2275                // Pulley at this time fundamentally doesn't support the
2276                // `threads` proposal, notably shared memory, because Rust can't
2277                // safely implement loads/stores in the face of shared memory.
2278                // Stack switching is not implemented, either.
2279                if self.compiler_target().is_pulley() {
2280                    unsupported |= WasmFeatures::THREADS;
2281                    unsupported |= WasmFeatures::STACK_SWITCHING;
2282                }
2283
2284                use target_lexicon::*;
2285                match self.compiler_target() {
2286                    Triple {
2287                        architecture: Architecture::X86_64 | Architecture::X86_64h,
2288                        operating_system:
2289                            OperatingSystem::Linux
2290                            | OperatingSystem::MacOSX(_)
2291                            | OperatingSystem::Darwin(_),
2292                        ..
2293                    } => {
2294                        // Stack switching supported on (non-Pulley) Cranelift.
2295                    }
2296
2297                    _ => {
2298                        // On platforms other than x64 Unix-like, we don't
2299                        // support stack switching.
2300                        unsupported |= WasmFeatures::STACK_SWITCHING;
2301                    }
2302                }
2303            }
2304            Some(Strategy::Winch) => {
2305                unsupported |= WasmFeatures::GC
2306                    | WasmFeatures::FUNCTION_REFERENCES
2307                    | WasmFeatures::RELAXED_SIMD
2308                    | WasmFeatures::TAIL_CALL
2309                    | WasmFeatures::GC_TYPES
2310                    | WasmFeatures::EXCEPTIONS
2311                    | WasmFeatures::LEGACY_EXCEPTIONS
2312                    | WasmFeatures::STACK_SWITCHING
2313                    | WasmFeatures::CM_ASYNC;
2314                match self.compiler_target().architecture {
2315                    target_lexicon::Architecture::Aarch64(_) => {
2316                        unsupported |= WasmFeatures::THREADS;
2317                        unsupported |= WasmFeatures::WIDE_ARITHMETIC;
2318                    }
2319
2320                    // Winch doesn't support other non-x64 architectures at this
2321                    // time either but will return an first-class error for
2322                    // them.
2323                    _ => {}
2324                }
2325            }
2326            Some(Strategy::Auto) => unreachable!(),
2327        }
2328        unsupported
2329    }
2330
2331    /// Calculates the set of features that are enabled for this `Config`.
2332    ///
2333    /// This method internally will start with the an empty set of features to
2334    /// avoid being tied to wasmparser's defaults. Next Wasmtime's set of
2335    /// default features are added to this set, some of which are conditional
2336    /// depending on crate features. Finally explicitly requested features via
2337    /// `wasm_*` methods on `Config` are applied. Everything is then validated
2338    /// later in `Config::validate`.
2339    fn features(&self) -> WasmFeatures {
2340        // Wasmtime by default supports all of the wasm 2.0 version of the
2341        // specification.
2342        let mut features = WasmFeatures::WASM2;
2343
2344        // On-by-default features that wasmtime has. Note that these are all
2345        // subject to the criteria at
2346        // https://docs.wasmtime.dev/contributing-implementing-wasm-proposals.html
2347        // and
2348        // https://docs.wasmtime.dev/stability-wasm-proposals.html
2349        features |= WasmFeatures::MULTI_MEMORY;
2350        features |= WasmFeatures::RELAXED_SIMD;
2351        features |= WasmFeatures::TAIL_CALL;
2352        features |= WasmFeatures::EXTENDED_CONST;
2353        features |= WasmFeatures::MEMORY64;
2354        // NB: if you add a feature above this line please double-check
2355        // https://docs.wasmtime.dev/stability-wasm-proposals.html
2356        // to ensure all requirements are met and/or update the documentation
2357        // there too.
2358
2359        // Set some features to their conditionally-enabled defaults depending
2360        // on crate compile-time features.
2361        features.set(WasmFeatures::GC_TYPES, cfg!(feature = "gc"));
2362        features.set(WasmFeatures::THREADS, cfg!(feature = "threads"));
2363        features.set(
2364            WasmFeatures::COMPONENT_MODEL,
2365            cfg!(feature = "component-model"),
2366        );
2367
2368        // From the default set of proposals remove any that the current
2369        // compiler backend may panic on if the module contains them.
2370        features = features & !self.compiler_panicking_wasm_features();
2371
2372        // After wasmtime's defaults are configured then factor in user requests
2373        // and disable/enable features. Note that the enable/disable sets should
2374        // be disjoint.
2375        debug_assert!((self.enabled_features & self.disabled_features).is_empty());
2376        features &= !self.disabled_features;
2377        features |= self.enabled_features;
2378
2379        features
2380    }
2381
2382    /// Returns the configured compiler target for this `Config`.
2383    pub(crate) fn compiler_target(&self) -> target_lexicon::Triple {
2384        // If a target is explicitly configured, always use that.
2385        if let Some(target) = self.target.clone() {
2386            return target;
2387        }
2388
2389        // If the `build.rs` script determined that this platform uses pulley by
2390        // default, then use Pulley.
2391        if cfg!(default_target_pulley) {
2392            return target_lexicon::Triple::pulley_host();
2393        }
2394
2395        // And at this point the target is for sure the host.
2396        target_lexicon::Triple::host()
2397    }
2398
2399    pub(crate) fn validate(&self) -> Result<(Tunables, WasmFeatures)> {
2400        let features = self.features();
2401
2402        // First validate that the selected compiler backend and configuration
2403        // supports the set of `features` that are enabled. This will help
2404        // provide more first class errors instead of panics about unsupported
2405        // features and configurations.
2406        let unsupported = features & self.compiler_panicking_wasm_features();
2407        if !unsupported.is_empty() {
2408            for flag in WasmFeatures::FLAGS.iter() {
2409                if !unsupported.contains(*flag.value()) {
2410                    continue;
2411                }
2412                bail!(
2413                    "the wasm_{} feature is not supported on this compiler configuration",
2414                    flag.name().to_lowercase()
2415                );
2416            }
2417
2418            panic!("should have returned an error by now")
2419        }
2420
2421        #[cfg(any(feature = "async", feature = "stack-switching"))]
2422        if self.async_support && self.max_wasm_stack > self.async_stack_size {
2423            bail!("max_wasm_stack size cannot exceed the async_stack_size");
2424        }
2425        if self.max_wasm_stack == 0 {
2426            bail!("max_wasm_stack size cannot be zero");
2427        }
2428        if !cfg!(feature = "wmemcheck") && self.wmemcheck {
2429            bail!("wmemcheck (memory checker) was requested but is not enabled in this build");
2430        }
2431
2432        if !cfg!(feature = "gc") && features.gc_types() {
2433            bail!("support for GC was disabled at compile time")
2434        }
2435
2436        if !cfg!(feature = "gc") && features.contains(WasmFeatures::EXCEPTIONS) {
2437            bail!("exceptions support requires garbage collection (GC) to be enabled in the build");
2438        }
2439
2440        let mut tunables = Tunables::default_for_target(&self.compiler_target())?;
2441
2442        // If no target is explicitly specified then further refine `tunables`
2443        // for the configuration of this host depending on what platform
2444        // features were found available at compile time. This means that anyone
2445        // cross-compiling for a customized host will need to further refine
2446        // compilation options.
2447        if self.target.is_none() {
2448            // If this platform doesn't have native signals then change some
2449            // defaults to account for that. Note that VM guards are turned off
2450            // here because that's primarily a feature of eliding
2451            // bounds-checks.
2452            if !cfg!(has_native_signals) {
2453                tunables.signals_based_traps = cfg!(has_native_signals);
2454                tunables.memory_guard_size = 0;
2455            }
2456
2457            // When virtual memory is not available use slightly different
2458            // defaults for tunables to be more amenable to `MallocMemory`.
2459            // Note that these can still be overridden by config options.
2460            if !cfg!(has_virtual_memory) {
2461                tunables.memory_reservation = 0;
2462                tunables.memory_reservation_for_growth = 1 << 20; // 1MB
2463                tunables.memory_init_cow = false;
2464            }
2465        }
2466
2467        // If guest-debugging is enabled, we must disable
2468        // signals-based traps. Do this before we process the user's
2469        // provided tunables settings so we can detect a conflict with
2470        // an explicit request to use signals-based traps.
2471        #[cfg(feature = "debug")]
2472        if self.tunables.debug_guest == Some(true) {
2473            tunables.signals_based_traps = false;
2474        }
2475
2476        self.tunables.configure(&mut tunables);
2477
2478        // If we're going to compile with winch, we must use the winch calling convention.
2479        #[cfg(any(feature = "cranelift", feature = "winch"))]
2480        {
2481            tunables.winch_callable = self
2482                .compiler_config
2483                .as_ref()
2484                .is_some_and(|c| c.strategy == Some(Strategy::Winch));
2485        }
2486
2487        tunables.collector = if features.gc_types() {
2488            #[cfg(feature = "gc")]
2489            {
2490                use wasmtime_environ::Collector as EnvCollector;
2491                Some(match self.collector.try_not_auto()? {
2492                    Collector::DeferredReferenceCounting => EnvCollector::DeferredReferenceCounting,
2493                    Collector::Null => EnvCollector::Null,
2494                    Collector::Auto => unreachable!(),
2495                })
2496            }
2497            #[cfg(not(feature = "gc"))]
2498            bail!("cannot use GC types: the `gc` feature was disabled at compile time")
2499        } else {
2500            None
2501        };
2502
2503        if tunables.debug_guest {
2504            ensure!(
2505                cfg!(feature = "debug"),
2506                "debug instrumentation support was disabled at compile time"
2507            );
2508            ensure!(
2509                !tunables.signals_based_traps,
2510                "cannot use signals-based traps with guest debugging enabled"
2511            );
2512        }
2513
2514        Ok((tunables, features))
2515    }
2516
2517    #[cfg(feature = "runtime")]
2518    pub(crate) fn build_allocator(
2519        &self,
2520        tunables: &Tunables,
2521    ) -> Result<Box<dyn InstanceAllocator + Send + Sync>> {
2522        #[cfg(feature = "async")]
2523        let (stack_size, stack_zeroing) = (self.async_stack_size, self.async_stack_zeroing);
2524
2525        #[cfg(not(feature = "async"))]
2526        let (stack_size, stack_zeroing) = (0, false);
2527
2528        let _ = tunables;
2529
2530        match &self.allocation_strategy {
2531            InstanceAllocationStrategy::OnDemand => {
2532                let mut _allocator = Box::new(OnDemandInstanceAllocator::new(
2533                    self.mem_creator.clone(),
2534                    stack_size,
2535                    stack_zeroing,
2536                ));
2537                #[cfg(feature = "async")]
2538                if let Some(stack_creator) = &self.stack_creator {
2539                    _allocator.set_stack_creator(stack_creator.clone());
2540                }
2541                Ok(_allocator)
2542            }
2543            #[cfg(feature = "pooling-allocator")]
2544            InstanceAllocationStrategy::Pooling(config) => {
2545                let mut config = config.config;
2546                config.stack_size = stack_size;
2547                config.async_stack_zeroing = stack_zeroing;
2548                Ok(Box::new(crate::runtime::vm::PoolingInstanceAllocator::new(
2549                    &config, tunables,
2550                )?))
2551            }
2552        }
2553    }
2554
2555    #[cfg(feature = "runtime")]
2556    pub(crate) fn build_gc_runtime(&self) -> Result<Option<Arc<dyn GcRuntime>>> {
2557        if !self.features().gc_types() {
2558            return Ok(None);
2559        }
2560
2561        #[cfg(not(feature = "gc"))]
2562        bail!("cannot create a GC runtime: the `gc` feature was disabled at compile time");
2563
2564        #[cfg(feature = "gc")]
2565        #[cfg_attr(
2566            not(any(feature = "gc-null", feature = "gc-drc")),
2567            expect(unreachable_code, reason = "definitions known to be dummy")
2568        )]
2569        {
2570            Ok(Some(match self.collector.try_not_auto()? {
2571                #[cfg(feature = "gc-drc")]
2572                Collector::DeferredReferenceCounting => {
2573                    Arc::new(crate::runtime::vm::DrcCollector::default()) as Arc<dyn GcRuntime>
2574                }
2575                #[cfg(not(feature = "gc-drc"))]
2576                Collector::DeferredReferenceCounting => unreachable!(),
2577
2578                #[cfg(feature = "gc-null")]
2579                Collector::Null => {
2580                    Arc::new(crate::runtime::vm::NullCollector::default()) as Arc<dyn GcRuntime>
2581                }
2582                #[cfg(not(feature = "gc-null"))]
2583                Collector::Null => unreachable!(),
2584
2585                Collector::Auto => unreachable!(),
2586            }))
2587        }
2588    }
2589
2590    #[cfg(feature = "runtime")]
2591    pub(crate) fn build_profiler(&self) -> Result<Box<dyn ProfilingAgent>> {
2592        Ok(match self.profiling_strategy {
2593            ProfilingStrategy::PerfMap => profiling_agent::new_perfmap()?,
2594            ProfilingStrategy::JitDump => profiling_agent::new_jitdump()?,
2595            ProfilingStrategy::VTune => profiling_agent::new_vtune()?,
2596            ProfilingStrategy::None => profiling_agent::new_null(),
2597            ProfilingStrategy::Pulley => profiling_agent::new_pulley()?,
2598        })
2599    }
2600
2601    #[cfg(any(feature = "cranelift", feature = "winch"))]
2602    pub(crate) fn build_compiler(
2603        mut self,
2604        tunables: &mut Tunables,
2605        features: WasmFeatures,
2606    ) -> Result<(Self, Box<dyn wasmtime_environ::Compiler>)> {
2607        let target = self.compiler_target();
2608
2609        // The target passed to the builders below is an `Option<Triple>` where
2610        // `None` represents the current host with CPU features inferred from
2611        // the host's CPU itself. The `target` above is not an `Option`, so
2612        // switch it to `None` in the case that a target wasn't explicitly
2613        // specified (which indicates no feature inference) and the target
2614        // matches the host.
2615        let target_for_builder =
2616            if self.target.is_none() && target == target_lexicon::Triple::host() {
2617                None
2618            } else {
2619                Some(target.clone())
2620            };
2621
2622        let mut compiler = match self.compiler_config_mut().strategy {
2623            #[cfg(feature = "cranelift")]
2624            Some(Strategy::Cranelift) => wasmtime_cranelift::builder(target_for_builder)?,
2625            #[cfg(not(feature = "cranelift"))]
2626            Some(Strategy::Cranelift) => bail!("cranelift support not compiled in"),
2627            #[cfg(feature = "winch")]
2628            Some(Strategy::Winch) => wasmtime_winch::builder(target_for_builder)?,
2629            #[cfg(not(feature = "winch"))]
2630            Some(Strategy::Winch) => bail!("winch support not compiled in"),
2631
2632            None | Some(Strategy::Auto) => unreachable!(),
2633        };
2634
2635        if let Some(path) = &self.compiler_config_mut().clif_dir {
2636            compiler.clif_dir(path)?;
2637        }
2638
2639        // If probestack is enabled for a target, Wasmtime will always use the
2640        // inline strategy which doesn't require us to define a `__probestack`
2641        // function or similar.
2642        self.compiler_config_mut()
2643            .settings
2644            .insert("probestack_strategy".into(), "inline".into());
2645
2646        // We enable stack probing by default on all targets.
2647        // This is required on Windows because of the way Windows
2648        // commits its stacks, but it's also a good idea on other
2649        // platforms to ensure guard pages are hit for large frame
2650        // sizes.
2651        self.compiler_config_mut()
2652            .flags
2653            .insert("enable_probestack".into());
2654
2655        // The current wasm multivalue implementation depends on this.
2656        // FIXME(#9510) handle this in wasmtime-cranelift instead.
2657        self.compiler_config_mut()
2658            .flags
2659            .insert("enable_multi_ret_implicit_sret".into());
2660
2661        if let Some(unwind_requested) = self.native_unwind_info {
2662            if !self
2663                .compiler_config_mut()
2664                .ensure_setting_unset_or_given("unwind_info", &unwind_requested.to_string())
2665            {
2666                bail!(
2667                    "incompatible settings requested for Cranelift and Wasmtime `unwind-info` settings"
2668                );
2669            }
2670        }
2671
2672        if target.operating_system == target_lexicon::OperatingSystem::Windows {
2673            if !self
2674                .compiler_config_mut()
2675                .ensure_setting_unset_or_given("unwind_info", "true")
2676            {
2677                bail!("`native_unwind_info` cannot be disabled on Windows");
2678            }
2679        }
2680
2681        // We require frame pointers for correct stack walking, which is safety
2682        // critical in the presence of reference types, and otherwise it is just
2683        // really bad developer experience to get wrong.
2684        self.compiler_config_mut()
2685            .settings
2686            .insert("preserve_frame_pointers".into(), "true".into());
2687
2688        if !tunables.signals_based_traps {
2689            let mut ok = self
2690                .compiler_config_mut()
2691                .ensure_setting_unset_or_given("enable_table_access_spectre_mitigation", "false");
2692            ok = ok
2693                && self.compiler_config_mut().ensure_setting_unset_or_given(
2694                    "enable_heap_access_spectre_mitigation",
2695                    "false",
2696                );
2697
2698            // Right now spectre-mitigated bounds checks will load from zero so
2699            // if host-based signal handlers are disabled then that's a mismatch
2700            // and doesn't work right now. Fixing this will require more thought
2701            // of how to implement the bounds check in spectre-only mode.
2702            if !ok {
2703                bail!(
2704                    "when signals-based traps are disabled then spectre \
2705                     mitigations must also be disabled"
2706                );
2707            }
2708        }
2709
2710        if features.contains(WasmFeatures::RELAXED_SIMD) && !features.contains(WasmFeatures::SIMD) {
2711            bail!("cannot disable the simd proposal but enable the relaxed simd proposal");
2712        }
2713
2714        if features.contains(WasmFeatures::STACK_SWITCHING) {
2715            use target_lexicon::OperatingSystem;
2716            let model = match target.operating_system {
2717                OperatingSystem::Windows => "update_windows_tib",
2718                OperatingSystem::Linux
2719                | OperatingSystem::MacOSX(_)
2720                | OperatingSystem::Darwin(_) => "basic",
2721                _ => bail!("stack-switching feature not supported on this platform "),
2722            };
2723
2724            if !self
2725                .compiler_config_mut()
2726                .ensure_setting_unset_or_given("stack_switch_model", model)
2727            {
2728                bail!(
2729                    "compiler option 'stack_switch_model' must be set to '{model}' on this platform"
2730                );
2731            }
2732        }
2733
2734        // Apply compiler settings and flags
2735        compiler.set_tunables(tunables.clone())?;
2736        for (k, v) in self.compiler_config_mut().settings.iter() {
2737            compiler.set(k, v)?;
2738        }
2739        for flag in self.compiler_config_mut().flags.iter() {
2740            compiler.enable(flag)?;
2741        }
2742        *tunables = compiler.tunables().cloned().unwrap();
2743
2744        #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
2745        if let Some(cache_store) = &self.compiler_config_mut().cache_store {
2746            compiler.enable_incremental_compilation(cache_store.clone())?;
2747        }
2748
2749        compiler.wmemcheck(self.compiler_config_mut().wmemcheck);
2750
2751        Ok((self, compiler.build()?))
2752    }
2753
2754    /// Internal setting for whether adapter modules for components will have
2755    /// extra WebAssembly instructions inserted performing more debug checks
2756    /// then are necessary.
2757    #[cfg(feature = "component-model")]
2758    pub fn debug_adapter_modules(&mut self, debug: bool) -> &mut Self {
2759        self.tunables.debug_adapter_modules = Some(debug);
2760        self
2761    }
2762
2763    /// Enables clif output when compiling a WebAssembly module.
2764    #[cfg(any(feature = "cranelift", feature = "winch"))]
2765    pub fn emit_clif(&mut self, path: &Path) -> &mut Self {
2766        self.compiler_config_mut().clif_dir = Some(path.to_path_buf());
2767        self
2768    }
2769
2770    /// Configures whether, when on macOS, Mach ports are used for exception
2771    /// handling instead of traditional Unix-based signal handling.
2772    ///
2773    /// WebAssembly traps in Wasmtime are implemented with native faults, for
2774    /// example a `SIGSEGV` will occur when a WebAssembly guest accesses
2775    /// out-of-bounds memory. Handling this can be configured to either use Unix
2776    /// signals or Mach ports on macOS. By default Mach ports are used.
2777    ///
2778    /// Mach ports enable Wasmtime to work by default with foreign
2779    /// error-handling systems such as breakpad which also use Mach ports to
2780    /// handle signals. In this situation Wasmtime will continue to handle guest
2781    /// faults gracefully while any non-guest faults will get forwarded to
2782    /// process-level handlers such as breakpad. Some more background on this
2783    /// can be found in #2456.
2784    ///
2785    /// A downside of using mach ports, however, is that they don't interact
2786    /// well with `fork()`. Forking a Wasmtime process on macOS will produce a
2787    /// child process that cannot successfully run WebAssembly. In this
2788    /// situation traditional Unix signal handling should be used as that's
2789    /// inherited and works across forks.
2790    ///
2791    /// If your embedding wants to use a custom error handler which leverages
2792    /// Mach ports and you additionally wish to `fork()` the process and use
2793    /// Wasmtime in the child process that's not currently possible. Please
2794    /// reach out to us if you're in this bucket!
2795    ///
2796    /// This option defaults to `true`, using Mach ports by default.
2797    pub fn macos_use_mach_ports(&mut self, mach_ports: bool) -> &mut Self {
2798        self.macos_use_mach_ports = mach_ports;
2799        self
2800    }
2801
2802    /// Configures an embedder-provided function, `detect`, which is used to
2803    /// determine if an ISA-specific feature is available on the current host.
2804    ///
2805    /// This function is used to verify that any features enabled for a compiler
2806    /// backend, such as AVX support on x86\_64, are also available on the host.
2807    /// It is undefined behavior to execute an AVX instruction on a host that
2808    /// doesn't support AVX instructions, for example.
2809    ///
2810    /// When the `std` feature is active on this crate then this function is
2811    /// configured to a default implementation that uses the standard library's
2812    /// feature detection. When the `std` feature is disabled then there is no
2813    /// default available and this method must be called to configure a feature
2814    /// probing function.
2815    ///
2816    /// The `detect` function provided is given a string name of an ISA feature.
2817    /// The function should then return:
2818    ///
2819    /// * `Some(true)` - indicates that the feature was found on the host and it
2820    ///   is supported.
2821    /// * `Some(false)` - the feature name was recognized but it was not
2822    ///   detected on the host, for example the CPU is too old.
2823    /// * `None` - the feature name was not recognized and it's not known
2824    ///   whether it's on the host or not.
2825    ///
2826    /// Feature names passed to `detect` match the same feature name used in the
2827    /// Rust standard library. For example `"sse4.2"` is used on x86\_64.
2828    ///
2829    /// # Unsafety
2830    ///
2831    /// This function is `unsafe` because it is undefined behavior to execute
2832    /// instructions that a host does not support. This means that the result of
2833    /// `detect` must be correct for memory safe execution at runtime.
2834    pub unsafe fn detect_host_feature(&mut self, detect: fn(&str) -> Option<bool>) -> &mut Self {
2835        self.detect_host_feature = Some(detect);
2836        self
2837    }
2838
2839    /// Configures Wasmtime to not use signals-based trap handlers, for example
2840    /// disables `SIGILL` and `SIGSEGV` handler registration on Unix platforms.
2841    ///
2842    /// > **Note:** this option has important performance ramifications, be sure
2843    /// > to understand the implications. Wasm programs have been measured to
2844    /// > run up to 2x slower when signals-based traps are disabled.
2845    ///
2846    /// Wasmtime will by default leverage signals-based trap handlers (or the
2847    /// platform equivalent, for example "vectored exception handlers" on
2848    /// Windows) to make generated code more efficient. For example, when
2849    /// Wasmtime can use signals-based traps, it can elide explicit bounds
2850    /// checks for Wasm linear memory accesses, instead relying on virtual
2851    /// memory guard pages to raise a `SIGSEGV` (on Unix) for out-of-bounds
2852    /// accesses, which Wasmtime's runtime then catches and handles. Another
2853    /// example is divide-by-zero: with signals-based traps, Wasmtime can let
2854    /// the hardware raise a trap when the divisor is zero. Without
2855    /// signals-based traps, Wasmtime must explicitly emit additional
2856    /// instructions to check for zero and conditionally branch to a trapping
2857    /// code path.
2858    ///
2859    /// Some environments however may not have access to signal handlers. For
2860    /// example embedded scenarios may not support virtual memory. Other
2861    /// environments where Wasmtime is embedded within the surrounding
2862    /// environment may require that new signal handlers aren't registered due
2863    /// to the global nature of signal handlers. This option exists to disable
2864    /// the signal handler registration when required for these scenarios.
2865    ///
2866    /// When signals-based trap handlers are disabled, then Wasmtime and its
2867    /// generated code will *never* rely on segfaults or other
2868    /// signals. Generated code will be slower because bounds must be explicitly
2869    /// checked along with other conditions like division by zero.
2870    ///
2871    /// The following additional factors can also affect Wasmtime's ability to
2872    /// elide explicit bounds checks and leverage signals-based traps:
2873    ///
2874    /// * The [`Config::memory_reservation`] and [`Config::memory_guard_size`]
2875    ///   settings
2876    /// * The index type of the linear memory (e.g. 32-bit or 64-bit)
2877    /// * The page size of the linear memory
2878    ///
2879    /// When this option is disabled, the
2880    /// `enable_heap_access_spectre_mitigation` and
2881    /// `enable_table_access_spectre_mitigation` Cranelift settings must also be
2882    /// disabled. This means that generated code must have spectre mitigations
2883    /// disabled. This is because spectre mitigations rely on faults from
2884    /// loading from the null address to implement bounds checks.
2885    ///
2886    /// This option defaults to `true`: signals-based trap handlers are enabled
2887    /// by default.
2888    ///
2889    /// > **Note:** Disabling this option is not compatible with the Winch
2890    /// > compiler.
2891    pub fn signals_based_traps(&mut self, enable: bool) -> &mut Self {
2892        self.tunables.signals_based_traps = Some(enable);
2893        self
2894    }
2895
2896    /// Enable/disable GC support in Wasmtime entirely.
2897    ///
2898    /// This flag can be used to gate whether GC infrastructure is enabled or
2899    /// initialized in Wasmtime at all. Wasmtime's GC implementation is required
2900    /// for the [`Self::wasm_gc`] proposal, [`Self::wasm_function_references`],
2901    /// and [`Self::wasm_exceptions`] at this time. None of those proposal can
2902    /// be enabled without also having this option enabled.
2903    ///
2904    /// This option defaults to whether the crate `gc` feature is enabled or
2905    /// not.
2906    pub fn gc_support(&mut self, enable: bool) -> &mut Self {
2907        self.wasm_features(WasmFeatures::GC_TYPES, enable)
2908    }
2909
2910    /// Explicitly indicate or not whether the host is using a hardware float
2911    /// ABI on x86 targets.
2912    ///
2913    /// This configuration option is only applicable on the
2914    /// `x86_64-unknown-none` Rust target and has no effect on other host
2915    /// targets. The `x86_64-unknown-none` Rust target does not support hardware
2916    /// floats by default and uses a "soft float" implementation and ABI. This
2917    /// means that `f32`, for example, is passed in a general-purpose register
2918    /// between functions instead of a floating-point register. This does not
2919    /// match Cranelift's ABI for `f32` where it's passed in floating-point
2920    /// registers.  Cranelift does not have support for a "soft float"
2921    /// implementation where all floating-point operations are lowered to
2922    /// libcalls.
2923    ///
2924    /// This means that for the `x86_64-unknown-none` target the ABI between
2925    /// Wasmtime's libcalls and the host is incompatible when floats are used.
2926    /// This further means that, by default, Wasmtime is unable to load native
2927    /// code when compiled to the `x86_64-unknown-none` target. The purpose of
2928    /// this option is to explicitly allow loading code and bypass this check.
2929    ///
2930    /// Setting this configuration option to `true` indicates that either:
2931    /// (a) the Rust target is compiled with the hard-float ABI manually via
2932    /// `-Zbuild-std` and a custom target JSON configuration, or (b) sufficient
2933    /// x86 features have been enabled in the compiler such that float libcalls
2934    /// will not be used in Wasmtime. For (a) there is no way in Rust at this
2935    /// time to detect whether a hard-float or soft-float ABI is in use on
2936    /// stable Rust, so this manual opt-in is required. For (b) the only
2937    /// instance where Wasmtime passes a floating-point value in a register
2938    /// between the host and compiled wasm code is with libcalls.
2939    ///
2940    /// Float-based libcalls are only used when the compilation target for a
2941    /// wasm module has insufficient target features enabled for native
2942    /// support. For example SSE4.1 is required for the `f32.ceil` WebAssembly
2943    /// instruction to be compiled to a native instruction. If SSE4.1 is not
2944    /// enabled then `f32.ceil` is translated to a "libcall" which is
2945    /// implemented on the host. Float-based libcalls can be avoided with
2946    /// sufficient target features enabled, for example:
2947    ///
2948    /// * `self.cranelift_flag_enable("has_sse3")`
2949    /// * `self.cranelift_flag_enable("has_ssse3")`
2950    /// * `self.cranelift_flag_enable("has_sse41")`
2951    /// * `self.cranelift_flag_enable("has_sse42")`
2952    /// * `self.cranelift_flag_enable("has_fma")`
2953    ///
2954    /// Note that when these features are enabled Wasmtime will perform a
2955    /// runtime check to determine that the host actually has the feature
2956    /// present.
2957    ///
2958    /// For some more discussion see [#11506].
2959    ///
2960    /// [#11506]: https://github.com/bytecodealliance/wasmtime/issues/11506
2961    ///
2962    /// # Safety
2963    ///
2964    /// This method is not safe because it cannot be detected in Rust right now
2965    /// whether the host is compiled with a soft or hard float ABI. Additionally
2966    /// if the host is compiled with a soft float ABI disabling this check does
2967    /// not ensure that the wasm module in question has zero usage of floats
2968    /// in the boundary to the host.
2969    ///
2970    /// Safely using this method requires one of:
2971    ///
2972    /// * The host target is compiled to use hardware floats.
2973    /// * Wasm modules loaded are compiled with enough x86 Cranelift features
2974    ///   enabled to avoid float-related hostcalls.
2975    pub unsafe fn x86_float_abi_ok(&mut self, enable: bool) -> &mut Self {
2976        self.x86_float_abi_ok = Some(enable);
2977        self
2978    }
2979
2980    /// Enable or disable the ability to create a
2981    /// [`SharedMemory`](crate::SharedMemory).
2982    ///
2983    /// The WebAssembly threads proposal, configured by [`Config::wasm_threads`]
2984    /// is on-by-default but there are enough deficiencies in Wasmtime's
2985    /// implementation and API integration that creation of a shared memory is
2986    /// disabled by default. This cofiguration knob can be used to enable this.
2987    ///
2988    /// When enabling this method be aware that wasm threads are, at this time,
2989    /// a [tier 2
2990    /// feature](https://docs.wasmtime.dev/stability-tiers.html#tier-2) in
2991    /// Wasmtime meaning that it will not receive security updates or fixes to
2992    /// historical releases. Additionally security CVEs will not be issued for
2993    /// bugs in the implementation.
2994    ///
2995    /// This option is `false` by default.
2996    pub fn shared_memory(&mut self, enable: bool) -> &mut Self {
2997        self.shared_memory = enable;
2998        self
2999    }
3000}
3001
3002impl Default for Config {
3003    fn default() -> Config {
3004        Config::new()
3005    }
3006}
3007
3008impl fmt::Debug for Config {
3009    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
3010        let mut f = f.debug_struct("Config");
3011
3012        // Not every flag in WasmFeatures can be enabled as part of creating
3013        // a Config. This impl gives a complete picture of all WasmFeatures
3014        // enabled, and doesn't require maintenance by hand (which has become out
3015        // of date in the past), at the cost of possible confusion for why
3016        // a flag in this set doesn't have a Config setter.
3017        let features = self.features();
3018        for flag in WasmFeatures::FLAGS.iter() {
3019            f.field(
3020                &format!("wasm_{}", flag.name().to_lowercase()),
3021                &features.contains(*flag.value()),
3022            );
3023        }
3024
3025        f.field("parallel_compilation", &self.parallel_compilation);
3026        #[cfg(any(feature = "cranelift", feature = "winch"))]
3027        {
3028            f.field("compiler_config", &self.compiler_config);
3029        }
3030
3031        self.tunables.format(&mut f);
3032        f.finish()
3033    }
3034}
3035
3036/// Possible Compilation strategies for a wasm module.
3037///
3038/// This is used as an argument to the [`Config::strategy`] method.
3039#[non_exhaustive]
3040#[derive(PartialEq, Eq, Clone, Debug, Copy)]
3041pub enum Strategy {
3042    /// An indicator that the compilation strategy should be automatically
3043    /// selected.
3044    ///
3045    /// This is generally what you want for most projects and indicates that the
3046    /// `wasmtime` crate itself should make the decision about what the best
3047    /// code generator for a wasm module is.
3048    ///
3049    /// Currently this always defaults to Cranelift, but the default value may
3050    /// change over time.
3051    Auto,
3052
3053    /// Currently the default backend, Cranelift aims to be a reasonably fast
3054    /// code generator which generates high quality machine code.
3055    Cranelift,
3056
3057    /// A low-latency baseline compiler for WebAssembly.
3058    /// For more details regarding ISA support and Wasm proposals support
3059    /// see https://docs.wasmtime.dev/stability-tiers.html#current-tier-status
3060    Winch,
3061}
3062
3063#[cfg(any(feature = "winch", feature = "cranelift"))]
3064impl Strategy {
3065    fn not_auto(&self) -> Option<Strategy> {
3066        match self {
3067            Strategy::Auto => {
3068                if cfg!(feature = "cranelift") {
3069                    Some(Strategy::Cranelift)
3070                } else if cfg!(feature = "winch") {
3071                    Some(Strategy::Winch)
3072                } else {
3073                    None
3074                }
3075            }
3076            other => Some(*other),
3077        }
3078    }
3079}
3080
3081/// Possible garbage collector implementations for Wasm.
3082///
3083/// This is used as an argument to the [`Config::collector`] method.
3084///
3085/// The properties of Wasmtime's available collectors are summarized in the
3086/// following table:
3087///
3088/// | Collector                   | Collects Garbage[^1] | Latency[^2] | Throughput[^3] | Allocation Speed[^4] | Heap Utilization[^5] |
3089/// |-----------------------------|----------------------|-------------|----------------|----------------------|----------------------|
3090/// | `DeferredReferenceCounting` | Yes, but not cycles  | 🙂         | 🙁             | 😐                   | 😐                  |
3091/// | `Null`                      | No                   | 🙂         | 🙂             | 🙂                   | 🙂                  |
3092///
3093/// [^1]: Whether or not the collector is capable of collecting garbage and cyclic garbage.
3094///
3095/// [^2]: How long the Wasm program is paused during garbage
3096///       collections. Shorter is better. In general, better latency implies
3097///       worse throughput and vice versa.
3098///
3099/// [^3]: How fast the Wasm program runs when using this collector. Roughly
3100///       equivalent to the number of Wasm instructions executed per
3101///       second. Faster is better. In general, better throughput implies worse
3102///       latency and vice versa.
3103///
3104/// [^4]: How fast can individual objects be allocated?
3105///
3106/// [^5]: How many objects can the collector fit into N bytes of memory? That
3107///       is, how much space for bookkeeping and metadata does this collector
3108///       require? Less space taken up by metadata means more space for
3109///       additional objects. Reference counts are larger than mark bits and
3110///       free lists are larger than bump pointers, for example.
3111#[non_exhaustive]
3112#[derive(PartialEq, Eq, Clone, Debug, Copy)]
3113pub enum Collector {
3114    /// An indicator that the garbage collector should be automatically
3115    /// selected.
3116    ///
3117    /// This is generally what you want for most projects and indicates that the
3118    /// `wasmtime` crate itself should make the decision about what the best
3119    /// collector for a wasm module is.
3120    ///
3121    /// Currently this always defaults to the deferred reference-counting
3122    /// collector, but the default value may change over time.
3123    Auto,
3124
3125    /// The deferred reference-counting collector.
3126    ///
3127    /// A reference-counting collector, generally trading improved latency for
3128    /// worsened throughput. However, to avoid the largest overheads of
3129    /// reference counting, it avoids manipulating reference counts for Wasm
3130    /// objects on the stack. Instead, it will hold a reference count for an
3131    /// over-approximation of all objects that are currently on the stack, trace
3132    /// the stack during collection to find the precise set of on-stack roots,
3133    /// and decrement the reference count of any object that was in the
3134    /// over-approximation but not the precise set. This improves throughput,
3135    /// compared to "pure" reference counting, by performing many fewer
3136    /// refcount-increment and -decrement operations. The cost is the increased
3137    /// latency associated with tracing the stack.
3138    ///
3139    /// This collector cannot currently collect cycles; they will leak until the
3140    /// GC heap's store is dropped.
3141    DeferredReferenceCounting,
3142
3143    /// The null collector.
3144    ///
3145    /// This collector does not actually collect any garbage. It simply
3146    /// allocates objects until it runs out of memory, at which point further
3147    /// objects allocation attempts will trap.
3148    ///
3149    /// This collector is useful for incredibly short-running Wasm instances
3150    /// where additionally you would rather halt an over-allocating Wasm program
3151    /// than spend time collecting its garbage to allow it to keep running. It
3152    /// is also useful for measuring the overheads associated with other
3153    /// collectors, as this collector imposes as close to zero throughput and
3154    /// latency overhead as possible.
3155    Null,
3156}
3157
3158impl Default for Collector {
3159    fn default() -> Collector {
3160        Collector::Auto
3161    }
3162}
3163
3164#[cfg(feature = "gc")]
3165impl Collector {
3166    fn not_auto(&self) -> Option<Collector> {
3167        match self {
3168            Collector::Auto => {
3169                if cfg!(feature = "gc-drc") {
3170                    Some(Collector::DeferredReferenceCounting)
3171                } else if cfg!(feature = "gc-null") {
3172                    Some(Collector::Null)
3173                } else {
3174                    None
3175                }
3176            }
3177            other => Some(*other),
3178        }
3179    }
3180
3181    fn try_not_auto(&self) -> Result<Self> {
3182        match self.not_auto() {
3183            #[cfg(feature = "gc-drc")]
3184            Some(c @ Collector::DeferredReferenceCounting) => Ok(c),
3185            #[cfg(not(feature = "gc-drc"))]
3186            Some(Collector::DeferredReferenceCounting) => bail!(
3187                "cannot create an engine using the deferred reference-counting \
3188                 collector because the `gc-drc` feature was not enabled at \
3189                 compile time",
3190            ),
3191
3192            #[cfg(feature = "gc-null")]
3193            Some(c @ Collector::Null) => Ok(c),
3194            #[cfg(not(feature = "gc-null"))]
3195            Some(Collector::Null) => bail!(
3196                "cannot create an engine using the null collector because \
3197                 the `gc-null` feature was not enabled at compile time",
3198            ),
3199
3200            Some(Collector::Auto) => unreachable!(),
3201
3202            None => bail!(
3203                "cannot create an engine with GC support when none of the \
3204                 collectors are available; enable one of the following \
3205                 features: `gc-drc`, `gc-null`",
3206            ),
3207        }
3208    }
3209}
3210
3211/// Possible optimization levels for the Cranelift codegen backend.
3212#[non_exhaustive]
3213#[derive(Copy, Clone, Debug, Eq, PartialEq)]
3214pub enum OptLevel {
3215    /// No optimizations performed, minimizes compilation time by disabling most
3216    /// optimizations.
3217    None,
3218    /// Generates the fastest possible code, but may take longer.
3219    Speed,
3220    /// Similar to `speed`, but also performs transformations aimed at reducing
3221    /// code size.
3222    SpeedAndSize,
3223}
3224
3225/// Possible register allocator algorithms for the Cranelift codegen backend.
3226#[non_exhaustive]
3227#[derive(Copy, Clone, Debug, Eq, PartialEq)]
3228pub enum RegallocAlgorithm {
3229    /// Generates the fastest possible code, but may take longer.
3230    ///
3231    /// This algorithm performs "backtracking", which means that it may
3232    /// undo its earlier work and retry as it discovers conflicts. This
3233    /// results in better register utilization, producing fewer spills
3234    /// and moves, but can cause super-linear compile runtime.
3235    Backtracking,
3236    /// Generates acceptable code very quickly.
3237    ///
3238    /// This algorithm performs a single pass through the code,
3239    /// guaranteed to work in linear time.  (Note that the rest of
3240    /// Cranelift is not necessarily guaranteed to run in linear time,
3241    /// however.) It cannot undo earlier decisions, however, and it
3242    /// cannot foresee constraints or issues that may occur further
3243    /// ahead in the code, so the code may have more spills and moves as
3244    /// a result.
3245    ///
3246    /// > **Note**: This algorithm is not yet production-ready and has
3247    /// > historically had known problems. It is not recommended to enable this
3248    /// > algorithm for security-sensitive applications and the Wasmtime project
3249    /// > does not consider this configuration option for issuing security
3250    /// > advisories at this time.
3251    SinglePass,
3252}
3253
3254/// Select which profiling technique to support.
3255#[derive(Debug, Clone, Copy, PartialEq)]
3256pub enum ProfilingStrategy {
3257    /// No profiler support.
3258    None,
3259
3260    /// Collect function name information as the "perf map" file format, used with `perf` on Linux.
3261    PerfMap,
3262
3263    /// Collect profiling info for "jitdump" file format, used with `perf` on
3264    /// Linux.
3265    JitDump,
3266
3267    /// Collect profiling info using the "ittapi", used with `VTune` on Linux.
3268    VTune,
3269
3270    /// Support for profiling Pulley, Wasmtime's interpreter. Note that enabling
3271    /// this at runtime requires enabling the `profile-pulley` Cargo feature at
3272    /// compile time.
3273    Pulley,
3274}
3275
3276/// Select how wasm backtrace detailed information is handled.
3277#[derive(Debug, Clone, Copy)]
3278pub enum WasmBacktraceDetails {
3279    /// Support is unconditionally enabled and wasmtime will parse and read
3280    /// debug information.
3281    Enable,
3282
3283    /// Support is disabled, and wasmtime will not parse debug information for
3284    /// backtrace details.
3285    Disable,
3286
3287    /// Support for backtrace details is conditional on the
3288    /// `WASMTIME_BACKTRACE_DETAILS` environment variable.
3289    Environment,
3290}
3291
3292/// Describe the tri-state configuration of keys such as MPK or PAGEMAP_SCAN.
3293#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
3294pub enum Enabled {
3295    /// Enable this feature if it's detected on the host system, otherwise leave
3296    /// it disabled.
3297    Auto,
3298    /// Enable this feature and fail configuration if the feature is not
3299    /// detected on the host system.
3300    Yes,
3301    /// Do not enable this feature, even if the host system supports it.
3302    No,
3303}
3304
3305/// Configuration options used with [`InstanceAllocationStrategy::Pooling`] to
3306/// change the behavior of the pooling instance allocator.
3307///
3308/// This structure has a builder-style API in the same manner as [`Config`] and
3309/// is configured with [`Config::allocation_strategy`].
3310///
3311/// Note that usage of the pooling allocator does not affect compiled
3312/// WebAssembly code. Compiled `*.cwasm` files, for example, are usable both
3313/// with and without the pooling allocator.
3314///
3315/// ## Advantages of Pooled Allocation
3316///
3317/// The main benefit of the pooling allocator is to make WebAssembly
3318/// instantiation both faster and more scalable in terms of parallelism.
3319/// Allocation is faster because virtual memory is already configured and ready
3320/// to go within the pool, there's no need to [`mmap`] (for example on Unix) a
3321/// new region and configure it with guard pages. By avoiding [`mmap`] this
3322/// avoids whole-process virtual memory locks which can improve scalability and
3323/// performance through avoiding this.
3324///
3325/// Additionally with pooled allocation it's possible to create "affine slots"
3326/// to a particular WebAssembly module or component over time. For example if
3327/// the same module is multiple times over time the pooling allocator will, by
3328/// default, attempt to reuse the same slot. This mean that the slot has been
3329/// pre-configured and can retain virtual memory mappings for a copy-on-write
3330/// image, for example (see [`Config::memory_init_cow`] for more information.
3331/// This means that in a steady state instance deallocation is a single
3332/// [`madvise`] to reset linear memory to its original contents followed by a
3333/// single (optional) [`mprotect`] during the next instantiation to shrink
3334/// memory back to its original size. Compared to non-pooled allocation this
3335/// avoids the need to [`mmap`] a new region of memory, [`munmap`] it, and
3336/// [`mprotect`] regions too.
3337///
3338/// Another benefit of pooled allocation is that it's possible to configure
3339/// things such that no virtual memory management is required at all in a steady
3340/// state. For example a pooling allocator can be configured with:
3341///
3342/// * [`Config::memory_init_cow`] disabled
3343/// * [`Config::memory_guard_size`] disabled
3344/// * [`Config::memory_reservation`] shrunk to minimal size
3345/// * [`PoolingAllocationConfig::table_keep_resident`] sufficiently large
3346/// * [`PoolingAllocationConfig::linear_memory_keep_resident`] sufficiently large
3347///
3348/// With all these options in place no virtual memory tricks are used at all and
3349/// everything is manually managed by Wasmtime (for example resetting memory is
3350/// a `memset(0)`). This is not as fast in a single-threaded scenario but can
3351/// provide benefits in high-parallelism situations as no virtual memory locks
3352/// or IPIs need happen.
3353///
3354/// ## Disadvantages of Pooled Allocation
3355///
3356/// Despite the above advantages to instantiation performance the pooling
3357/// allocator is not enabled by default in Wasmtime. One reason is that the
3358/// performance advantages are not necessarily portable, for example while the
3359/// pooling allocator works on Windows it has not been tuned for performance on
3360/// Windows in the same way it has on Linux.
3361///
3362/// Additionally the main cost of the pooling allocator is that it requires a
3363/// very large reservation of virtual memory (on the order of most of the
3364/// addressable virtual address space). WebAssembly 32-bit linear memories in
3365/// Wasmtime are, by default 4G address space reservations with a small guard
3366/// region both before and after the linear memory. Memories in the pooling
3367/// allocator are contiguous which means that we only need a guard after linear
3368/// memory because the previous linear memory's slot post-guard is our own
3369/// pre-guard. This means that, by default, the pooling allocator uses roughly
3370/// 4G of virtual memory per WebAssembly linear memory slot. 4G of virtual
3371/// memory is 32 bits of a 64-bit address. Many 64-bit systems can only
3372/// actually use 48-bit addresses by default (although this can be extended on
3373/// architectures nowadays too), and of those 48 bits one of them is reserved
3374/// to indicate kernel-vs-userspace. This leaves 47-32=15 bits left,
3375/// meaning you can only have at most 32k slots of linear memories on many
3376/// systems by default. This is a relatively small number and shows how the
3377/// pooling allocator can quickly exhaust all of virtual memory.
3378///
3379/// Another disadvantage of the pooling allocator is that it may keep memory
3380/// alive when nothing is using it. A previously used slot for an instance might
3381/// have paged-in memory that will not get paged out until the
3382/// [`Engine`](crate::Engine) owning the pooling allocator is dropped. While
3383/// suitable for some applications this behavior may not be suitable for all
3384/// applications.
3385///
3386/// Finally the last disadvantage of the pooling allocator is that the
3387/// configuration values for the maximum number of instances, memories, tables,
3388/// etc, must all be fixed up-front. There's not always a clear answer as to
3389/// what these values should be so not all applications may be able to work
3390/// with this constraint.
3391///
3392/// [`madvise`]: https://man7.org/linux/man-pages/man2/madvise.2.html
3393/// [`mprotect`]: https://man7.org/linux/man-pages/man2/mprotect.2.html
3394/// [`mmap`]: https://man7.org/linux/man-pages/man2/mmap.2.html
3395/// [`munmap`]: https://man7.org/linux/man-pages/man2/munmap.2.html
3396#[cfg(feature = "pooling-allocator")]
3397#[derive(Debug, Clone, Default)]
3398pub struct PoolingAllocationConfig {
3399    config: crate::runtime::vm::PoolingInstanceAllocatorConfig,
3400}
3401
3402#[cfg(feature = "pooling-allocator")]
3403impl PoolingAllocationConfig {
3404    /// Returns a new configuration builder with all default settings
3405    /// configured.
3406    pub fn new() -> PoolingAllocationConfig {
3407        PoolingAllocationConfig::default()
3408    }
3409
3410    /// Configures the maximum number of "unused warm slots" to retain in the
3411    /// pooling allocator.
3412    ///
3413    /// The pooling allocator operates over slots to allocate from, and each
3414    /// slot is considered "cold" if it's never been used before or "warm" if
3415    /// it's been used by some module in the past. Slots in the pooling
3416    /// allocator additionally track an "affinity" flag to a particular core
3417    /// wasm module. When a module is instantiated into a slot then the slot is
3418    /// considered affine to that module, even after the instance has been
3419    /// deallocated.
3420    ///
3421    /// When a new instance is created then a slot must be chosen, and the
3422    /// current algorithm for selecting a slot is:
3423    ///
3424    /// * If there are slots that are affine to the module being instantiated,
3425    ///   then the most recently used slot is selected to be allocated from.
3426    ///   This is done to improve reuse of resources such as memory mappings and
3427    ///   additionally try to benefit from temporal locality for things like
3428    ///   caches.
3429    ///
3430    /// * Otherwise if there are more than N affine slots to other modules, then
3431    ///   one of those affine slots is chosen to be allocated. The slot chosen
3432    ///   is picked on a least-recently-used basis.
3433    ///
3434    /// * Finally, if there are less than N affine slots to other modules, then
3435    ///   the non-affine slots are allocated from.
3436    ///
3437    /// This setting, `max_unused_warm_slots`, is the value for N in the above
3438    /// algorithm. The purpose of this setting is to have a knob over the RSS
3439    /// impact of "unused slots" for a long-running wasm server.
3440    ///
3441    /// If this setting is set to 0, for example, then affine slots are
3442    /// aggressively reused on a least-recently-used basis. A "cold" slot is
3443    /// only used if there are no affine slots available to allocate from. This
3444    /// means that the set of slots used over the lifetime of a program is the
3445    /// same as the maximum concurrent number of wasm instances.
3446    ///
3447    /// If this setting is set to infinity, however, then cold slots are
3448    /// prioritized to be allocated from. This means that the set of slots used
3449    /// over the lifetime of a program will approach
3450    /// [`PoolingAllocationConfig::total_memories`], or the maximum number of
3451    /// slots in the pooling allocator.
3452    ///
3453    /// Wasmtime does not aggressively decommit all resources associated with a
3454    /// slot when the slot is not in use. For example the
3455    /// [`PoolingAllocationConfig::linear_memory_keep_resident`] option can be
3456    /// used to keep memory associated with a slot, even when it's not in use.
3457    /// This means that the total set of used slots in the pooling instance
3458    /// allocator can impact the overall RSS usage of a program.
3459    ///
3460    /// The default value for this option is `100`.
3461    pub fn max_unused_warm_slots(&mut self, max: u32) -> &mut Self {
3462        self.config.max_unused_warm_slots = max;
3463        self
3464    }
3465
3466    /// The target number of decommits to do per batch.
3467    ///
3468    /// This is not precise, as we can queue up decommits at times when we
3469    /// aren't prepared to immediately flush them, and so we may go over this
3470    /// target size occasionally.
3471    ///
3472    /// A batch size of one effectively disables batching.
3473    ///
3474    /// Defaults to `1`.
3475    pub fn decommit_batch_size(&mut self, batch_size: usize) -> &mut Self {
3476        self.config.decommit_batch_size = batch_size;
3477        self
3478    }
3479
3480    /// How much memory, in bytes, to keep resident for async stacks allocated
3481    /// with the pooling allocator.
3482    ///
3483    /// When [`Config::async_stack_zeroing`] is enabled then Wasmtime will reset
3484    /// the contents of async stacks back to zero upon deallocation. This option
3485    /// can be used to perform the zeroing operation with `memset` up to a
3486    /// certain threshold of bytes instead of using system calls to reset the
3487    /// stack to zero.
3488    ///
3489    /// Note that when using this option the memory with async stacks will
3490    /// never be decommitted.
3491    #[cfg(feature = "async")]
3492    pub fn async_stack_keep_resident(&mut self, size: usize) -> &mut Self {
3493        self.config.async_stack_keep_resident = size;
3494        self
3495    }
3496
3497    /// How much memory, in bytes, to keep resident for each linear memory
3498    /// after deallocation.
3499    ///
3500    /// This option is only applicable on Linux and has no effect on other
3501    /// platforms.
3502    ///
3503    /// By default Wasmtime will use `madvise` to reset the entire contents of
3504    /// linear memory back to zero when a linear memory is deallocated. This
3505    /// option can be used to use `memset` instead to set memory back to zero
3506    /// which can, in some configurations, reduce the number of page faults
3507    /// taken when a slot is reused.
3508    pub fn linear_memory_keep_resident(&mut self, size: usize) -> &mut Self {
3509        self.config.linear_memory_keep_resident = size;
3510        self
3511    }
3512
3513    /// How much memory, in bytes, to keep resident for each table after
3514    /// deallocation.
3515    ///
3516    /// This option is only applicable on Linux and has no effect on other
3517    /// platforms.
3518    ///
3519    /// This option is the same as
3520    /// [`PoolingAllocationConfig::linear_memory_keep_resident`] except that it
3521    /// is applicable to tables instead.
3522    pub fn table_keep_resident(&mut self, size: usize) -> &mut Self {
3523        self.config.table_keep_resident = size;
3524        self
3525    }
3526
3527    /// The maximum number of concurrent component instances supported (default
3528    /// is `1000`).
3529    ///
3530    /// This provides an upper-bound on the total size of component
3531    /// metadata-related allocations, along with
3532    /// [`PoolingAllocationConfig::max_component_instance_size`]. The upper bound is
3533    ///
3534    /// ```text
3535    /// total_component_instances * max_component_instance_size
3536    /// ```
3537    ///
3538    /// where `max_component_instance_size` is rounded up to the size and alignment
3539    /// of the internal representation of the metadata.
3540    pub fn total_component_instances(&mut self, count: u32) -> &mut Self {
3541        self.config.limits.total_component_instances = count;
3542        self
3543    }
3544
3545    /// The maximum size, in bytes, allocated for a component instance's
3546    /// `VMComponentContext` metadata.
3547    ///
3548    /// The [`wasmtime::component::Instance`][crate::component::Instance] type
3549    /// has a static size but its internal `VMComponentContext` is dynamically
3550    /// sized depending on the component being instantiated. This size limit
3551    /// loosely correlates to the size of the component, taking into account
3552    /// factors such as:
3553    ///
3554    /// * number of lifted and lowered functions,
3555    /// * number of memories
3556    /// * number of inner instances
3557    /// * number of resources
3558    ///
3559    /// If the allocated size per instance is too small then instantiation of a
3560    /// module will fail at runtime with an error indicating how many bytes were
3561    /// needed.
3562    ///
3563    /// The default value for this is 1MiB.
3564    ///
3565    /// This provides an upper-bound on the total size of component
3566    /// metadata-related allocations, along with
3567    /// [`PoolingAllocationConfig::total_component_instances`]. The upper bound is
3568    ///
3569    /// ```text
3570    /// total_component_instances * max_component_instance_size
3571    /// ```
3572    ///
3573    /// where `max_component_instance_size` is rounded up to the size and alignment
3574    /// of the internal representation of the metadata.
3575    pub fn max_component_instance_size(&mut self, size: usize) -> &mut Self {
3576        self.config.limits.component_instance_size = size;
3577        self
3578    }
3579
3580    /// The maximum number of core instances a single component may contain
3581    /// (default is unlimited).
3582    ///
3583    /// This method (along with
3584    /// [`PoolingAllocationConfig::max_memories_per_component`],
3585    /// [`PoolingAllocationConfig::max_tables_per_component`], and
3586    /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3587    /// the amount of resources a single component allocation consumes.
3588    ///
3589    /// If a component will instantiate more core instances than `count`, then
3590    /// the component will fail to instantiate.
3591    pub fn max_core_instances_per_component(&mut self, count: u32) -> &mut Self {
3592        self.config.limits.max_core_instances_per_component = count;
3593        self
3594    }
3595
3596    /// The maximum number of Wasm linear memories that a single component may
3597    /// transitively contain (default is unlimited).
3598    ///
3599    /// This method (along with
3600    /// [`PoolingAllocationConfig::max_core_instances_per_component`],
3601    /// [`PoolingAllocationConfig::max_tables_per_component`], and
3602    /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3603    /// the amount of resources a single component allocation consumes.
3604    ///
3605    /// If a component transitively contains more linear memories than `count`,
3606    /// then the component will fail to instantiate.
3607    pub fn max_memories_per_component(&mut self, count: u32) -> &mut Self {
3608        self.config.limits.max_memories_per_component = count;
3609        self
3610    }
3611
3612    /// The maximum number of tables that a single component may transitively
3613    /// contain (default is unlimited).
3614    ///
3615    /// This method (along with
3616    /// [`PoolingAllocationConfig::max_core_instances_per_component`],
3617    /// [`PoolingAllocationConfig::max_memories_per_component`],
3618    /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3619    /// the amount of resources a single component allocation consumes.
3620    ///
3621    /// If a component will transitively contains more tables than `count`, then
3622    /// the component will fail to instantiate.
3623    pub fn max_tables_per_component(&mut self, count: u32) -> &mut Self {
3624        self.config.limits.max_tables_per_component = count;
3625        self
3626    }
3627
3628    /// The maximum number of concurrent Wasm linear memories supported (default
3629    /// is `1000`).
3630    ///
3631    /// This value has a direct impact on the amount of memory allocated by the pooling
3632    /// instance allocator.
3633    ///
3634    /// The pooling instance allocator allocates a memory pool, where each entry
3635    /// in the pool contains the reserved address space for each linear memory
3636    /// supported by an instance.
3637    ///
3638    /// The memory pool will reserve a large quantity of host process address
3639    /// space to elide the bounds checks required for correct WebAssembly memory
3640    /// semantics. Even with 64-bit address spaces, the address space is limited
3641    /// when dealing with a large number of linear memories.
3642    ///
3643    /// For example, on Linux x86_64, the userland address space limit is 128
3644    /// TiB. That might seem like a lot, but each linear memory will *reserve* 6
3645    /// GiB of space by default.
3646    pub fn total_memories(&mut self, count: u32) -> &mut Self {
3647        self.config.limits.total_memories = count;
3648        self
3649    }
3650
3651    /// The maximum number of concurrent tables supported (default is `1000`).
3652    ///
3653    /// This value has a direct impact on the amount of memory allocated by the
3654    /// pooling instance allocator.
3655    ///
3656    /// The pooling instance allocator allocates a table pool, where each entry
3657    /// in the pool contains the space needed for each WebAssembly table
3658    /// supported by an instance (see `table_elements` to control the size of
3659    /// each table).
3660    pub fn total_tables(&mut self, count: u32) -> &mut Self {
3661        self.config.limits.total_tables = count;
3662        self
3663    }
3664
3665    /// The maximum number of execution stacks allowed for asynchronous
3666    /// execution, when enabled (default is `1000`).
3667    ///
3668    /// This value has a direct impact on the amount of memory allocated by the
3669    /// pooling instance allocator.
3670    #[cfg(feature = "async")]
3671    pub fn total_stacks(&mut self, count: u32) -> &mut Self {
3672        self.config.limits.total_stacks = count;
3673        self
3674    }
3675
3676    /// The maximum number of concurrent core instances supported (default is
3677    /// `1000`).
3678    ///
3679    /// This provides an upper-bound on the total size of core instance
3680    /// metadata-related allocations, along with
3681    /// [`PoolingAllocationConfig::max_core_instance_size`]. The upper bound is
3682    ///
3683    /// ```text
3684    /// total_core_instances * max_core_instance_size
3685    /// ```
3686    ///
3687    /// where `max_core_instance_size` is rounded up to the size and alignment of
3688    /// the internal representation of the metadata.
3689    pub fn total_core_instances(&mut self, count: u32) -> &mut Self {
3690        self.config.limits.total_core_instances = count;
3691        self
3692    }
3693
3694    /// The maximum size, in bytes, allocated for a core instance's `VMContext`
3695    /// metadata.
3696    ///
3697    /// The [`Instance`][crate::Instance] type has a static size but its
3698    /// `VMContext` metadata is dynamically sized depending on the module being
3699    /// instantiated. This size limit loosely correlates to the size of the Wasm
3700    /// module, taking into account factors such as:
3701    ///
3702    /// * number of functions
3703    /// * number of globals
3704    /// * number of memories
3705    /// * number of tables
3706    /// * number of function types
3707    ///
3708    /// If the allocated size per instance is too small then instantiation of a
3709    /// module will fail at runtime with an error indicating how many bytes were
3710    /// needed.
3711    ///
3712    /// The default value for this is 1MiB.
3713    ///
3714    /// This provides an upper-bound on the total size of core instance
3715    /// metadata-related allocations, along with
3716    /// [`PoolingAllocationConfig::total_core_instances`]. The upper bound is
3717    ///
3718    /// ```text
3719    /// total_core_instances * max_core_instance_size
3720    /// ```
3721    ///
3722    /// where `max_core_instance_size` is rounded up to the size and alignment of
3723    /// the internal representation of the metadata.
3724    pub fn max_core_instance_size(&mut self, size: usize) -> &mut Self {
3725        self.config.limits.core_instance_size = size;
3726        self
3727    }
3728
3729    /// The maximum number of defined tables for a core module (default is `1`).
3730    ///
3731    /// This value controls the capacity of the `VMTableDefinition` table in
3732    /// each instance's `VMContext` structure.
3733    ///
3734    /// The allocated size of the table will be `tables *
3735    /// sizeof(VMTableDefinition)` for each instance regardless of how many
3736    /// tables are defined by an instance's module.
3737    pub fn max_tables_per_module(&mut self, tables: u32) -> &mut Self {
3738        self.config.limits.max_tables_per_module = tables;
3739        self
3740    }
3741
3742    /// The maximum table elements for any table defined in a module (default is
3743    /// `20000`).
3744    ///
3745    /// If a table's minimum element limit is greater than this value, the
3746    /// module will fail to instantiate.
3747    ///
3748    /// If a table's maximum element limit is unbounded or greater than this
3749    /// value, the maximum will be `table_elements` for the purpose of any
3750    /// `table.grow` instruction.
3751    ///
3752    /// This value is used to reserve the maximum space for each supported
3753    /// table; table elements are pointer-sized in the Wasmtime runtime.
3754    /// Therefore, the space reserved for each instance is `tables *
3755    /// table_elements * sizeof::<*const ()>`.
3756    pub fn table_elements(&mut self, elements: usize) -> &mut Self {
3757        self.config.limits.table_elements = elements;
3758        self
3759    }
3760
3761    /// The maximum number of defined linear memories for a module (default is
3762    /// `1`).
3763    ///
3764    /// This value controls the capacity of the `VMMemoryDefinition` table in
3765    /// each core instance's `VMContext` structure.
3766    ///
3767    /// The allocated size of the table will be `memories *
3768    /// sizeof(VMMemoryDefinition)` for each core instance regardless of how
3769    /// many memories are defined by the core instance's module.
3770    pub fn max_memories_per_module(&mut self, memories: u32) -> &mut Self {
3771        self.config.limits.max_memories_per_module = memories;
3772        self
3773    }
3774
3775    /// The maximum byte size that any WebAssembly linear memory may grow to.
3776    ///
3777    /// This option defaults to 4 GiB meaning that for 32-bit linear memories
3778    /// there is no restrictions. 64-bit linear memories will not be allowed to
3779    /// grow beyond 4 GiB by default.
3780    ///
3781    /// If a memory's minimum size is greater than this value, the module will
3782    /// fail to instantiate.
3783    ///
3784    /// If a memory's maximum size is unbounded or greater than this value, the
3785    /// maximum will be `max_memory_size` for the purpose of any `memory.grow`
3786    /// instruction.
3787    ///
3788    /// This value is used to control the maximum accessible space for each
3789    /// linear memory of a core instance. This can be thought of as a simple
3790    /// mechanism like [`Store::limiter`](crate::Store::limiter) to limit memory
3791    /// at runtime. This value can also affect striping/coloring behavior when
3792    /// used in conjunction with
3793    /// [`memory_protection_keys`](PoolingAllocationConfig::memory_protection_keys).
3794    ///
3795    /// The virtual memory reservation size of each linear memory is controlled
3796    /// by the [`Config::memory_reservation`] setting and this method's
3797    /// configuration cannot exceed [`Config::memory_reservation`].
3798    pub fn max_memory_size(&mut self, bytes: usize) -> &mut Self {
3799        self.config.limits.max_memory_size = bytes;
3800        self
3801    }
3802
3803    /// Configures whether memory protection keys (MPK) should be used for more
3804    /// efficient layout of pool-allocated memories.
3805    ///
3806    /// When using the pooling allocator (see [`Config::allocation_strategy`],
3807    /// [`InstanceAllocationStrategy::Pooling`]), memory protection keys can
3808    /// reduce the total amount of allocated virtual memory by eliminating guard
3809    /// regions between WebAssembly memories in the pool. It does so by
3810    /// "coloring" memory regions with different memory keys and setting which
3811    /// regions are accessible each time executions switches from host to guest
3812    /// (or vice versa).
3813    ///
3814    /// Leveraging MPK requires configuring a smaller-than-default
3815    /// [`max_memory_size`](PoolingAllocationConfig::max_memory_size) to enable
3816    /// this coloring/striping behavior. For example embeddings might want to
3817    /// reduce the default 4G allowance to 128M.
3818    ///
3819    /// MPK is only available on Linux (called `pku` there) and recent x86
3820    /// systems; we check for MPK support at runtime by examining the `CPUID`
3821    /// register. This configuration setting can be in three states:
3822    ///
3823    /// - `auto`: if MPK support is available the guard regions are removed; if
3824    ///   not, the guard regions remain
3825    /// - `yes`: use MPK to eliminate guard regions; fail if MPK is not
3826    ///   supported
3827    /// - `no`: never use MPK
3828    ///
3829    /// By default this value is `no`, but may become `auto` in future
3830    /// releases.
3831    ///
3832    /// __WARNING__: this configuration options is still experimental--use at
3833    /// your own risk! MPK uses kernel and CPU features to protect memory
3834    /// regions; you may observe segmentation faults if anything is
3835    /// misconfigured.
3836    #[cfg(feature = "memory-protection-keys")]
3837    pub fn memory_protection_keys(&mut self, enable: Enabled) -> &mut Self {
3838        self.config.memory_protection_keys = enable;
3839        self
3840    }
3841
3842    /// Sets an upper limit on how many memory protection keys (MPK) Wasmtime
3843    /// will use.
3844    ///
3845    /// This setting is only applicable when
3846    /// [`PoolingAllocationConfig::memory_protection_keys`] is set to `enable`
3847    /// or `auto`. Configuring this above the HW and OS limits (typically 15)
3848    /// has no effect.
3849    ///
3850    /// If multiple Wasmtime engines are used in the same process, note that all
3851    /// engines will share the same set of allocated keys; this setting will
3852    /// limit how many keys are allocated initially and thus available to all
3853    /// other engines.
3854    #[cfg(feature = "memory-protection-keys")]
3855    pub fn max_memory_protection_keys(&mut self, max: usize) -> &mut Self {
3856        self.config.max_memory_protection_keys = max;
3857        self
3858    }
3859
3860    /// Check if memory protection keys (MPK) are available on the current host.
3861    ///
3862    /// This is a convenience method for determining MPK availability using the
3863    /// same method that [`Enabled::Auto`] does. See
3864    /// [`PoolingAllocationConfig::memory_protection_keys`] for more
3865    /// information.
3866    #[cfg(feature = "memory-protection-keys")]
3867    pub fn are_memory_protection_keys_available() -> bool {
3868        crate::runtime::vm::mpk::is_supported()
3869    }
3870
3871    /// The maximum number of concurrent GC heaps supported (default is `1000`).
3872    ///
3873    /// This value has a direct impact on the amount of memory allocated by the
3874    /// pooling instance allocator.
3875    ///
3876    /// The pooling instance allocator allocates a GC heap pool, where each
3877    /// entry in the pool contains the space needed for each GC heap used by a
3878    /// store.
3879    #[cfg(feature = "gc")]
3880    pub fn total_gc_heaps(&mut self, count: u32) -> &mut Self {
3881        self.config.limits.total_gc_heaps = count;
3882        self
3883    }
3884
3885    /// Configures whether the Linux-specific [`PAGEMAP_SCAN` ioctl][ioctl] is
3886    /// used to help reset linear memory.
3887    ///
3888    /// When [`Self::linear_memory_keep_resident`] or
3889    /// [`Self::table_keep_resident`] options are configured to nonzero values
3890    /// the default behavior is to `memset` the lowest addresses of a table or
3891    /// memory back to their original contents. With the `PAGEMAP_SCAN` ioctl on
3892    /// Linux this can be done to more intelligently scan for resident pages in
3893    /// the region and only reset those pages back to their original contents
3894    /// with `memset` rather than assuming the low addresses are all resident.
3895    ///
3896    /// This ioctl has the potential to provide a number of performance benefits
3897    /// in high-reuse and high concurrency scenarios. Notably this enables
3898    /// Wasmtime to scan the entire region of WebAssembly linear memory and
3899    /// manually reset memory back to its original contents, up to
3900    /// [`Self::linear_memory_keep_resident`] bytes, possibly skipping an
3901    /// `madvise` entirely. This can be more efficient by avoiding removing
3902    /// pages from the address space entirely and additionally ensuring that
3903    /// future use of the linear memory doesn't incur page faults as the pages
3904    /// remain resident.
3905    ///
3906    /// At this time this configuration option is still being evaluated as to
3907    /// how appropriate it is for all use cases. It currently defaults to
3908    /// `no` or disabled but may change to `auto`, enable if supported, in the
3909    /// future. This option is only supported on Linux and requires a kernel
3910    /// version of 6.7 or higher.
3911    ///
3912    /// [ioctl]: https://www.man7.org/linux/man-pages/man2/PAGEMAP_SCAN.2const.html
3913    pub fn pagemap_scan(&mut self, enable: Enabled) -> &mut Self {
3914        self.config.pagemap_scan = enable;
3915        self
3916    }
3917
3918    /// Tests whether [`Self::pagemap_scan`] is available or not on the host
3919    /// system.
3920    pub fn is_pagemap_scan_available() -> bool {
3921        crate::runtime::vm::PoolingInstanceAllocatorConfig::is_pagemap_scan_available()
3922    }
3923}
3924
3925#[cfg(feature = "std")]
3926fn detect_host_feature(feature: &str) -> Option<bool> {
3927    #[cfg(target_arch = "aarch64")]
3928    {
3929        return match feature {
3930            "lse" => Some(std::arch::is_aarch64_feature_detected!("lse")),
3931            "paca" => Some(std::arch::is_aarch64_feature_detected!("paca")),
3932            "fp16" => Some(std::arch::is_aarch64_feature_detected!("fp16")),
3933
3934            _ => None,
3935        };
3936    }
3937
3938    // `is_s390x_feature_detected` is nightly only for now, so use the
3939    // STORE FACILITY LIST EXTENDED instruction as a temporary measure.
3940    #[cfg(target_arch = "s390x")]
3941    {
3942        let mut facility_list: [u64; 4] = [0; 4];
3943        unsafe {
3944            core::arch::asm!(
3945                "stfle 0({})",
3946                in(reg_addr) facility_list.as_mut_ptr() ,
3947                inout("r0") facility_list.len() as u64 - 1 => _,
3948                options(nostack)
3949            );
3950        }
3951        let get_facility_bit = |n: usize| {
3952            // NOTE: bits are numbered from the left.
3953            facility_list[n / 64] & (1 << (63 - (n % 64))) != 0
3954        };
3955
3956        return match feature {
3957            "mie3" => Some(get_facility_bit(61)),
3958            "mie4" => Some(get_facility_bit(84)),
3959            "vxrs_ext2" => Some(get_facility_bit(148)),
3960            "vxrs_ext3" => Some(get_facility_bit(198)),
3961
3962            _ => None,
3963        };
3964    }
3965
3966    #[cfg(target_arch = "riscv64")]
3967    {
3968        return match feature {
3969            // due to `is_riscv64_feature_detected` is not stable.
3970            // we cannot use it. For now lie and say all features are always
3971            // found to keep tests working.
3972            _ => Some(true),
3973        };
3974    }
3975
3976    #[cfg(target_arch = "x86_64")]
3977    {
3978        return match feature {
3979            "cmpxchg16b" => Some(std::is_x86_feature_detected!("cmpxchg16b")),
3980            "sse3" => Some(std::is_x86_feature_detected!("sse3")),
3981            "ssse3" => Some(std::is_x86_feature_detected!("ssse3")),
3982            "sse4.1" => Some(std::is_x86_feature_detected!("sse4.1")),
3983            "sse4.2" => Some(std::is_x86_feature_detected!("sse4.2")),
3984            "popcnt" => Some(std::is_x86_feature_detected!("popcnt")),
3985            "avx" => Some(std::is_x86_feature_detected!("avx")),
3986            "avx2" => Some(std::is_x86_feature_detected!("avx2")),
3987            "fma" => Some(std::is_x86_feature_detected!("fma")),
3988            "bmi1" => Some(std::is_x86_feature_detected!("bmi1")),
3989            "bmi2" => Some(std::is_x86_feature_detected!("bmi2")),
3990            "avx512bitalg" => Some(std::is_x86_feature_detected!("avx512bitalg")),
3991            "avx512dq" => Some(std::is_x86_feature_detected!("avx512dq")),
3992            "avx512f" => Some(std::is_x86_feature_detected!("avx512f")),
3993            "avx512vl" => Some(std::is_x86_feature_detected!("avx512vl")),
3994            "avx512vbmi" => Some(std::is_x86_feature_detected!("avx512vbmi")),
3995            "lzcnt" => Some(std::is_x86_feature_detected!("lzcnt")),
3996
3997            _ => None,
3998        };
3999    }
4000
4001    #[allow(
4002        unreachable_code,
4003        reason = "reachable or not depending on if a target above matches"
4004    )]
4005    {
4006        let _ = feature;
4007        return None;
4008    }
4009}