wasmtime/
config.rs

1use crate::prelude::*;
2use alloc::sync::Arc;
3use bitflags::Flags;
4use core::fmt;
5use core::str::FromStr;
6#[cfg(any(feature = "cache", feature = "cranelift", feature = "winch"))]
7use std::path::Path;
8use wasmparser::WasmFeatures;
9use wasmtime_environ::{ConfigTunables, TripleExt, Tunables};
10
11#[cfg(feature = "runtime")]
12use crate::memory::MemoryCreator;
13#[cfg(feature = "runtime")]
14use crate::profiling_agent::{self, ProfilingAgent};
15#[cfg(feature = "runtime")]
16use crate::runtime::vm::{
17    GcRuntime, InstanceAllocator, OnDemandInstanceAllocator, RuntimeMemoryCreator,
18};
19#[cfg(feature = "runtime")]
20use crate::trampoline::MemoryCreatorProxy;
21
22#[cfg(feature = "async")]
23use crate::stack::{StackCreator, StackCreatorProxy};
24#[cfg(feature = "async")]
25use wasmtime_fiber::RuntimeFiberStackCreator;
26
27#[cfg(feature = "runtime")]
28pub use crate::runtime::code_memory::CustomCodeMemory;
29#[cfg(feature = "cache")]
30pub use wasmtime_cache::{Cache, CacheConfig};
31#[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
32pub use wasmtime_environ::CacheStore;
33
34/// Represents the module instance allocation strategy to use.
35#[derive(Clone)]
36#[non_exhaustive]
37pub enum InstanceAllocationStrategy {
38    /// The on-demand instance allocation strategy.
39    ///
40    /// Resources related to a module instance are allocated at instantiation time and
41    /// immediately deallocated when the `Store` referencing the instance is dropped.
42    ///
43    /// This is the default allocation strategy for Wasmtime.
44    OnDemand,
45    /// The pooling instance allocation strategy.
46    ///
47    /// A pool of resources is created in advance and module instantiation reuses resources
48    /// from the pool. Resources are returned to the pool when the `Store` referencing the instance
49    /// is dropped.
50    #[cfg(feature = "pooling-allocator")]
51    Pooling(PoolingAllocationConfig),
52}
53
54impl InstanceAllocationStrategy {
55    /// The default pooling instance allocation strategy.
56    #[cfg(feature = "pooling-allocator")]
57    pub fn pooling() -> Self {
58        Self::Pooling(Default::default())
59    }
60}
61
62impl Default for InstanceAllocationStrategy {
63    fn default() -> Self {
64        Self::OnDemand
65    }
66}
67
68#[cfg(feature = "pooling-allocator")]
69impl From<PoolingAllocationConfig> for InstanceAllocationStrategy {
70    fn from(cfg: PoolingAllocationConfig) -> InstanceAllocationStrategy {
71        InstanceAllocationStrategy::Pooling(cfg)
72    }
73}
74
75#[derive(Clone)]
76/// Configure the strategy used for versioning in serializing and deserializing [`crate::Module`].
77pub enum ModuleVersionStrategy {
78    /// Use the wasmtime crate's Cargo package version.
79    WasmtimeVersion,
80    /// Use a custom version string. Must be at most 255 bytes.
81    Custom(String),
82    /// Emit no version string in serialization, and accept all version strings in deserialization.
83    None,
84}
85
86impl Default for ModuleVersionStrategy {
87    fn default() -> Self {
88        ModuleVersionStrategy::WasmtimeVersion
89    }
90}
91
92impl core::hash::Hash for ModuleVersionStrategy {
93    fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
94        match self {
95            Self::WasmtimeVersion => env!("CARGO_PKG_VERSION").hash(hasher),
96            Self::Custom(s) => s.hash(hasher),
97            Self::None => {}
98        };
99    }
100}
101
102/// Global configuration options used to create an [`Engine`](crate::Engine)
103/// and customize its behavior.
104///
105/// This structure exposed a builder-like interface and is primarily consumed by
106/// [`Engine::new()`](crate::Engine::new).
107///
108/// The validation of `Config` is deferred until the engine is being built, thus
109/// a problematic config may cause `Engine::new` to fail.
110///
111/// # Defaults
112///
113/// The `Default` trait implementation and the return value from
114/// [`Config::new()`] are the same and represent the default set of
115/// configuration for an engine. The exact set of defaults will differ based on
116/// properties such as enabled Cargo features at compile time and the configured
117/// target (see [`Config::target`]). Configuration options document their
118/// default values and what the conditional value of the default is where
119/// applicable.
120#[derive(Clone)]
121pub struct Config {
122    #[cfg(any(feature = "cranelift", feature = "winch"))]
123    compiler_config: CompilerConfig,
124    target: Option<target_lexicon::Triple>,
125    #[cfg(feature = "gc")]
126    collector: Collector,
127    profiling_strategy: ProfilingStrategy,
128    tunables: ConfigTunables,
129
130    #[cfg(feature = "cache")]
131    pub(crate) cache: Option<Cache>,
132    #[cfg(feature = "runtime")]
133    pub(crate) mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>,
134    #[cfg(feature = "runtime")]
135    pub(crate) custom_code_memory: Option<Arc<dyn CustomCodeMemory>>,
136    pub(crate) allocation_strategy: InstanceAllocationStrategy,
137    pub(crate) max_wasm_stack: usize,
138    /// Explicitly enabled features via `Config::wasm_*` methods. This is a
139    /// signal that the embedder specifically wants something turned on
140    /// regardless of the defaults that Wasmtime might otherwise have enabled.
141    ///
142    /// Note that this, and `disabled_features` below, start as the empty set of
143    /// features to only track explicit user requests.
144    pub(crate) enabled_features: WasmFeatures,
145    /// Same as `enabled_features`, but for those that are explicitly disabled.
146    pub(crate) disabled_features: WasmFeatures,
147    pub(crate) wasm_backtrace: bool,
148    pub(crate) wasm_backtrace_details_env_used: bool,
149    pub(crate) native_unwind_info: Option<bool>,
150    #[cfg(any(feature = "async", feature = "stack-switching"))]
151    pub(crate) async_stack_size: usize,
152    #[cfg(feature = "async")]
153    pub(crate) async_stack_zeroing: bool,
154    #[cfg(feature = "async")]
155    pub(crate) stack_creator: Option<Arc<dyn RuntimeFiberStackCreator>>,
156    pub(crate) async_support: bool,
157    pub(crate) module_version: ModuleVersionStrategy,
158    pub(crate) parallel_compilation: bool,
159    pub(crate) memory_guaranteed_dense_image_size: u64,
160    pub(crate) force_memory_init_memfd: bool,
161    pub(crate) wmemcheck: bool,
162    #[cfg(feature = "coredump")]
163    pub(crate) coredump_on_trap: bool,
164    pub(crate) macos_use_mach_ports: bool,
165    pub(crate) detect_host_feature: Option<fn(&str) -> Option<bool>>,
166    pub(crate) x86_float_abi_ok: Option<bool>,
167}
168
169/// User-provided configuration for the compiler.
170#[cfg(any(feature = "cranelift", feature = "winch"))]
171#[derive(Debug, Clone)]
172struct CompilerConfig {
173    strategy: Option<Strategy>,
174    settings: crate::hash_map::HashMap<String, String>,
175    flags: crate::hash_set::HashSet<String>,
176    #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
177    cache_store: Option<Arc<dyn CacheStore>>,
178    clif_dir: Option<std::path::PathBuf>,
179    wmemcheck: bool,
180}
181
182#[cfg(any(feature = "cranelift", feature = "winch"))]
183impl CompilerConfig {
184    fn new() -> Self {
185        Self {
186            strategy: Strategy::Auto.not_auto(),
187            settings: Default::default(),
188            flags: Default::default(),
189            #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
190            cache_store: None,
191            clif_dir: None,
192            wmemcheck: false,
193        }
194    }
195
196    /// Ensures that the key is not set or equals to the given value.
197    /// If the key is not set, it will be set to the given value.
198    ///
199    /// # Returns
200    ///
201    /// Returns true if successfully set or already had the given setting
202    /// value, or false if the setting was explicitly set to something
203    /// else previously.
204    fn ensure_setting_unset_or_given(&mut self, k: &str, v: &str) -> bool {
205        if let Some(value) = self.settings.get(k) {
206            if value != v {
207                return false;
208            }
209        } else {
210            self.settings.insert(k.to_string(), v.to_string());
211        }
212        true
213    }
214}
215
216#[cfg(any(feature = "cranelift", feature = "winch"))]
217impl Default for CompilerConfig {
218    fn default() -> Self {
219        Self::new()
220    }
221}
222
223impl Config {
224    /// Creates a new configuration object with the default configuration
225    /// specified.
226    pub fn new() -> Self {
227        let mut ret = Self {
228            tunables: ConfigTunables::default(),
229            #[cfg(any(feature = "cranelift", feature = "winch"))]
230            compiler_config: CompilerConfig::default(),
231            target: None,
232            #[cfg(feature = "gc")]
233            collector: Collector::default(),
234            #[cfg(feature = "cache")]
235            cache: None,
236            profiling_strategy: ProfilingStrategy::None,
237            #[cfg(feature = "runtime")]
238            mem_creator: None,
239            #[cfg(feature = "runtime")]
240            custom_code_memory: None,
241            allocation_strategy: InstanceAllocationStrategy::OnDemand,
242            // 512k of stack -- note that this is chosen currently to not be too
243            // big, not be too small, and be a good default for most platforms.
244            // One platform of particular note is Windows where the stack size
245            // of the main thread seems to, by default, be smaller than that of
246            // Linux and macOS. This 512k value at least lets our current test
247            // suite pass on the main thread of Windows (using `--test-threads
248            // 1` forces this), or at least it passed when this change was
249            // committed.
250            max_wasm_stack: 512 * 1024,
251            wasm_backtrace: true,
252            wasm_backtrace_details_env_used: false,
253            native_unwind_info: None,
254            enabled_features: WasmFeatures::empty(),
255            disabled_features: WasmFeatures::empty(),
256            #[cfg(any(feature = "async", feature = "stack-switching"))]
257            async_stack_size: 2 << 20,
258            #[cfg(feature = "async")]
259            async_stack_zeroing: false,
260            #[cfg(feature = "async")]
261            stack_creator: None,
262            async_support: false,
263            module_version: ModuleVersionStrategy::default(),
264            parallel_compilation: !cfg!(miri),
265            memory_guaranteed_dense_image_size: 16 << 20,
266            force_memory_init_memfd: false,
267            wmemcheck: false,
268            #[cfg(feature = "coredump")]
269            coredump_on_trap: false,
270            macos_use_mach_ports: !cfg!(miri),
271            #[cfg(feature = "std")]
272            detect_host_feature: Some(detect_host_feature),
273            #[cfg(not(feature = "std"))]
274            detect_host_feature: None,
275            x86_float_abi_ok: None,
276        };
277        #[cfg(any(feature = "cranelift", feature = "winch"))]
278        {
279            ret.cranelift_debug_verifier(false);
280            ret.cranelift_opt_level(OptLevel::Speed);
281
282            // When running under MIRI try to optimize for compile time of wasm
283            // code itself as much as possible. Disable optimizations by
284            // default and use the fastest regalloc available to us.
285            if cfg!(miri) {
286                ret.cranelift_opt_level(OptLevel::None);
287                ret.cranelift_regalloc_algorithm(RegallocAlgorithm::SinglePass);
288            }
289        }
290
291        ret.wasm_backtrace_details(WasmBacktraceDetails::Environment);
292
293        ret
294    }
295
296    /// Configures the target platform of this [`Config`].
297    ///
298    /// This method is used to configure the output of compilation in an
299    /// [`Engine`](crate::Engine). This can be used, for example, to
300    /// cross-compile from one platform to another. By default, the host target
301    /// triple is used meaning compiled code is suitable to run on the host.
302    ///
303    /// Note that the [`Module`](crate::Module) type can only be created if the
304    /// target configured here matches the host. Otherwise if a cross-compile is
305    /// being performed where the host doesn't match the target then
306    /// [`Engine::precompile_module`](crate::Engine::precompile_module) must be
307    /// used instead.
308    ///
309    /// Target-specific flags (such as CPU features) will not be inferred by
310    /// default for the target when one is provided here. This means that this
311    /// can also be used, for example, with the host architecture to disable all
312    /// host-inferred feature flags. Configuring target-specific flags can be
313    /// done with [`Config::cranelift_flag_set`] and
314    /// [`Config::cranelift_flag_enable`].
315    ///
316    /// # Errors
317    ///
318    /// This method will error if the given target triple is not supported.
319    pub fn target(&mut self, target: &str) -> Result<&mut Self> {
320        self.target =
321            Some(target_lexicon::Triple::from_str(target).map_err(|e| anyhow::anyhow!(e))?);
322
323        Ok(self)
324    }
325
326    /// Enables the incremental compilation cache in Cranelift, using the provided `CacheStore`
327    /// backend for storage.
328    #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
329    pub fn enable_incremental_compilation(
330        &mut self,
331        cache_store: Arc<dyn CacheStore>,
332    ) -> Result<&mut Self> {
333        self.compiler_config.cache_store = Some(cache_store);
334        Ok(self)
335    }
336
337    /// Whether or not to enable support for asynchronous functions in Wasmtime.
338    ///
339    /// When enabled, the config can optionally define host functions with `async`.
340    /// Instances created and functions called with this `Config` *must* be called
341    /// through their asynchronous APIs, however. For example using
342    /// [`Func::call`](crate::Func::call) will panic when used with this config.
343    ///
344    /// # Asynchronous Wasm
345    ///
346    /// WebAssembly does not currently have a way to specify at the bytecode
347    /// level what is and isn't async. Host-defined functions, however, may be
348    /// defined as `async`. WebAssembly imports always appear synchronous, which
349    /// gives rise to a bit of an impedance mismatch here. To solve this
350    /// Wasmtime supports "asynchronous configs" which enables calling these
351    /// asynchronous functions in a way that looks synchronous to the executing
352    /// WebAssembly code.
353    ///
354    /// An asynchronous config must always invoke wasm code asynchronously,
355    /// meaning we'll always represent its computation as a
356    /// [`Future`](std::future::Future). The `poll` method of the futures
357    /// returned by Wasmtime will perform the actual work of calling the
358    /// WebAssembly. Wasmtime won't manage its own thread pools or similar,
359    /// that's left up to the embedder.
360    ///
361    /// To implement futures in a way that WebAssembly sees asynchronous host
362    /// functions as synchronous, all async Wasmtime futures will execute on a
363    /// separately allocated native stack from the thread otherwise executing
364    /// Wasmtime. This separate native stack can then be switched to and from.
365    /// Using this whenever an `async` host function returns a future that
366    /// resolves to `Pending` we switch away from the temporary stack back to
367    /// the main stack and propagate the `Pending` status.
368    ///
369    /// In general it's encouraged that the integration with `async` and
370    /// wasmtime is designed early on in your embedding of Wasmtime to ensure
371    /// that it's planned that WebAssembly executes in the right context of your
372    /// application.
373    ///
374    /// # Execution in `poll`
375    ///
376    /// The [`Future::poll`](std::future::Future::poll) method is the main
377    /// driving force behind Rust's futures. That method's own documentation
378    /// states "an implementation of `poll` should strive to return quickly, and
379    /// should not block". This, however, can be at odds with executing
380    /// WebAssembly code as part of the `poll` method itself. If your
381    /// WebAssembly is untrusted then this could allow the `poll` method to take
382    /// arbitrarily long in the worst case, likely blocking all other
383    /// asynchronous tasks.
384    ///
385    /// To remedy this situation you have a few possible ways to solve this:
386    ///
387    /// * The most efficient solution is to enable
388    ///   [`Config::epoch_interruption`] in conjunction with
389    ///   [`crate::Store::epoch_deadline_async_yield_and_update`]. Coupled with
390    ///   periodic calls to [`crate::Engine::increment_epoch`] this will cause
391    ///   executing WebAssembly to periodically yield back according to the
392    ///   epoch configuration settings. This enables `Future::poll` to take at
393    ///   most a certain amount of time according to epoch configuration
394    ///   settings and when increments happen. The benefit of this approach is
395    ///   that the instrumentation in compiled code is quite lightweight, but a
396    ///   downside can be that the scheduling is somewhat nondeterministic since
397    ///   increments are usually timer-based which are not always deterministic.
398    ///
399    ///   Note that to prevent infinite execution of wasm it's recommended to
400    ///   place a timeout on the entire future representing executing wasm code
401    ///   and the periodic yields with epochs should ensure that when the
402    ///   timeout is reached it's appropriately recognized.
403    ///
404    /// * Alternatively you can enable the
405    ///   [`Config::consume_fuel`](crate::Config::consume_fuel) method as well
406    ///   as [`crate::Store::fuel_async_yield_interval`] When doing so this will
407    ///   configure Wasmtime futures to yield periodically while they're
408    ///   executing WebAssembly code. After consuming the specified amount of
409    ///   fuel wasm futures will return `Poll::Pending` from their `poll`
410    ///   method, and will get automatically re-polled later. This enables the
411    ///   `Future::poll` method to take roughly a fixed amount of time since
412    ///   fuel is guaranteed to get consumed while wasm is executing. Unlike
413    ///   epoch-based preemption this is deterministic since wasm always
414    ///   consumes a fixed amount of fuel per-operation. The downside of this
415    ///   approach, however, is that the compiled code instrumentation is
416    ///   significantly more expensive than epoch checks.
417    ///
418    ///   Note that to prevent infinite execution of wasm it's recommended to
419    ///   place a timeout on the entire future representing executing wasm code
420    ///   and the periodic yields with epochs should ensure that when the
421    ///   timeout is reached it's appropriately recognized.
422    ///
423    /// In all cases special care needs to be taken when integrating
424    /// asynchronous wasm into your application. You should carefully plan where
425    /// WebAssembly will execute and what compute resources will be allotted to
426    /// it. If Wasmtime doesn't support exactly what you'd like just yet, please
427    /// feel free to open an issue!
428    #[cfg(feature = "async")]
429    pub fn async_support(&mut self, enable: bool) -> &mut Self {
430        self.async_support = enable;
431        self
432    }
433
434    /// Configures whether DWARF debug information will be emitted during
435    /// compilation.
436    ///
437    /// Note that the `debug-builtins` compile-time Cargo feature must also be
438    /// enabled for native debuggers such as GDB or LLDB to be able to debug
439    /// guest WebAssembly programs.
440    ///
441    /// By default this option is `false`.
442    /// **Note** Enabling this option is not compatible with the Winch compiler.
443    pub fn debug_info(&mut self, enable: bool) -> &mut Self {
444        self.tunables.generate_native_debuginfo = Some(enable);
445        self
446    }
447
448    /// Configures whether [`WasmBacktrace`] will be present in the context of
449    /// errors returned from Wasmtime.
450    ///
451    /// A backtrace may be collected whenever an error is returned from a host
452    /// function call through to WebAssembly or when WebAssembly itself hits a
453    /// trap condition, such as an out-of-bounds memory access. This flag
454    /// indicates, in these conditions, whether the backtrace is collected or
455    /// not.
456    ///
457    /// Currently wasm backtraces are implemented through frame pointer walking.
458    /// This means that collecting a backtrace is expected to be a fast and
459    /// relatively cheap operation. Additionally backtrace collection is
460    /// suitable in concurrent environments since one thread capturing a
461    /// backtrace won't block other threads.
462    ///
463    /// Collected backtraces are attached via [`anyhow::Error::context`] to
464    /// errors returned from host functions. The [`WasmBacktrace`] type can be
465    /// acquired via [`anyhow::Error::downcast_ref`] to inspect the backtrace.
466    /// When this option is disabled then this context is never applied to
467    /// errors coming out of wasm.
468    ///
469    /// This option is `true` by default.
470    ///
471    /// [`WasmBacktrace`]: crate::WasmBacktrace
472    pub fn wasm_backtrace(&mut self, enable: bool) -> &mut Self {
473        self.wasm_backtrace = enable;
474        self
475    }
476
477    /// Configures whether backtraces in `Trap` will parse debug info in the wasm file to
478    /// have filename/line number information.
479    ///
480    /// When enabled this will causes modules to retain debugging information
481    /// found in wasm binaries. This debug information will be used when a trap
482    /// happens to symbolicate each stack frame and attempt to print a
483    /// filename/line number for each wasm frame in the stack trace.
484    ///
485    /// By default this option is `WasmBacktraceDetails::Environment`, meaning
486    /// that wasm will read `WASMTIME_BACKTRACE_DETAILS` to indicate whether
487    /// details should be parsed. Note that the `std` feature of this crate must
488    /// be active to read environment variables, otherwise this is disabled by
489    /// default.
490    pub fn wasm_backtrace_details(&mut self, enable: WasmBacktraceDetails) -> &mut Self {
491        self.wasm_backtrace_details_env_used = false;
492        self.tunables.parse_wasm_debuginfo = match enable {
493            WasmBacktraceDetails::Enable => Some(true),
494            WasmBacktraceDetails::Disable => Some(false),
495            WasmBacktraceDetails::Environment => {
496                #[cfg(feature = "std")]
497                {
498                    self.wasm_backtrace_details_env_used = true;
499                    std::env::var("WASMTIME_BACKTRACE_DETAILS")
500                        .map(|s| Some(s == "1"))
501                        .unwrap_or(Some(false))
502                }
503                #[cfg(not(feature = "std"))]
504                {
505                    Some(false)
506                }
507            }
508        };
509        self
510    }
511
512    /// Configures whether to generate native unwind information
513    /// (e.g. `.eh_frame` on Linux).
514    ///
515    /// This configuration option only exists to help third-party stack
516    /// capturing mechanisms, such as the system's unwinder or the `backtrace`
517    /// crate, determine how to unwind through Wasm frames. It does not affect
518    /// whether Wasmtime can capture Wasm backtraces or not. The presence of
519    /// [`WasmBacktrace`] is controlled by the [`Config::wasm_backtrace`]
520    /// option.
521    ///
522    /// Native unwind information is included:
523    /// - When targeting Windows, since the Windows ABI requires it.
524    /// - By default.
525    ///
526    /// Note that systems loading many modules may wish to disable this
527    /// configuration option instead of leaving it on-by-default. Some platforms
528    /// exhibit quadratic behavior when registering/unregistering unwinding
529    /// information which can greatly slow down the module loading/unloading
530    /// process.
531    ///
532    /// [`WasmBacktrace`]: crate::WasmBacktrace
533    pub fn native_unwind_info(&mut self, enable: bool) -> &mut Self {
534        self.native_unwind_info = Some(enable);
535        self
536    }
537
538    /// Configures whether execution of WebAssembly will "consume fuel" to
539    /// either halt or yield execution as desired.
540    ///
541    /// This can be used to deterministically prevent infinitely-executing
542    /// WebAssembly code by instrumenting generated code to consume fuel as it
543    /// executes. When fuel runs out a trap is raised, however [`Store`] can be
544    /// configured to yield execution periodically via
545    /// [`crate::Store::fuel_async_yield_interval`].
546    ///
547    /// Note that a [`Store`] starts with no fuel, so if you enable this option
548    /// you'll have to be sure to pour some fuel into [`Store`] before
549    /// executing some code.
550    ///
551    /// By default this option is `false`.
552    ///
553    /// **Note** Enabling this option is not compatible with the Winch compiler.
554    ///
555    /// [`Store`]: crate::Store
556    pub fn consume_fuel(&mut self, enable: bool) -> &mut Self {
557        self.tunables.consume_fuel = Some(enable);
558        self
559    }
560
561    /// Enables epoch-based interruption.
562    ///
563    /// When executing code in async mode, we sometimes want to
564    /// implement a form of cooperative timeslicing: long-running Wasm
565    /// guest code should periodically yield to the executor
566    /// loop. This yielding could be implemented by using "fuel" (see
567    /// [`consume_fuel`](Config::consume_fuel)). However, fuel
568    /// instrumentation is somewhat expensive: it modifies the
569    /// compiled form of the Wasm code so that it maintains a precise
570    /// instruction count, frequently checking this count against the
571    /// remaining fuel. If one does not need this precise count or
572    /// deterministic interruptions, and only needs a periodic
573    /// interrupt of some form, then It would be better to have a more
574    /// lightweight mechanism.
575    ///
576    /// Epoch-based interruption is that mechanism. There is a global
577    /// "epoch", which is a counter that divides time into arbitrary
578    /// periods (or epochs). This counter lives on the
579    /// [`Engine`](crate::Engine) and can be incremented by calling
580    /// [`Engine::increment_epoch`](crate::Engine::increment_epoch).
581    /// Epoch-based instrumentation works by setting a "deadline
582    /// epoch". The compiled code knows the deadline, and at certain
583    /// points, checks the current epoch against that deadline. It
584    /// will yield if the deadline has been reached.
585    ///
586    /// The idea is that checking an infrequently-changing counter is
587    /// cheaper than counting and frequently storing a precise metric
588    /// (instructions executed) locally. The interruptions are not
589    /// deterministic, but if the embedder increments the epoch in a
590    /// periodic way (say, every regular timer tick by a thread or
591    /// signal handler), then we can ensure that all async code will
592    /// yield to the executor within a bounded time.
593    ///
594    /// The deadline check cannot be avoided by malicious wasm code. It is safe
595    /// to use epoch deadlines to limit the execution time of untrusted
596    /// code.
597    ///
598    /// The [`Store`](crate::Store) tracks the deadline, and controls
599    /// what happens when the deadline is reached during
600    /// execution. Several behaviors are possible:
601    ///
602    /// - Trap if code is executing when the epoch deadline is
603    ///   met. See
604    ///   [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap).
605    ///
606    /// - Call an arbitrary function. This function may chose to trap or
607    ///   increment the epoch. See
608    ///   [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback).
609    ///
610    /// - Yield to the executor loop, then resume when the future is
611    ///   next polled. See
612    ///   [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update).
613    ///
614    /// Trapping is the default. The yielding behaviour may be used for
615    /// the timeslicing behavior described above.
616    ///
617    /// This feature is available with or without async support.
618    /// However, without async support, the timeslicing behaviour is
619    /// not available. This means epoch-based interruption can only
620    /// serve as a simple external-interruption mechanism.
621    ///
622    /// An initial deadline must be set before executing code by calling
623    /// [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline). If this
624    /// deadline is not configured then wasm will immediately trap.
625    ///
626    /// ## Interaction with blocking host calls
627    ///
628    /// Epochs (and fuel) do not assist in handling WebAssembly code blocked in
629    /// a call to the host. For example if the WebAssembly function calls
630    /// `wasi:io/poll/poll` to sleep epochs will not assist in waking this up or
631    /// timing it out. Epochs intentionally only affect running WebAssembly code
632    /// itself and it's left to the embedder to determine how best to wake up
633    /// indefinitely blocking code in the host.
634    ///
635    /// The typical solution for this, however, is to use
636    /// [`Config::async_support(true)`](Config::async_support) and the `async`
637    /// variant of WASI host functions. This models computation as a Rust
638    /// `Future` which means that when blocking happens the future is only
639    /// suspended and control yields back to the main event loop. This gives the
640    /// embedder the opportunity to use `tokio::time::timeout` for example on a
641    /// wasm computation and have the desired effect of cancelling a blocking
642    /// operation when a timeout expires.
643    ///
644    /// ## When to use fuel vs. epochs
645    ///
646    /// In general, epoch-based interruption results in faster
647    /// execution. This difference is sometimes significant: in some
648    /// measurements, up to 2-3x. This is because epoch-based
649    /// interruption does less work: it only watches for a global
650    /// rarely-changing counter to increment, rather than keeping a
651    /// local frequently-changing counter and comparing it to a
652    /// deadline.
653    ///
654    /// Fuel, in contrast, should be used when *deterministic*
655    /// yielding or trapping is needed. For example, if it is required
656    /// that the same function call with the same starting state will
657    /// always either complete or trap with an out-of-fuel error,
658    /// deterministically, then fuel with a fixed bound should be
659    /// used.
660    ///
661    /// **Note** Enabling this option is not compatible with the Winch compiler.
662    ///
663    /// # See Also
664    ///
665    /// - [`Engine::increment_epoch`](crate::Engine::increment_epoch)
666    /// - [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline)
667    /// - [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap)
668    /// - [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback)
669    /// - [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update)
670    pub fn epoch_interruption(&mut self, enable: bool) -> &mut Self {
671        self.tunables.epoch_interruption = Some(enable);
672        self
673    }
674
675    /// Configures the maximum amount of stack space available for
676    /// executing WebAssembly code.
677    ///
678    /// WebAssembly has well-defined semantics on stack overflow. This is
679    /// intended to be a knob which can help configure how much stack space
680    /// wasm execution is allowed to consume. Note that the number here is not
681    /// super-precise, but rather wasm will take at most "pretty close to this
682    /// much" stack space.
683    ///
684    /// If a wasm call (or series of nested wasm calls) take more stack space
685    /// than the `size` specified then a stack overflow trap will be raised.
686    ///
687    /// Caveat: this knob only limits the stack space consumed by wasm code.
688    /// More importantly, it does not ensure that this much stack space is
689    /// available on the calling thread stack. Exhausting the thread stack
690    /// typically leads to an **abort** of the process.
691    ///
692    /// Here are some examples of how that could happen:
693    ///
694    /// - Let's assume this option is set to 2 MiB and then a thread that has
695    ///   a stack with 512 KiB left.
696    ///
697    ///   If wasm code consumes more than 512 KiB then the process will be aborted.
698    ///
699    /// - Assuming the same conditions, but this time wasm code does not consume
700    ///   any stack but calls into a host function. The host function consumes
701    ///   more than 512 KiB of stack space. The process will be aborted.
702    ///
703    /// There's another gotcha related to recursive calling into wasm: the stack
704    /// space consumed by a host function is counted towards this limit. The
705    /// host functions are not prevented from consuming more than this limit.
706    /// However, if the host function that used more than this limit and called
707    /// back into wasm, then the execution will trap immediately because of
708    /// stack overflow.
709    ///
710    /// When the `async` feature is enabled, this value cannot exceed the
711    /// `async_stack_size` option. Be careful not to set this value too close
712    /// to `async_stack_size` as doing so may limit how much stack space
713    /// is available for host functions.
714    ///
715    /// By default this option is 512 KiB.
716    ///
717    /// # Errors
718    ///
719    /// The `Engine::new` method will fail if the `size` specified here is
720    /// either 0 or larger than the [`Config::async_stack_size`] configuration.
721    pub fn max_wasm_stack(&mut self, size: usize) -> &mut Self {
722        self.max_wasm_stack = size;
723        self
724    }
725
726    /// Configures the size of the stacks used for asynchronous execution.
727    ///
728    /// This setting configures the size of the stacks that are allocated for
729    /// asynchronous execution. The value cannot be less than `max_wasm_stack`.
730    ///
731    /// The amount of stack space guaranteed for host functions is
732    /// `async_stack_size - max_wasm_stack`, so take care not to set these two values
733    /// close to one another; doing so may cause host functions to overflow the
734    /// stack and abort the process.
735    ///
736    /// By default this option is 2 MiB.
737    ///
738    /// # Errors
739    ///
740    /// The `Engine::new` method will fail if the value for this option is
741    /// smaller than the [`Config::max_wasm_stack`] option.
742    #[cfg(any(feature = "async", feature = "stack-switching"))]
743    pub fn async_stack_size(&mut self, size: usize) -> &mut Self {
744        self.async_stack_size = size;
745        self
746    }
747
748    /// Configures whether or not stacks used for async futures are zeroed
749    /// before (re)use.
750    ///
751    /// When the [`async_support`](Config::async_support) method is enabled for
752    /// Wasmtime and the [`call_async`] variant of calling WebAssembly is used
753    /// then Wasmtime will create a separate runtime execution stack for each
754    /// future produced by [`call_async`]. By default upon allocation, depending
755    /// on the platform, these stacks might be filled with uninitialized
756    /// memory. This is safe and correct because, modulo bugs in Wasmtime,
757    /// compiled Wasm code will never read from a stack slot before it
758    /// initializes the stack slot.
759    ///
760    /// However, as a defense-in-depth mechanism, you may configure Wasmtime to
761    /// ensure that these stacks are zeroed before they are used. Notably, if
762    /// you are using the pooling allocator, stacks can be pooled and reused
763    /// across different Wasm guests; ensuring that stacks are zeroed can
764    /// prevent data leakage between Wasm guests even in the face of potential
765    /// read-of-stack-slot-before-initialization bugs in Wasmtime's compiler.
766    ///
767    /// Stack zeroing can be a costly operation in highly concurrent
768    /// environments due to modifications of the virtual address space requiring
769    /// process-wide synchronization. It can also be costly in `no-std`
770    /// environments that must manually zero memory, and cannot rely on an OS
771    /// and virtual memory to provide zeroed pages.
772    ///
773    /// This option defaults to `false`.
774    ///
775    /// [`call_async`]: crate::TypedFunc::call_async
776    #[cfg(feature = "async")]
777    pub fn async_stack_zeroing(&mut self, enable: bool) -> &mut Self {
778        self.async_stack_zeroing = enable;
779        self
780    }
781
782    fn wasm_feature(&mut self, flag: WasmFeatures, enable: bool) -> &mut Self {
783        self.enabled_features.set(flag, enable);
784        self.disabled_features.set(flag, !enable);
785        self
786    }
787
788    /// Configures whether the WebAssembly tail calls proposal will be enabled
789    /// for compilation or not.
790    ///
791    /// The [WebAssembly tail calls proposal] introduces the `return_call` and
792    /// `return_call_indirect` instructions. These instructions allow for Wasm
793    /// programs to implement some recursive algorithms with *O(1)* stack space
794    /// usage.
795    ///
796    /// This is `true` by default except when the Winch compiler is enabled.
797    ///
798    /// [WebAssembly tail calls proposal]: https://github.com/WebAssembly/tail-call
799    pub fn wasm_tail_call(&mut self, enable: bool) -> &mut Self {
800        self.wasm_feature(WasmFeatures::TAIL_CALL, enable);
801        self
802    }
803
804    /// Configures whether the WebAssembly custom-page-sizes proposal will be
805    /// enabled for compilation or not.
806    ///
807    /// The [WebAssembly custom-page-sizes proposal] allows a memory to
808    /// customize its page sizes. By default, Wasm page sizes are 64KiB
809    /// large. This proposal allows the memory to opt into smaller page sizes
810    /// instead, allowing Wasm to run in environments with less than 64KiB RAM
811    /// available, for example.
812    ///
813    /// Note that the page size is part of the memory's type, and because
814    /// different memories may have different types, they may also have
815    /// different page sizes.
816    ///
817    /// Currently the only valid page sizes are 64KiB (the default) and 1
818    /// byte. Future extensions may relax this constraint and allow all powers
819    /// of two.
820    ///
821    /// Support for this proposal is disabled by default.
822    ///
823    /// [WebAssembly custom-page-sizes proposal]: https://github.com/WebAssembly/custom-page-sizes
824    pub fn wasm_custom_page_sizes(&mut self, enable: bool) -> &mut Self {
825        self.wasm_feature(WasmFeatures::CUSTOM_PAGE_SIZES, enable);
826        self
827    }
828
829    /// Configures whether the WebAssembly [threads] proposal will be enabled
830    /// for compilation.
831    ///
832    /// This feature gates items such as shared memories and atomic
833    /// instructions. Note that the threads feature depends on the bulk memory
834    /// feature, which is enabled by default. Additionally note that while the
835    /// wasm feature is called "threads" it does not actually include the
836    /// ability to spawn threads. Spawning threads is part of the [wasi-threads]
837    /// proposal which is a separately gated feature in Wasmtime.
838    ///
839    /// Embeddings of Wasmtime are able to build their own custom threading
840    /// scheme on top of the core wasm threads proposal, however.
841    ///
842    /// The default value for this option is whether the `threads`
843    /// crate feature of Wasmtime is enabled or not. By default this crate
844    /// feature is enabled.
845    ///
846    /// [threads]: https://github.com/webassembly/threads
847    /// [wasi-threads]: https://github.com/webassembly/wasi-threads
848    #[cfg(feature = "threads")]
849    pub fn wasm_threads(&mut self, enable: bool) -> &mut Self {
850        self.wasm_feature(WasmFeatures::THREADS, enable);
851        self
852    }
853
854    /// Configures whether the WebAssembly [shared-everything-threads] proposal
855    /// will be enabled for compilation.
856    ///
857    /// This feature gates extended use of the `shared` attribute on items other
858    /// than memories, extra atomic instructions, and new component model
859    /// intrinsics for spawning threads. It depends on the
860    /// [`wasm_threads`][Self::wasm_threads] being enabled.
861    ///
862    /// [shared-everything-threads]:
863    ///     https://github.com/webassembly/shared-everything-threads
864    pub fn wasm_shared_everything_threads(&mut self, enable: bool) -> &mut Self {
865        self.wasm_feature(WasmFeatures::SHARED_EVERYTHING_THREADS, enable);
866        self
867    }
868
869    /// Configures whether the [WebAssembly reference types proposal][proposal]
870    /// will be enabled for compilation.
871    ///
872    /// This feature gates items such as the `externref` and `funcref` types as
873    /// well as allowing a module to define multiple tables.
874    ///
875    /// Note that the reference types proposal depends on the bulk memory proposal.
876    ///
877    /// This feature is `true` by default.
878    ///
879    /// # Errors
880    ///
881    /// The validation of this feature are deferred until the engine is being built,
882    /// and thus may cause `Engine::new` fail if the `bulk_memory` feature is disabled.
883    ///
884    /// [proposal]: https://github.com/webassembly/reference-types
885    #[cfg(feature = "gc")]
886    pub fn wasm_reference_types(&mut self, enable: bool) -> &mut Self {
887        self.wasm_feature(WasmFeatures::REFERENCE_TYPES, enable);
888        self
889    }
890
891    /// Configures whether the [WebAssembly function references
892    /// proposal][proposal] will be enabled for compilation.
893    ///
894    /// This feature gates non-nullable reference types, function reference
895    /// types, `call_ref`, `ref.func`, and non-nullable reference related
896    /// instructions.
897    ///
898    /// Note that the function references proposal depends on the reference
899    /// types proposal.
900    ///
901    /// This feature is `false` by default.
902    ///
903    /// [proposal]: https://github.com/WebAssembly/function-references
904    #[cfg(feature = "gc")]
905    pub fn wasm_function_references(&mut self, enable: bool) -> &mut Self {
906        self.wasm_feature(WasmFeatures::FUNCTION_REFERENCES, enable);
907        self
908    }
909
910    /// Configures whether the [WebAssembly wide-arithmetic][proposal] will be
911    /// enabled for compilation.
912    ///
913    /// This feature is `false` by default.
914    ///
915    /// [proposal]: https://github.com/WebAssembly/wide-arithmetic
916    pub fn wasm_wide_arithmetic(&mut self, enable: bool) -> &mut Self {
917        self.wasm_feature(WasmFeatures::WIDE_ARITHMETIC, enable);
918        self
919    }
920
921    /// Configures whether the [WebAssembly Garbage Collection
922    /// proposal][proposal] will be enabled for compilation.
923    ///
924    /// This feature gates `struct` and `array` type definitions and references,
925    /// the `i31ref` type, and all related instructions.
926    ///
927    /// Note that the function references proposal depends on the typed function
928    /// references proposal.
929    ///
930    /// This feature is `false` by default.
931    ///
932    /// **Warning: Wasmtime's implementation of the GC proposal is still in
933    /// progress and generally not ready for primetime.**
934    ///
935    /// [proposal]: https://github.com/WebAssembly/gc
936    #[cfg(feature = "gc")]
937    pub fn wasm_gc(&mut self, enable: bool) -> &mut Self {
938        self.wasm_feature(WasmFeatures::GC, enable);
939        self
940    }
941
942    /// Configures whether the WebAssembly SIMD proposal will be
943    /// enabled for compilation.
944    ///
945    /// The [WebAssembly SIMD proposal][proposal]. This feature gates items such
946    /// as the `v128` type and all of its operators being in a module. Note that
947    /// this does not enable the [relaxed simd proposal].
948    ///
949    /// **Note**
950    ///
951    /// On x86_64 platforms the base CPU feature requirement for SIMD
952    /// is SSE2 for the Cranelift compiler and AVX for the Winch compiler.
953    ///
954    /// This is `true` by default.
955    ///
956    /// [proposal]: https://github.com/webassembly/simd
957    /// [relaxed simd proposal]: https://github.com/WebAssembly/relaxed-simd
958    pub fn wasm_simd(&mut self, enable: bool) -> &mut Self {
959        self.wasm_feature(WasmFeatures::SIMD, enable);
960        self
961    }
962
963    /// Configures whether the WebAssembly Relaxed SIMD proposal will be
964    /// enabled for compilation.
965    ///
966    /// The relaxed SIMD proposal adds new instructions to WebAssembly which,
967    /// for some specific inputs, are allowed to produce different results on
968    /// different hosts. More-or-less this proposal enables exposing
969    /// platform-specific semantics of SIMD instructions in a controlled
970    /// fashion to a WebAssembly program. From an embedder's perspective this
971    /// means that WebAssembly programs may execute differently depending on
972    /// whether the host is x86_64 or AArch64, for example.
973    ///
974    /// By default Wasmtime lowers relaxed SIMD instructions to the fastest
975    /// lowering for the platform it's running on. This means that, by default,
976    /// some relaxed SIMD instructions may have different results for the same
977    /// inputs across x86_64 and AArch64. This behavior can be disabled through
978    /// the [`Config::relaxed_simd_deterministic`] option which will force
979    /// deterministic behavior across all platforms, as classified by the
980    /// specification, at the cost of performance.
981    ///
982    /// This is `true` by default.
983    ///
984    /// [proposal]: https://github.com/webassembly/relaxed-simd
985    pub fn wasm_relaxed_simd(&mut self, enable: bool) -> &mut Self {
986        self.wasm_feature(WasmFeatures::RELAXED_SIMD, enable);
987        self
988    }
989
990    /// This option can be used to control the behavior of the [relaxed SIMD
991    /// proposal's][proposal] instructions.
992    ///
993    /// The relaxed SIMD proposal introduces instructions that are allowed to
994    /// have different behavior on different architectures, primarily to afford
995    /// an efficient implementation on all architectures. This means, however,
996    /// that the same module may execute differently on one host than another,
997    /// which typically is not otherwise the case. This option is provided to
998    /// force Wasmtime to generate deterministic code for all relaxed simd
999    /// instructions, at the cost of performance, for all architectures. When
1000    /// this option is enabled then the deterministic behavior of all
1001    /// instructions in the relaxed SIMD proposal is selected.
1002    ///
1003    /// This is `false` by default.
1004    ///
1005    /// [proposal]: https://github.com/webassembly/relaxed-simd
1006    pub fn relaxed_simd_deterministic(&mut self, enable: bool) -> &mut Self {
1007        self.tunables.relaxed_simd_deterministic = Some(enable);
1008        self
1009    }
1010
1011    /// Configures whether the [WebAssembly bulk memory operations
1012    /// proposal][proposal] will be enabled for compilation.
1013    ///
1014    /// This feature gates items such as the `memory.copy` instruction, passive
1015    /// data/table segments, etc, being in a module.
1016    ///
1017    /// This is `true` by default.
1018    ///
1019    /// Feature `reference_types`, which is also `true` by default, requires
1020    /// this feature to be enabled. Thus disabling this feature must also disable
1021    /// `reference_types` as well using [`wasm_reference_types`](crate::Config::wasm_reference_types).
1022    ///
1023    /// # Errors
1024    ///
1025    /// Disabling this feature without disabling `reference_types` will cause
1026    /// `Engine::new` to fail.
1027    ///
1028    /// [proposal]: https://github.com/webassembly/bulk-memory-operations
1029    pub fn wasm_bulk_memory(&mut self, enable: bool) -> &mut Self {
1030        self.wasm_feature(WasmFeatures::BULK_MEMORY, enable);
1031        self
1032    }
1033
1034    /// Configures whether the WebAssembly multi-value [proposal] will
1035    /// be enabled for compilation.
1036    ///
1037    /// This feature gates functions and blocks returning multiple values in a
1038    /// module, for example.
1039    ///
1040    /// This is `true` by default.
1041    ///
1042    /// [proposal]: https://github.com/webassembly/multi-value
1043    pub fn wasm_multi_value(&mut self, enable: bool) -> &mut Self {
1044        self.wasm_feature(WasmFeatures::MULTI_VALUE, enable);
1045        self
1046    }
1047
1048    /// Configures whether the WebAssembly multi-memory [proposal] will
1049    /// be enabled for compilation.
1050    ///
1051    /// This feature gates modules having more than one linear memory
1052    /// declaration or import.
1053    ///
1054    /// This is `true` by default.
1055    ///
1056    /// [proposal]: https://github.com/webassembly/multi-memory
1057    pub fn wasm_multi_memory(&mut self, enable: bool) -> &mut Self {
1058        self.wasm_feature(WasmFeatures::MULTI_MEMORY, enable);
1059        self
1060    }
1061
1062    /// Configures whether the WebAssembly memory64 [proposal] will
1063    /// be enabled for compilation.
1064    ///
1065    /// Note that this the upstream specification is not finalized and Wasmtime
1066    /// may also have bugs for this feature since it hasn't been exercised
1067    /// much.
1068    ///
1069    /// This is `false` by default.
1070    ///
1071    /// [proposal]: https://github.com/webassembly/memory64
1072    pub fn wasm_memory64(&mut self, enable: bool) -> &mut Self {
1073        self.wasm_feature(WasmFeatures::MEMORY64, enable);
1074        self
1075    }
1076
1077    /// Configures whether the WebAssembly extended-const [proposal] will
1078    /// be enabled for compilation.
1079    ///
1080    /// This is `true` by default.
1081    ///
1082    /// [proposal]: https://github.com/webassembly/extended-const
1083    pub fn wasm_extended_const(&mut self, enable: bool) -> &mut Self {
1084        self.wasm_feature(WasmFeatures::EXTENDED_CONST, enable);
1085        self
1086    }
1087
1088    /// Configures whether the [WebAssembly stack switching
1089    /// proposal][proposal] will be enabled for compilation.
1090    ///
1091    /// This feature gates the use of control tags.
1092    ///
1093    /// This feature depends on the `function_reference_types` and
1094    /// `exceptions` features.
1095    ///
1096    /// This feature is `false` by default.
1097    ///
1098    /// # Errors
1099    ///
1100    /// [proposal]: https://github.com/webassembly/stack-switching
1101    pub fn wasm_stack_switching(&mut self, enable: bool) -> &mut Self {
1102        self.wasm_feature(WasmFeatures::STACK_SWITCHING, enable);
1103        self
1104    }
1105
1106    /// Configures whether the WebAssembly component-model [proposal] will
1107    /// be enabled for compilation.
1108    ///
1109    /// This flag can be used to blanket disable all components within Wasmtime.
1110    /// Otherwise usage of components requires statically using
1111    /// [`Component`](crate::component::Component) instead of
1112    /// [`Module`](crate::Module) for example anyway.
1113    ///
1114    /// The default value for this option is whether the `component-model`
1115    /// crate feature of Wasmtime is enabled or not. By default this crate
1116    /// feature is enabled.
1117    ///
1118    /// [proposal]: https://github.com/webassembly/component-model
1119    #[cfg(feature = "component-model")]
1120    pub fn wasm_component_model(&mut self, enable: bool) -> &mut Self {
1121        self.wasm_feature(WasmFeatures::COMPONENT_MODEL, enable);
1122        self
1123    }
1124
1125    /// Configures whether components support the async ABI [proposal] for
1126    /// lifting and lowering functions, as well as `stream`, `future`, and
1127    /// `error-context` types.
1128    ///
1129    /// Please note that Wasmtime's support for this feature is _very_
1130    /// incomplete.
1131    ///
1132    /// [proposal]:
1133    ///     https://github.com/WebAssembly/component-model/blob/main/design/mvp/Async.md
1134    #[cfg(feature = "component-model-async")]
1135    pub fn wasm_component_model_async(&mut self, enable: bool) -> &mut Self {
1136        self.wasm_feature(WasmFeatures::CM_ASYNC, enable);
1137        self
1138    }
1139
1140    /// This corresponds to the 🚝 emoji in the component model specification.
1141    ///
1142    /// Please note that Wasmtime's support for this feature is _very_
1143    /// incomplete.
1144    ///
1145    /// [proposal]:
1146    ///     https://github.com/WebAssembly/component-model/blob/main/design/mvp/Async.md
1147    #[cfg(feature = "component-model-async")]
1148    pub fn wasm_component_model_async_builtins(&mut self, enable: bool) -> &mut Self {
1149        self.wasm_feature(WasmFeatures::CM_ASYNC_BUILTINS, enable);
1150        self
1151    }
1152
1153    /// This corresponds to the 🚟 emoji in the component model specification.
1154    ///
1155    /// Please note that Wasmtime's support for this feature is _very_
1156    /// incomplete.
1157    ///
1158    /// [proposal]: https://github.com/WebAssembly/component-model/blob/main/design/mvp/Async.md
1159    #[cfg(feature = "component-model-async")]
1160    pub fn wasm_component_model_async_stackful(&mut self, enable: bool) -> &mut Self {
1161        self.wasm_feature(WasmFeatures::CM_ASYNC_STACKFUL, enable);
1162        self
1163    }
1164
1165    /// This corresponds to the 📝 emoji in the component model specification.
1166    ///
1167    /// Please note that Wasmtime's support for this feature is _very_
1168    /// incomplete.
1169    ///
1170    /// [proposal]: https://github.com/WebAssembly/component-model/blob/main/design/mvp/Async.md
1171    #[cfg(feature = "component-model")]
1172    pub fn wasm_component_model_error_context(&mut self, enable: bool) -> &mut Self {
1173        self.wasm_feature(WasmFeatures::CM_ERROR_CONTEXT, enable);
1174        self
1175    }
1176
1177    /// Configures whether the [GC extension to the component-model
1178    /// proposal][proposal] is enabled or not.
1179    ///
1180    /// This corresponds to the 🛸 emoji in the component model specification.
1181    ///
1182    /// Please note that Wasmtime's support for this feature is _very_
1183    /// incomplete.
1184    ///
1185    /// [proposal]: https://github.com/WebAssembly/component-model/issues/525
1186    #[cfg(feature = "component-model")]
1187    pub fn wasm_component_model_gc(&mut self, enable: bool) -> &mut Self {
1188        self.wasm_feature(WasmFeatures::CM_GC, enable);
1189        self
1190    }
1191
1192    /// Configures whether the [Exception-handling proposal][proposal] is enabled or not.
1193    ///
1194    /// [proposal]: https://github.com/WebAssembly/exception-handling
1195    #[cfg(feature = "gc")]
1196    pub fn wasm_exceptions(&mut self, enable: bool) -> &mut Self {
1197        self.wasm_feature(WasmFeatures::EXCEPTIONS, enable);
1198        self
1199    }
1200
1201    #[doc(hidden)] // FIXME(#3427) - if/when implemented then un-hide this
1202    #[deprecated = "This configuration option only exists for internal \
1203                    usage with the spec testsuite. It may be removed at \
1204                    any time and without warning. Do not rely on it!"]
1205    pub fn wasm_legacy_exceptions(&mut self, enable: bool) -> &mut Self {
1206        self.wasm_feature(WasmFeatures::LEGACY_EXCEPTIONS, enable);
1207        self
1208    }
1209
1210    /// Configures which compilation strategy will be used for wasm modules.
1211    ///
1212    /// This method can be used to configure which compiler is used for wasm
1213    /// modules, and for more documentation consult the [`Strategy`] enumeration
1214    /// and its documentation.
1215    ///
1216    /// The default value for this is `Strategy::Auto`.
1217    #[cfg(any(feature = "cranelift", feature = "winch"))]
1218    pub fn strategy(&mut self, strategy: Strategy) -> &mut Self {
1219        self.compiler_config.strategy = strategy.not_auto();
1220        self
1221    }
1222
1223    /// Configures which garbage collector will be used for Wasm modules.
1224    ///
1225    /// This method can be used to configure which garbage collector
1226    /// implementation is used for Wasm modules. For more documentation, consult
1227    /// the [`Collector`] enumeration and its documentation.
1228    ///
1229    /// The default value for this is `Collector::Auto`.
1230    #[cfg(feature = "gc")]
1231    pub fn collector(&mut self, collector: Collector) -> &mut Self {
1232        self.collector = collector;
1233        self
1234    }
1235
1236    /// Creates a default profiler based on the profiling strategy chosen.
1237    ///
1238    /// Profiler creation calls the type's default initializer where the purpose is
1239    /// really just to put in place the type used for profiling.
1240    ///
1241    /// Some [`ProfilingStrategy`] require specific platforms or particular feature
1242    /// to be enabled, such as `ProfilingStrategy::JitDump` requires the `jitdump`
1243    /// feature.
1244    ///
1245    /// # Errors
1246    ///
1247    /// The validation of this field is deferred until the engine is being built, and thus may
1248    /// cause `Engine::new` fail if the required feature is disabled, or the platform is not
1249    /// supported.
1250    pub fn profiler(&mut self, profile: ProfilingStrategy) -> &mut Self {
1251        self.profiling_strategy = profile;
1252        self
1253    }
1254
1255    /// Configures whether the debug verifier of Cranelift is enabled or not.
1256    ///
1257    /// When Cranelift is used as a code generation backend this will configure
1258    /// it to have the `enable_verifier` flag which will enable a number of debug
1259    /// checks inside of Cranelift. This is largely only useful for the
1260    /// developers of wasmtime itself.
1261    ///
1262    /// The default value for this is `false`
1263    #[cfg(any(feature = "cranelift", feature = "winch"))]
1264    pub fn cranelift_debug_verifier(&mut self, enable: bool) -> &mut Self {
1265        let val = if enable { "true" } else { "false" };
1266        self.compiler_config
1267            .settings
1268            .insert("enable_verifier".to_string(), val.to_string());
1269        self
1270    }
1271
1272    /// Configures whether extra debug checks are inserted into
1273    /// Wasmtime-generated code by Cranelift.
1274    ///
1275    /// The default value for this is `false`
1276    #[cfg(any(feature = "cranelift", feature = "winch"))]
1277    pub fn cranelift_wasmtime_debug_checks(&mut self, enable: bool) -> &mut Self {
1278        unsafe { self.cranelift_flag_set("wasmtime_debug_checks", &enable.to_string()) }
1279    }
1280
1281    /// Configures the Cranelift code generator optimization level.
1282    ///
1283    /// When the Cranelift code generator is used you can configure the
1284    /// optimization level used for generated code in a few various ways. For
1285    /// more information see the documentation of [`OptLevel`].
1286    ///
1287    /// The default value for this is `OptLevel::Speed`.
1288    #[cfg(any(feature = "cranelift", feature = "winch"))]
1289    pub fn cranelift_opt_level(&mut self, level: OptLevel) -> &mut Self {
1290        let val = match level {
1291            OptLevel::None => "none",
1292            OptLevel::Speed => "speed",
1293            OptLevel::SpeedAndSize => "speed_and_size",
1294        };
1295        self.compiler_config
1296            .settings
1297            .insert("opt_level".to_string(), val.to_string());
1298        self
1299    }
1300
1301    /// Configures the regalloc algorithm used by the Cranelift code generator.
1302    ///
1303    /// Cranelift can select any of several register allocator algorithms. Each
1304    /// of these algorithms generates correct code, but they represent different
1305    /// tradeoffs between compile speed (how expensive the compilation process
1306    /// is) and run-time speed (how fast the generated code runs).
1307    /// For more information see the documentation of [`RegallocAlgorithm`].
1308    ///
1309    /// The default value for this is `RegallocAlgorithm::Backtracking`.
1310    #[cfg(any(feature = "cranelift", feature = "winch"))]
1311    pub fn cranelift_regalloc_algorithm(&mut self, algo: RegallocAlgorithm) -> &mut Self {
1312        let val = match algo {
1313            RegallocAlgorithm::Backtracking => "backtracking",
1314            RegallocAlgorithm::SinglePass => "single_pass",
1315        };
1316        self.compiler_config
1317            .settings
1318            .insert("regalloc_algorithm".to_string(), val.to_string());
1319        self
1320    }
1321
1322    /// Configures whether Cranelift should perform a NaN-canonicalization pass.
1323    ///
1324    /// When Cranelift is used as a code generation backend this will configure
1325    /// it to replace NaNs with a single canonical value. This is useful for
1326    /// users requiring entirely deterministic WebAssembly computation.  This is
1327    /// not required by the WebAssembly spec, so it is not enabled by default.
1328    ///
1329    /// Note that this option affects not only WebAssembly's `f32` and `f64`
1330    /// types but additionally the `v128` type. This option will cause
1331    /// operations using any of these types to have extra checks placed after
1332    /// them to normalize NaN values as needed.
1333    ///
1334    /// The default value for this is `false`
1335    #[cfg(any(feature = "cranelift", feature = "winch"))]
1336    pub fn cranelift_nan_canonicalization(&mut self, enable: bool) -> &mut Self {
1337        let val = if enable { "true" } else { "false" };
1338        self.compiler_config
1339            .settings
1340            .insert("enable_nan_canonicalization".to_string(), val.to_string());
1341        self
1342    }
1343
1344    /// Controls whether proof-carrying code (PCC) is used to validate
1345    /// lowering of Wasm sandbox checks.
1346    ///
1347    /// Proof-carrying code carries "facts" about program values from
1348    /// the IR all the way to machine code, and checks those facts
1349    /// against known machine-instruction semantics. This guards
1350    /// against bugs in instruction lowering that might create holes
1351    /// in the Wasm sandbox.
1352    ///
1353    /// PCC is designed to be fast: it does not require complex
1354    /// solvers or logic engines to verify, but only a linear pass
1355    /// over a trail of "breadcrumbs" or facts at each intermediate
1356    /// value. Thus, it is appropriate to enable in production.
1357    #[cfg(any(feature = "cranelift", feature = "winch"))]
1358    pub fn cranelift_pcc(&mut self, enable: bool) -> &mut Self {
1359        let val = if enable { "true" } else { "false" };
1360        self.compiler_config
1361            .settings
1362            .insert("enable_pcc".to_string(), val.to_string());
1363        self
1364    }
1365
1366    /// Allows setting a Cranelift boolean flag or preset. This allows
1367    /// fine-tuning of Cranelift settings.
1368    ///
1369    /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1370    /// either; other `Config` functions should be preferred for stability.
1371    ///
1372    /// # Safety
1373    ///
1374    /// This is marked as unsafe, because setting the wrong flag might break invariants,
1375    /// resulting in execution hazards.
1376    ///
1377    /// # Errors
1378    ///
1379    /// The validation of the flags are deferred until the engine is being built, and thus may
1380    /// cause `Engine::new` fail if the flag's name does not exist, or the value is not appropriate
1381    /// for the flag type.
1382    #[cfg(any(feature = "cranelift", feature = "winch"))]
1383    pub unsafe fn cranelift_flag_enable(&mut self, flag: &str) -> &mut Self {
1384        self.compiler_config.flags.insert(flag.to_string());
1385        self
1386    }
1387
1388    /// Allows settings another Cranelift flag defined by a flag name and value. This allows
1389    /// fine-tuning of Cranelift settings.
1390    ///
1391    /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1392    /// either; other `Config` functions should be preferred for stability.
1393    ///
1394    /// # Safety
1395    ///
1396    /// This is marked as unsafe, because setting the wrong flag might break invariants,
1397    /// resulting in execution hazards.
1398    ///
1399    /// # Errors
1400    ///
1401    /// The validation of the flags are deferred until the engine is being built, and thus may
1402    /// cause `Engine::new` fail if the flag's name does not exist, or incompatible with other
1403    /// settings.
1404    ///
1405    /// For example, feature `wasm_backtrace` will set `unwind_info` to `true`, but if it's
1406    /// manually set to false then it will fail.
1407    #[cfg(any(feature = "cranelift", feature = "winch"))]
1408    pub unsafe fn cranelift_flag_set(&mut self, name: &str, value: &str) -> &mut Self {
1409        self.compiler_config
1410            .settings
1411            .insert(name.to_string(), value.to_string());
1412        self
1413    }
1414
1415    /// Set a custom [`Cache`].
1416    ///
1417    /// To load a cache configuration from a file, use [`Cache::from_file`]. Otherwise, you can
1418    /// create a new cache config using [`CacheConfig::new`] and passing that to [`Cache::new`].
1419    ///
1420    /// If you want to disable the cache, you can call this method with `None`.
1421    ///
1422    /// By default, new configs do not have caching enabled.
1423    /// Every call to [`Module::new(my_wasm)`][crate::Module::new] will recompile `my_wasm`,
1424    /// even when it is unchanged, unless an enabled `CacheConfig` is provided.
1425    ///
1426    /// This method is only available when the `cache` feature of this crate is
1427    /// enabled.
1428    ///
1429    /// [docs]: https://bytecodealliance.github.io/wasmtime/cli-cache.html
1430    #[cfg(feature = "cache")]
1431    pub fn cache(&mut self, cache: Option<Cache>) -> &mut Self {
1432        self.cache = cache;
1433        self
1434    }
1435
1436    /// Sets a custom memory creator.
1437    ///
1438    /// Custom memory creators are used when creating host `Memory` objects or when
1439    /// creating instance linear memories for the on-demand instance allocation strategy.
1440    #[cfg(feature = "runtime")]
1441    pub fn with_host_memory(&mut self, mem_creator: Arc<dyn MemoryCreator>) -> &mut Self {
1442        self.mem_creator = Some(Arc::new(MemoryCreatorProxy(mem_creator)));
1443        self
1444    }
1445
1446    /// Sets a custom stack creator.
1447    ///
1448    /// Custom memory creators are used when creating creating async instance stacks for
1449    /// the on-demand instance allocation strategy.
1450    #[cfg(feature = "async")]
1451    pub fn with_host_stack(&mut self, stack_creator: Arc<dyn StackCreator>) -> &mut Self {
1452        self.stack_creator = Some(Arc::new(StackCreatorProxy(stack_creator)));
1453        self
1454    }
1455
1456    /// Sets a custom executable-memory publisher.
1457    ///
1458    /// Custom executable-memory publishers are hooks that allow
1459    /// Wasmtime to make certain regions of memory executable when
1460    /// loading precompiled modules or compiling new modules
1461    /// in-process. In most modern operating systems, memory allocated
1462    /// for heap usage is readable and writable by default but not
1463    /// executable. To jump to machine code stored in that memory, we
1464    /// need to make it executable. For security reasons, we usually
1465    /// also make it read-only at the same time, so the executing code
1466    /// can't be modified later.
1467    ///
1468    /// By default, Wasmtime will use the appropriate system calls on
1469    /// the host platform for this work. However, it also allows
1470    /// plugging in a custom implementation via this configuration
1471    /// option. This may be useful on custom or `no_std` platforms,
1472    /// for example, especially where virtual memory is not otherwise
1473    /// used by Wasmtime (no `signals-and-traps` feature).
1474    #[cfg(feature = "runtime")]
1475    pub fn with_custom_code_memory(
1476        &mut self,
1477        custom_code_memory: Option<Arc<dyn CustomCodeMemory>>,
1478    ) -> &mut Self {
1479        self.custom_code_memory = custom_code_memory;
1480        self
1481    }
1482
1483    /// Sets the instance allocation strategy to use.
1484    ///
1485    /// This is notably used in conjunction with
1486    /// [`InstanceAllocationStrategy::Pooling`] and [`PoolingAllocationConfig`].
1487    pub fn allocation_strategy(
1488        &mut self,
1489        strategy: impl Into<InstanceAllocationStrategy>,
1490    ) -> &mut Self {
1491        self.allocation_strategy = strategy.into();
1492        self
1493    }
1494
1495    /// Specifies the capacity of linear memories, in bytes, in their initial
1496    /// allocation.
1497    ///
1498    /// > Note: this value has important performance ramifications, be sure to
1499    /// > benchmark when setting this to a non-default value and read over this
1500    /// > documentation.
1501    ///
1502    /// This function will change the size of the initial memory allocation made
1503    /// for linear memories. This setting is only applicable when the initial
1504    /// size of a linear memory is below this threshold. Linear memories are
1505    /// allocated in the virtual address space of the host process with OS APIs
1506    /// such as `mmap` and this setting affects how large the allocation will
1507    /// be.
1508    ///
1509    /// ## Background: WebAssembly Linear Memories
1510    ///
1511    /// WebAssembly linear memories always start with a minimum size and can
1512    /// possibly grow up to a maximum size. The minimum size is always specified
1513    /// in a WebAssembly module itself and the maximum size can either be
1514    /// optionally specified in the module or inherently limited by the index
1515    /// type. For example for this module:
1516    ///
1517    /// ```wasm
1518    /// (module
1519    ///     (memory $a 4)
1520    ///     (memory $b 4096 4096 (pagesize 1))
1521    ///     (memory $c i64 10)
1522    /// )
1523    /// ```
1524    ///
1525    /// * Memory `$a` initially allocates 4 WebAssembly pages (256KiB) and can
1526    ///   grow up to 4GiB, the limit of the 32-bit index space.
1527    /// * Memory `$b` initially allocates 4096 WebAssembly pages, but in this
1528    ///   case its page size is 1, so it's 4096 bytes. Memory can also grow no
1529    ///   further meaning that it will always be 4096 bytes.
1530    /// * Memory `$c` is a 64-bit linear memory which starts with 640KiB of
1531    ///   memory and can theoretically grow up to 2^64 bytes, although most
1532    ///   hosts will run out of memory long before that.
1533    ///
1534    /// All operations on linear memories done by wasm are required to be
1535    /// in-bounds. Any access beyond the end of a linear memory is considered a
1536    /// trap.
1537    ///
1538    /// ## What this setting affects: Virtual Memory
1539    ///
1540    /// This setting is used to configure the behavior of the size of the linear
1541    /// memory allocation performed for each of these memories. For example the
1542    /// initial linear memory allocation looks like this:
1543    ///
1544    /// ```text
1545    ///              memory_reservation
1546    ///                    |
1547    ///          ◄─────────┴────────────────►
1548    /// ┌───────┬─────────┬──────────────────┬───────┐
1549    /// │ guard │ initial │ ... capacity ... │ guard │
1550    /// └───────┴─────────┴──────────────────┴───────┘
1551    ///  ◄──┬──►                              ◄──┬──►
1552    ///     │                                    │
1553    ///     │                             memory_guard_size
1554    ///     │
1555    ///     │
1556    ///  memory_guard_size (if guard_before_linear_memory)
1557    /// ```
1558    ///
1559    /// Memory in the `initial` range is accessible to the instance and can be
1560    /// read/written by wasm code. Memory in the `guard` regions is never
1561    /// accessible to wasm code and memory in `capacity` is initially
1562    /// inaccessible but may become accessible through `memory.grow` instructions
1563    /// for example.
1564    ///
1565    /// This means that this setting is the size of the initial chunk of virtual
1566    /// memory that a linear memory may grow into.
1567    ///
1568    /// ## What this setting affects: Runtime Speed
1569    ///
1570    /// This is a performance-sensitive setting which is taken into account
1571    /// during the compilation process of a WebAssembly module. For example if a
1572    /// 32-bit WebAssembly linear memory has a `memory_reservation` size of 4GiB
1573    /// then bounds checks can be elided because `capacity` will be guaranteed
1574    /// to be unmapped for all addressable bytes that wasm can access (modulo a
1575    /// few details).
1576    ///
1577    /// If `memory_reservation` was something smaller like 256KiB then that
1578    /// would have a much smaller impact on virtual memory but the compile code
1579    /// would then need to have explicit bounds checks to ensure that
1580    /// loads/stores are in-bounds.
1581    ///
1582    /// The goal of this setting is to enable skipping bounds checks in most
1583    /// modules by default. Some situations which require explicit bounds checks
1584    /// though are:
1585    ///
1586    /// * When `memory_reservation` is smaller than the addressable size of the
1587    ///   linear memory. For example if 64-bit linear memories always need
1588    ///   bounds checks as they can address the entire virtual address spacce.
1589    ///   For 32-bit linear memories a `memory_reservation` minimum size of 4GiB
1590    ///   is required to elide bounds checks.
1591    ///
1592    /// * When linear memories have a page size of 1 then bounds checks are
1593    ///   required. In this situation virtual memory can't be relied upon
1594    ///   because that operates at the host page size granularity where wasm
1595    ///   requires a per-byte level granularity.
1596    ///
1597    /// * Configuration settings such as [`Config::signals_based_traps`] can be
1598    ///   used to disable the use of signal handlers and virtual memory so
1599    ///   explicit bounds checks are required.
1600    ///
1601    /// * When [`Config::memory_guard_size`] is too small a bounds check may be
1602    ///   required. For 32-bit wasm addresses are actually 33-bit effective
1603    ///   addresses because loads/stores have a 32-bit static offset to add to
1604    ///   the dynamic 32-bit address. If the static offset is larger than the
1605    ///   size of the guard region then an explicit bounds check is required.
1606    ///
1607    /// ## What this setting affects: Memory Growth Behavior
1608    ///
1609    /// In addition to affecting bounds checks emitted in compiled code this
1610    /// setting also affects how WebAssembly linear memories are grown. The
1611    /// `memory.grow` instruction can be used to make a linear memory larger and
1612    /// this is also affected by APIs such as
1613    /// [`Memory::grow`](crate::Memory::grow).
1614    ///
1615    /// In these situations when the amount being grown is small enough to fit
1616    /// within the remaining capacity then the linear memory doesn't have to be
1617    /// moved at runtime. If the capacity runs out though then a new linear
1618    /// memory allocation must be made and the contents of linear memory is
1619    /// copied over.
1620    ///
1621    /// For example here's a situation where a copy happens:
1622    ///
1623    /// * The `memory_reservation` setting is configured to 128KiB.
1624    /// * A WebAssembly linear memory starts with a single 64KiB page.
1625    /// * This memory can be grown by one page to contain the full 128KiB of
1626    ///   memory.
1627    /// * If grown by one more page, though, then a 192KiB allocation must be
1628    ///   made and the previous 128KiB of contents are copied into the new
1629    ///   allocation.
1630    ///
1631    /// This growth behavior can have a significant performance impact if lots
1632    /// of data needs to be copied on growth. Conversely if memory growth never
1633    /// needs to happen because the capacity will always be large enough then
1634    /// optimizations can be applied to cache the base pointer of linear memory.
1635    ///
1636    /// When memory is grown then the
1637    /// [`Config::memory_reservation_for_growth`] is used for the new
1638    /// memory allocation to have memory to grow into.
1639    ///
1640    /// When using the pooling allocator via [`PoolingAllocationConfig`] then
1641    /// memories are never allowed to move so requests for growth are instead
1642    /// rejected with an error.
1643    ///
1644    /// ## When this setting is not used
1645    ///
1646    /// This setting is ignored and unused when the initial size of linear
1647    /// memory is larger than this threshold. For example if this setting is set
1648    /// to 1MiB but a wasm module requires a 2MiB minimum allocation then this
1649    /// setting is ignored. In this situation the minimum size of memory will be
1650    /// allocated along with [`Config::memory_reservation_for_growth`]
1651    /// after it to grow into.
1652    ///
1653    /// That means that this value can be set to zero. That can be useful in
1654    /// benchmarking to see the overhead of bounds checks for example.
1655    /// Additionally it can be used to minimize the virtual memory allocated by
1656    /// Wasmtime.
1657    ///
1658    /// ## Default Value
1659    ///
1660    /// The default value for this property depends on the host platform. For
1661    /// 64-bit platforms there's lots of address space available, so the default
1662    /// configured here is 4GiB. When coupled with the default size of
1663    /// [`Config::memory_guard_size`] this means that 32-bit WebAssembly linear
1664    /// memories with 64KiB page sizes will skip almost all bounds checks by
1665    /// default.
1666    ///
1667    /// For 32-bit platforms this value defaults to 10MiB. This means that
1668    /// bounds checks will be required on 32-bit platforms.
1669    pub fn memory_reservation(&mut self, bytes: u64) -> &mut Self {
1670        self.tunables.memory_reservation = Some(bytes);
1671        self
1672    }
1673
1674    /// Indicates whether linear memories may relocate their base pointer at
1675    /// runtime.
1676    ///
1677    /// WebAssembly linear memories either have a maximum size that's explicitly
1678    /// listed in the type of a memory or inherently limited by the index type
1679    /// of the memory (e.g. 4GiB for 32-bit linear memories). Depending on how
1680    /// the linear memory is allocated (see [`Config::memory_reservation`]) it
1681    /// may be necessary to move the memory in the host's virtual address space
1682    /// during growth. This option controls whether this movement is allowed or
1683    /// not.
1684    ///
1685    /// An example of a linear memory needing to move is when
1686    /// [`Config::memory_reservation`] is 0 then a linear memory will be
1687    /// allocated as the minimum size of the memory plus
1688    /// [`Config::memory_reservation_for_growth`]. When memory grows beyond the
1689    /// reservation for growth then the memory needs to be relocated.
1690    ///
1691    /// When this option is set to `false` then it can have a number of impacts
1692    /// on how memories work at runtime:
1693    ///
1694    /// * Modules can be compiled with static knowledge the base pointer of
1695    ///   linear memory never changes to enable optimizations such as
1696    ///   loop invariant code motion (hoisting the base pointer out of a loop).
1697    ///
1698    /// * Memories cannot grow in excess of their original allocation. This
1699    ///   means that [`Config::memory_reservation`] and
1700    ///   [`Config::memory_reservation_for_growth`] may need tuning to ensure
1701    ///   the memory configuration works at runtime.
1702    ///
1703    /// The default value for this option is `true`.
1704    pub fn memory_may_move(&mut self, enable: bool) -> &mut Self {
1705        self.tunables.memory_may_move = Some(enable);
1706        self
1707    }
1708
1709    /// Configures the size, in bytes, of the guard region used at the end of a
1710    /// linear memory's address space reservation.
1711    ///
1712    /// > Note: this value has important performance ramifications, be sure to
1713    /// > understand what this value does before tweaking it and benchmarking.
1714    ///
1715    /// This setting controls how many bytes are guaranteed to be unmapped after
1716    /// the virtual memory allocation of a linear memory. When
1717    /// combined with sufficiently large values of
1718    /// [`Config::memory_reservation`] (e.g. 4GiB for 32-bit linear memories)
1719    /// then a guard region can be used to eliminate bounds checks in generated
1720    /// code.
1721    ///
1722    /// This setting additionally can be used to help deduplicate bounds checks
1723    /// in code that otherwise requires bounds checks. For example with a 4KiB
1724    /// guard region then a 64-bit linear memory which accesses addresses `x+8`
1725    /// and `x+16` only needs to perform a single bounds check on `x`. If that
1726    /// bounds check passes then the offset is guaranteed to either reside in
1727    /// linear memory or the guard region, resulting in deterministic behavior
1728    /// either way.
1729    ///
1730    /// ## How big should the guard be?
1731    ///
1732    /// In general, like with configuring [`Config::memory_reservation`], you
1733    /// probably don't want to change this value from the defaults. Removing
1734    /// bounds checks is dependent on a number of factors where the size of the
1735    /// guard region is only one piece of the equation. Other factors include:
1736    ///
1737    /// * [`Config::memory_reservation`]
1738    /// * The index type of the linear memory (e.g. 32-bit or 64-bit)
1739    /// * The page size of the linear memory
1740    /// * Other settings such as [`Config::signals_based_traps`]
1741    ///
1742    /// Embeddings using virtual memory almost always want at least some guard
1743    /// region, but otherwise changes from the default should be profiled
1744    /// locally to see the performance impact.
1745    ///
1746    /// ## Default
1747    ///
1748    /// The default value for this property is 32MiB on 64-bit platforms. This
1749    /// allows eliminating almost all bounds checks on loads/stores with an
1750    /// immediate offset of less than 32MiB. On 32-bit platforms this defaults
1751    /// to 64KiB.
1752    pub fn memory_guard_size(&mut self, bytes: u64) -> &mut Self {
1753        self.tunables.memory_guard_size = Some(bytes);
1754        self
1755    }
1756
1757    /// Configures the size, in bytes, of the extra virtual memory space
1758    /// reserved after a linear memory is relocated.
1759    ///
1760    /// This setting is used in conjunction with [`Config::memory_reservation`]
1761    /// to configure what happens after a linear memory is relocated in the host
1762    /// address space. If the initial size of a linear memory exceeds
1763    /// [`Config::memory_reservation`] or if it grows beyond that size
1764    /// throughout its lifetime then this setting will be used.
1765    ///
1766    /// When a linear memory is relocated it will initially look like this:
1767    ///
1768    /// ```text
1769    ///            memory.size
1770    ///                 │
1771    ///          ◄──────┴─────►
1772    /// ┌───────┬──────────────┬───────┐
1773    /// │ guard │  accessible  │ guard │
1774    /// └───────┴──────────────┴───────┘
1775    ///                         ◄──┬──►
1776    ///                            │
1777    ///                     memory_guard_size
1778    /// ```
1779    ///
1780    /// where `accessible` needs to be grown but there's no more memory to grow
1781    /// into. A new region of the virtual address space will be allocated that
1782    /// looks like this:
1783    ///
1784    /// ```text
1785    ///                           memory_reservation_for_growth
1786    ///                                       │
1787    ///            memory.size                │
1788    ///                 │                     │
1789    ///          ◄──────┴─────► ◄─────────────┴───────────►
1790    /// ┌───────┬──────────────┬───────────────────────────┬───────┐
1791    /// │ guard │  accessible  │ .. reserved for growth .. │ guard │
1792    /// └───────┴──────────────┴───────────────────────────┴───────┘
1793    ///                                                     ◄──┬──►
1794    ///                                                        │
1795    ///                                               memory_guard_size
1796    /// ```
1797    ///
1798    /// This means that up to `memory_reservation_for_growth` bytes can be
1799    /// allocated again before the entire linear memory needs to be moved again
1800    /// when another `memory_reservation_for_growth` bytes will be appended to
1801    /// the size of the allocation.
1802    ///
1803    /// Note that this is a currently simple heuristic for optimizing the growth
1804    /// of dynamic memories, primarily implemented for the memory64 proposal
1805    /// where the maximum size of memory is larger than 4GiB. This setting is
1806    /// unlikely to be a one-size-fits-all style approach and if you're an
1807    /// embedder running into issues with growth and are interested in having
1808    /// other growth strategies available here please feel free to [open an
1809    /// issue on the Wasmtime repository][issue]!
1810    ///
1811    /// [issue]: https://github.com/bytecodealliance/wasmtime/issues/new
1812    ///
1813    /// ## Default
1814    ///
1815    /// For 64-bit platforms this defaults to 2GiB, and for 32-bit platforms
1816    /// this defaults to 1MiB.
1817    pub fn memory_reservation_for_growth(&mut self, bytes: u64) -> &mut Self {
1818        self.tunables.memory_reservation_for_growth = Some(bytes);
1819        self
1820    }
1821
1822    /// Indicates whether a guard region is present before allocations of
1823    /// linear memory.
1824    ///
1825    /// Guard regions before linear memories are never used during normal
1826    /// operation of WebAssembly modules, even if they have out-of-bounds
1827    /// loads. The only purpose for a preceding guard region in linear memory
1828    /// is extra protection against possible bugs in code generators like
1829    /// Cranelift. This setting does not affect performance in any way, but will
1830    /// result in larger virtual memory reservations for linear memories (it
1831    /// won't actually ever use more memory, just use more of the address
1832    /// space).
1833    ///
1834    /// The size of the guard region before linear memory is the same as the
1835    /// guard size that comes after linear memory, which is configured by
1836    /// [`Config::memory_guard_size`].
1837    ///
1838    /// ## Default
1839    ///
1840    /// This value defaults to `true`.
1841    pub fn guard_before_linear_memory(&mut self, enable: bool) -> &mut Self {
1842        self.tunables.guard_before_linear_memory = Some(enable);
1843        self
1844    }
1845
1846    /// Indicates whether to initialize tables lazily, so that instantiation
1847    /// is fast but indirect calls are a little slower. If false, tables
1848    /// are initialized eagerly during instantiation from any active element
1849    /// segments that apply to them.
1850    ///
1851    /// **Note** Disabling this option is not compatible with the Winch compiler.
1852    ///
1853    /// ## Default
1854    ///
1855    /// This value defaults to `true`.
1856    pub fn table_lazy_init(&mut self, table_lazy_init: bool) -> &mut Self {
1857        self.tunables.table_lazy_init = Some(table_lazy_init);
1858        self
1859    }
1860
1861    /// Configure the version information used in serialized and deserialized [`crate::Module`]s.
1862    /// This effects the behavior of [`crate::Module::serialize()`], as well as
1863    /// [`crate::Module::deserialize()`] and related functions.
1864    ///
1865    /// The default strategy is to use the wasmtime crate's Cargo package version.
1866    pub fn module_version(&mut self, strategy: ModuleVersionStrategy) -> Result<&mut Self> {
1867        match strategy {
1868            // This case requires special precondition for assertion in SerializedModule::to_bytes
1869            ModuleVersionStrategy::Custom(ref v) => {
1870                if v.as_bytes().len() > 255 {
1871                    bail!("custom module version cannot be more than 255 bytes: {}", v);
1872                }
1873            }
1874            _ => {}
1875        }
1876        self.module_version = strategy;
1877        Ok(self)
1878    }
1879
1880    /// Configure whether wasmtime should compile a module using multiple
1881    /// threads.
1882    ///
1883    /// Disabling this will result in a single thread being used to compile
1884    /// the wasm bytecode.
1885    ///
1886    /// By default parallel compilation is enabled.
1887    #[cfg(feature = "parallel-compilation")]
1888    pub fn parallel_compilation(&mut self, parallel: bool) -> &mut Self {
1889        self.parallel_compilation = parallel;
1890        self
1891    }
1892
1893    /// Configures whether compiled artifacts will contain information to map
1894    /// native program addresses back to the original wasm module.
1895    ///
1896    /// This configuration option is `true` by default and, if enabled,
1897    /// generates the appropriate tables in compiled modules to map from native
1898    /// address back to wasm source addresses. This is used for displaying wasm
1899    /// program counters in backtraces as well as generating filenames/line
1900    /// numbers if so configured as well (and the original wasm module has DWARF
1901    /// debugging information present).
1902    pub fn generate_address_map(&mut self, generate: bool) -> &mut Self {
1903        self.tunables.generate_address_map = Some(generate);
1904        self
1905    }
1906
1907    /// Configures whether copy-on-write memory-mapped data is used to
1908    /// initialize a linear memory.
1909    ///
1910    /// Initializing linear memory via a copy-on-write mapping can drastically
1911    /// improve instantiation costs of a WebAssembly module because copying
1912    /// memory is deferred. Additionally if a page of memory is only ever read
1913    /// from WebAssembly and never written too then the same underlying page of
1914    /// data will be reused between all instantiations of a module meaning that
1915    /// if a module is instantiated many times this can lower the overall memory
1916    /// required needed to run that module.
1917    ///
1918    /// The main disadvantage of copy-on-write initialization, however, is that
1919    /// it may be possible for highly-parallel scenarios to be less scalable. If
1920    /// a page is read initially by a WebAssembly module then that page will be
1921    /// mapped to a read-only copy shared between all WebAssembly instances. If
1922    /// the same page is then written, however, then a private copy is created
1923    /// and swapped out from the read-only version. This also requires an [IPI],
1924    /// however, which can be a significant bottleneck in high-parallelism
1925    /// situations.
1926    ///
1927    /// This feature is only applicable when a WebAssembly module meets specific
1928    /// criteria to be initialized in this fashion, such as:
1929    ///
1930    /// * Only memories defined in the module can be initialized this way.
1931    /// * Data segments for memory must use statically known offsets.
1932    /// * Data segments for memory must all be in-bounds.
1933    ///
1934    /// Modules which do not meet these criteria will fall back to
1935    /// initialization of linear memory based on copying memory.
1936    ///
1937    /// This feature of Wasmtime is also platform-specific:
1938    ///
1939    /// * Linux - this feature is supported for all instances of [`Module`].
1940    ///   Modules backed by an existing mmap (such as those created by
1941    ///   [`Module::deserialize_file`]) will reuse that mmap to cow-initialize
1942    ///   memory. Other instance of [`Module`] may use the `memfd_create`
1943    ///   syscall to create an initialization image to `mmap`.
1944    /// * Unix (not Linux) - this feature is only supported when loading modules
1945    ///   from a precompiled file via [`Module::deserialize_file`] where there
1946    ///   is a file descriptor to use to map data into the process. Note that
1947    ///   the module must have been compiled with this setting enabled as well.
1948    /// * Windows - there is no support for this feature at this time. Memory
1949    ///   initialization will always copy bytes.
1950    ///
1951    /// By default this option is enabled.
1952    ///
1953    /// [`Module::deserialize_file`]: crate::Module::deserialize_file
1954    /// [`Module`]: crate::Module
1955    /// [IPI]: https://en.wikipedia.org/wiki/Inter-processor_interrupt
1956    pub fn memory_init_cow(&mut self, enable: bool) -> &mut Self {
1957        self.tunables.memory_init_cow = Some(enable);
1958        self
1959    }
1960
1961    /// A configuration option to force the usage of `memfd_create` on Linux to
1962    /// be used as the backing source for a module's initial memory image.
1963    ///
1964    /// When [`Config::memory_init_cow`] is enabled, which is enabled by
1965    /// default, module memory initialization images are taken from a module's
1966    /// original mmap if possible. If a precompiled module was loaded from disk
1967    /// this means that the disk's file is used as an mmap source for the
1968    /// initial linear memory contents. This option can be used to force, on
1969    /// Linux, that instead of using the original file on disk a new in-memory
1970    /// file is created with `memfd_create` to hold the contents of the initial
1971    /// image.
1972    ///
1973    /// This option can be used to avoid possibly loading the contents of memory
1974    /// from disk through a page fault. Instead with `memfd_create` the contents
1975    /// of memory are always in RAM, meaning that even page faults which
1976    /// initially populate a wasm linear memory will only work with RAM instead
1977    /// of ever hitting the disk that the original precompiled module is stored
1978    /// on.
1979    ///
1980    /// This option is disabled by default.
1981    pub fn force_memory_init_memfd(&mut self, enable: bool) -> &mut Self {
1982        self.force_memory_init_memfd = enable;
1983        self
1984    }
1985
1986    /// Configures whether or not a coredump should be generated and attached to
1987    /// the anyhow::Error when a trap is raised.
1988    ///
1989    /// This option is disabled by default.
1990    #[cfg(feature = "coredump")]
1991    pub fn coredump_on_trap(&mut self, enable: bool) -> &mut Self {
1992        self.coredump_on_trap = enable;
1993        self
1994    }
1995
1996    /// Enables memory error checking for wasm programs.
1997    ///
1998    /// This option is disabled by default.
1999    #[cfg(any(feature = "cranelift", feature = "winch"))]
2000    pub fn wmemcheck(&mut self, enable: bool) -> &mut Self {
2001        self.wmemcheck = enable;
2002        self.compiler_config.wmemcheck = enable;
2003        self
2004    }
2005
2006    /// Configures the "guaranteed dense image size" for copy-on-write
2007    /// initialized memories.
2008    ///
2009    /// When using the [`Config::memory_init_cow`] feature to initialize memory
2010    /// efficiently (which is enabled by default), compiled modules contain an
2011    /// image of the module's initial heap. If the module has a fairly sparse
2012    /// initial heap, with just a few data segments at very different offsets,
2013    /// this could result in a large region of zero bytes in the image. In
2014    /// other words, it's not very memory-efficient.
2015    ///
2016    /// We normally use a heuristic to avoid this: if less than half
2017    /// of the initialized range (first non-zero to last non-zero
2018    /// byte) of any memory in the module has pages with nonzero
2019    /// bytes, then we avoid creating a memory image for the entire module.
2020    ///
2021    /// However, if the embedder always needs the instantiation-time efficiency
2022    /// of copy-on-write initialization, and is otherwise carefully controlling
2023    /// parameters of the modules (for example, by limiting the maximum heap
2024    /// size of the modules), then it may be desirable to ensure a memory image
2025    /// is created even if this could go against the heuristic above. Thus, we
2026    /// add another condition: there is a size of initialized data region up to
2027    /// which we *always* allow a memory image. The embedder can set this to a
2028    /// known maximum heap size if they desire to always get the benefits of
2029    /// copy-on-write images.
2030    ///
2031    /// In the future we may implement a "best of both worlds"
2032    /// solution where we have a dense image up to some limit, and
2033    /// then support a sparse list of initializers beyond that; this
2034    /// would get most of the benefit of copy-on-write and pay the incremental
2035    /// cost of eager initialization only for those bits of memory
2036    /// that are out-of-bounds. However, for now, an embedder desiring
2037    /// fast instantiation should ensure that this setting is as large
2038    /// as the maximum module initial memory content size.
2039    ///
2040    /// By default this value is 16 MiB.
2041    pub fn memory_guaranteed_dense_image_size(&mut self, size_in_bytes: u64) -> &mut Self {
2042        self.memory_guaranteed_dense_image_size = size_in_bytes;
2043        self
2044    }
2045
2046    /// Whether to enable function inlining during compilation or not.
2047    ///
2048    /// This may result in faster execution at runtime, but adds additional
2049    /// compilation time. Inlining may also enlarge the size of compiled
2050    /// artifacts (for example, the size of the result of
2051    /// [`Engine::precompile_component`](crate::Engine::precompile_component)).
2052    ///
2053    /// Inlining is not supported by all of Wasmtime's compilation strategies;
2054    /// currently, it only Cranelift supports it. This setting will be ignored
2055    /// when using a compilation strategy that does not support inlining, like
2056    /// Winch.
2057    ///
2058    /// Note that inlining is still somewhat experimental at the moment (as of
2059    /// the Wasmtime version 36).
2060    pub fn compiler_inlining(&mut self, inlining: bool) -> &mut Self {
2061        self.tunables.inlining = Some(inlining);
2062        self
2063    }
2064
2065    /// Returns the set of features that the currently selected compiler backend
2066    /// does not support at all and may panic on.
2067    ///
2068    /// Wasmtime strives to reject unknown modules or unsupported modules with
2069    /// first-class errors instead of panics. Not all compiler backends have the
2070    /// same level of feature support on all platforms as well. This method
2071    /// returns a set of features that the currently selected compiler
2072    /// configuration is known to not support and may panic on. This acts as a
2073    /// first-level filter on incoming wasm modules/configuration to fail-fast
2074    /// instead of panicking later on.
2075    ///
2076    /// Note that if a feature is not listed here it does not mean that the
2077    /// backend fully supports the proposal. Instead that means that the backend
2078    /// doesn't ever panic on the proposal, but errors during compilation may
2079    /// still be returned. This means that features listed here are definitely
2080    /// not supported at all, but features not listed here may still be
2081    /// partially supported. For example at the time of this writing the Winch
2082    /// backend partially supports simd so it's not listed here. Winch doesn't
2083    /// fully support simd but unimplemented instructions just return errors.
2084    fn compiler_panicking_wasm_features(&self) -> WasmFeatures {
2085        #[cfg(any(feature = "cranelift", feature = "winch"))]
2086        match self.compiler_config.strategy {
2087            None | Some(Strategy::Cranelift) => {
2088                let mut unsupported = WasmFeatures::empty();
2089
2090                // Pulley at this time fundamentally doesn't support the
2091                // `threads` proposal, notably shared memory, because Rust can't
2092                // safely implement loads/stores in the face of shared memory.
2093                // Stack switching is not implemented, either.
2094                if self.compiler_target().is_pulley() {
2095                    unsupported |= WasmFeatures::THREADS;
2096                    unsupported |= WasmFeatures::STACK_SWITCHING;
2097                }
2098
2099                use target_lexicon::*;
2100                match self.compiler_target() {
2101                    Triple {
2102                        architecture: Architecture::X86_64 | Architecture::X86_64h,
2103                        operating_system:
2104                            OperatingSystem::Linux
2105                            | OperatingSystem::MacOSX(_)
2106                            | OperatingSystem::Darwin(_),
2107                        ..
2108                    } => {
2109                        // Stack switching supported on (non-Pulley) Cranelift.
2110                    }
2111
2112                    _ => {
2113                        // On platforms other than x64 Unix-like, we don't
2114                        // support stack switching.
2115                        unsupported |= WasmFeatures::STACK_SWITCHING;
2116                    }
2117                }
2118                unsupported
2119            }
2120            Some(Strategy::Winch) => {
2121                let mut unsupported = WasmFeatures::GC
2122                    | WasmFeatures::FUNCTION_REFERENCES
2123                    | WasmFeatures::RELAXED_SIMD
2124                    | WasmFeatures::TAIL_CALL
2125                    | WasmFeatures::GC_TYPES
2126                    | WasmFeatures::EXCEPTIONS
2127                    | WasmFeatures::LEGACY_EXCEPTIONS
2128                    | WasmFeatures::STACK_SWITCHING
2129                    | WasmFeatures::CM_ASYNC;
2130                match self.compiler_target().architecture {
2131                    target_lexicon::Architecture::Aarch64(_) => {
2132                        unsupported |= WasmFeatures::THREADS;
2133                        unsupported |= WasmFeatures::WIDE_ARITHMETIC;
2134                    }
2135
2136                    // Winch doesn't support other non-x64 architectures at this
2137                    // time either but will return an first-class error for
2138                    // them.
2139                    _ => {}
2140                }
2141                unsupported
2142            }
2143            Some(Strategy::Auto) => unreachable!(),
2144        }
2145        #[cfg(not(any(feature = "cranelift", feature = "winch")))]
2146        return WasmFeatures::empty();
2147    }
2148
2149    /// Calculates the set of features that are enabled for this `Config`.
2150    ///
2151    /// This method internally will start with the an empty set of features to
2152    /// avoid being tied to wasmparser's defaults. Next Wasmtime's set of
2153    /// default features are added to this set, some of which are conditional
2154    /// depending on crate features. Finally explicitly requested features via
2155    /// `wasm_*` methods on `Config` are applied. Everything is then validated
2156    /// later in `Config::validate`.
2157    fn features(&self) -> WasmFeatures {
2158        // Wasmtime by default supports all of the wasm 2.0 version of the
2159        // specification.
2160        let mut features = WasmFeatures::WASM2;
2161
2162        // On-by-default features that wasmtime has. Note that these are all
2163        // subject to the criteria at
2164        // https://docs.wasmtime.dev/contributing-implementing-wasm-proposals.html
2165        // and
2166        // https://docs.wasmtime.dev/stability-wasm-proposals.html
2167        features |= WasmFeatures::MULTI_MEMORY;
2168        features |= WasmFeatures::RELAXED_SIMD;
2169        features |= WasmFeatures::TAIL_CALL;
2170        features |= WasmFeatures::EXTENDED_CONST;
2171        features |= WasmFeatures::MEMORY64;
2172        // NB: if you add a feature above this line please double-check
2173        // https://docs.wasmtime.dev/stability-wasm-proposals.html
2174        // to ensure all requirements are met and/or update the documentation
2175        // there too.
2176
2177        // Set some features to their conditionally-enabled defaults depending
2178        // on crate compile-time features.
2179        features.set(WasmFeatures::GC_TYPES, cfg!(feature = "gc"));
2180        features.set(WasmFeatures::THREADS, cfg!(feature = "threads"));
2181        features.set(
2182            WasmFeatures::COMPONENT_MODEL,
2183            cfg!(feature = "component-model"),
2184        );
2185
2186        // From the default set of proposals remove any that the current
2187        // compiler backend may panic on if the module contains them.
2188        features = features & !self.compiler_panicking_wasm_features();
2189
2190        // After wasmtime's defaults are configured then factor in user requests
2191        // and disable/enable features. Note that the enable/disable sets should
2192        // be disjoint.
2193        debug_assert!((self.enabled_features & self.disabled_features).is_empty());
2194        features &= !self.disabled_features;
2195        features |= self.enabled_features;
2196
2197        features
2198    }
2199
2200    /// Returns the configured compiler target for this `Config`.
2201    pub(crate) fn compiler_target(&self) -> target_lexicon::Triple {
2202        // If a target is explicitly configured, always use that.
2203        if let Some(target) = self.target.clone() {
2204            return target;
2205        }
2206
2207        // If the `build.rs` script determined that this platform uses pulley by
2208        // default, then use Pulley.
2209        if cfg!(default_target_pulley) {
2210            return target_lexicon::Triple::pulley_host();
2211        }
2212
2213        // And at this point the target is for sure the host.
2214        target_lexicon::Triple::host()
2215    }
2216
2217    pub(crate) fn validate(&self) -> Result<(Tunables, WasmFeatures)> {
2218        let features = self.features();
2219
2220        // First validate that the selected compiler backend and configuration
2221        // supports the set of `features` that are enabled. This will help
2222        // provide more first class errors instead of panics about unsupported
2223        // features and configurations.
2224        let unsupported = features & self.compiler_panicking_wasm_features();
2225        if !unsupported.is_empty() {
2226            for flag in WasmFeatures::FLAGS.iter() {
2227                if !unsupported.contains(*flag.value()) {
2228                    continue;
2229                }
2230                bail!(
2231                    "the wasm_{} feature is not supported on this compiler configuration",
2232                    flag.name().to_lowercase()
2233                );
2234            }
2235
2236            panic!("should have returned an error by now")
2237        }
2238
2239        #[cfg(any(feature = "async", feature = "stack-switching"))]
2240        if self.async_support && self.max_wasm_stack > self.async_stack_size {
2241            bail!("max_wasm_stack size cannot exceed the async_stack_size");
2242        }
2243        if self.max_wasm_stack == 0 {
2244            bail!("max_wasm_stack size cannot be zero");
2245        }
2246        if !cfg!(feature = "wmemcheck") && self.wmemcheck {
2247            bail!("wmemcheck (memory checker) was requested but is not enabled in this build");
2248        }
2249
2250        if !cfg!(feature = "gc") && features.gc_types() {
2251            bail!("support for GC was disabled at compile time")
2252        }
2253
2254        if !cfg!(feature = "gc") && features.contains(WasmFeatures::EXCEPTIONS) {
2255            bail!("exceptions support requires garbage collection (GC) to be enabled in the build");
2256        }
2257
2258        let mut tunables = Tunables::default_for_target(&self.compiler_target())?;
2259
2260        // If no target is explicitly specified then further refine `tunables`
2261        // for the configuration of this host depending on what platform
2262        // features were found available at compile time. This means that anyone
2263        // cross-compiling for a customized host will need to further refine
2264        // compilation options.
2265        if self.target.is_none() {
2266            // If this platform doesn't have native signals then change some
2267            // defaults to account for that. Note that VM guards are turned off
2268            // here because that's primarily a feature of eliding
2269            // bounds-checks.
2270            if !cfg!(has_native_signals) {
2271                tunables.signals_based_traps = cfg!(has_native_signals);
2272                tunables.memory_guard_size = 0;
2273            }
2274
2275            // When virtual memory is not available use slightly different
2276            // defaults for tunables to be more amenable to `MallocMemory`.
2277            // Note that these can still be overridden by config options.
2278            if !cfg!(has_virtual_memory) {
2279                tunables.memory_reservation = 0;
2280                tunables.memory_reservation_for_growth = 1 << 20; // 1MB
2281                tunables.memory_init_cow = false;
2282            }
2283        }
2284
2285        self.tunables.configure(&mut tunables);
2286
2287        // If we're going to compile with winch, we must use the winch calling convention.
2288        #[cfg(any(feature = "cranelift", feature = "winch"))]
2289        {
2290            tunables.winch_callable = self.compiler_config.strategy == Some(Strategy::Winch);
2291        }
2292
2293        tunables.collector = if features.gc_types() {
2294            #[cfg(feature = "gc")]
2295            {
2296                use wasmtime_environ::Collector as EnvCollector;
2297                Some(match self.collector.try_not_auto()? {
2298                    Collector::DeferredReferenceCounting => EnvCollector::DeferredReferenceCounting,
2299                    Collector::Null => EnvCollector::Null,
2300                    Collector::Auto => unreachable!(),
2301                })
2302            }
2303            #[cfg(not(feature = "gc"))]
2304            bail!("cannot use GC types: the `gc` feature was disabled at compile time")
2305        } else {
2306            None
2307        };
2308
2309        Ok((tunables, features))
2310    }
2311
2312    #[cfg(feature = "runtime")]
2313    pub(crate) fn build_allocator(
2314        &self,
2315        tunables: &Tunables,
2316    ) -> Result<Box<dyn InstanceAllocator + Send + Sync>> {
2317        #[cfg(feature = "async")]
2318        let (stack_size, stack_zeroing) = (self.async_stack_size, self.async_stack_zeroing);
2319
2320        #[cfg(not(feature = "async"))]
2321        let (stack_size, stack_zeroing) = (0, false);
2322
2323        let _ = tunables;
2324
2325        match &self.allocation_strategy {
2326            InstanceAllocationStrategy::OnDemand => {
2327                let mut _allocator = Box::new(OnDemandInstanceAllocator::new(
2328                    self.mem_creator.clone(),
2329                    stack_size,
2330                    stack_zeroing,
2331                ));
2332                #[cfg(feature = "async")]
2333                if let Some(stack_creator) = &self.stack_creator {
2334                    _allocator.set_stack_creator(stack_creator.clone());
2335                }
2336                Ok(_allocator)
2337            }
2338            #[cfg(feature = "pooling-allocator")]
2339            InstanceAllocationStrategy::Pooling(config) => {
2340                let mut config = config.config;
2341                config.stack_size = stack_size;
2342                config.async_stack_zeroing = stack_zeroing;
2343                Ok(Box::new(crate::runtime::vm::PoolingInstanceAllocator::new(
2344                    &config, tunables,
2345                )?))
2346            }
2347        }
2348    }
2349
2350    #[cfg(feature = "runtime")]
2351    pub(crate) fn build_gc_runtime(&self) -> Result<Option<Arc<dyn GcRuntime>>> {
2352        if !self.features().gc_types() {
2353            return Ok(None);
2354        }
2355
2356        #[cfg(not(feature = "gc"))]
2357        bail!("cannot create a GC runtime: the `gc` feature was disabled at compile time");
2358
2359        #[cfg(feature = "gc")]
2360        #[cfg_attr(
2361            not(any(feature = "gc-null", feature = "gc-drc")),
2362            expect(unreachable_code, reason = "definitions known to be dummy")
2363        )]
2364        {
2365            Ok(Some(match self.collector.try_not_auto()? {
2366                #[cfg(feature = "gc-drc")]
2367                Collector::DeferredReferenceCounting => {
2368                    Arc::new(crate::runtime::vm::DrcCollector::default()) as Arc<dyn GcRuntime>
2369                }
2370                #[cfg(not(feature = "gc-drc"))]
2371                Collector::DeferredReferenceCounting => unreachable!(),
2372
2373                #[cfg(feature = "gc-null")]
2374                Collector::Null => {
2375                    Arc::new(crate::runtime::vm::NullCollector::default()) as Arc<dyn GcRuntime>
2376                }
2377                #[cfg(not(feature = "gc-null"))]
2378                Collector::Null => unreachable!(),
2379
2380                Collector::Auto => unreachable!(),
2381            }))
2382        }
2383    }
2384
2385    #[cfg(feature = "runtime")]
2386    pub(crate) fn build_profiler(&self) -> Result<Box<dyn ProfilingAgent>> {
2387        Ok(match self.profiling_strategy {
2388            ProfilingStrategy::PerfMap => profiling_agent::new_perfmap()?,
2389            ProfilingStrategy::JitDump => profiling_agent::new_jitdump()?,
2390            ProfilingStrategy::VTune => profiling_agent::new_vtune()?,
2391            ProfilingStrategy::None => profiling_agent::new_null(),
2392            ProfilingStrategy::Pulley => profiling_agent::new_pulley()?,
2393        })
2394    }
2395
2396    #[cfg(any(feature = "cranelift", feature = "winch"))]
2397    pub(crate) fn build_compiler(
2398        mut self,
2399        tunables: &Tunables,
2400        features: WasmFeatures,
2401    ) -> Result<(Self, Box<dyn wasmtime_environ::Compiler>)> {
2402        let target = self.compiler_target();
2403
2404        // The target passed to the builders below is an `Option<Triple>` where
2405        // `None` represents the current host with CPU features inferred from
2406        // the host's CPU itself. The `target` above is not an `Option`, so
2407        // switch it to `None` in the case that a target wasn't explicitly
2408        // specified (which indicates no feature inference) and the target
2409        // matches the host.
2410        let target_for_builder =
2411            if self.target.is_none() && target == target_lexicon::Triple::host() {
2412                None
2413            } else {
2414                Some(target.clone())
2415            };
2416
2417        let mut compiler = match self.compiler_config.strategy {
2418            #[cfg(feature = "cranelift")]
2419            Some(Strategy::Cranelift) => wasmtime_cranelift::builder(target_for_builder)?,
2420            #[cfg(not(feature = "cranelift"))]
2421            Some(Strategy::Cranelift) => bail!("cranelift support not compiled in"),
2422            #[cfg(feature = "winch")]
2423            Some(Strategy::Winch) => wasmtime_winch::builder(target_for_builder)?,
2424            #[cfg(not(feature = "winch"))]
2425            Some(Strategy::Winch) => bail!("winch support not compiled in"),
2426
2427            None | Some(Strategy::Auto) => unreachable!(),
2428        };
2429
2430        if let Some(path) = &self.compiler_config.clif_dir {
2431            compiler.clif_dir(path)?;
2432        }
2433
2434        // If probestack is enabled for a target, Wasmtime will always use the
2435        // inline strategy which doesn't require us to define a `__probestack`
2436        // function or similar.
2437        self.compiler_config
2438            .settings
2439            .insert("probestack_strategy".into(), "inline".into());
2440
2441        // We enable stack probing by default on all targets.
2442        // This is required on Windows because of the way Windows
2443        // commits its stacks, but it's also a good idea on other
2444        // platforms to ensure guard pages are hit for large frame
2445        // sizes.
2446        self.compiler_config
2447            .flags
2448            .insert("enable_probestack".into());
2449
2450        // The current wasm multivalue implementation depends on this.
2451        // FIXME(#9510) handle this in wasmtime-cranelift instead.
2452        self.compiler_config
2453            .flags
2454            .insert("enable_multi_ret_implicit_sret".into());
2455
2456        if let Some(unwind_requested) = self.native_unwind_info {
2457            if !self
2458                .compiler_config
2459                .ensure_setting_unset_or_given("unwind_info", &unwind_requested.to_string())
2460            {
2461                bail!(
2462                    "incompatible settings requested for Cranelift and Wasmtime `unwind-info` settings"
2463                );
2464            }
2465        }
2466
2467        if target.operating_system == target_lexicon::OperatingSystem::Windows {
2468            if !self
2469                .compiler_config
2470                .ensure_setting_unset_or_given("unwind_info", "true")
2471            {
2472                bail!("`native_unwind_info` cannot be disabled on Windows");
2473            }
2474        }
2475
2476        // We require frame pointers for correct stack walking, which is safety
2477        // critical in the presence of reference types, and otherwise it is just
2478        // really bad developer experience to get wrong.
2479        self.compiler_config
2480            .settings
2481            .insert("preserve_frame_pointers".into(), "true".into());
2482
2483        if !tunables.signals_based_traps {
2484            let mut ok = self
2485                .compiler_config
2486                .ensure_setting_unset_or_given("enable_table_access_spectre_mitigation", "false");
2487            ok = ok
2488                && self.compiler_config.ensure_setting_unset_or_given(
2489                    "enable_heap_access_spectre_mitigation",
2490                    "false",
2491                );
2492
2493            // Right now spectre-mitigated bounds checks will load from zero so
2494            // if host-based signal handlers are disabled then that's a mismatch
2495            // and doesn't work right now. Fixing this will require more thought
2496            // of how to implement the bounds check in spectre-only mode.
2497            if !ok {
2498                bail!(
2499                    "when signals-based traps are disabled then spectre \
2500                     mitigations must also be disabled"
2501                );
2502            }
2503        }
2504
2505        // check for incompatible compiler options and set required values
2506        if features.contains(WasmFeatures::REFERENCE_TYPES) {
2507            if !self
2508                .compiler_config
2509                .ensure_setting_unset_or_given("enable_safepoints", "true")
2510            {
2511                bail!(
2512                    "compiler option 'enable_safepoints' must be enabled when 'reference types' is enabled"
2513                );
2514            }
2515        }
2516
2517        if features.contains(WasmFeatures::RELAXED_SIMD) && !features.contains(WasmFeatures::SIMD) {
2518            bail!("cannot disable the simd proposal but enable the relaxed simd proposal");
2519        }
2520
2521        if features.contains(WasmFeatures::STACK_SWITCHING) {
2522            use target_lexicon::OperatingSystem;
2523            let model = match target.operating_system {
2524                OperatingSystem::Windows => "update_windows_tib",
2525                OperatingSystem::Linux
2526                | OperatingSystem::MacOSX(_)
2527                | OperatingSystem::Darwin(_) => "basic",
2528                _ => bail!("stack-switching feature not supported on this platform "),
2529            };
2530
2531            if !self
2532                .compiler_config
2533                .ensure_setting_unset_or_given("stack_switch_model", model)
2534            {
2535                bail!(
2536                    "compiler option 'stack_switch_model' must be set to '{}' on this platform",
2537                    model
2538                );
2539            }
2540        }
2541
2542        // Apply compiler settings and flags
2543        compiler.set_tunables(tunables.clone())?;
2544        for (k, v) in self.compiler_config.settings.iter() {
2545            compiler.set(k, v)?;
2546        }
2547        for flag in self.compiler_config.flags.iter() {
2548            compiler.enable(flag)?;
2549        }
2550
2551        #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
2552        if let Some(cache_store) = &self.compiler_config.cache_store {
2553            compiler.enable_incremental_compilation(cache_store.clone())?;
2554        }
2555
2556        compiler.wmemcheck(self.compiler_config.wmemcheck);
2557
2558        Ok((self, compiler.build()?))
2559    }
2560
2561    /// Internal setting for whether adapter modules for components will have
2562    /// extra WebAssembly instructions inserted performing more debug checks
2563    /// then are necessary.
2564    #[cfg(feature = "component-model")]
2565    pub fn debug_adapter_modules(&mut self, debug: bool) -> &mut Self {
2566        self.tunables.debug_adapter_modules = Some(debug);
2567        self
2568    }
2569
2570    /// Enables clif output when compiling a WebAssembly module.
2571    #[cfg(any(feature = "cranelift", feature = "winch"))]
2572    pub fn emit_clif(&mut self, path: &Path) -> &mut Self {
2573        self.compiler_config.clif_dir = Some(path.to_path_buf());
2574        self
2575    }
2576
2577    /// Configures whether, when on macOS, Mach ports are used for exception
2578    /// handling instead of traditional Unix-based signal handling.
2579    ///
2580    /// WebAssembly traps in Wasmtime are implemented with native faults, for
2581    /// example a `SIGSEGV` will occur when a WebAssembly guest accesses
2582    /// out-of-bounds memory. Handling this can be configured to either use Unix
2583    /// signals or Mach ports on macOS. By default Mach ports are used.
2584    ///
2585    /// Mach ports enable Wasmtime to work by default with foreign
2586    /// error-handling systems such as breakpad which also use Mach ports to
2587    /// handle signals. In this situation Wasmtime will continue to handle guest
2588    /// faults gracefully while any non-guest faults will get forwarded to
2589    /// process-level handlers such as breakpad. Some more background on this
2590    /// can be found in #2456.
2591    ///
2592    /// A downside of using mach ports, however, is that they don't interact
2593    /// well with `fork()`. Forking a Wasmtime process on macOS will produce a
2594    /// child process that cannot successfully run WebAssembly. In this
2595    /// situation traditional Unix signal handling should be used as that's
2596    /// inherited and works across forks.
2597    ///
2598    /// If your embedding wants to use a custom error handler which leverages
2599    /// Mach ports and you additionally wish to `fork()` the process and use
2600    /// Wasmtime in the child process that's not currently possible. Please
2601    /// reach out to us if you're in this bucket!
2602    ///
2603    /// This option defaults to `true`, using Mach ports by default.
2604    pub fn macos_use_mach_ports(&mut self, mach_ports: bool) -> &mut Self {
2605        self.macos_use_mach_ports = mach_ports;
2606        self
2607    }
2608
2609    /// Configures an embedder-provided function, `detect`, which is used to
2610    /// determine if an ISA-specific feature is available on the current host.
2611    ///
2612    /// This function is used to verify that any features enabled for a compiler
2613    /// backend, such as AVX support on x86\_64, are also available on the host.
2614    /// It is undefined behavior to execute an AVX instruction on a host that
2615    /// doesn't support AVX instructions, for example.
2616    ///
2617    /// When the `std` feature is active on this crate then this function is
2618    /// configured to a default implementation that uses the standard library's
2619    /// feature detection. When the `std` feature is disabled then there is no
2620    /// default available and this method must be called to configure a feature
2621    /// probing function.
2622    ///
2623    /// The `detect` function provided is given a string name of an ISA feature.
2624    /// The function should then return:
2625    ///
2626    /// * `Some(true)` - indicates that the feature was found on the host and it
2627    ///   is supported.
2628    /// * `Some(false)` - the feature name was recognized but it was not
2629    ///   detected on the host, for example the CPU is too old.
2630    /// * `None` - the feature name was not recognized and it's not known
2631    ///   whether it's on the host or not.
2632    ///
2633    /// Feature names passed to `detect` match the same feature name used in the
2634    /// Rust standard library. For example `"sse4.2"` is used on x86\_64.
2635    ///
2636    /// # Unsafety
2637    ///
2638    /// This function is `unsafe` because it is undefined behavior to execute
2639    /// instructions that a host does not support. This means that the result of
2640    /// `detect` must be correct for memory safe execution at runtime.
2641    pub unsafe fn detect_host_feature(&mut self, detect: fn(&str) -> Option<bool>) -> &mut Self {
2642        self.detect_host_feature = Some(detect);
2643        self
2644    }
2645
2646    /// Configures Wasmtime to not use signals-based trap handlers, for example
2647    /// disables `SIGILL` and `SIGSEGV` handler registration on Unix platforms.
2648    ///
2649    /// > **Note:** this option has important performance ramifications, be sure
2650    /// > to understand the implications. Wasm programs have been measured to
2651    /// > run up to 2x slower when signals-based traps are disabled.
2652    ///
2653    /// Wasmtime will by default leverage signals-based trap handlers (or the
2654    /// platform equivalent, for example "vectored exception handlers" on
2655    /// Windows) to make generated code more efficient. For example, when
2656    /// Wasmtime can use signals-based traps, it can elide explicit bounds
2657    /// checks for Wasm linear memory accesses, instead relying on virtual
2658    /// memory guard pages to raise a `SIGSEGV` (on Unix) for out-of-bounds
2659    /// accesses, which Wasmtime's runtime then catches and handles. Another
2660    /// example is divide-by-zero: with signals-based traps, Wasmtime can let
2661    /// the hardware raise a trap when the divisor is zero. Without
2662    /// signals-based traps, Wasmtime must explicitly emit additional
2663    /// instructions to check for zero and conditionally branch to a trapping
2664    /// code path.
2665    ///
2666    /// Some environments however may not have access to signal handlers. For
2667    /// example embedded scenarios may not support virtual memory. Other
2668    /// environments where Wasmtime is embedded within the surrounding
2669    /// environment may require that new signal handlers aren't registered due
2670    /// to the global nature of signal handlers. This option exists to disable
2671    /// the signal handler registration when required for these scenarios.
2672    ///
2673    /// When signals-based trap handlers are disabled, then Wasmtime and its
2674    /// generated code will *never* rely on segfaults or other
2675    /// signals. Generated code will be slower because bounds must be explicitly
2676    /// checked along with other conditions like division by zero.
2677    ///
2678    /// The following additional factors can also affect Wasmtime's ability to
2679    /// elide explicit bounds checks and leverage signals-based traps:
2680    ///
2681    /// * The [`Config::memory_reservation`] and [`Config::memory_guard_size`]
2682    ///   settings
2683    /// * The index type of the linear memory (e.g. 32-bit or 64-bit)
2684    /// * The page size of the linear memory
2685    ///
2686    /// When this option is disabled, the
2687    /// `enable_heap_access_spectre_mitigation` and
2688    /// `enable_table_access_spectre_mitigation` Cranelift settings must also be
2689    /// disabled. This means that generated code must have spectre mitigations
2690    /// disabled. This is because spectre mitigations rely on faults from
2691    /// loading from the null address to implement bounds checks.
2692    ///
2693    /// This option defaults to `true`: signals-based trap handlers are enabled
2694    /// by default.
2695    ///
2696    /// > **Note:** Disabling this option is not compatible with the Winch
2697    /// > compiler.
2698    pub fn signals_based_traps(&mut self, enable: bool) -> &mut Self {
2699        self.tunables.signals_based_traps = Some(enable);
2700        self
2701    }
2702
2703    /// Enable/disable GC support in Wasmtime entirely.
2704    ///
2705    /// This flag can be used to gate whether GC infrastructure is enabled or
2706    /// initialized in Wasmtime at all. Wasmtime's GC implementation is required
2707    /// for the [`Self::wasm_gc`] proposal, [`Self::wasm_function_references`],
2708    /// and [`Self::wasm_exceptions`] at this time. None of those proposal can
2709    /// be enabled without also having this option enabled.
2710    ///
2711    /// This option defaults to whether the crate `gc` feature is enabled or
2712    /// not.
2713    pub fn gc_support(&mut self, enable: bool) -> &mut Self {
2714        self.wasm_feature(WasmFeatures::GC_TYPES, enable)
2715    }
2716
2717    /// Explicitly indicate or not whether the host is using a hardware float
2718    /// ABI on x86 targets.
2719    ///
2720    /// This configuration option is only applicable on the
2721    /// `x86_64-unknown-none` Rust target and has no effect on other host
2722    /// targets. The `x86_64-unknown-none` Rust target does not support hardware
2723    /// floats by default and uses a "soft float" implementation and ABI. This
2724    /// means that `f32`, for example, is passed in a general-purpose register
2725    /// between functions instead of a floating-point register. This does not
2726    /// match Cranelift's ABI for `f32` where it's passed in floating-point
2727    /// registers.  Cranelift does not have support for a "soft float"
2728    /// implementation where all floating-point operations are lowered to
2729    /// libcalls.
2730    ///
2731    /// This means that for the `x86_64-unknown-none` target the ABI between
2732    /// Wasmtime's libcalls and the host is incompatible when floats are used.
2733    /// This further means that, by default, Wasmtime is unable to load native
2734    /// code when compiled to the `x86_64-unknown-none` target. The purpose of
2735    /// this option is to explicitly allow loading code and bypass this check.
2736    ///
2737    /// Setting this configuration option to `true` indicates that either:
2738    /// (a) the Rust target is compiled with the hard-float ABI manually via
2739    /// `-Zbuild-std` and a custom target JSON configuration, or (b) sufficient
2740    /// x86 features have been enabled in the compiler such that float libcalls
2741    /// will not be used in Wasmtime. For (a) there is no way in Rust at this
2742    /// time to detect whether a hard-float or soft-float ABI is in use on
2743    /// stable Rust, so this manual opt-in is required. For (b) the only
2744    /// instance where Wasmtime passes a floating-point value in a register
2745    /// between the host and compiled wasm code is with libcalls.
2746    ///
2747    /// Float-based libcalls are only used when the compilation target for a
2748    /// wasm module has insufficient target features enabled for native
2749    /// support. For example SSE4.1 is required for the `f32.ceil` WebAssembly
2750    /// instruction to be compiled to a native instruction. If SSE4.1 is not
2751    /// enabled then `f32.ceil` is translated to a "libcall" which is
2752    /// implemented on the host. Float-based libcalls can be avoided with
2753    /// sufficient target features enabled, for example:
2754    ///
2755    /// * `self.cranelift_flag_enable("has_sse3")`
2756    /// * `self.cranelift_flag_enable("has_ssse3")`
2757    /// * `self.cranelift_flag_enable("has_sse41")`
2758    /// * `self.cranelift_flag_enable("has_sse42")`
2759    /// * `self.cranelift_flag_enable("has_fma")`
2760    ///
2761    /// Note that when these features are enabled Wasmtime will perform a
2762    /// runtime check to determine that the host actually has the feature
2763    /// present.
2764    ///
2765    /// For some more discussion see [#11506].
2766    ///
2767    /// [#11506]: https://github.com/bytecodealliance/wasmtime/issues/11506
2768    ///
2769    /// # Safety
2770    ///
2771    /// This method is not safe because it cannot be detected in Rust right now
2772    /// whether the host is compiled with a soft or hard float ABI. Additionally
2773    /// if the host is compiled with a soft float ABI disabling this check does
2774    /// not ensure that the wasm module in question has zero usage of floats
2775    /// in the boundary to the host.
2776    ///
2777    /// Safely using this method requires one of:
2778    ///
2779    /// * The host target is compiled to use hardware floats.
2780    /// * Wasm modules loaded are compiled with enough x86 Cranelift features
2781    ///   enabled to avoid float-related hostcalls.
2782    pub unsafe fn x86_float_abi_ok(&mut self, enable: bool) -> &mut Self {
2783        self.x86_float_abi_ok = Some(enable);
2784        self
2785    }
2786}
2787
2788impl Default for Config {
2789    fn default() -> Config {
2790        Config::new()
2791    }
2792}
2793
2794impl fmt::Debug for Config {
2795    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2796        let mut f = f.debug_struct("Config");
2797
2798        // Not every flag in WasmFeatures can be enabled as part of creating
2799        // a Config. This impl gives a complete picture of all WasmFeatures
2800        // enabled, and doesn't require maintenance by hand (which has become out
2801        // of date in the past), at the cost of possible confusion for why
2802        // a flag in this set doesn't have a Config setter.
2803        let features = self.features();
2804        for flag in WasmFeatures::FLAGS.iter() {
2805            f.field(
2806                &format!("wasm_{}", flag.name().to_lowercase()),
2807                &features.contains(*flag.value()),
2808            );
2809        }
2810
2811        f.field("parallel_compilation", &self.parallel_compilation);
2812        #[cfg(any(feature = "cranelift", feature = "winch"))]
2813        {
2814            f.field("compiler_config", &self.compiler_config);
2815        }
2816
2817        self.tunables.format(&mut f);
2818        f.finish()
2819    }
2820}
2821
2822/// Possible Compilation strategies for a wasm module.
2823///
2824/// This is used as an argument to the [`Config::strategy`] method.
2825#[non_exhaustive]
2826#[derive(PartialEq, Eq, Clone, Debug, Copy)]
2827pub enum Strategy {
2828    /// An indicator that the compilation strategy should be automatically
2829    /// selected.
2830    ///
2831    /// This is generally what you want for most projects and indicates that the
2832    /// `wasmtime` crate itself should make the decision about what the best
2833    /// code generator for a wasm module is.
2834    ///
2835    /// Currently this always defaults to Cranelift, but the default value may
2836    /// change over time.
2837    Auto,
2838
2839    /// Currently the default backend, Cranelift aims to be a reasonably fast
2840    /// code generator which generates high quality machine code.
2841    Cranelift,
2842
2843    /// A baseline compiler for WebAssembly, currently under active development and not ready for
2844    /// production applications.
2845    Winch,
2846}
2847
2848#[cfg(any(feature = "winch", feature = "cranelift"))]
2849impl Strategy {
2850    fn not_auto(&self) -> Option<Strategy> {
2851        match self {
2852            Strategy::Auto => {
2853                if cfg!(feature = "cranelift") {
2854                    Some(Strategy::Cranelift)
2855                } else if cfg!(feature = "winch") {
2856                    Some(Strategy::Winch)
2857                } else {
2858                    None
2859                }
2860            }
2861            other => Some(*other),
2862        }
2863    }
2864}
2865
2866/// Possible garbage collector implementations for Wasm.
2867///
2868/// This is used as an argument to the [`Config::collector`] method.
2869///
2870/// The properties of Wasmtime's available collectors are summarized in the
2871/// following table:
2872///
2873/// | Collector                   | Collects Garbage[^1] | Latency[^2] | Throughput[^3] | Allocation Speed[^4] | Heap Utilization[^5] |
2874/// |-----------------------------|----------------------|-------------|----------------|----------------------|----------------------|
2875/// | `DeferredReferenceCounting` | Yes, but not cycles  | 🙂         | 🙁             | 😐                   | 😐                  |
2876/// | `Null`                      | No                   | 🙂         | 🙂             | 🙂                   | 🙂                  |
2877///
2878/// [^1]: Whether or not the collector is capable of collecting garbage and cyclic garbage.
2879///
2880/// [^2]: How long the Wasm program is paused during garbage
2881///       collections. Shorter is better. In general, better latency implies
2882///       worse throughput and vice versa.
2883///
2884/// [^3]: How fast the Wasm program runs when using this collector. Roughly
2885///       equivalent to the number of Wasm instructions executed per
2886///       second. Faster is better. In general, better throughput implies worse
2887///       latency and vice versa.
2888///
2889/// [^4]: How fast can individual objects be allocated?
2890///
2891/// [^5]: How many objects can the collector fit into N bytes of memory? That
2892///       is, how much space for bookkeeping and metadata does this collector
2893///       require? Less space taken up by metadata means more space for
2894///       additional objects. Reference counts are larger than mark bits and
2895///       free lists are larger than bump pointers, for example.
2896#[non_exhaustive]
2897#[derive(PartialEq, Eq, Clone, Debug, Copy)]
2898pub enum Collector {
2899    /// An indicator that the garbage collector should be automatically
2900    /// selected.
2901    ///
2902    /// This is generally what you want for most projects and indicates that the
2903    /// `wasmtime` crate itself should make the decision about what the best
2904    /// collector for a wasm module is.
2905    ///
2906    /// Currently this always defaults to the deferred reference-counting
2907    /// collector, but the default value may change over time.
2908    Auto,
2909
2910    /// The deferred reference-counting collector.
2911    ///
2912    /// A reference-counting collector, generally trading improved latency for
2913    /// worsened throughput. However, to avoid the largest overheads of
2914    /// reference counting, it avoids manipulating reference counts for Wasm
2915    /// objects on the stack. Instead, it will hold a reference count for an
2916    /// over-approximation of all objects that are currently on the stack, trace
2917    /// the stack during collection to find the precise set of on-stack roots,
2918    /// and decrement the reference count of any object that was in the
2919    /// over-approximation but not the precise set. This improves throughput,
2920    /// compared to "pure" reference counting, by performing many fewer
2921    /// refcount-increment and -decrement operations. The cost is the increased
2922    /// latency associated with tracing the stack.
2923    ///
2924    /// This collector cannot currently collect cycles; they will leak until the
2925    /// GC heap's store is dropped.
2926    DeferredReferenceCounting,
2927
2928    /// The null collector.
2929    ///
2930    /// This collector does not actually collect any garbage. It simply
2931    /// allocates objects until it runs out of memory, at which point further
2932    /// objects allocation attempts will trap.
2933    ///
2934    /// This collector is useful for incredibly short-running Wasm instances
2935    /// where additionally you would rather halt an over-allocating Wasm program
2936    /// than spend time collecting its garbage to allow it to keep running. It
2937    /// is also useful for measuring the overheads associated with other
2938    /// collectors, as this collector imposes as close to zero throughput and
2939    /// latency overhead as possible.
2940    Null,
2941}
2942
2943impl Default for Collector {
2944    fn default() -> Collector {
2945        Collector::Auto
2946    }
2947}
2948
2949#[cfg(feature = "gc")]
2950impl Collector {
2951    fn not_auto(&self) -> Option<Collector> {
2952        match self {
2953            Collector::Auto => {
2954                if cfg!(feature = "gc-drc") {
2955                    Some(Collector::DeferredReferenceCounting)
2956                } else if cfg!(feature = "gc-null") {
2957                    Some(Collector::Null)
2958                } else {
2959                    None
2960                }
2961            }
2962            other => Some(*other),
2963        }
2964    }
2965
2966    fn try_not_auto(&self) -> Result<Self> {
2967        match self.not_auto() {
2968            #[cfg(feature = "gc-drc")]
2969            Some(c @ Collector::DeferredReferenceCounting) => Ok(c),
2970            #[cfg(not(feature = "gc-drc"))]
2971            Some(Collector::DeferredReferenceCounting) => bail!(
2972                "cannot create an engine using the deferred reference-counting \
2973                 collector because the `gc-drc` feature was not enabled at \
2974                 compile time",
2975            ),
2976
2977            #[cfg(feature = "gc-null")]
2978            Some(c @ Collector::Null) => Ok(c),
2979            #[cfg(not(feature = "gc-null"))]
2980            Some(Collector::Null) => bail!(
2981                "cannot create an engine using the null collector because \
2982                 the `gc-null` feature was not enabled at compile time",
2983            ),
2984
2985            Some(Collector::Auto) => unreachable!(),
2986
2987            None => bail!(
2988                "cannot create an engine with GC support when none of the \
2989                 collectors are available; enable one of the following \
2990                 features: `gc-drc`, `gc-null`",
2991            ),
2992        }
2993    }
2994}
2995
2996/// Possible optimization levels for the Cranelift codegen backend.
2997#[non_exhaustive]
2998#[derive(Copy, Clone, Debug, Eq, PartialEq)]
2999pub enum OptLevel {
3000    /// No optimizations performed, minimizes compilation time by disabling most
3001    /// optimizations.
3002    None,
3003    /// Generates the fastest possible code, but may take longer.
3004    Speed,
3005    /// Similar to `speed`, but also performs transformations aimed at reducing
3006    /// code size.
3007    SpeedAndSize,
3008}
3009
3010/// Possible register allocator algorithms for the Cranelift codegen backend.
3011#[non_exhaustive]
3012#[derive(Copy, Clone, Debug, Eq, PartialEq)]
3013pub enum RegallocAlgorithm {
3014    /// Generates the fastest possible code, but may take longer.
3015    ///
3016    /// This algorithm performs "backtracking", which means that it may
3017    /// undo its earlier work and retry as it discovers conflicts. This
3018    /// results in better register utilization, producing fewer spills
3019    /// and moves, but can cause super-linear compile runtime.
3020    Backtracking,
3021    /// Generates acceptable code very quickly.
3022    ///
3023    /// This algorithm performs a single pass through the code,
3024    /// guaranteed to work in linear time.  (Note that the rest of
3025    /// Cranelift is not necessarily guaranteed to run in linear time,
3026    /// however.) It cannot undo earlier decisions, however, and it
3027    /// cannot foresee constraints or issues that may occur further
3028    /// ahead in the code, so the code may have more spills and moves as
3029    /// a result.
3030    SinglePass,
3031}
3032
3033/// Select which profiling technique to support.
3034#[derive(Debug, Clone, Copy, PartialEq)]
3035pub enum ProfilingStrategy {
3036    /// No profiler support.
3037    None,
3038
3039    /// Collect function name information as the "perf map" file format, used with `perf` on Linux.
3040    PerfMap,
3041
3042    /// Collect profiling info for "jitdump" file format, used with `perf` on
3043    /// Linux.
3044    JitDump,
3045
3046    /// Collect profiling info using the "ittapi", used with `VTune` on Linux.
3047    VTune,
3048
3049    /// Support for profiling Pulley, Wasmtime's interpreter. Note that enabling
3050    /// this at runtime requires enabling the `profile-pulley` Cargo feature at
3051    /// compile time.
3052    Pulley,
3053}
3054
3055/// Select how wasm backtrace detailed information is handled.
3056#[derive(Debug, Clone, Copy)]
3057pub enum WasmBacktraceDetails {
3058    /// Support is unconditionally enabled and wasmtime will parse and read
3059    /// debug information.
3060    Enable,
3061
3062    /// Support is disabled, and wasmtime will not parse debug information for
3063    /// backtrace details.
3064    Disable,
3065
3066    /// Support for backtrace details is conditional on the
3067    /// `WASMTIME_BACKTRACE_DETAILS` environment variable.
3068    Environment,
3069}
3070
3071/// Describe the tri-state configuration of keys such as MPK or PAGEMAP_SCAN.
3072#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
3073pub enum Enabled {
3074    /// Enable this feature if it's detected on the host system, otherwise leave
3075    /// it disabled.
3076    Auto,
3077    /// Enable this feature and fail configuration if the feature is not
3078    /// detected on the host system.
3079    Yes,
3080    /// Do not enable this feature, even if the host system supports it.
3081    No,
3082}
3083
3084/// Configuration options used with [`InstanceAllocationStrategy::Pooling`] to
3085/// change the behavior of the pooling instance allocator.
3086///
3087/// This structure has a builder-style API in the same manner as [`Config`] and
3088/// is configured with [`Config::allocation_strategy`].
3089///
3090/// Note that usage of the pooling allocator does not affect compiled
3091/// WebAssembly code. Compiled `*.cwasm` files, for example, are usable both
3092/// with and without the pooling allocator.
3093///
3094/// ## Advantages of Pooled Allocation
3095///
3096/// The main benefit of the pooling allocator is to make WebAssembly
3097/// instantiation both faster and more scalable in terms of parallelism.
3098/// Allocation is faster because virtual memory is already configured and ready
3099/// to go within the pool, there's no need to [`mmap`] (for example on Unix) a
3100/// new region and configure it with guard pages. By avoiding [`mmap`] this
3101/// avoids whole-process virtual memory locks which can improve scalability and
3102/// performance through avoiding this.
3103///
3104/// Additionally with pooled allocation it's possible to create "affine slots"
3105/// to a particular WebAssembly module or component over time. For example if
3106/// the same module is multiple times over time the pooling allocator will, by
3107/// default, attempt to reuse the same slot. This mean that the slot has been
3108/// pre-configured and can retain virtual memory mappings for a copy-on-write
3109/// image, for example (see [`Config::memory_init_cow`] for more information.
3110/// This means that in a steady state instance deallocation is a single
3111/// [`madvise`] to reset linear memory to its original contents followed by a
3112/// single (optional) [`mprotect`] during the next instantiation to shrink
3113/// memory back to its original size. Compared to non-pooled allocation this
3114/// avoids the need to [`mmap`] a new region of memory, [`munmap`] it, and
3115/// [`mprotect`] regions too.
3116///
3117/// Another benefit of pooled allocation is that it's possible to configure
3118/// things such that no virtual memory management is required at all in a steady
3119/// state. For example a pooling allocator can be configured with:
3120///
3121/// * [`Config::memory_init_cow`] disabled
3122/// * [`Config::memory_guard_size`] disabled
3123/// * [`Config::memory_reservation`] shrunk to minimal size
3124/// * [`PoolingAllocationConfig::table_keep_resident`] sufficiently large
3125/// * [`PoolingAllocationConfig::linear_memory_keep_resident`] sufficiently large
3126///
3127/// With all these options in place no virtual memory tricks are used at all and
3128/// everything is manually managed by Wasmtime (for example resetting memory is
3129/// a `memset(0)`). This is not as fast in a single-threaded scenario but can
3130/// provide benefits in high-parallelism situations as no virtual memory locks
3131/// or IPIs need happen.
3132///
3133/// ## Disadvantages of Pooled Allocation
3134///
3135/// Despite the above advantages to instantiation performance the pooling
3136/// allocator is not enabled by default in Wasmtime. One reason is that the
3137/// performance advantages are not necessarily portable, for example while the
3138/// pooling allocator works on Windows it has not been tuned for performance on
3139/// Windows in the same way it has on Linux.
3140///
3141/// Additionally the main cost of the pooling allocator is that it requires a
3142/// very large reservation of virtual memory (on the order of most of the
3143/// addressable virtual address space). WebAssembly 32-bit linear memories in
3144/// Wasmtime are, by default 4G address space reservations with a small guard
3145/// region both before and after the linear memory. Memories in the pooling
3146/// allocator are contiguous which means that we only need a guard after linear
3147/// memory because the previous linear memory's slot post-guard is our own
3148/// pre-guard. This means that, by default, the pooling allocator uses roughly
3149/// 4G of virtual memory per WebAssembly linear memory slot. 4G of virtual
3150/// memory is 32 bits of a 64-bit address. Many 64-bit systems can only
3151/// actually use 48-bit addresses by default (although this can be extended on
3152/// architectures nowadays too), and of those 48 bits one of them is reserved
3153/// to indicate kernel-vs-userspace. This leaves 47-32=15 bits left,
3154/// meaning you can only have at most 32k slots of linear memories on many
3155/// systems by default. This is a relatively small number and shows how the
3156/// pooling allocator can quickly exhaust all of virtual memory.
3157///
3158/// Another disadvantage of the pooling allocator is that it may keep memory
3159/// alive when nothing is using it. A previously used slot for an instance might
3160/// have paged-in memory that will not get paged out until the
3161/// [`Engine`](crate::Engine) owning the pooling allocator is dropped. While
3162/// suitable for some applications this behavior may not be suitable for all
3163/// applications.
3164///
3165/// Finally the last disadvantage of the pooling allocator is that the
3166/// configuration values for the maximum number of instances, memories, tables,
3167/// etc, must all be fixed up-front. There's not always a clear answer as to
3168/// what these values should be so not all applications may be able to work
3169/// with this constraint.
3170///
3171/// [`madvise`]: https://man7.org/linux/man-pages/man2/madvise.2.html
3172/// [`mprotect`]: https://man7.org/linux/man-pages/man2/mprotect.2.html
3173/// [`mmap`]: https://man7.org/linux/man-pages/man2/mmap.2.html
3174/// [`munmap`]: https://man7.org/linux/man-pages/man2/munmap.2.html
3175#[cfg(feature = "pooling-allocator")]
3176#[derive(Debug, Clone, Default)]
3177pub struct PoolingAllocationConfig {
3178    config: crate::runtime::vm::PoolingInstanceAllocatorConfig,
3179}
3180
3181#[cfg(feature = "pooling-allocator")]
3182impl PoolingAllocationConfig {
3183    /// Returns a new configuration builder with all default settings
3184    /// configured.
3185    pub fn new() -> PoolingAllocationConfig {
3186        PoolingAllocationConfig::default()
3187    }
3188
3189    /// Configures the maximum number of "unused warm slots" to retain in the
3190    /// pooling allocator.
3191    ///
3192    /// The pooling allocator operates over slots to allocate from, and each
3193    /// slot is considered "cold" if it's never been used before or "warm" if
3194    /// it's been used by some module in the past. Slots in the pooling
3195    /// allocator additionally track an "affinity" flag to a particular core
3196    /// wasm module. When a module is instantiated into a slot then the slot is
3197    /// considered affine to that module, even after the instance has been
3198    /// deallocated.
3199    ///
3200    /// When a new instance is created then a slot must be chosen, and the
3201    /// current algorithm for selecting a slot is:
3202    ///
3203    /// * If there are slots that are affine to the module being instantiated,
3204    ///   then the most recently used slot is selected to be allocated from.
3205    ///   This is done to improve reuse of resources such as memory mappings and
3206    ///   additionally try to benefit from temporal locality for things like
3207    ///   caches.
3208    ///
3209    /// * Otherwise if there are more than N affine slots to other modules, then
3210    ///   one of those affine slots is chosen to be allocated. The slot chosen
3211    ///   is picked on a least-recently-used basis.
3212    ///
3213    /// * Finally, if there are less than N affine slots to other modules, then
3214    ///   the non-affine slots are allocated from.
3215    ///
3216    /// This setting, `max_unused_warm_slots`, is the value for N in the above
3217    /// algorithm. The purpose of this setting is to have a knob over the RSS
3218    /// impact of "unused slots" for a long-running wasm server.
3219    ///
3220    /// If this setting is set to 0, for example, then affine slots are
3221    /// aggressively reused on a least-recently-used basis. A "cold" slot is
3222    /// only used if there are no affine slots available to allocate from. This
3223    /// means that the set of slots used over the lifetime of a program is the
3224    /// same as the maximum concurrent number of wasm instances.
3225    ///
3226    /// If this setting is set to infinity, however, then cold slots are
3227    /// prioritized to be allocated from. This means that the set of slots used
3228    /// over the lifetime of a program will approach
3229    /// [`PoolingAllocationConfig::total_memories`], or the maximum number of
3230    /// slots in the pooling allocator.
3231    ///
3232    /// Wasmtime does not aggressively decommit all resources associated with a
3233    /// slot when the slot is not in use. For example the
3234    /// [`PoolingAllocationConfig::linear_memory_keep_resident`] option can be
3235    /// used to keep memory associated with a slot, even when it's not in use.
3236    /// This means that the total set of used slots in the pooling instance
3237    /// allocator can impact the overall RSS usage of a program.
3238    ///
3239    /// The default value for this option is `100`.
3240    pub fn max_unused_warm_slots(&mut self, max: u32) -> &mut Self {
3241        self.config.max_unused_warm_slots = max;
3242        self
3243    }
3244
3245    /// The target number of decommits to do per batch.
3246    ///
3247    /// This is not precise, as we can queue up decommits at times when we
3248    /// aren't prepared to immediately flush them, and so we may go over this
3249    /// target size occasionally.
3250    ///
3251    /// A batch size of one effectively disables batching.
3252    ///
3253    /// Defaults to `1`.
3254    pub fn decommit_batch_size(&mut self, batch_size: usize) -> &mut Self {
3255        self.config.decommit_batch_size = batch_size;
3256        self
3257    }
3258
3259    /// How much memory, in bytes, to keep resident for async stacks allocated
3260    /// with the pooling allocator.
3261    ///
3262    /// When [`PoolingAllocationConfig::async_stack_zeroing`] is enabled then
3263    /// Wasmtime will reset the contents of async stacks back to zero upon
3264    /// deallocation. This option can be used to perform the zeroing operation
3265    /// with `memset` up to a certain threshold of bytes instead of using system
3266    /// calls to reset the stack to zero.
3267    ///
3268    /// Note that when using this option the memory with async stacks will
3269    /// never be decommitted.
3270    #[cfg(feature = "async")]
3271    pub fn async_stack_keep_resident(&mut self, size: usize) -> &mut Self {
3272        self.config.async_stack_keep_resident = size;
3273        self
3274    }
3275
3276    /// How much memory, in bytes, to keep resident for each linear memory
3277    /// after deallocation.
3278    ///
3279    /// This option is only applicable on Linux and has no effect on other
3280    /// platforms.
3281    ///
3282    /// By default Wasmtime will use `madvise` to reset the entire contents of
3283    /// linear memory back to zero when a linear memory is deallocated. This
3284    /// option can be used to use `memset` instead to set memory back to zero
3285    /// which can, in some configurations, reduce the number of page faults
3286    /// taken when a slot is reused.
3287    pub fn linear_memory_keep_resident(&mut self, size: usize) -> &mut Self {
3288        self.config.linear_memory_keep_resident = size;
3289        self
3290    }
3291
3292    /// How much memory, in bytes, to keep resident for each table after
3293    /// deallocation.
3294    ///
3295    /// This option is only applicable on Linux and has no effect on other
3296    /// platforms.
3297    ///
3298    /// This option is the same as
3299    /// [`PoolingAllocationConfig::linear_memory_keep_resident`] except that it
3300    /// is applicable to tables instead.
3301    pub fn table_keep_resident(&mut self, size: usize) -> &mut Self {
3302        self.config.table_keep_resident = size;
3303        self
3304    }
3305
3306    /// The maximum number of concurrent component instances supported (default
3307    /// is `1000`).
3308    ///
3309    /// This provides an upper-bound on the total size of component
3310    /// metadata-related allocations, along with
3311    /// [`PoolingAllocationConfig::max_component_instance_size`]. The upper bound is
3312    ///
3313    /// ```text
3314    /// total_component_instances * max_component_instance_size
3315    /// ```
3316    ///
3317    /// where `max_component_instance_size` is rounded up to the size and alignment
3318    /// of the internal representation of the metadata.
3319    pub fn total_component_instances(&mut self, count: u32) -> &mut Self {
3320        self.config.limits.total_component_instances = count;
3321        self
3322    }
3323
3324    /// The maximum size, in bytes, allocated for a component instance's
3325    /// `VMComponentContext` metadata.
3326    ///
3327    /// The [`wasmtime::component::Instance`][crate::component::Instance] type
3328    /// has a static size but its internal `VMComponentContext` is dynamically
3329    /// sized depending on the component being instantiated. This size limit
3330    /// loosely correlates to the size of the component, taking into account
3331    /// factors such as:
3332    ///
3333    /// * number of lifted and lowered functions,
3334    /// * number of memories
3335    /// * number of inner instances
3336    /// * number of resources
3337    ///
3338    /// If the allocated size per instance is too small then instantiation of a
3339    /// module will fail at runtime with an error indicating how many bytes were
3340    /// needed.
3341    ///
3342    /// The default value for this is 1MiB.
3343    ///
3344    /// This provides an upper-bound on the total size of component
3345    /// metadata-related allocations, along with
3346    /// [`PoolingAllocationConfig::total_component_instances`]. The upper bound is
3347    ///
3348    /// ```text
3349    /// total_component_instances * max_component_instance_size
3350    /// ```
3351    ///
3352    /// where `max_component_instance_size` is rounded up to the size and alignment
3353    /// of the internal representation of the metadata.
3354    pub fn max_component_instance_size(&mut self, size: usize) -> &mut Self {
3355        self.config.limits.component_instance_size = size;
3356        self
3357    }
3358
3359    /// The maximum number of core instances a single component may contain
3360    /// (default is unlimited).
3361    ///
3362    /// This method (along with
3363    /// [`PoolingAllocationConfig::max_memories_per_component`],
3364    /// [`PoolingAllocationConfig::max_tables_per_component`], and
3365    /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3366    /// the amount of resources a single component allocation consumes.
3367    ///
3368    /// If a component will instantiate more core instances than `count`, then
3369    /// the component will fail to instantiate.
3370    pub fn max_core_instances_per_component(&mut self, count: u32) -> &mut Self {
3371        self.config.limits.max_core_instances_per_component = count;
3372        self
3373    }
3374
3375    /// The maximum number of Wasm linear memories that a single component may
3376    /// transitively contain (default is unlimited).
3377    ///
3378    /// This method (along with
3379    /// [`PoolingAllocationConfig::max_core_instances_per_component`],
3380    /// [`PoolingAllocationConfig::max_tables_per_component`], and
3381    /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3382    /// the amount of resources a single component allocation consumes.
3383    ///
3384    /// If a component transitively contains more linear memories than `count`,
3385    /// then the component will fail to instantiate.
3386    pub fn max_memories_per_component(&mut self, count: u32) -> &mut Self {
3387        self.config.limits.max_memories_per_component = count;
3388        self
3389    }
3390
3391    /// The maximum number of tables that a single component may transitively
3392    /// contain (default is unlimited).
3393    ///
3394    /// This method (along with
3395    /// [`PoolingAllocationConfig::max_core_instances_per_component`],
3396    /// [`PoolingAllocationConfig::max_memories_per_component`],
3397    /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3398    /// the amount of resources a single component allocation consumes.
3399    ///
3400    /// If a component will transitively contains more tables than `count`, then
3401    /// the component will fail to instantiate.
3402    pub fn max_tables_per_component(&mut self, count: u32) -> &mut Self {
3403        self.config.limits.max_tables_per_component = count;
3404        self
3405    }
3406
3407    /// The maximum number of concurrent Wasm linear memories supported (default
3408    /// is `1000`).
3409    ///
3410    /// This value has a direct impact on the amount of memory allocated by the pooling
3411    /// instance allocator.
3412    ///
3413    /// The pooling instance allocator allocates a memory pool, where each entry
3414    /// in the pool contains the reserved address space for each linear memory
3415    /// supported by an instance.
3416    ///
3417    /// The memory pool will reserve a large quantity of host process address
3418    /// space to elide the bounds checks required for correct WebAssembly memory
3419    /// semantics. Even with 64-bit address spaces, the address space is limited
3420    /// when dealing with a large number of linear memories.
3421    ///
3422    /// For example, on Linux x86_64, the userland address space limit is 128
3423    /// TiB. That might seem like a lot, but each linear memory will *reserve* 6
3424    /// GiB of space by default.
3425    pub fn total_memories(&mut self, count: u32) -> &mut Self {
3426        self.config.limits.total_memories = count;
3427        self
3428    }
3429
3430    /// The maximum number of concurrent tables supported (default is `1000`).
3431    ///
3432    /// This value has a direct impact on the amount of memory allocated by the
3433    /// pooling instance allocator.
3434    ///
3435    /// The pooling instance allocator allocates a table pool, where each entry
3436    /// in the pool contains the space needed for each WebAssembly table
3437    /// supported by an instance (see `table_elements` to control the size of
3438    /// each table).
3439    pub fn total_tables(&mut self, count: u32) -> &mut Self {
3440        self.config.limits.total_tables = count;
3441        self
3442    }
3443
3444    /// The maximum number of execution stacks allowed for asynchronous
3445    /// execution, when enabled (default is `1000`).
3446    ///
3447    /// This value has a direct impact on the amount of memory allocated by the
3448    /// pooling instance allocator.
3449    #[cfg(feature = "async")]
3450    pub fn total_stacks(&mut self, count: u32) -> &mut Self {
3451        self.config.limits.total_stacks = count;
3452        self
3453    }
3454
3455    /// The maximum number of concurrent core instances supported (default is
3456    /// `1000`).
3457    ///
3458    /// This provides an upper-bound on the total size of core instance
3459    /// metadata-related allocations, along with
3460    /// [`PoolingAllocationConfig::max_core_instance_size`]. The upper bound is
3461    ///
3462    /// ```text
3463    /// total_core_instances * max_core_instance_size
3464    /// ```
3465    ///
3466    /// where `max_core_instance_size` is rounded up to the size and alignment of
3467    /// the internal representation of the metadata.
3468    pub fn total_core_instances(&mut self, count: u32) -> &mut Self {
3469        self.config.limits.total_core_instances = count;
3470        self
3471    }
3472
3473    /// The maximum size, in bytes, allocated for a core instance's `VMContext`
3474    /// metadata.
3475    ///
3476    /// The [`Instance`][crate::Instance] type has a static size but its
3477    /// `VMContext` metadata is dynamically sized depending on the module being
3478    /// instantiated. This size limit loosely correlates to the size of the Wasm
3479    /// module, taking into account factors such as:
3480    ///
3481    /// * number of functions
3482    /// * number of globals
3483    /// * number of memories
3484    /// * number of tables
3485    /// * number of function types
3486    ///
3487    /// If the allocated size per instance is too small then instantiation of a
3488    /// module will fail at runtime with an error indicating how many bytes were
3489    /// needed.
3490    ///
3491    /// The default value for this is 1MiB.
3492    ///
3493    /// This provides an upper-bound on the total size of core instance
3494    /// metadata-related allocations, along with
3495    /// [`PoolingAllocationConfig::total_core_instances`]. The upper bound is
3496    ///
3497    /// ```text
3498    /// total_core_instances * max_core_instance_size
3499    /// ```
3500    ///
3501    /// where `max_core_instance_size` is rounded up to the size and alignment of
3502    /// the internal representation of the metadata.
3503    pub fn max_core_instance_size(&mut self, size: usize) -> &mut Self {
3504        self.config.limits.core_instance_size = size;
3505        self
3506    }
3507
3508    /// The maximum number of defined tables for a core module (default is `1`).
3509    ///
3510    /// This value controls the capacity of the `VMTableDefinition` table in
3511    /// each instance's `VMContext` structure.
3512    ///
3513    /// The allocated size of the table will be `tables *
3514    /// sizeof(VMTableDefinition)` for each instance regardless of how many
3515    /// tables are defined by an instance's module.
3516    pub fn max_tables_per_module(&mut self, tables: u32) -> &mut Self {
3517        self.config.limits.max_tables_per_module = tables;
3518        self
3519    }
3520
3521    /// The maximum table elements for any table defined in a module (default is
3522    /// `20000`).
3523    ///
3524    /// If a table's minimum element limit is greater than this value, the
3525    /// module will fail to instantiate.
3526    ///
3527    /// If a table's maximum element limit is unbounded or greater than this
3528    /// value, the maximum will be `table_elements` for the purpose of any
3529    /// `table.grow` instruction.
3530    ///
3531    /// This value is used to reserve the maximum space for each supported
3532    /// table; table elements are pointer-sized in the Wasmtime runtime.
3533    /// Therefore, the space reserved for each instance is `tables *
3534    /// table_elements * sizeof::<*const ()>`.
3535    pub fn table_elements(&mut self, elements: usize) -> &mut Self {
3536        self.config.limits.table_elements = elements;
3537        self
3538    }
3539
3540    /// The maximum number of defined linear memories for a module (default is
3541    /// `1`).
3542    ///
3543    /// This value controls the capacity of the `VMMemoryDefinition` table in
3544    /// each core instance's `VMContext` structure.
3545    ///
3546    /// The allocated size of the table will be `memories *
3547    /// sizeof(VMMemoryDefinition)` for each core instance regardless of how
3548    /// many memories are defined by the core instance's module.
3549    pub fn max_memories_per_module(&mut self, memories: u32) -> &mut Self {
3550        self.config.limits.max_memories_per_module = memories;
3551        self
3552    }
3553
3554    /// The maximum byte size that any WebAssembly linear memory may grow to.
3555    ///
3556    /// This option defaults to 4 GiB meaning that for 32-bit linear memories
3557    /// there is no restrictions. 64-bit linear memories will not be allowed to
3558    /// grow beyond 4 GiB by default.
3559    ///
3560    /// If a memory's minimum size is greater than this value, the module will
3561    /// fail to instantiate.
3562    ///
3563    /// If a memory's maximum size is unbounded or greater than this value, the
3564    /// maximum will be `max_memory_size` for the purpose of any `memory.grow`
3565    /// instruction.
3566    ///
3567    /// This value is used to control the maximum accessible space for each
3568    /// linear memory of a core instance. This can be thought of as a simple
3569    /// mechanism like [`Store::limiter`](crate::Store::limiter) to limit memory
3570    /// at runtime. This value can also affect striping/coloring behavior when
3571    /// used in conjunction with
3572    /// [`memory_protection_keys`](PoolingAllocationConfig::memory_protection_keys).
3573    ///
3574    /// The virtual memory reservation size of each linear memory is controlled
3575    /// by the [`Config::memory_reservation`] setting and this method's
3576    /// configuration cannot exceed [`Config::memory_reservation`].
3577    pub fn max_memory_size(&mut self, bytes: usize) -> &mut Self {
3578        self.config.limits.max_memory_size = bytes;
3579        self
3580    }
3581
3582    /// Configures whether memory protection keys (MPK) should be used for more
3583    /// efficient layout of pool-allocated memories.
3584    ///
3585    /// When using the pooling allocator (see [`Config::allocation_strategy`],
3586    /// [`InstanceAllocationStrategy::Pooling`]), memory protection keys can
3587    /// reduce the total amount of allocated virtual memory by eliminating guard
3588    /// regions between WebAssembly memories in the pool. It does so by
3589    /// "coloring" memory regions with different memory keys and setting which
3590    /// regions are accessible each time executions switches from host to guest
3591    /// (or vice versa).
3592    ///
3593    /// Leveraging MPK requires configuring a smaller-than-default
3594    /// [`max_memory_size`](PoolingAllocationConfig::max_memory_size) to enable
3595    /// this coloring/striping behavior. For example embeddings might want to
3596    /// reduce the default 4G allowance to 128M.
3597    ///
3598    /// MPK is only available on Linux (called `pku` there) and recent x86
3599    /// systems; we check for MPK support at runtime by examining the `CPUID`
3600    /// register. This configuration setting can be in three states:
3601    ///
3602    /// - `auto`: if MPK support is available the guard regions are removed; if
3603    ///   not, the guard regions remain
3604    /// - `yes`: use MPK to eliminate guard regions; fail if MPK is not
3605    ///   supported
3606    /// - `no`: never use MPK
3607    ///
3608    /// By default this value is `no`, but may become `auto` in future
3609    /// releases.
3610    ///
3611    /// __WARNING__: this configuration options is still experimental--use at
3612    /// your own risk! MPK uses kernel and CPU features to protect memory
3613    /// regions; you may observe segmentation faults if anything is
3614    /// misconfigured.
3615    #[cfg(feature = "memory-protection-keys")]
3616    pub fn memory_protection_keys(&mut self, enable: Enabled) -> &mut Self {
3617        self.config.memory_protection_keys = enable;
3618        self
3619    }
3620
3621    /// Sets an upper limit on how many memory protection keys (MPK) Wasmtime
3622    /// will use.
3623    ///
3624    /// This setting is only applicable when
3625    /// [`PoolingAllocationConfig::memory_protection_keys`] is set to `enable`
3626    /// or `auto`. Configuring this above the HW and OS limits (typically 15)
3627    /// has no effect.
3628    ///
3629    /// If multiple Wasmtime engines are used in the same process, note that all
3630    /// engines will share the same set of allocated keys; this setting will
3631    /// limit how many keys are allocated initially and thus available to all
3632    /// other engines.
3633    #[cfg(feature = "memory-protection-keys")]
3634    pub fn max_memory_protection_keys(&mut self, max: usize) -> &mut Self {
3635        self.config.max_memory_protection_keys = max;
3636        self
3637    }
3638
3639    /// Check if memory protection keys (MPK) are available on the current host.
3640    ///
3641    /// This is a convenience method for determining MPK availability using the
3642    /// same method that [`Enabled::Auto`] does. See
3643    /// [`PoolingAllocationConfig::memory_protection_keys`] for more
3644    /// information.
3645    #[cfg(feature = "memory-protection-keys")]
3646    pub fn are_memory_protection_keys_available() -> bool {
3647        crate::runtime::vm::mpk::is_supported()
3648    }
3649
3650    /// The maximum number of concurrent GC heaps supported (default is `1000`).
3651    ///
3652    /// This value has a direct impact on the amount of memory allocated by the
3653    /// pooling instance allocator.
3654    ///
3655    /// The pooling instance allocator allocates a GC heap pool, where each
3656    /// entry in the pool contains the space needed for each GC heap used by a
3657    /// store.
3658    #[cfg(feature = "gc")]
3659    pub fn total_gc_heaps(&mut self, count: u32) -> &mut Self {
3660        self.config.limits.total_gc_heaps = count;
3661        self
3662    }
3663
3664    /// Configures whether the Linux-specific [`PAGEMAP_SCAN` ioctl][ioctl] is
3665    /// used to help reset linear memory.
3666    ///
3667    /// When [`Self::linear_memory_keep_resident`] or
3668    /// [`Self::table_keep_resident`] options are configured to nonzero values
3669    /// the default behavior is to `memset` the lowest addresses of a table or
3670    /// memory back to their original contents. With the `PAGEMAP_SCAN` ioctl on
3671    /// Linux this can be done to more intelligently scan for resident pages in
3672    /// the region and only reset those pages back to their original contents
3673    /// with `memset` rather than assuming the low addresses are all resident.
3674    ///
3675    /// This ioctl has the potential to provide a number of performance benefits
3676    /// in high-reuse and high concurrency scenarios. Notably this enables
3677    /// Wasmtime to scan the entire region of WebAssembly linear memory and
3678    /// manually reset memory back to its original contents, up to
3679    /// [`Self::linear_memory_keep_resident`] bytes, possibly skipping an
3680    /// `madvise` entirely. This can be more efficient by avoiding removing
3681    /// pages from the address space entirely and additionally ensuring that
3682    /// future use of the linear memory doesn't incur page faults as the pages
3683    /// remain resident.
3684    ///
3685    /// At this time this configuration option is still being evaluated as to
3686    /// how appropriate it is for all use cases. It currently defaults to
3687    /// `no` or disabled but may change to `auto`, enable if supported, in the
3688    /// future. This option is only supported on Linux and requires a kernel
3689    /// version of 6.7 or higher.
3690    ///
3691    /// [ioctl]: https://www.man7.org/linux/man-pages/man2/PAGEMAP_SCAN.2const.html
3692    pub fn pagemap_scan(&mut self, enable: Enabled) -> &mut Self {
3693        self.config.pagemap_scan = enable;
3694        self
3695    }
3696
3697    /// Tests whether [`Self::pagemap_scan`] is available or not on the host
3698    /// system.
3699    pub fn is_pagemap_scan_available() -> bool {
3700        crate::runtime::vm::PoolingInstanceAllocatorConfig::is_pagemap_scan_available()
3701    }
3702}
3703
3704#[cfg(feature = "std")]
3705fn detect_host_feature(feature: &str) -> Option<bool> {
3706    #[cfg(target_arch = "aarch64")]
3707    {
3708        return match feature {
3709            "lse" => Some(std::arch::is_aarch64_feature_detected!("lse")),
3710            "paca" => Some(std::arch::is_aarch64_feature_detected!("paca")),
3711            "fp16" => Some(std::arch::is_aarch64_feature_detected!("fp16")),
3712
3713            _ => None,
3714        };
3715    }
3716
3717    // `is_s390x_feature_detected` is nightly only for now, so use the
3718    // STORE FACILITY LIST EXTENDED instruction as a temporary measure.
3719    #[cfg(target_arch = "s390x")]
3720    {
3721        let mut facility_list: [u64; 4] = [0; 4];
3722        unsafe {
3723            core::arch::asm!(
3724                "stfle 0({})",
3725                in(reg_addr) facility_list.as_mut_ptr() ,
3726                inout("r0") facility_list.len() as u64 - 1 => _,
3727                options(nostack)
3728            );
3729        }
3730        let get_facility_bit = |n: usize| {
3731            // NOTE: bits are numbered from the left.
3732            facility_list[n / 64] & (1 << (63 - (n % 64))) != 0
3733        };
3734
3735        return match feature {
3736            "mie3" => Some(get_facility_bit(61)),
3737            "mie4" => Some(get_facility_bit(84)),
3738            "vxrs_ext2" => Some(get_facility_bit(148)),
3739            "vxrs_ext3" => Some(get_facility_bit(198)),
3740
3741            _ => None,
3742        };
3743    }
3744
3745    #[cfg(target_arch = "riscv64")]
3746    {
3747        return match feature {
3748            // due to `is_riscv64_feature_detected` is not stable.
3749            // we cannot use it. For now lie and say all features are always
3750            // found to keep tests working.
3751            _ => Some(true),
3752        };
3753    }
3754
3755    #[cfg(target_arch = "x86_64")]
3756    {
3757        return match feature {
3758            "cmpxchg16b" => Some(std::is_x86_feature_detected!("cmpxchg16b")),
3759            "sse3" => Some(std::is_x86_feature_detected!("sse3")),
3760            "ssse3" => Some(std::is_x86_feature_detected!("ssse3")),
3761            "sse4.1" => Some(std::is_x86_feature_detected!("sse4.1")),
3762            "sse4.2" => Some(std::is_x86_feature_detected!("sse4.2")),
3763            "popcnt" => Some(std::is_x86_feature_detected!("popcnt")),
3764            "avx" => Some(std::is_x86_feature_detected!("avx")),
3765            "avx2" => Some(std::is_x86_feature_detected!("avx2")),
3766            "fma" => Some(std::is_x86_feature_detected!("fma")),
3767            "bmi1" => Some(std::is_x86_feature_detected!("bmi1")),
3768            "bmi2" => Some(std::is_x86_feature_detected!("bmi2")),
3769            "avx512bitalg" => Some(std::is_x86_feature_detected!("avx512bitalg")),
3770            "avx512dq" => Some(std::is_x86_feature_detected!("avx512dq")),
3771            "avx512f" => Some(std::is_x86_feature_detected!("avx512f")),
3772            "avx512vl" => Some(std::is_x86_feature_detected!("avx512vl")),
3773            "avx512vbmi" => Some(std::is_x86_feature_detected!("avx512vbmi")),
3774            "lzcnt" => Some(std::is_x86_feature_detected!("lzcnt")),
3775
3776            _ => None,
3777        };
3778    }
3779
3780    #[allow(
3781        unreachable_code,
3782        reason = "reachable or not depending on if a target above matches"
3783    )]
3784    {
3785        let _ = feature;
3786        return None;
3787    }
3788}