wasmtime/config.rs
1use crate::prelude::*;
2use alloc::sync::Arc;
3use bitflags::Flags;
4use core::fmt;
5use core::str::FromStr;
6#[cfg(any(feature = "cache", feature = "cranelift", feature = "winch"))]
7use std::path::Path;
8pub use wasmparser::WasmFeatures;
9use wasmtime_environ::{ConfigTunables, TripleExt, Tunables};
10
11#[cfg(feature = "runtime")]
12use crate::memory::MemoryCreator;
13#[cfg(feature = "runtime")]
14use crate::profiling_agent::{self, ProfilingAgent};
15#[cfg(feature = "runtime")]
16use crate::runtime::vm::{
17 GcRuntime, InstanceAllocator, OnDemandInstanceAllocator, RuntimeMemoryCreator,
18};
19#[cfg(feature = "runtime")]
20use crate::trampoline::MemoryCreatorProxy;
21
22#[cfg(feature = "async")]
23use crate::stack::{StackCreator, StackCreatorProxy};
24#[cfg(feature = "async")]
25use wasmtime_fiber::RuntimeFiberStackCreator;
26
27#[cfg(feature = "runtime")]
28pub use crate::runtime::code_memory::CustomCodeMemory;
29#[cfg(feature = "cache")]
30pub use wasmtime_cache::{Cache, CacheConfig};
31#[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
32pub use wasmtime_environ::CacheStore;
33
34/// Represents the module instance allocation strategy to use.
35#[derive(Clone)]
36#[non_exhaustive]
37pub enum InstanceAllocationStrategy {
38 /// The on-demand instance allocation strategy.
39 ///
40 /// Resources related to a module instance are allocated at instantiation time and
41 /// immediately deallocated when the `Store` referencing the instance is dropped.
42 ///
43 /// This is the default allocation strategy for Wasmtime.
44 OnDemand,
45 /// The pooling instance allocation strategy.
46 ///
47 /// A pool of resources is created in advance and module instantiation reuses resources
48 /// from the pool. Resources are returned to the pool when the `Store` referencing the instance
49 /// is dropped.
50 #[cfg(feature = "pooling-allocator")]
51 Pooling(PoolingAllocationConfig),
52}
53
54impl InstanceAllocationStrategy {
55 /// The default pooling instance allocation strategy.
56 #[cfg(feature = "pooling-allocator")]
57 pub fn pooling() -> Self {
58 Self::Pooling(Default::default())
59 }
60}
61
62impl Default for InstanceAllocationStrategy {
63 fn default() -> Self {
64 Self::OnDemand
65 }
66}
67
68#[cfg(feature = "pooling-allocator")]
69impl From<PoolingAllocationConfig> for InstanceAllocationStrategy {
70 fn from(cfg: PoolingAllocationConfig) -> InstanceAllocationStrategy {
71 InstanceAllocationStrategy::Pooling(cfg)
72 }
73}
74
75#[derive(Clone)]
76/// Configure the strategy used for versioning in serializing and deserializing [`crate::Module`].
77pub enum ModuleVersionStrategy {
78 /// Use the wasmtime crate's Cargo package version.
79 WasmtimeVersion,
80 /// Use a custom version string. Must be at most 255 bytes.
81 Custom(String),
82 /// Emit no version string in serialization, and accept all version strings in deserialization.
83 None,
84}
85
86impl Default for ModuleVersionStrategy {
87 fn default() -> Self {
88 ModuleVersionStrategy::WasmtimeVersion
89 }
90}
91
92impl core::hash::Hash for ModuleVersionStrategy {
93 fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
94 match self {
95 Self::WasmtimeVersion => env!("CARGO_PKG_VERSION").hash(hasher),
96 Self::Custom(s) => s.hash(hasher),
97 Self::None => {}
98 };
99 }
100}
101
102/// Global configuration options used to create an [`Engine`](crate::Engine)
103/// and customize its behavior.
104///
105/// This structure exposed a builder-like interface and is primarily consumed by
106/// [`Engine::new()`](crate::Engine::new).
107///
108/// The validation of `Config` is deferred until the engine is being built, thus
109/// a problematic config may cause `Engine::new` to fail.
110///
111/// # Defaults
112///
113/// The `Default` trait implementation and the return value from
114/// [`Config::new()`] are the same and represent the default set of
115/// configuration for an engine. The exact set of defaults will differ based on
116/// properties such as enabled Cargo features at compile time and the configured
117/// target (see [`Config::target`]). Configuration options document their
118/// default values and what the conditional value of the default is where
119/// applicable.
120#[derive(Clone)]
121pub struct Config {
122 #[cfg(any(feature = "cranelift", feature = "winch"))]
123 compiler_config: Option<CompilerConfig>,
124 target: Option<target_lexicon::Triple>,
125 #[cfg(feature = "gc")]
126 collector: Collector,
127 profiling_strategy: ProfilingStrategy,
128 tunables: ConfigTunables,
129
130 #[cfg(feature = "cache")]
131 pub(crate) cache: Option<Cache>,
132 #[cfg(feature = "runtime")]
133 pub(crate) mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>,
134 #[cfg(feature = "runtime")]
135 pub(crate) custom_code_memory: Option<Arc<dyn CustomCodeMemory>>,
136 pub(crate) allocation_strategy: InstanceAllocationStrategy,
137 pub(crate) max_wasm_stack: usize,
138 /// Explicitly enabled features via `Config::wasm_*` methods. This is a
139 /// signal that the embedder specifically wants something turned on
140 /// regardless of the defaults that Wasmtime might otherwise have enabled.
141 ///
142 /// Note that this, and `disabled_features` below, start as the empty set of
143 /// features to only track explicit user requests.
144 pub(crate) enabled_features: WasmFeatures,
145 /// Same as `enabled_features`, but for those that are explicitly disabled.
146 pub(crate) disabled_features: WasmFeatures,
147 pub(crate) wasm_backtrace: bool,
148 pub(crate) wasm_backtrace_details_env_used: bool,
149 pub(crate) native_unwind_info: Option<bool>,
150 #[cfg(any(feature = "async", feature = "stack-switching"))]
151 pub(crate) async_stack_size: usize,
152 #[cfg(feature = "async")]
153 pub(crate) async_stack_zeroing: bool,
154 #[cfg(feature = "async")]
155 pub(crate) stack_creator: Option<Arc<dyn RuntimeFiberStackCreator>>,
156 pub(crate) async_support: bool,
157 pub(crate) module_version: ModuleVersionStrategy,
158 pub(crate) parallel_compilation: bool,
159 pub(crate) memory_guaranteed_dense_image_size: u64,
160 pub(crate) force_memory_init_memfd: bool,
161 pub(crate) wmemcheck: bool,
162 #[cfg(feature = "coredump")]
163 pub(crate) coredump_on_trap: bool,
164 pub(crate) macos_use_mach_ports: bool,
165 pub(crate) detect_host_feature: Option<fn(&str) -> Option<bool>>,
166 pub(crate) x86_float_abi_ok: Option<bool>,
167 pub(crate) shared_memory: bool,
168}
169
170/// User-provided configuration for the compiler.
171#[cfg(any(feature = "cranelift", feature = "winch"))]
172#[derive(Debug, Clone)]
173struct CompilerConfig {
174 strategy: Option<Strategy>,
175 settings: crate::hash_map::HashMap<String, String>,
176 flags: crate::hash_set::HashSet<String>,
177 #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
178 cache_store: Option<Arc<dyn CacheStore>>,
179 clif_dir: Option<std::path::PathBuf>,
180 wmemcheck: bool,
181}
182
183#[cfg(any(feature = "cranelift", feature = "winch"))]
184impl CompilerConfig {
185 fn new() -> Self {
186 Self {
187 strategy: Strategy::Auto.not_auto(),
188 settings: Default::default(),
189 flags: Default::default(),
190 #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
191 cache_store: None,
192 clif_dir: None,
193 wmemcheck: false,
194 }
195 }
196
197 /// Ensures that the key is not set or equals to the given value.
198 /// If the key is not set, it will be set to the given value.
199 ///
200 /// # Returns
201 ///
202 /// Returns true if successfully set or already had the given setting
203 /// value, or false if the setting was explicitly set to something
204 /// else previously.
205 fn ensure_setting_unset_or_given(&mut self, k: &str, v: &str) -> bool {
206 if let Some(value) = self.settings.get(k) {
207 if value != v {
208 return false;
209 }
210 } else {
211 self.settings.insert(k.to_string(), v.to_string());
212 }
213 true
214 }
215}
216
217#[cfg(any(feature = "cranelift", feature = "winch"))]
218impl Default for CompilerConfig {
219 fn default() -> Self {
220 Self::new()
221 }
222}
223
224impl Config {
225 /// Creates a new configuration object with the default configuration
226 /// specified.
227 pub fn new() -> Self {
228 let mut ret = Self {
229 tunables: ConfigTunables::default(),
230 #[cfg(any(feature = "cranelift", feature = "winch"))]
231 compiler_config: Some(CompilerConfig::default()),
232 target: None,
233 #[cfg(feature = "gc")]
234 collector: Collector::default(),
235 #[cfg(feature = "cache")]
236 cache: None,
237 profiling_strategy: ProfilingStrategy::None,
238 #[cfg(feature = "runtime")]
239 mem_creator: None,
240 #[cfg(feature = "runtime")]
241 custom_code_memory: None,
242 allocation_strategy: InstanceAllocationStrategy::OnDemand,
243 // 512k of stack -- note that this is chosen currently to not be too
244 // big, not be too small, and be a good default for most platforms.
245 // One platform of particular note is Windows where the stack size
246 // of the main thread seems to, by default, be smaller than that of
247 // Linux and macOS. This 512k value at least lets our current test
248 // suite pass on the main thread of Windows (using `--test-threads
249 // 1` forces this), or at least it passed when this change was
250 // committed.
251 max_wasm_stack: 512 * 1024,
252 wasm_backtrace: true,
253 wasm_backtrace_details_env_used: false,
254 native_unwind_info: None,
255 enabled_features: WasmFeatures::empty(),
256 disabled_features: WasmFeatures::empty(),
257 #[cfg(any(feature = "async", feature = "stack-switching"))]
258 async_stack_size: 2 << 20,
259 #[cfg(feature = "async")]
260 async_stack_zeroing: false,
261 #[cfg(feature = "async")]
262 stack_creator: None,
263 async_support: false,
264 module_version: ModuleVersionStrategy::default(),
265 parallel_compilation: !cfg!(miri),
266 memory_guaranteed_dense_image_size: 16 << 20,
267 force_memory_init_memfd: false,
268 wmemcheck: false,
269 #[cfg(feature = "coredump")]
270 coredump_on_trap: false,
271 macos_use_mach_ports: !cfg!(miri),
272 #[cfg(feature = "std")]
273 detect_host_feature: Some(detect_host_feature),
274 #[cfg(not(feature = "std"))]
275 detect_host_feature: None,
276 x86_float_abi_ok: None,
277 shared_memory: false,
278 };
279 ret.wasm_backtrace_details(WasmBacktraceDetails::Environment);
280 ret
281 }
282
283 #[cfg(any(feature = "cranelift", feature = "winch"))]
284 pub(crate) fn has_compiler(&self) -> bool {
285 self.compiler_config.is_some()
286 }
287
288 #[track_caller]
289 #[cfg(any(feature = "cranelift", feature = "winch"))]
290 fn compiler_config_mut(&mut self) -> &mut CompilerConfig {
291 self.compiler_config.as_mut().expect(
292 "cannot configure compiler settings for `Config`s \
293 created by `Config::without_compiler`",
294 )
295 }
296
297 /// Configure whether Wasm compilation is enabled.
298 ///
299 /// Disabling Wasm compilation will allow you to load and run
300 /// [pre-compiled][crate::Engine::precompile_module] Wasm programs, but not
301 /// to compile and run new Wasm programs that have not already been
302 /// pre-compiled.
303 ///
304 /// Many compilation-related configuration methods will panic if compilation
305 /// has been disabled.
306 ///
307 /// Note that there are two ways to disable Wasm compilation:
308 ///
309 /// 1. Statically, by disabling the `"cranelift"` and `"winch"` cargo
310 /// features when building Wasmtime. These builds of Wasmtime will have
311 /// smaller code size, since they do not include any of the code to
312 /// compile Wasm.
313 ///
314 /// 2. Dynamically, by passing `false` to this method at run-time when
315 /// configuring Wasmtime. The Wasmtime binary will still include the code
316 /// for compiling Wasm, it just won't be executed, so code size is larger
317 /// than with the first approach.
318 ///
319 /// The static approach is better in most cases, however dynamically calling
320 /// `enable_compiler(false)` is useful whenever you create multiple
321 /// `Engine`s in the same process, some of which must be able to compile
322 /// Wasm and some of which should never do so. Tests are a common example of
323 /// such a situation, especially when there are multiple Rust binaries in
324 /// the same cargo workspace, and cargo's feature resolution enables the
325 /// `"cranelift"` or `"winch"` features across the whole workspace.
326 #[cfg(any(feature = "cranelift", feature = "winch"))]
327 pub fn enable_compiler(&mut self, enable: bool) -> &mut Self {
328 match (enable, &self.compiler_config) {
329 (true, Some(_)) | (false, None) => {}
330 (true, None) => {
331 self.compiler_config = Some(CompilerConfig::default());
332 }
333 (false, Some(_)) => {
334 self.compiler_config = None;
335 }
336 }
337 self
338 }
339
340 /// Configures the target platform of this [`Config`].
341 ///
342 /// This method is used to configure the output of compilation in an
343 /// [`Engine`](crate::Engine). This can be used, for example, to
344 /// cross-compile from one platform to another. By default, the host target
345 /// triple is used meaning compiled code is suitable to run on the host.
346 ///
347 /// Note that the [`Module`](crate::Module) type can only be created if the
348 /// target configured here matches the host. Otherwise if a cross-compile is
349 /// being performed where the host doesn't match the target then
350 /// [`Engine::precompile_module`](crate::Engine::precompile_module) must be
351 /// used instead.
352 ///
353 /// Target-specific flags (such as CPU features) will not be inferred by
354 /// default for the target when one is provided here. This means that this
355 /// can also be used, for example, with the host architecture to disable all
356 /// host-inferred feature flags. Configuring target-specific flags can be
357 /// done with [`Config::cranelift_flag_set`] and
358 /// [`Config::cranelift_flag_enable`].
359 ///
360 /// # Errors
361 ///
362 /// This method will error if the given target triple is not supported.
363 pub fn target(&mut self, target: &str) -> Result<&mut Self> {
364 self.target =
365 Some(target_lexicon::Triple::from_str(target).map_err(|e| crate::format_err!(e))?);
366
367 Ok(self)
368 }
369
370 /// Enables the incremental compilation cache in Cranelift, using the provided `CacheStore`
371 /// backend for storage.
372 ///
373 /// # Panics
374 ///
375 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
376 #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
377 pub fn enable_incremental_compilation(
378 &mut self,
379 cache_store: Arc<dyn CacheStore>,
380 ) -> Result<&mut Self> {
381 self.compiler_config_mut().cache_store = Some(cache_store);
382 Ok(self)
383 }
384
385 /// Whether or not to enable support for asynchronous functions in Wasmtime.
386 ///
387 /// When enabled, the config can optionally define host functions with `async`.
388 /// Instances created and functions called with this `Config` *must* be called
389 /// through their asynchronous APIs, however. For example using
390 /// [`Func::call`](crate::Func::call) will panic when used with this config.
391 ///
392 /// # Asynchronous Wasm
393 ///
394 /// WebAssembly does not currently have a way to specify at the bytecode
395 /// level what is and isn't async. Host-defined functions, however, may be
396 /// defined as `async`. WebAssembly imports always appear synchronous, which
397 /// gives rise to a bit of an impedance mismatch here. To solve this
398 /// Wasmtime supports "asynchronous configs" which enables calling these
399 /// asynchronous functions in a way that looks synchronous to the executing
400 /// WebAssembly code.
401 ///
402 /// An asynchronous config must always invoke wasm code asynchronously,
403 /// meaning we'll always represent its computation as a
404 /// [`Future`](std::future::Future). The `poll` method of the futures
405 /// returned by Wasmtime will perform the actual work of calling the
406 /// WebAssembly. Wasmtime won't manage its own thread pools or similar,
407 /// that's left up to the embedder.
408 ///
409 /// To implement futures in a way that WebAssembly sees asynchronous host
410 /// functions as synchronous, all async Wasmtime futures will execute on a
411 /// separately allocated native stack from the thread otherwise executing
412 /// Wasmtime. This separate native stack can then be switched to and from.
413 /// Using this whenever an `async` host function returns a future that
414 /// resolves to `Pending` we switch away from the temporary stack back to
415 /// the main stack and propagate the `Pending` status.
416 ///
417 /// In general it's encouraged that the integration with `async` and
418 /// wasmtime is designed early on in your embedding of Wasmtime to ensure
419 /// that it's planned that WebAssembly executes in the right context of your
420 /// application.
421 ///
422 /// # Execution in `poll`
423 ///
424 /// The [`Future::poll`](std::future::Future::poll) method is the main
425 /// driving force behind Rust's futures. That method's own documentation
426 /// states "an implementation of `poll` should strive to return quickly, and
427 /// should not block". This, however, can be at odds with executing
428 /// WebAssembly code as part of the `poll` method itself. If your
429 /// WebAssembly is untrusted then this could allow the `poll` method to take
430 /// arbitrarily long in the worst case, likely blocking all other
431 /// asynchronous tasks.
432 ///
433 /// To remedy this situation you have a few possible ways to solve this:
434 ///
435 /// * The most efficient solution is to enable
436 /// [`Config::epoch_interruption`] in conjunction with
437 /// [`crate::Store::epoch_deadline_async_yield_and_update`]. Coupled with
438 /// periodic calls to [`crate::Engine::increment_epoch`] this will cause
439 /// executing WebAssembly to periodically yield back according to the
440 /// epoch configuration settings. This enables `Future::poll` to take at
441 /// most a certain amount of time according to epoch configuration
442 /// settings and when increments happen. The benefit of this approach is
443 /// that the instrumentation in compiled code is quite lightweight, but a
444 /// downside can be that the scheduling is somewhat nondeterministic since
445 /// increments are usually timer-based which are not always deterministic.
446 ///
447 /// Note that to prevent infinite execution of wasm it's recommended to
448 /// place a timeout on the entire future representing executing wasm code
449 /// and the periodic yields with epochs should ensure that when the
450 /// timeout is reached it's appropriately recognized.
451 ///
452 /// * Alternatively you can enable the
453 /// [`Config::consume_fuel`](crate::Config::consume_fuel) method as well
454 /// as [`crate::Store::fuel_async_yield_interval`] When doing so this will
455 /// configure Wasmtime futures to yield periodically while they're
456 /// executing WebAssembly code. After consuming the specified amount of
457 /// fuel wasm futures will return `Poll::Pending` from their `poll`
458 /// method, and will get automatically re-polled later. This enables the
459 /// `Future::poll` method to take roughly a fixed amount of time since
460 /// fuel is guaranteed to get consumed while wasm is executing. Unlike
461 /// epoch-based preemption this is deterministic since wasm always
462 /// consumes a fixed amount of fuel per-operation. The downside of this
463 /// approach, however, is that the compiled code instrumentation is
464 /// significantly more expensive than epoch checks.
465 ///
466 /// Note that to prevent infinite execution of wasm it's recommended to
467 /// place a timeout on the entire future representing executing wasm code
468 /// and the periodic yields with epochs should ensure that when the
469 /// timeout is reached it's appropriately recognized.
470 ///
471 /// In all cases special care needs to be taken when integrating
472 /// asynchronous wasm into your application. You should carefully plan where
473 /// WebAssembly will execute and what compute resources will be allotted to
474 /// it. If Wasmtime doesn't support exactly what you'd like just yet, please
475 /// feel free to open an issue!
476 #[cfg(feature = "async")]
477 pub fn async_support(&mut self, enable: bool) -> &mut Self {
478 self.async_support = enable;
479 self
480 }
481
482 /// Configures whether DWARF debug information will be emitted
483 /// during compilation for a native debugger on the Wasmtime
484 /// process to consume.
485 ///
486 /// Note that the `debug-builtins` compile-time Cargo feature must also be
487 /// enabled for native debuggers such as GDB or LLDB to be able to debug
488 /// guest WebAssembly programs.
489 ///
490 /// By default this option is `false`.
491 /// **Note** Enabling this option is not compatible with the Winch compiler.
492 pub fn debug_info(&mut self, enable: bool) -> &mut Self {
493 self.tunables.debug_native = Some(enable);
494 self
495 }
496
497 /// Configures whether compiled guest code will be instrumented to
498 /// provide debugging at the Wasm VM level.
499 ///
500 /// This is required in order to enable a guest-level debugging
501 /// API that can precisely examine Wasm VM state and (eventually,
502 /// once it is complete) set breakpoints and watchpoints and step
503 /// through code.
504 ///
505 /// Without this enabled, debugging can only be done via a native
506 /// debugger operating on the compiled guest code (see
507 /// [`Config::debug_info`] and is "best-effort": we may be able to
508 /// recover some Wasm locals or operand stack values, but it is
509 /// not guaranteed, even when optimizations are disabled.
510 ///
511 /// When this is enabled, additional instrumentation is inserted
512 /// that directly tracks the Wasm VM state at every step. This has
513 /// some performance impact, but allows perfect debugging
514 /// fidelity.
515 ///
516 /// Breakpoints, watchpoints, and stepping are not yet supported,
517 /// but will be added in a future version of Wasmtime.
518 ///
519 /// This enables use of the [`crate::DebugFrameCursor`] API which is
520 /// provided by [`crate::Caller::debug_frames`] from within a
521 /// hostcall context.
522 ///
523 /// ***Note*** Enabling this option is not compatible with the
524 /// Winch compiler.
525 #[cfg(feature = "debug")]
526 pub fn guest_debug(&mut self, enable: bool) -> &mut Self {
527 self.tunables.debug_guest = Some(enable);
528 self
529 }
530
531 /// Configures whether [`WasmBacktrace`] will be present in the context of
532 /// errors returned from Wasmtime.
533 ///
534 /// A backtrace may be collected whenever an error is returned from a host
535 /// function call through to WebAssembly or when WebAssembly itself hits a
536 /// trap condition, such as an out-of-bounds memory access. This flag
537 /// indicates, in these conditions, whether the backtrace is collected or
538 /// not.
539 ///
540 /// Currently wasm backtraces are implemented through frame pointer walking.
541 /// This means that collecting a backtrace is expected to be a fast and
542 /// relatively cheap operation. Additionally backtrace collection is
543 /// suitable in concurrent environments since one thread capturing a
544 /// backtrace won't block other threads.
545 ///
546 /// Collected backtraces are attached via [`wasmtime::Error::context`] to
547 /// errors returned from host functions. The [`WasmBacktrace`] type can be
548 /// acquired via [`wasmtime::Error::downcast_ref`] to inspect the backtrace.
549 /// When this option is disabled then this context is never applied to
550 /// errors coming out of wasm.
551 ///
552 /// This option is `true` by default.
553 ///
554 /// [`WasmBacktrace`]: crate::WasmBacktrace
555 pub fn wasm_backtrace(&mut self, enable: bool) -> &mut Self {
556 self.wasm_backtrace = enable;
557 self
558 }
559
560 /// Configures whether backtraces in `Trap` will parse debug info in the wasm file to
561 /// have filename/line number information.
562 ///
563 /// When enabled this will causes modules to retain debugging information
564 /// found in wasm binaries. This debug information will be used when a trap
565 /// happens to symbolicate each stack frame and attempt to print a
566 /// filename/line number for each wasm frame in the stack trace.
567 ///
568 /// By default this option is `WasmBacktraceDetails::Environment`, meaning
569 /// that wasm will read `WASMTIME_BACKTRACE_DETAILS` to indicate whether
570 /// details should be parsed. Note that the `std` feature of this crate must
571 /// be active to read environment variables, otherwise this is disabled by
572 /// default.
573 pub fn wasm_backtrace_details(&mut self, enable: WasmBacktraceDetails) -> &mut Self {
574 self.wasm_backtrace_details_env_used = false;
575 self.tunables.parse_wasm_debuginfo = match enable {
576 WasmBacktraceDetails::Enable => Some(true),
577 WasmBacktraceDetails::Disable => Some(false),
578 WasmBacktraceDetails::Environment => {
579 #[cfg(feature = "std")]
580 {
581 self.wasm_backtrace_details_env_used = true;
582 std::env::var("WASMTIME_BACKTRACE_DETAILS")
583 .map(|s| Some(s == "1"))
584 .unwrap_or(Some(false))
585 }
586 #[cfg(not(feature = "std"))]
587 {
588 Some(false)
589 }
590 }
591 };
592 self
593 }
594
595 /// Configures whether to generate native unwind information
596 /// (e.g. `.eh_frame` on Linux).
597 ///
598 /// This configuration option only exists to help third-party stack
599 /// capturing mechanisms, such as the system's unwinder or the `backtrace`
600 /// crate, determine how to unwind through Wasm frames. It does not affect
601 /// whether Wasmtime can capture Wasm backtraces or not. The presence of
602 /// [`WasmBacktrace`] is controlled by the [`Config::wasm_backtrace`]
603 /// option.
604 ///
605 /// Native unwind information is included:
606 /// - When targeting Windows, since the Windows ABI requires it.
607 /// - By default.
608 ///
609 /// Note that systems loading many modules may wish to disable this
610 /// configuration option instead of leaving it on-by-default. Some platforms
611 /// exhibit quadratic behavior when registering/unregistering unwinding
612 /// information which can greatly slow down the module loading/unloading
613 /// process.
614 ///
615 /// [`WasmBacktrace`]: crate::WasmBacktrace
616 pub fn native_unwind_info(&mut self, enable: bool) -> &mut Self {
617 self.native_unwind_info = Some(enable);
618 self
619 }
620
621 /// Configures whether execution of WebAssembly will "consume fuel" to
622 /// either halt or yield execution as desired.
623 ///
624 /// This can be used to deterministically prevent infinitely-executing
625 /// WebAssembly code by instrumenting generated code to consume fuel as it
626 /// executes. When fuel runs out a trap is raised, however [`Store`] can be
627 /// configured to yield execution periodically via
628 /// [`crate::Store::fuel_async_yield_interval`].
629 ///
630 /// Note that a [`Store`] starts with no fuel, so if you enable this option
631 /// you'll have to be sure to pour some fuel into [`Store`] before
632 /// executing some code.
633 ///
634 /// By default this option is `false`.
635 ///
636 /// **Note** Enabling this option is not compatible with the Winch compiler.
637 ///
638 /// [`Store`]: crate::Store
639 pub fn consume_fuel(&mut self, enable: bool) -> &mut Self {
640 self.tunables.consume_fuel = Some(enable);
641 self
642 }
643
644 /// Enables epoch-based interruption.
645 ///
646 /// When executing code in async mode, we sometimes want to
647 /// implement a form of cooperative timeslicing: long-running Wasm
648 /// guest code should periodically yield to the executor
649 /// loop. This yielding could be implemented by using "fuel" (see
650 /// [`consume_fuel`](Config::consume_fuel)). However, fuel
651 /// instrumentation is somewhat expensive: it modifies the
652 /// compiled form of the Wasm code so that it maintains a precise
653 /// instruction count, frequently checking this count against the
654 /// remaining fuel. If one does not need this precise count or
655 /// deterministic interruptions, and only needs a periodic
656 /// interrupt of some form, then It would be better to have a more
657 /// lightweight mechanism.
658 ///
659 /// Epoch-based interruption is that mechanism. There is a global
660 /// "epoch", which is a counter that divides time into arbitrary
661 /// periods (or epochs). This counter lives on the
662 /// [`Engine`](crate::Engine) and can be incremented by calling
663 /// [`Engine::increment_epoch`](crate::Engine::increment_epoch).
664 /// Epoch-based instrumentation works by setting a "deadline
665 /// epoch". The compiled code knows the deadline, and at certain
666 /// points, checks the current epoch against that deadline. It
667 /// will yield if the deadline has been reached.
668 ///
669 /// The idea is that checking an infrequently-changing counter is
670 /// cheaper than counting and frequently storing a precise metric
671 /// (instructions executed) locally. The interruptions are not
672 /// deterministic, but if the embedder increments the epoch in a
673 /// periodic way (say, every regular timer tick by a thread or
674 /// signal handler), then we can ensure that all async code will
675 /// yield to the executor within a bounded time.
676 ///
677 /// The deadline check cannot be avoided by malicious wasm code. It is safe
678 /// to use epoch deadlines to limit the execution time of untrusted
679 /// code.
680 ///
681 /// The [`Store`](crate::Store) tracks the deadline, and controls
682 /// what happens when the deadline is reached during
683 /// execution. Several behaviors are possible:
684 ///
685 /// - Trap if code is executing when the epoch deadline is
686 /// met. See
687 /// [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap).
688 ///
689 /// - Call an arbitrary function. This function may chose to trap or
690 /// increment the epoch. See
691 /// [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback).
692 ///
693 /// - Yield to the executor loop, then resume when the future is
694 /// next polled. See
695 /// [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update).
696 ///
697 /// Trapping is the default. The yielding behaviour may be used for
698 /// the timeslicing behavior described above.
699 ///
700 /// This feature is available with or without async support.
701 /// However, without async support, the timeslicing behaviour is
702 /// not available. This means epoch-based interruption can only
703 /// serve as a simple external-interruption mechanism.
704 ///
705 /// An initial deadline must be set before executing code by calling
706 /// [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline). If this
707 /// deadline is not configured then wasm will immediately trap.
708 ///
709 /// ## Interaction with blocking host calls
710 ///
711 /// Epochs (and fuel) do not assist in handling WebAssembly code blocked in
712 /// a call to the host. For example if the WebAssembly function calls
713 /// `wasi:io/poll.poll` to sleep epochs will not assist in waking this up or
714 /// timing it out. Epochs intentionally only affect running WebAssembly code
715 /// itself and it's left to the embedder to determine how best to wake up
716 /// indefinitely blocking code in the host.
717 ///
718 /// The typical solution for this, however, is to use
719 /// [`Config::async_support(true)`](Config::async_support) and the `async`
720 /// variant of WASI host functions. This models computation as a Rust
721 /// `Future` which means that when blocking happens the future is only
722 /// suspended and control yields back to the main event loop. This gives the
723 /// embedder the opportunity to use `tokio::time::timeout` for example on a
724 /// wasm computation and have the desired effect of cancelling a blocking
725 /// operation when a timeout expires.
726 ///
727 /// ## When to use fuel vs. epochs
728 ///
729 /// In general, epoch-based interruption results in faster
730 /// execution. This difference is sometimes significant: in some
731 /// measurements, up to 2-3x. This is because epoch-based
732 /// interruption does less work: it only watches for a global
733 /// rarely-changing counter to increment, rather than keeping a
734 /// local frequently-changing counter and comparing it to a
735 /// deadline.
736 ///
737 /// Fuel, in contrast, should be used when *deterministic*
738 /// yielding or trapping is needed. For example, if it is required
739 /// that the same function call with the same starting state will
740 /// always either complete or trap with an out-of-fuel error,
741 /// deterministically, then fuel with a fixed bound should be
742 /// used.
743 ///
744 /// **Note** Enabling this option is not compatible with the Winch compiler.
745 ///
746 /// # See Also
747 ///
748 /// - [`Engine::increment_epoch`](crate::Engine::increment_epoch)
749 /// - [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline)
750 /// - [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap)
751 /// - [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback)
752 /// - [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update)
753 pub fn epoch_interruption(&mut self, enable: bool) -> &mut Self {
754 self.tunables.epoch_interruption = Some(enable);
755 self
756 }
757
758 /// Configures the maximum amount of stack space available for
759 /// executing WebAssembly code.
760 ///
761 /// WebAssembly has well-defined semantics on stack overflow. This is
762 /// intended to be a knob which can help configure how much stack space
763 /// wasm execution is allowed to consume. Note that the number here is not
764 /// super-precise, but rather wasm will take at most "pretty close to this
765 /// much" stack space.
766 ///
767 /// If a wasm call (or series of nested wasm calls) take more stack space
768 /// than the `size` specified then a stack overflow trap will be raised.
769 ///
770 /// Caveat: this knob only limits the stack space consumed by wasm code.
771 /// More importantly, it does not ensure that this much stack space is
772 /// available on the calling thread stack. Exhausting the thread stack
773 /// typically leads to an **abort** of the process.
774 ///
775 /// Here are some examples of how that could happen:
776 ///
777 /// - Let's assume this option is set to 2 MiB and then a thread that has
778 /// a stack with 512 KiB left.
779 ///
780 /// If wasm code consumes more than 512 KiB then the process will be aborted.
781 ///
782 /// - Assuming the same conditions, but this time wasm code does not consume
783 /// any stack but calls into a host function. The host function consumes
784 /// more than 512 KiB of stack space. The process will be aborted.
785 ///
786 /// There's another gotcha related to recursive calling into wasm: the stack
787 /// space consumed by a host function is counted towards this limit. The
788 /// host functions are not prevented from consuming more than this limit.
789 /// However, if the host function that used more than this limit and called
790 /// back into wasm, then the execution will trap immediately because of
791 /// stack overflow.
792 ///
793 /// When the `async` feature is enabled, this value cannot exceed the
794 /// `async_stack_size` option. Be careful not to set this value too close
795 /// to `async_stack_size` as doing so may limit how much stack space
796 /// is available for host functions.
797 ///
798 /// By default this option is 512 KiB.
799 ///
800 /// # Errors
801 ///
802 /// The `Engine::new` method will fail if the `size` specified here is
803 /// either 0 or larger than the [`Config::async_stack_size`] configuration.
804 pub fn max_wasm_stack(&mut self, size: usize) -> &mut Self {
805 self.max_wasm_stack = size;
806 self
807 }
808
809 /// Configures the size of the stacks used for asynchronous execution.
810 ///
811 /// This setting configures the size of the stacks that are allocated for
812 /// asynchronous execution. The value cannot be less than `max_wasm_stack`.
813 ///
814 /// The amount of stack space guaranteed for host functions is
815 /// `async_stack_size - max_wasm_stack`, so take care not to set these two values
816 /// close to one another; doing so may cause host functions to overflow the
817 /// stack and abort the process.
818 ///
819 /// By default this option is 2 MiB.
820 ///
821 /// # Errors
822 ///
823 /// The `Engine::new` method will fail if the value for this option is
824 /// smaller than the [`Config::max_wasm_stack`] option.
825 #[cfg(any(feature = "async", feature = "stack-switching"))]
826 pub fn async_stack_size(&mut self, size: usize) -> &mut Self {
827 self.async_stack_size = size;
828 self
829 }
830
831 /// Configures whether or not stacks used for async futures are zeroed
832 /// before (re)use.
833 ///
834 /// When the [`async_support`](Config::async_support) method is enabled for
835 /// Wasmtime and the [`call_async`] variant of calling WebAssembly is used
836 /// then Wasmtime will create a separate runtime execution stack for each
837 /// future produced by [`call_async`]. By default upon allocation, depending
838 /// on the platform, these stacks might be filled with uninitialized
839 /// memory. This is safe and correct because, modulo bugs in Wasmtime,
840 /// compiled Wasm code will never read from a stack slot before it
841 /// initializes the stack slot.
842 ///
843 /// However, as a defense-in-depth mechanism, you may configure Wasmtime to
844 /// ensure that these stacks are zeroed before they are used. Notably, if
845 /// you are using the pooling allocator, stacks can be pooled and reused
846 /// across different Wasm guests; ensuring that stacks are zeroed can
847 /// prevent data leakage between Wasm guests even in the face of potential
848 /// read-of-stack-slot-before-initialization bugs in Wasmtime's compiler.
849 ///
850 /// Stack zeroing can be a costly operation in highly concurrent
851 /// environments due to modifications of the virtual address space requiring
852 /// process-wide synchronization. It can also be costly in `no-std`
853 /// environments that must manually zero memory, and cannot rely on an OS
854 /// and virtual memory to provide zeroed pages.
855 ///
856 /// This option defaults to `false`.
857 ///
858 /// [`call_async`]: crate::TypedFunc::call_async
859 #[cfg(feature = "async")]
860 pub fn async_stack_zeroing(&mut self, enable: bool) -> &mut Self {
861 self.async_stack_zeroing = enable;
862 self
863 }
864
865 /// Explicitly enables (and un-disables) a given set of [`WasmFeatures`].
866 ///
867 /// Note: this is a low-level method that does not necessarily imply that
868 /// wasmtime _supports_ a feature. It should only be used to _disable_
869 /// features that callers want to be rejected by the parser or _enable_
870 /// features callers are certain that the current configuration of wasmtime
871 /// supports.
872 ///
873 /// Feature validation is deferred until an engine is being built, thus by
874 /// enabling features here a caller may cause [`Engine::new`] to fail later,
875 /// if the feature configuration isn't supported.
876 pub fn wasm_features(&mut self, flag: WasmFeatures, enable: bool) -> &mut Self {
877 self.enabled_features.set(flag, enable);
878 self.disabled_features.set(flag, !enable);
879 self
880 }
881
882 /// Configures whether the WebAssembly tail calls proposal will be enabled
883 /// for compilation or not.
884 ///
885 /// The [WebAssembly tail calls proposal] introduces the `return_call` and
886 /// `return_call_indirect` instructions. These instructions allow for Wasm
887 /// programs to implement some recursive algorithms with *O(1)* stack space
888 /// usage.
889 ///
890 /// This is `true` by default except when the Winch compiler is enabled.
891 ///
892 /// [WebAssembly tail calls proposal]: https://github.com/WebAssembly/tail-call
893 pub fn wasm_tail_call(&mut self, enable: bool) -> &mut Self {
894 self.wasm_features(WasmFeatures::TAIL_CALL, enable);
895 self
896 }
897
898 /// Configures whether the WebAssembly custom-page-sizes proposal will be
899 /// enabled for compilation or not.
900 ///
901 /// The [WebAssembly custom-page-sizes proposal] allows a memory to
902 /// customize its page sizes. By default, Wasm page sizes are 64KiB
903 /// large. This proposal allows the memory to opt into smaller page sizes
904 /// instead, allowing Wasm to run in environments with less than 64KiB RAM
905 /// available, for example.
906 ///
907 /// Note that the page size is part of the memory's type, and because
908 /// different memories may have different types, they may also have
909 /// different page sizes.
910 ///
911 /// Currently the only valid page sizes are 64KiB (the default) and 1
912 /// byte. Future extensions may relax this constraint and allow all powers
913 /// of two.
914 ///
915 /// Support for this proposal is disabled by default.
916 ///
917 /// [WebAssembly custom-page-sizes proposal]: https://github.com/WebAssembly/custom-page-sizes
918 pub fn wasm_custom_page_sizes(&mut self, enable: bool) -> &mut Self {
919 self.wasm_features(WasmFeatures::CUSTOM_PAGE_SIZES, enable);
920 self
921 }
922
923 /// Configures whether the WebAssembly [threads] proposal will be enabled
924 /// for compilation.
925 ///
926 /// This feature gates items such as shared memories and atomic
927 /// instructions. Note that the threads feature depends on the bulk memory
928 /// feature, which is enabled by default. Additionally note that while the
929 /// wasm feature is called "threads" it does not actually include the
930 /// ability to spawn threads. Spawning threads is part of the [wasi-threads]
931 /// proposal which is a separately gated feature in Wasmtime.
932 ///
933 /// Embeddings of Wasmtime are able to build their own custom threading
934 /// scheme on top of the core wasm threads proposal, however.
935 ///
936 /// The default value for this option is whether the `threads`
937 /// crate feature of Wasmtime is enabled or not. By default this crate
938 /// feature is enabled.
939 ///
940 /// [threads]: https://github.com/webassembly/threads
941 /// [wasi-threads]: https://github.com/webassembly/wasi-threads
942 #[cfg(feature = "threads")]
943 pub fn wasm_threads(&mut self, enable: bool) -> &mut Self {
944 self.wasm_features(WasmFeatures::THREADS, enable);
945 self
946 }
947
948 /// Configures whether the WebAssembly [shared-everything-threads] proposal
949 /// will be enabled for compilation.
950 ///
951 /// This feature gates extended use of the `shared` attribute on items other
952 /// than memories, extra atomic instructions, and new component model
953 /// intrinsics for spawning threads. It depends on the
954 /// [`wasm_threads`][Self::wasm_threads] being enabled.
955 ///
956 /// [shared-everything-threads]:
957 /// https://github.com/webassembly/shared-everything-threads
958 pub fn wasm_shared_everything_threads(&mut self, enable: bool) -> &mut Self {
959 self.wasm_features(WasmFeatures::SHARED_EVERYTHING_THREADS, enable);
960 self
961 }
962
963 /// Configures whether the [WebAssembly reference types proposal][proposal]
964 /// will be enabled for compilation.
965 ///
966 /// This feature gates items such as the `externref` and `funcref` types as
967 /// well as allowing a module to define multiple tables.
968 ///
969 /// Note that the reference types proposal depends on the bulk memory proposal.
970 ///
971 /// This feature is `true` by default.
972 ///
973 /// # Errors
974 ///
975 /// The validation of this feature are deferred until the engine is being built,
976 /// and thus may cause `Engine::new` fail if the `bulk_memory` feature is disabled.
977 ///
978 /// [proposal]: https://github.com/webassembly/reference-types
979 #[cfg(feature = "gc")]
980 pub fn wasm_reference_types(&mut self, enable: bool) -> &mut Self {
981 self.wasm_features(WasmFeatures::REFERENCE_TYPES, enable);
982 self
983 }
984
985 /// Configures whether the [WebAssembly function references
986 /// proposal][proposal] will be enabled for compilation.
987 ///
988 /// This feature gates non-nullable reference types, function reference
989 /// types, `call_ref`, `ref.func`, and non-nullable reference related
990 /// instructions.
991 ///
992 /// Note that the function references proposal depends on the reference
993 /// types proposal.
994 ///
995 /// This feature is `false` by default.
996 ///
997 /// [proposal]: https://github.com/WebAssembly/function-references
998 #[cfg(feature = "gc")]
999 pub fn wasm_function_references(&mut self, enable: bool) -> &mut Self {
1000 self.wasm_features(WasmFeatures::FUNCTION_REFERENCES, enable);
1001 self
1002 }
1003
1004 /// Configures whether the [WebAssembly wide-arithmetic][proposal] will be
1005 /// enabled for compilation.
1006 ///
1007 /// This feature is `false` by default.
1008 ///
1009 /// [proposal]: https://github.com/WebAssembly/wide-arithmetic
1010 pub fn wasm_wide_arithmetic(&mut self, enable: bool) -> &mut Self {
1011 self.wasm_features(WasmFeatures::WIDE_ARITHMETIC, enable);
1012 self
1013 }
1014
1015 /// Configures whether the [WebAssembly Garbage Collection
1016 /// proposal][proposal] will be enabled for compilation.
1017 ///
1018 /// This feature gates `struct` and `array` type definitions and references,
1019 /// the `i31ref` type, and all related instructions.
1020 ///
1021 /// Note that the function references proposal depends on the typed function
1022 /// references proposal.
1023 ///
1024 /// This feature is `false` by default.
1025 ///
1026 /// **Warning: Wasmtime's implementation of the GC proposal is still in
1027 /// progress and generally not ready for primetime.**
1028 ///
1029 /// [proposal]: https://github.com/WebAssembly/gc
1030 #[cfg(feature = "gc")]
1031 pub fn wasm_gc(&mut self, enable: bool) -> &mut Self {
1032 self.wasm_features(WasmFeatures::GC, enable);
1033 self
1034 }
1035
1036 /// Configures whether the WebAssembly SIMD proposal will be
1037 /// enabled for compilation.
1038 ///
1039 /// The [WebAssembly SIMD proposal][proposal]. This feature gates items such
1040 /// as the `v128` type and all of its operators being in a module. Note that
1041 /// this does not enable the [relaxed simd proposal].
1042 ///
1043 /// **Note**
1044 ///
1045 /// On x86_64 platforms the base CPU feature requirement for SIMD
1046 /// is SSE2 for the Cranelift compiler and AVX for the Winch compiler.
1047 ///
1048 /// This is `true` by default.
1049 ///
1050 /// [proposal]: https://github.com/webassembly/simd
1051 /// [relaxed simd proposal]: https://github.com/WebAssembly/relaxed-simd
1052 pub fn wasm_simd(&mut self, enable: bool) -> &mut Self {
1053 self.wasm_features(WasmFeatures::SIMD, enable);
1054 self
1055 }
1056
1057 /// Configures whether the WebAssembly Relaxed SIMD proposal will be
1058 /// enabled for compilation.
1059 ///
1060 /// The relaxed SIMD proposal adds new instructions to WebAssembly which,
1061 /// for some specific inputs, are allowed to produce different results on
1062 /// different hosts. More-or-less this proposal enables exposing
1063 /// platform-specific semantics of SIMD instructions in a controlled
1064 /// fashion to a WebAssembly program. From an embedder's perspective this
1065 /// means that WebAssembly programs may execute differently depending on
1066 /// whether the host is x86_64 or AArch64, for example.
1067 ///
1068 /// By default Wasmtime lowers relaxed SIMD instructions to the fastest
1069 /// lowering for the platform it's running on. This means that, by default,
1070 /// some relaxed SIMD instructions may have different results for the same
1071 /// inputs across x86_64 and AArch64. This behavior can be disabled through
1072 /// the [`Config::relaxed_simd_deterministic`] option which will force
1073 /// deterministic behavior across all platforms, as classified by the
1074 /// specification, at the cost of performance.
1075 ///
1076 /// This is `true` by default.
1077 ///
1078 /// [proposal]: https://github.com/webassembly/relaxed-simd
1079 pub fn wasm_relaxed_simd(&mut self, enable: bool) -> &mut Self {
1080 self.wasm_features(WasmFeatures::RELAXED_SIMD, enable);
1081 self
1082 }
1083
1084 /// This option can be used to control the behavior of the [relaxed SIMD
1085 /// proposal's][proposal] instructions.
1086 ///
1087 /// The relaxed SIMD proposal introduces instructions that are allowed to
1088 /// have different behavior on different architectures, primarily to afford
1089 /// an efficient implementation on all architectures. This means, however,
1090 /// that the same module may execute differently on one host than another,
1091 /// which typically is not otherwise the case. This option is provided to
1092 /// force Wasmtime to generate deterministic code for all relaxed simd
1093 /// instructions, at the cost of performance, for all architectures. When
1094 /// this option is enabled then the deterministic behavior of all
1095 /// instructions in the relaxed SIMD proposal is selected.
1096 ///
1097 /// This is `false` by default.
1098 ///
1099 /// [proposal]: https://github.com/webassembly/relaxed-simd
1100 pub fn relaxed_simd_deterministic(&mut self, enable: bool) -> &mut Self {
1101 self.tunables.relaxed_simd_deterministic = Some(enable);
1102 self
1103 }
1104
1105 /// Configures whether the [WebAssembly bulk memory operations
1106 /// proposal][proposal] will be enabled for compilation.
1107 ///
1108 /// This feature gates items such as the `memory.copy` instruction, passive
1109 /// data/table segments, etc, being in a module.
1110 ///
1111 /// This is `true` by default.
1112 ///
1113 /// Feature `reference_types`, which is also `true` by default, requires
1114 /// this feature to be enabled. Thus disabling this feature must also disable
1115 /// `reference_types` as well using [`wasm_reference_types`](crate::Config::wasm_reference_types).
1116 ///
1117 /// # Errors
1118 ///
1119 /// Disabling this feature without disabling `reference_types` will cause
1120 /// `Engine::new` to fail.
1121 ///
1122 /// [proposal]: https://github.com/webassembly/bulk-memory-operations
1123 pub fn wasm_bulk_memory(&mut self, enable: bool) -> &mut Self {
1124 self.wasm_features(WasmFeatures::BULK_MEMORY, enable);
1125 self
1126 }
1127
1128 /// Configures whether the WebAssembly multi-value [proposal] will
1129 /// be enabled for compilation.
1130 ///
1131 /// This feature gates functions and blocks returning multiple values in a
1132 /// module, for example.
1133 ///
1134 /// This is `true` by default.
1135 ///
1136 /// [proposal]: https://github.com/webassembly/multi-value
1137 pub fn wasm_multi_value(&mut self, enable: bool) -> &mut Self {
1138 self.wasm_features(WasmFeatures::MULTI_VALUE, enable);
1139 self
1140 }
1141
1142 /// Configures whether the WebAssembly multi-memory [proposal] will
1143 /// be enabled for compilation.
1144 ///
1145 /// This feature gates modules having more than one linear memory
1146 /// declaration or import.
1147 ///
1148 /// This is `true` by default.
1149 ///
1150 /// [proposal]: https://github.com/webassembly/multi-memory
1151 pub fn wasm_multi_memory(&mut self, enable: bool) -> &mut Self {
1152 self.wasm_features(WasmFeatures::MULTI_MEMORY, enable);
1153 self
1154 }
1155
1156 /// Configures whether the WebAssembly memory64 [proposal] will
1157 /// be enabled for compilation.
1158 ///
1159 /// Note that this the upstream specification is not finalized and Wasmtime
1160 /// may also have bugs for this feature since it hasn't been exercised
1161 /// much.
1162 ///
1163 /// This is `false` by default.
1164 ///
1165 /// [proposal]: https://github.com/webassembly/memory64
1166 pub fn wasm_memory64(&mut self, enable: bool) -> &mut Self {
1167 self.wasm_features(WasmFeatures::MEMORY64, enable);
1168 self
1169 }
1170
1171 /// Configures whether the WebAssembly extended-const [proposal] will
1172 /// be enabled for compilation.
1173 ///
1174 /// This is `true` by default.
1175 ///
1176 /// [proposal]: https://github.com/webassembly/extended-const
1177 pub fn wasm_extended_const(&mut self, enable: bool) -> &mut Self {
1178 self.wasm_features(WasmFeatures::EXTENDED_CONST, enable);
1179 self
1180 }
1181
1182 /// Configures whether the [WebAssembly stack switching
1183 /// proposal][proposal] will be enabled for compilation.
1184 ///
1185 /// This feature gates the use of control tags.
1186 ///
1187 /// This feature depends on the `function_reference_types` and
1188 /// `exceptions` features.
1189 ///
1190 /// This feature is `false` by default.
1191 ///
1192 /// # Errors
1193 ///
1194 /// [proposal]: https://github.com/webassembly/stack-switching
1195 pub fn wasm_stack_switching(&mut self, enable: bool) -> &mut Self {
1196 self.wasm_features(WasmFeatures::STACK_SWITCHING, enable);
1197 self
1198 }
1199
1200 /// Configures whether the WebAssembly component-model [proposal] will
1201 /// be enabled for compilation.
1202 ///
1203 /// This flag can be used to blanket disable all components within Wasmtime.
1204 /// Otherwise usage of components requires statically using
1205 /// [`Component`](crate::component::Component) instead of
1206 /// [`Module`](crate::Module) for example anyway.
1207 ///
1208 /// The default value for this option is whether the `component-model`
1209 /// crate feature of Wasmtime is enabled or not. By default this crate
1210 /// feature is enabled.
1211 ///
1212 /// [proposal]: https://github.com/webassembly/component-model
1213 #[cfg(feature = "component-model")]
1214 pub fn wasm_component_model(&mut self, enable: bool) -> &mut Self {
1215 self.wasm_features(WasmFeatures::COMPONENT_MODEL, enable);
1216 self
1217 }
1218
1219 /// Configures whether components support the async ABI [proposal] for
1220 /// lifting and lowering functions, as well as `stream`, `future`, and
1221 /// `error-context` types.
1222 ///
1223 /// Please note that Wasmtime's support for this feature is _very_
1224 /// incomplete.
1225 ///
1226 /// [proposal]:
1227 /// https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1228 #[cfg(feature = "component-model-async")]
1229 pub fn wasm_component_model_async(&mut self, enable: bool) -> &mut Self {
1230 self.wasm_features(WasmFeatures::CM_ASYNC, enable);
1231 self
1232 }
1233
1234 /// This corresponds to the 🚝 emoji in the component model specification.
1235 ///
1236 /// Please note that Wasmtime's support for this feature is _very_
1237 /// incomplete.
1238 ///
1239 /// [proposal]:
1240 /// https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1241 #[cfg(feature = "component-model-async")]
1242 pub fn wasm_component_model_async_builtins(&mut self, enable: bool) -> &mut Self {
1243 self.wasm_features(WasmFeatures::CM_ASYNC_BUILTINS, enable);
1244 self
1245 }
1246
1247 /// This corresponds to the 🚟 emoji in the component model specification.
1248 ///
1249 /// Please note that Wasmtime's support for this feature is _very_
1250 /// incomplete.
1251 ///
1252 /// [proposal]: https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1253 #[cfg(feature = "component-model-async")]
1254 pub fn wasm_component_model_async_stackful(&mut self, enable: bool) -> &mut Self {
1255 self.wasm_features(WasmFeatures::CM_ASYNC_STACKFUL, enable);
1256 self
1257 }
1258
1259 /// This corresponds to the 🧵 emoji in the component model specification.
1260 ///
1261 /// Please note that Wasmtime's support for this feature is _very_
1262 /// incomplete.
1263 ///
1264 /// [proposal]:
1265 /// https://github.com/WebAssembly/component-model/pull/557
1266 #[cfg(feature = "component-model-async")]
1267 pub fn wasm_component_model_threading(&mut self, enable: bool) -> &mut Self {
1268 self.wasm_features(WasmFeatures::CM_THREADING, enable);
1269 self
1270 }
1271
1272 /// This corresponds to the 📝 emoji in the component model specification.
1273 ///
1274 /// Please note that Wasmtime's support for this feature is _very_
1275 /// incomplete.
1276 ///
1277 /// [proposal]: https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1278 #[cfg(feature = "component-model")]
1279 pub fn wasm_component_model_error_context(&mut self, enable: bool) -> &mut Self {
1280 self.wasm_features(WasmFeatures::CM_ERROR_CONTEXT, enable);
1281 self
1282 }
1283
1284 /// Configures whether the [GC extension to the component-model
1285 /// proposal][proposal] is enabled or not.
1286 ///
1287 /// This corresponds to the 🛸 emoji in the component model specification.
1288 ///
1289 /// Please note that Wasmtime's support for this feature is _very_
1290 /// incomplete.
1291 ///
1292 /// [proposal]: https://github.com/WebAssembly/component-model/issues/525
1293 #[cfg(feature = "component-model")]
1294 pub fn wasm_component_model_gc(&mut self, enable: bool) -> &mut Self {
1295 self.wasm_features(WasmFeatures::CM_GC, enable);
1296 self
1297 }
1298
1299 /// This corresponds to the 🔧 emoji in the component model specification.
1300 ///
1301 /// Please note that Wasmtime's support for this feature is _very_
1302 /// incomplete.
1303 #[cfg(feature = "component-model")]
1304 pub fn wasm_component_model_fixed_length_lists(&mut self, enable: bool) -> &mut Self {
1305 self.wasm_features(WasmFeatures::CM_FIXED_SIZE_LIST, enable);
1306 self
1307 }
1308
1309 /// Configures whether the [Exception-handling proposal][proposal] is enabled or not.
1310 ///
1311 /// [proposal]: https://github.com/WebAssembly/exception-handling
1312 #[cfg(feature = "gc")]
1313 pub fn wasm_exceptions(&mut self, enable: bool) -> &mut Self {
1314 self.wasm_features(WasmFeatures::EXCEPTIONS, enable);
1315 self
1316 }
1317
1318 #[doc(hidden)] // FIXME(#3427) - if/when implemented then un-hide this
1319 #[deprecated = "This configuration option only exists for internal \
1320 usage with the spec testsuite. It may be removed at \
1321 any time and without warning. Do not rely on it!"]
1322 pub fn wasm_legacy_exceptions(&mut self, enable: bool) -> &mut Self {
1323 self.wasm_features(WasmFeatures::LEGACY_EXCEPTIONS, enable);
1324 self
1325 }
1326
1327 /// Configures which compilation strategy will be used for wasm modules.
1328 ///
1329 /// This method can be used to configure which compiler is used for wasm
1330 /// modules, and for more documentation consult the [`Strategy`] enumeration
1331 /// and its documentation.
1332 ///
1333 /// The default value for this is `Strategy::Auto`.
1334 ///
1335 /// # Panics
1336 ///
1337 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1338 #[cfg(any(feature = "cranelift", feature = "winch"))]
1339 pub fn strategy(&mut self, strategy: Strategy) -> &mut Self {
1340 self.compiler_config_mut().strategy = strategy.not_auto();
1341 self
1342 }
1343
1344 /// Configures which garbage collector will be used for Wasm modules.
1345 ///
1346 /// This method can be used to configure which garbage collector
1347 /// implementation is used for Wasm modules. For more documentation, consult
1348 /// the [`Collector`] enumeration and its documentation.
1349 ///
1350 /// The default value for this is `Collector::Auto`.
1351 #[cfg(feature = "gc")]
1352 pub fn collector(&mut self, collector: Collector) -> &mut Self {
1353 self.collector = collector;
1354 self
1355 }
1356
1357 /// Creates a default profiler based on the profiling strategy chosen.
1358 ///
1359 /// Profiler creation calls the type's default initializer where the purpose is
1360 /// really just to put in place the type used for profiling.
1361 ///
1362 /// Some [`ProfilingStrategy`] require specific platforms or particular feature
1363 /// to be enabled, such as `ProfilingStrategy::JitDump` requires the `jitdump`
1364 /// feature.
1365 ///
1366 /// # Errors
1367 ///
1368 /// The validation of this field is deferred until the engine is being built, and thus may
1369 /// cause `Engine::new` fail if the required feature is disabled, or the platform is not
1370 /// supported.
1371 pub fn profiler(&mut self, profile: ProfilingStrategy) -> &mut Self {
1372 self.profiling_strategy = profile;
1373 self
1374 }
1375
1376 /// Configures whether the debug verifier of Cranelift is enabled or not.
1377 ///
1378 /// When Cranelift is used as a code generation backend this will configure
1379 /// it to have the `enable_verifier` flag which will enable a number of debug
1380 /// checks inside of Cranelift. This is largely only useful for the
1381 /// developers of wasmtime itself.
1382 ///
1383 /// The default value for this is `false`
1384 ///
1385 /// # Panics
1386 ///
1387 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1388 #[cfg(any(feature = "cranelift", feature = "winch"))]
1389 pub fn cranelift_debug_verifier(&mut self, enable: bool) -> &mut Self {
1390 let val = if enable { "true" } else { "false" };
1391 self.compiler_config_mut()
1392 .settings
1393 .insert("enable_verifier".to_string(), val.to_string());
1394 self
1395 }
1396
1397 /// Configures whether extra debug checks are inserted into
1398 /// Wasmtime-generated code by Cranelift.
1399 ///
1400 /// The default value for this is `false`
1401 ///
1402 /// # Panics
1403 ///
1404 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1405 #[cfg(any(feature = "cranelift", feature = "winch"))]
1406 pub fn cranelift_wasmtime_debug_checks(&mut self, enable: bool) -> &mut Self {
1407 unsafe { self.cranelift_flag_set("wasmtime_debug_checks", &enable.to_string()) }
1408 }
1409
1410 /// Configures the Cranelift code generator optimization level.
1411 ///
1412 /// When the Cranelift code generator is used you can configure the
1413 /// optimization level used for generated code in a few various ways. For
1414 /// more information see the documentation of [`OptLevel`].
1415 ///
1416 /// The default value for this is `OptLevel::Speed`.
1417 ///
1418 /// # Panics
1419 ///
1420 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1421 #[cfg(any(feature = "cranelift", feature = "winch"))]
1422 pub fn cranelift_opt_level(&mut self, level: OptLevel) -> &mut Self {
1423 let val = match level {
1424 OptLevel::None => "none",
1425 OptLevel::Speed => "speed",
1426 OptLevel::SpeedAndSize => "speed_and_size",
1427 };
1428 self.compiler_config_mut()
1429 .settings
1430 .insert("opt_level".to_string(), val.to_string());
1431 self
1432 }
1433
1434 /// Configures the regalloc algorithm used by the Cranelift code generator.
1435 ///
1436 /// Cranelift can select any of several register allocator algorithms. Each
1437 /// of these algorithms generates correct code, but they represent different
1438 /// tradeoffs between compile speed (how expensive the compilation process
1439 /// is) and run-time speed (how fast the generated code runs).
1440 /// For more information see the documentation of [`RegallocAlgorithm`].
1441 ///
1442 /// The default value for this is `RegallocAlgorithm::Backtracking`.
1443 ///
1444 /// # Panics
1445 ///
1446 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1447 #[cfg(any(feature = "cranelift", feature = "winch"))]
1448 pub fn cranelift_regalloc_algorithm(&mut self, algo: RegallocAlgorithm) -> &mut Self {
1449 let val = match algo {
1450 RegallocAlgorithm::Backtracking => "backtracking",
1451 RegallocAlgorithm::SinglePass => "single_pass",
1452 };
1453 self.compiler_config_mut()
1454 .settings
1455 .insert("regalloc_algorithm".to_string(), val.to_string());
1456 self
1457 }
1458
1459 /// Configures whether Cranelift should perform a NaN-canonicalization pass.
1460 ///
1461 /// When Cranelift is used as a code generation backend this will configure
1462 /// it to replace NaNs with a single canonical value. This is useful for
1463 /// users requiring entirely deterministic WebAssembly computation. This is
1464 /// not required by the WebAssembly spec, so it is not enabled by default.
1465 ///
1466 /// Note that this option affects not only WebAssembly's `f32` and `f64`
1467 /// types but additionally the `v128` type. This option will cause
1468 /// operations using any of these types to have extra checks placed after
1469 /// them to normalize NaN values as needed.
1470 ///
1471 /// The default value for this is `false`
1472 ///
1473 /// # Panics
1474 ///
1475 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1476 #[cfg(any(feature = "cranelift", feature = "winch"))]
1477 pub fn cranelift_nan_canonicalization(&mut self, enable: bool) -> &mut Self {
1478 let val = if enable { "true" } else { "false" };
1479 self.compiler_config_mut()
1480 .settings
1481 .insert("enable_nan_canonicalization".to_string(), val.to_string());
1482 self
1483 }
1484
1485 /// Controls whether proof-carrying code (PCC) is used to validate
1486 /// lowering of Wasm sandbox checks.
1487 ///
1488 /// Proof-carrying code carries "facts" about program values from
1489 /// the IR all the way to machine code, and checks those facts
1490 /// against known machine-instruction semantics. This guards
1491 /// against bugs in instruction lowering that might create holes
1492 /// in the Wasm sandbox.
1493 ///
1494 /// PCC is designed to be fast: it does not require complex
1495 /// solvers or logic engines to verify, but only a linear pass
1496 /// over a trail of "breadcrumbs" or facts at each intermediate
1497 /// value. Thus, it is appropriate to enable in production.
1498 ///
1499 /// # Panics
1500 ///
1501 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1502 #[cfg(any(feature = "cranelift", feature = "winch"))]
1503 pub fn cranelift_pcc(&mut self, enable: bool) -> &mut Self {
1504 let val = if enable { "true" } else { "false" };
1505 self.compiler_config_mut()
1506 .settings
1507 .insert("enable_pcc".to_string(), val.to_string());
1508 self
1509 }
1510
1511 /// Allows setting a Cranelift boolean flag or preset. This allows
1512 /// fine-tuning of Cranelift settings.
1513 ///
1514 /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1515 /// either; other `Config` functions should be preferred for stability.
1516 ///
1517 /// # Safety
1518 ///
1519 /// This is marked as unsafe, because setting the wrong flag might break invariants,
1520 /// resulting in execution hazards.
1521 ///
1522 /// # Errors
1523 ///
1524 /// The validation of the flags are deferred until the engine is being built, and thus may
1525 /// cause `Engine::new` fail if the flag's name does not exist, or the value is not appropriate
1526 /// for the flag type.
1527 ///
1528 /// # Panics
1529 ///
1530 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1531 #[cfg(any(feature = "cranelift", feature = "winch"))]
1532 pub unsafe fn cranelift_flag_enable(&mut self, flag: &str) -> &mut Self {
1533 self.compiler_config_mut().flags.insert(flag.to_string());
1534 self
1535 }
1536
1537 /// Allows settings another Cranelift flag defined by a flag name and value. This allows
1538 /// fine-tuning of Cranelift settings.
1539 ///
1540 /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1541 /// either; other `Config` functions should be preferred for stability.
1542 ///
1543 /// # Safety
1544 ///
1545 /// This is marked as unsafe, because setting the wrong flag might break invariants,
1546 /// resulting in execution hazards.
1547 ///
1548 /// # Errors
1549 ///
1550 /// The validation of the flags are deferred until the engine is being built, and thus may
1551 /// cause `Engine::new` fail if the flag's name does not exist, or incompatible with other
1552 /// settings.
1553 ///
1554 /// For example, feature `wasm_backtrace` will set `unwind_info` to `true`, but if it's
1555 /// manually set to false then it will fail.
1556 ///
1557 /// # Panics
1558 ///
1559 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1560 #[cfg(any(feature = "cranelift", feature = "winch"))]
1561 pub unsafe fn cranelift_flag_set(&mut self, name: &str, value: &str) -> &mut Self {
1562 self.compiler_config_mut()
1563 .settings
1564 .insert(name.to_string(), value.to_string());
1565 self
1566 }
1567
1568 /// Set a custom [`Cache`].
1569 ///
1570 /// To load a cache configuration from a file, use [`Cache::from_file`]. Otherwise, you can
1571 /// create a new cache config using [`CacheConfig::new`] and passing that to [`Cache::new`].
1572 ///
1573 /// If you want to disable the cache, you can call this method with `None`.
1574 ///
1575 /// By default, new configs do not have caching enabled.
1576 /// Every call to [`Module::new(my_wasm)`][crate::Module::new] will recompile `my_wasm`,
1577 /// even when it is unchanged, unless an enabled `CacheConfig` is provided.
1578 ///
1579 /// This method is only available when the `cache` feature of this crate is
1580 /// enabled.
1581 ///
1582 /// [docs]: https://bytecodealliance.github.io/wasmtime/cli-cache.html
1583 #[cfg(feature = "cache")]
1584 pub fn cache(&mut self, cache: Option<Cache>) -> &mut Self {
1585 self.cache = cache;
1586 self
1587 }
1588
1589 /// Sets a custom memory creator.
1590 ///
1591 /// Custom memory creators are used when creating host `Memory` objects or when
1592 /// creating instance linear memories for the on-demand instance allocation strategy.
1593 #[cfg(feature = "runtime")]
1594 pub fn with_host_memory(&mut self, mem_creator: Arc<dyn MemoryCreator>) -> &mut Self {
1595 self.mem_creator = Some(Arc::new(MemoryCreatorProxy(mem_creator)));
1596 self
1597 }
1598
1599 /// Sets a custom stack creator.
1600 ///
1601 /// Custom memory creators are used when creating creating async instance stacks for
1602 /// the on-demand instance allocation strategy.
1603 #[cfg(feature = "async")]
1604 pub fn with_host_stack(&mut self, stack_creator: Arc<dyn StackCreator>) -> &mut Self {
1605 self.stack_creator = Some(Arc::new(StackCreatorProxy(stack_creator)));
1606 self
1607 }
1608
1609 /// Sets a custom executable-memory publisher.
1610 ///
1611 /// Custom executable-memory publishers are hooks that allow
1612 /// Wasmtime to make certain regions of memory executable when
1613 /// loading precompiled modules or compiling new modules
1614 /// in-process. In most modern operating systems, memory allocated
1615 /// for heap usage is readable and writable by default but not
1616 /// executable. To jump to machine code stored in that memory, we
1617 /// need to make it executable. For security reasons, we usually
1618 /// also make it read-only at the same time, so the executing code
1619 /// can't be modified later.
1620 ///
1621 /// By default, Wasmtime will use the appropriate system calls on
1622 /// the host platform for this work. However, it also allows
1623 /// plugging in a custom implementation via this configuration
1624 /// option. This may be useful on custom or `no_std` platforms,
1625 /// for example, especially where virtual memory is not otherwise
1626 /// used by Wasmtime (no `signals-and-traps` feature).
1627 #[cfg(feature = "runtime")]
1628 pub fn with_custom_code_memory(
1629 &mut self,
1630 custom_code_memory: Option<Arc<dyn CustomCodeMemory>>,
1631 ) -> &mut Self {
1632 self.custom_code_memory = custom_code_memory;
1633 self
1634 }
1635
1636 /// Sets the instance allocation strategy to use.
1637 ///
1638 /// This is notably used in conjunction with
1639 /// [`InstanceAllocationStrategy::Pooling`] and [`PoolingAllocationConfig`].
1640 pub fn allocation_strategy(
1641 &mut self,
1642 strategy: impl Into<InstanceAllocationStrategy>,
1643 ) -> &mut Self {
1644 self.allocation_strategy = strategy.into();
1645 self
1646 }
1647
1648 /// Specifies the capacity of linear memories, in bytes, in their initial
1649 /// allocation.
1650 ///
1651 /// > Note: this value has important performance ramifications, be sure to
1652 /// > benchmark when setting this to a non-default value and read over this
1653 /// > documentation.
1654 ///
1655 /// This function will change the size of the initial memory allocation made
1656 /// for linear memories. This setting is only applicable when the initial
1657 /// size of a linear memory is below this threshold. Linear memories are
1658 /// allocated in the virtual address space of the host process with OS APIs
1659 /// such as `mmap` and this setting affects how large the allocation will
1660 /// be.
1661 ///
1662 /// ## Background: WebAssembly Linear Memories
1663 ///
1664 /// WebAssembly linear memories always start with a minimum size and can
1665 /// possibly grow up to a maximum size. The minimum size is always specified
1666 /// in a WebAssembly module itself and the maximum size can either be
1667 /// optionally specified in the module or inherently limited by the index
1668 /// type. For example for this module:
1669 ///
1670 /// ```wasm
1671 /// (module
1672 /// (memory $a 4)
1673 /// (memory $b 4096 4096 (pagesize 1))
1674 /// (memory $c i64 10)
1675 /// )
1676 /// ```
1677 ///
1678 /// * Memory `$a` initially allocates 4 WebAssembly pages (256KiB) and can
1679 /// grow up to 4GiB, the limit of the 32-bit index space.
1680 /// * Memory `$b` initially allocates 4096 WebAssembly pages, but in this
1681 /// case its page size is 1, so it's 4096 bytes. Memory can also grow no
1682 /// further meaning that it will always be 4096 bytes.
1683 /// * Memory `$c` is a 64-bit linear memory which starts with 640KiB of
1684 /// memory and can theoretically grow up to 2^64 bytes, although most
1685 /// hosts will run out of memory long before that.
1686 ///
1687 /// All operations on linear memories done by wasm are required to be
1688 /// in-bounds. Any access beyond the end of a linear memory is considered a
1689 /// trap.
1690 ///
1691 /// ## What this setting affects: Virtual Memory
1692 ///
1693 /// This setting is used to configure the behavior of the size of the linear
1694 /// memory allocation performed for each of these memories. For example the
1695 /// initial linear memory allocation looks like this:
1696 ///
1697 /// ```text
1698 /// memory_reservation
1699 /// |
1700 /// ◄─────────┴────────────────►
1701 /// ┌───────┬─────────┬──────────────────┬───────┐
1702 /// │ guard │ initial │ ... capacity ... │ guard │
1703 /// └───────┴─────────┴──────────────────┴───────┘
1704 /// ◄──┬──► ◄──┬──►
1705 /// │ │
1706 /// │ memory_guard_size
1707 /// │
1708 /// │
1709 /// memory_guard_size (if guard_before_linear_memory)
1710 /// ```
1711 ///
1712 /// Memory in the `initial` range is accessible to the instance and can be
1713 /// read/written by wasm code. Memory in the `guard` regions is never
1714 /// accessible to wasm code and memory in `capacity` is initially
1715 /// inaccessible but may become accessible through `memory.grow` instructions
1716 /// for example.
1717 ///
1718 /// This means that this setting is the size of the initial chunk of virtual
1719 /// memory that a linear memory may grow into.
1720 ///
1721 /// ## What this setting affects: Runtime Speed
1722 ///
1723 /// This is a performance-sensitive setting which is taken into account
1724 /// during the compilation process of a WebAssembly module. For example if a
1725 /// 32-bit WebAssembly linear memory has a `memory_reservation` size of 4GiB
1726 /// then bounds checks can be elided because `capacity` will be guaranteed
1727 /// to be unmapped for all addressable bytes that wasm can access (modulo a
1728 /// few details).
1729 ///
1730 /// If `memory_reservation` was something smaller like 256KiB then that
1731 /// would have a much smaller impact on virtual memory but the compile code
1732 /// would then need to have explicit bounds checks to ensure that
1733 /// loads/stores are in-bounds.
1734 ///
1735 /// The goal of this setting is to enable skipping bounds checks in most
1736 /// modules by default. Some situations which require explicit bounds checks
1737 /// though are:
1738 ///
1739 /// * When `memory_reservation` is smaller than the addressable size of the
1740 /// linear memory. For example if 64-bit linear memories always need
1741 /// bounds checks as they can address the entire virtual address spacce.
1742 /// For 32-bit linear memories a `memory_reservation` minimum size of 4GiB
1743 /// is required to elide bounds checks.
1744 ///
1745 /// * When linear memories have a page size of 1 then bounds checks are
1746 /// required. In this situation virtual memory can't be relied upon
1747 /// because that operates at the host page size granularity where wasm
1748 /// requires a per-byte level granularity.
1749 ///
1750 /// * Configuration settings such as [`Config::signals_based_traps`] can be
1751 /// used to disable the use of signal handlers and virtual memory so
1752 /// explicit bounds checks are required.
1753 ///
1754 /// * When [`Config::memory_guard_size`] is too small a bounds check may be
1755 /// required. For 32-bit wasm addresses are actually 33-bit effective
1756 /// addresses because loads/stores have a 32-bit static offset to add to
1757 /// the dynamic 32-bit address. If the static offset is larger than the
1758 /// size of the guard region then an explicit bounds check is required.
1759 ///
1760 /// ## What this setting affects: Memory Growth Behavior
1761 ///
1762 /// In addition to affecting bounds checks emitted in compiled code this
1763 /// setting also affects how WebAssembly linear memories are grown. The
1764 /// `memory.grow` instruction can be used to make a linear memory larger and
1765 /// this is also affected by APIs such as
1766 /// [`Memory::grow`](crate::Memory::grow).
1767 ///
1768 /// In these situations when the amount being grown is small enough to fit
1769 /// within the remaining capacity then the linear memory doesn't have to be
1770 /// moved at runtime. If the capacity runs out though then a new linear
1771 /// memory allocation must be made and the contents of linear memory is
1772 /// copied over.
1773 ///
1774 /// For example here's a situation where a copy happens:
1775 ///
1776 /// * The `memory_reservation` setting is configured to 128KiB.
1777 /// * A WebAssembly linear memory starts with a single 64KiB page.
1778 /// * This memory can be grown by one page to contain the full 128KiB of
1779 /// memory.
1780 /// * If grown by one more page, though, then a 192KiB allocation must be
1781 /// made and the previous 128KiB of contents are copied into the new
1782 /// allocation.
1783 ///
1784 /// This growth behavior can have a significant performance impact if lots
1785 /// of data needs to be copied on growth. Conversely if memory growth never
1786 /// needs to happen because the capacity will always be large enough then
1787 /// optimizations can be applied to cache the base pointer of linear memory.
1788 ///
1789 /// When memory is grown then the
1790 /// [`Config::memory_reservation_for_growth`] is used for the new
1791 /// memory allocation to have memory to grow into.
1792 ///
1793 /// When using the pooling allocator via [`PoolingAllocationConfig`] then
1794 /// memories are never allowed to move so requests for growth are instead
1795 /// rejected with an error.
1796 ///
1797 /// ## When this setting is not used
1798 ///
1799 /// This setting is ignored and unused when the initial size of linear
1800 /// memory is larger than this threshold. For example if this setting is set
1801 /// to 1MiB but a wasm module requires a 2MiB minimum allocation then this
1802 /// setting is ignored. In this situation the minimum size of memory will be
1803 /// allocated along with [`Config::memory_reservation_for_growth`]
1804 /// after it to grow into.
1805 ///
1806 /// That means that this value can be set to zero. That can be useful in
1807 /// benchmarking to see the overhead of bounds checks for example.
1808 /// Additionally it can be used to minimize the virtual memory allocated by
1809 /// Wasmtime.
1810 ///
1811 /// ## Default Value
1812 ///
1813 /// The default value for this property depends on the host platform. For
1814 /// 64-bit platforms there's lots of address space available, so the default
1815 /// configured here is 4GiB. When coupled with the default size of
1816 /// [`Config::memory_guard_size`] this means that 32-bit WebAssembly linear
1817 /// memories with 64KiB page sizes will skip almost all bounds checks by
1818 /// default.
1819 ///
1820 /// For 32-bit platforms this value defaults to 10MiB. This means that
1821 /// bounds checks will be required on 32-bit platforms.
1822 pub fn memory_reservation(&mut self, bytes: u64) -> &mut Self {
1823 self.tunables.memory_reservation = Some(bytes);
1824 self
1825 }
1826
1827 /// Indicates whether linear memories may relocate their base pointer at
1828 /// runtime.
1829 ///
1830 /// WebAssembly linear memories either have a maximum size that's explicitly
1831 /// listed in the type of a memory or inherently limited by the index type
1832 /// of the memory (e.g. 4GiB for 32-bit linear memories). Depending on how
1833 /// the linear memory is allocated (see [`Config::memory_reservation`]) it
1834 /// may be necessary to move the memory in the host's virtual address space
1835 /// during growth. This option controls whether this movement is allowed or
1836 /// not.
1837 ///
1838 /// An example of a linear memory needing to move is when
1839 /// [`Config::memory_reservation`] is 0 then a linear memory will be
1840 /// allocated as the minimum size of the memory plus
1841 /// [`Config::memory_reservation_for_growth`]. When memory grows beyond the
1842 /// reservation for growth then the memory needs to be relocated.
1843 ///
1844 /// When this option is set to `false` then it can have a number of impacts
1845 /// on how memories work at runtime:
1846 ///
1847 /// * Modules can be compiled with static knowledge the base pointer of
1848 /// linear memory never changes to enable optimizations such as
1849 /// loop invariant code motion (hoisting the base pointer out of a loop).
1850 ///
1851 /// * Memories cannot grow in excess of their original allocation. This
1852 /// means that [`Config::memory_reservation`] and
1853 /// [`Config::memory_reservation_for_growth`] may need tuning to ensure
1854 /// the memory configuration works at runtime.
1855 ///
1856 /// The default value for this option is `true`.
1857 pub fn memory_may_move(&mut self, enable: bool) -> &mut Self {
1858 self.tunables.memory_may_move = Some(enable);
1859 self
1860 }
1861
1862 /// Configures the size, in bytes, of the guard region used at the end of a
1863 /// linear memory's address space reservation.
1864 ///
1865 /// > Note: this value has important performance ramifications, be sure to
1866 /// > understand what this value does before tweaking it and benchmarking.
1867 ///
1868 /// This setting controls how many bytes are guaranteed to be unmapped after
1869 /// the virtual memory allocation of a linear memory. When
1870 /// combined with sufficiently large values of
1871 /// [`Config::memory_reservation`] (e.g. 4GiB for 32-bit linear memories)
1872 /// then a guard region can be used to eliminate bounds checks in generated
1873 /// code.
1874 ///
1875 /// This setting additionally can be used to help deduplicate bounds checks
1876 /// in code that otherwise requires bounds checks. For example with a 4KiB
1877 /// guard region then a 64-bit linear memory which accesses addresses `x+8`
1878 /// and `x+16` only needs to perform a single bounds check on `x`. If that
1879 /// bounds check passes then the offset is guaranteed to either reside in
1880 /// linear memory or the guard region, resulting in deterministic behavior
1881 /// either way.
1882 ///
1883 /// ## How big should the guard be?
1884 ///
1885 /// In general, like with configuring [`Config::memory_reservation`], you
1886 /// probably don't want to change this value from the defaults. Removing
1887 /// bounds checks is dependent on a number of factors where the size of the
1888 /// guard region is only one piece of the equation. Other factors include:
1889 ///
1890 /// * [`Config::memory_reservation`]
1891 /// * The index type of the linear memory (e.g. 32-bit or 64-bit)
1892 /// * The page size of the linear memory
1893 /// * Other settings such as [`Config::signals_based_traps`]
1894 ///
1895 /// Embeddings using virtual memory almost always want at least some guard
1896 /// region, but otherwise changes from the default should be profiled
1897 /// locally to see the performance impact.
1898 ///
1899 /// ## Default
1900 ///
1901 /// The default value for this property is 32MiB on 64-bit platforms. This
1902 /// allows eliminating almost all bounds checks on loads/stores with an
1903 /// immediate offset of less than 32MiB. On 32-bit platforms this defaults
1904 /// to 64KiB.
1905 pub fn memory_guard_size(&mut self, bytes: u64) -> &mut Self {
1906 self.tunables.memory_guard_size = Some(bytes);
1907 self
1908 }
1909
1910 /// Configures the size, in bytes, of the extra virtual memory space
1911 /// reserved after a linear memory is relocated.
1912 ///
1913 /// This setting is used in conjunction with [`Config::memory_reservation`]
1914 /// to configure what happens after a linear memory is relocated in the host
1915 /// address space. If the initial size of a linear memory exceeds
1916 /// [`Config::memory_reservation`] or if it grows beyond that size
1917 /// throughout its lifetime then this setting will be used.
1918 ///
1919 /// When a linear memory is relocated it will initially look like this:
1920 ///
1921 /// ```text
1922 /// memory.size
1923 /// │
1924 /// ◄──────┴─────►
1925 /// ┌───────┬──────────────┬───────┐
1926 /// │ guard │ accessible │ guard │
1927 /// └───────┴──────────────┴───────┘
1928 /// ◄──┬──►
1929 /// │
1930 /// memory_guard_size
1931 /// ```
1932 ///
1933 /// where `accessible` needs to be grown but there's no more memory to grow
1934 /// into. A new region of the virtual address space will be allocated that
1935 /// looks like this:
1936 ///
1937 /// ```text
1938 /// memory_reservation_for_growth
1939 /// │
1940 /// memory.size │
1941 /// │ │
1942 /// ◄──────┴─────► ◄─────────────┴───────────►
1943 /// ┌───────┬──────────────┬───────────────────────────┬───────┐
1944 /// │ guard │ accessible │ .. reserved for growth .. │ guard │
1945 /// └───────┴──────────────┴───────────────────────────┴───────┘
1946 /// ◄──┬──►
1947 /// │
1948 /// memory_guard_size
1949 /// ```
1950 ///
1951 /// This means that up to `memory_reservation_for_growth` bytes can be
1952 /// allocated again before the entire linear memory needs to be moved again
1953 /// when another `memory_reservation_for_growth` bytes will be appended to
1954 /// the size of the allocation.
1955 ///
1956 /// Note that this is a currently simple heuristic for optimizing the growth
1957 /// of dynamic memories, primarily implemented for the memory64 proposal
1958 /// where the maximum size of memory is larger than 4GiB. This setting is
1959 /// unlikely to be a one-size-fits-all style approach and if you're an
1960 /// embedder running into issues with growth and are interested in having
1961 /// other growth strategies available here please feel free to [open an
1962 /// issue on the Wasmtime repository][issue]!
1963 ///
1964 /// [issue]: https://github.com/bytecodealliance/wasmtime/issues/new
1965 ///
1966 /// ## Default
1967 ///
1968 /// For 64-bit platforms this defaults to 2GiB, and for 32-bit platforms
1969 /// this defaults to 1MiB.
1970 pub fn memory_reservation_for_growth(&mut self, bytes: u64) -> &mut Self {
1971 self.tunables.memory_reservation_for_growth = Some(bytes);
1972 self
1973 }
1974
1975 /// Indicates whether a guard region is present before allocations of
1976 /// linear memory.
1977 ///
1978 /// Guard regions before linear memories are never used during normal
1979 /// operation of WebAssembly modules, even if they have out-of-bounds
1980 /// loads. The only purpose for a preceding guard region in linear memory
1981 /// is extra protection against possible bugs in code generators like
1982 /// Cranelift. This setting does not affect performance in any way, but will
1983 /// result in larger virtual memory reservations for linear memories (it
1984 /// won't actually ever use more memory, just use more of the address
1985 /// space).
1986 ///
1987 /// The size of the guard region before linear memory is the same as the
1988 /// guard size that comes after linear memory, which is configured by
1989 /// [`Config::memory_guard_size`].
1990 ///
1991 /// ## Default
1992 ///
1993 /// This value defaults to `true`.
1994 pub fn guard_before_linear_memory(&mut self, enable: bool) -> &mut Self {
1995 self.tunables.guard_before_linear_memory = Some(enable);
1996 self
1997 }
1998
1999 /// Indicates whether to initialize tables lazily, so that instantiation
2000 /// is fast but indirect calls are a little slower. If false, tables
2001 /// are initialized eagerly during instantiation from any active element
2002 /// segments that apply to them.
2003 ///
2004 /// **Note** Disabling this option is not compatible with the Winch compiler.
2005 ///
2006 /// ## Default
2007 ///
2008 /// This value defaults to `true`.
2009 pub fn table_lazy_init(&mut self, table_lazy_init: bool) -> &mut Self {
2010 self.tunables.table_lazy_init = Some(table_lazy_init);
2011 self
2012 }
2013
2014 /// Configure the version information used in serialized and deserialized [`crate::Module`]s.
2015 /// This effects the behavior of [`crate::Module::serialize()`], as well as
2016 /// [`crate::Module::deserialize()`] and related functions.
2017 ///
2018 /// The default strategy is to use the wasmtime crate's Cargo package version.
2019 pub fn module_version(&mut self, strategy: ModuleVersionStrategy) -> Result<&mut Self> {
2020 match strategy {
2021 // This case requires special precondition for assertion in SerializedModule::to_bytes
2022 ModuleVersionStrategy::Custom(ref v) => {
2023 if v.as_bytes().len() > 255 {
2024 bail!("custom module version cannot be more than 255 bytes: {v}");
2025 }
2026 }
2027 _ => {}
2028 }
2029 self.module_version = strategy;
2030 Ok(self)
2031 }
2032
2033 /// Configure whether wasmtime should compile a module using multiple
2034 /// threads.
2035 ///
2036 /// Disabling this will result in a single thread being used to compile
2037 /// the wasm bytecode.
2038 ///
2039 /// By default parallel compilation is enabled.
2040 #[cfg(feature = "parallel-compilation")]
2041 pub fn parallel_compilation(&mut self, parallel: bool) -> &mut Self {
2042 self.parallel_compilation = parallel;
2043 self
2044 }
2045
2046 /// Configures whether compiled artifacts will contain information to map
2047 /// native program addresses back to the original wasm module.
2048 ///
2049 /// This configuration option is `true` by default and, if enabled,
2050 /// generates the appropriate tables in compiled modules to map from native
2051 /// address back to wasm source addresses. This is used for displaying wasm
2052 /// program counters in backtraces as well as generating filenames/line
2053 /// numbers if so configured as well (and the original wasm module has DWARF
2054 /// debugging information present).
2055 pub fn generate_address_map(&mut self, generate: bool) -> &mut Self {
2056 self.tunables.generate_address_map = Some(generate);
2057 self
2058 }
2059
2060 /// Configures whether copy-on-write memory-mapped data is used to
2061 /// initialize a linear memory.
2062 ///
2063 /// Initializing linear memory via a copy-on-write mapping can drastically
2064 /// improve instantiation costs of a WebAssembly module because copying
2065 /// memory is deferred. Additionally if a page of memory is only ever read
2066 /// from WebAssembly and never written too then the same underlying page of
2067 /// data will be reused between all instantiations of a module meaning that
2068 /// if a module is instantiated many times this can lower the overall memory
2069 /// required needed to run that module.
2070 ///
2071 /// The main disadvantage of copy-on-write initialization, however, is that
2072 /// it may be possible for highly-parallel scenarios to be less scalable. If
2073 /// a page is read initially by a WebAssembly module then that page will be
2074 /// mapped to a read-only copy shared between all WebAssembly instances. If
2075 /// the same page is then written, however, then a private copy is created
2076 /// and swapped out from the read-only version. This also requires an [IPI],
2077 /// however, which can be a significant bottleneck in high-parallelism
2078 /// situations.
2079 ///
2080 /// This feature is only applicable when a WebAssembly module meets specific
2081 /// criteria to be initialized in this fashion, such as:
2082 ///
2083 /// * Only memories defined in the module can be initialized this way.
2084 /// * Data segments for memory must use statically known offsets.
2085 /// * Data segments for memory must all be in-bounds.
2086 ///
2087 /// Modules which do not meet these criteria will fall back to
2088 /// initialization of linear memory based on copying memory.
2089 ///
2090 /// This feature of Wasmtime is also platform-specific:
2091 ///
2092 /// * Linux - this feature is supported for all instances of [`Module`].
2093 /// Modules backed by an existing mmap (such as those created by
2094 /// [`Module::deserialize_file`]) will reuse that mmap to cow-initialize
2095 /// memory. Other instance of [`Module`] may use the `memfd_create`
2096 /// syscall to create an initialization image to `mmap`.
2097 /// * Unix (not Linux) - this feature is only supported when loading modules
2098 /// from a precompiled file via [`Module::deserialize_file`] where there
2099 /// is a file descriptor to use to map data into the process. Note that
2100 /// the module must have been compiled with this setting enabled as well.
2101 /// * Windows - there is no support for this feature at this time. Memory
2102 /// initialization will always copy bytes.
2103 ///
2104 /// By default this option is enabled.
2105 ///
2106 /// [`Module::deserialize_file`]: crate::Module::deserialize_file
2107 /// [`Module`]: crate::Module
2108 /// [IPI]: https://en.wikipedia.org/wiki/Inter-processor_interrupt
2109 pub fn memory_init_cow(&mut self, enable: bool) -> &mut Self {
2110 self.tunables.memory_init_cow = Some(enable);
2111 self
2112 }
2113
2114 /// A configuration option to force the usage of `memfd_create` on Linux to
2115 /// be used as the backing source for a module's initial memory image.
2116 ///
2117 /// When [`Config::memory_init_cow`] is enabled, which is enabled by
2118 /// default, module memory initialization images are taken from a module's
2119 /// original mmap if possible. If a precompiled module was loaded from disk
2120 /// this means that the disk's file is used as an mmap source for the
2121 /// initial linear memory contents. This option can be used to force, on
2122 /// Linux, that instead of using the original file on disk a new in-memory
2123 /// file is created with `memfd_create` to hold the contents of the initial
2124 /// image.
2125 ///
2126 /// This option can be used to avoid possibly loading the contents of memory
2127 /// from disk through a page fault. Instead with `memfd_create` the contents
2128 /// of memory are always in RAM, meaning that even page faults which
2129 /// initially populate a wasm linear memory will only work with RAM instead
2130 /// of ever hitting the disk that the original precompiled module is stored
2131 /// on.
2132 ///
2133 /// This option is disabled by default.
2134 pub fn force_memory_init_memfd(&mut self, enable: bool) -> &mut Self {
2135 self.force_memory_init_memfd = enable;
2136 self
2137 }
2138
2139 /// Configures whether or not a coredump should be generated and attached to
2140 /// the [`wasmtime::Error`] when a trap is raised.
2141 ///
2142 /// This option is disabled by default.
2143 #[cfg(feature = "coredump")]
2144 pub fn coredump_on_trap(&mut self, enable: bool) -> &mut Self {
2145 self.coredump_on_trap = enable;
2146 self
2147 }
2148
2149 /// Enables memory error checking for wasm programs.
2150 ///
2151 /// This option is disabled by default.
2152 ///
2153 /// # Panics
2154 ///
2155 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
2156 #[cfg(any(feature = "cranelift", feature = "winch"))]
2157 pub fn wmemcheck(&mut self, enable: bool) -> &mut Self {
2158 self.wmemcheck = enable;
2159 self.compiler_config_mut().wmemcheck = enable;
2160 self
2161 }
2162
2163 /// Configures the "guaranteed dense image size" for copy-on-write
2164 /// initialized memories.
2165 ///
2166 /// When using the [`Config::memory_init_cow`] feature to initialize memory
2167 /// efficiently (which is enabled by default), compiled modules contain an
2168 /// image of the module's initial heap. If the module has a fairly sparse
2169 /// initial heap, with just a few data segments at very different offsets,
2170 /// this could result in a large region of zero bytes in the image. In
2171 /// other words, it's not very memory-efficient.
2172 ///
2173 /// We normally use a heuristic to avoid this: if less than half
2174 /// of the initialized range (first non-zero to last non-zero
2175 /// byte) of any memory in the module has pages with nonzero
2176 /// bytes, then we avoid creating a memory image for the entire module.
2177 ///
2178 /// However, if the embedder always needs the instantiation-time efficiency
2179 /// of copy-on-write initialization, and is otherwise carefully controlling
2180 /// parameters of the modules (for example, by limiting the maximum heap
2181 /// size of the modules), then it may be desirable to ensure a memory image
2182 /// is created even if this could go against the heuristic above. Thus, we
2183 /// add another condition: there is a size of initialized data region up to
2184 /// which we *always* allow a memory image. The embedder can set this to a
2185 /// known maximum heap size if they desire to always get the benefits of
2186 /// copy-on-write images.
2187 ///
2188 /// In the future we may implement a "best of both worlds"
2189 /// solution where we have a dense image up to some limit, and
2190 /// then support a sparse list of initializers beyond that; this
2191 /// would get most of the benefit of copy-on-write and pay the incremental
2192 /// cost of eager initialization only for those bits of memory
2193 /// that are out-of-bounds. However, for now, an embedder desiring
2194 /// fast instantiation should ensure that this setting is as large
2195 /// as the maximum module initial memory content size.
2196 ///
2197 /// By default this value is 16 MiB.
2198 pub fn memory_guaranteed_dense_image_size(&mut self, size_in_bytes: u64) -> &mut Self {
2199 self.memory_guaranteed_dense_image_size = size_in_bytes;
2200 self
2201 }
2202
2203 /// Whether to enable function inlining during compilation or not.
2204 ///
2205 /// This may result in faster execution at runtime, but adds additional
2206 /// compilation time. Inlining may also enlarge the size of compiled
2207 /// artifacts (for example, the size of the result of
2208 /// [`Engine::precompile_component`](crate::Engine::precompile_component)).
2209 ///
2210 /// Inlining is not supported by all of Wasmtime's compilation strategies;
2211 /// currently, it only Cranelift supports it. This setting will be ignored
2212 /// when using a compilation strategy that does not support inlining, like
2213 /// Winch.
2214 ///
2215 /// Note that inlining is still somewhat experimental at the moment (as of
2216 /// the Wasmtime version 36).
2217 pub fn compiler_inlining(&mut self, inlining: bool) -> &mut Self {
2218 self.tunables.inlining = Some(inlining);
2219 self
2220 }
2221
2222 /// Returns the set of features that the currently selected compiler backend
2223 /// does not support at all and may panic on.
2224 ///
2225 /// Wasmtime strives to reject unknown modules or unsupported modules with
2226 /// first-class errors instead of panics. Not all compiler backends have the
2227 /// same level of feature support on all platforms as well. This method
2228 /// returns a set of features that the currently selected compiler
2229 /// configuration is known to not support and may panic on. This acts as a
2230 /// first-level filter on incoming wasm modules/configuration to fail-fast
2231 /// instead of panicking later on.
2232 ///
2233 /// Note that if a feature is not listed here it does not mean that the
2234 /// backend fully supports the proposal. Instead that means that the backend
2235 /// doesn't ever panic on the proposal, but errors during compilation may
2236 /// still be returned. This means that features listed here are definitely
2237 /// not supported at all, but features not listed here may still be
2238 /// partially supported. For example at the time of this writing the Winch
2239 /// backend partially supports simd so it's not listed here. Winch doesn't
2240 /// fully support simd but unimplemented instructions just return errors.
2241 fn compiler_panicking_wasm_features(&self) -> WasmFeatures {
2242 // First we compute the set of features that Wasmtime itself knows;
2243 // this is a sort of "maximal set" that we invert to create a set
2244 // of features we _definitely can't support_ because wasmtime
2245 // has never heard of them.
2246 let features_known_to_wasmtime = WasmFeatures::empty()
2247 | WasmFeatures::MUTABLE_GLOBAL
2248 | WasmFeatures::SATURATING_FLOAT_TO_INT
2249 | WasmFeatures::SIGN_EXTENSION
2250 | WasmFeatures::REFERENCE_TYPES
2251 | WasmFeatures::CALL_INDIRECT_OVERLONG
2252 | WasmFeatures::MULTI_VALUE
2253 | WasmFeatures::BULK_MEMORY
2254 | WasmFeatures::BULK_MEMORY_OPT
2255 | WasmFeatures::SIMD
2256 | WasmFeatures::RELAXED_SIMD
2257 | WasmFeatures::THREADS
2258 | WasmFeatures::SHARED_EVERYTHING_THREADS
2259 | WasmFeatures::TAIL_CALL
2260 | WasmFeatures::FLOATS
2261 | WasmFeatures::MULTI_MEMORY
2262 | WasmFeatures::EXCEPTIONS
2263 | WasmFeatures::MEMORY64
2264 | WasmFeatures::EXTENDED_CONST
2265 | WasmFeatures::COMPONENT_MODEL
2266 | WasmFeatures::FUNCTION_REFERENCES
2267 | WasmFeatures::GC
2268 | WasmFeatures::CUSTOM_PAGE_SIZES
2269 | WasmFeatures::GC_TYPES
2270 | WasmFeatures::STACK_SWITCHING
2271 | WasmFeatures::WIDE_ARITHMETIC
2272 | WasmFeatures::CM_ASYNC
2273 | WasmFeatures::CM_ASYNC_STACKFUL
2274 | WasmFeatures::CM_ASYNC_BUILTINS
2275 | WasmFeatures::CM_THREADING
2276 | WasmFeatures::CM_ERROR_CONTEXT
2277 | WasmFeatures::CM_GC
2278 | WasmFeatures::CM_FIXED_SIZE_LIST;
2279
2280 #[allow(unused_mut, reason = "easier to avoid #[cfg]")]
2281 let mut unsupported = !features_known_to_wasmtime;
2282
2283 #[cfg(any(feature = "cranelift", feature = "winch"))]
2284 match self.compiler_config.as_ref().and_then(|c| c.strategy) {
2285 None | Some(Strategy::Cranelift) => {
2286 // Pulley at this time fundamentally doesn't support the
2287 // `threads` proposal, notably shared memory, because Rust can't
2288 // safely implement loads/stores in the face of shared memory.
2289 // Stack switching is not implemented, either.
2290 if self.compiler_target().is_pulley() {
2291 unsupported |= WasmFeatures::THREADS;
2292 unsupported |= WasmFeatures::STACK_SWITCHING;
2293 }
2294
2295 use target_lexicon::*;
2296 match self.compiler_target() {
2297 Triple {
2298 architecture: Architecture::X86_64 | Architecture::X86_64h,
2299 operating_system:
2300 OperatingSystem::Linux
2301 | OperatingSystem::MacOSX(_)
2302 | OperatingSystem::Darwin(_),
2303 ..
2304 } => {
2305 // Stack switching supported on (non-Pulley) Cranelift.
2306 }
2307
2308 _ => {
2309 // On platforms other than x64 Unix-like, we don't
2310 // support stack switching.
2311 unsupported |= WasmFeatures::STACK_SWITCHING;
2312 }
2313 }
2314 }
2315 Some(Strategy::Winch) => {
2316 unsupported |= WasmFeatures::GC
2317 | WasmFeatures::FUNCTION_REFERENCES
2318 | WasmFeatures::RELAXED_SIMD
2319 | WasmFeatures::TAIL_CALL
2320 | WasmFeatures::GC_TYPES
2321 | WasmFeatures::EXCEPTIONS
2322 | WasmFeatures::LEGACY_EXCEPTIONS
2323 | WasmFeatures::STACK_SWITCHING
2324 | WasmFeatures::CM_ASYNC;
2325 match self.compiler_target().architecture {
2326 target_lexicon::Architecture::Aarch64(_) => {
2327 unsupported |= WasmFeatures::THREADS;
2328 unsupported |= WasmFeatures::WIDE_ARITHMETIC;
2329 }
2330
2331 // Winch doesn't support other non-x64 architectures at this
2332 // time either but will return an first-class error for
2333 // them.
2334 _ => {}
2335 }
2336 }
2337 Some(Strategy::Auto) => unreachable!(),
2338 }
2339 unsupported
2340 }
2341
2342 /// Calculates the set of features that are enabled for this `Config`.
2343 ///
2344 /// This method internally will start with the an empty set of features to
2345 /// avoid being tied to wasmparser's defaults. Next Wasmtime's set of
2346 /// default features are added to this set, some of which are conditional
2347 /// depending on crate features. Finally explicitly requested features via
2348 /// `wasm_*` methods on `Config` are applied. Everything is then validated
2349 /// later in `Config::validate`.
2350 fn features(&self) -> WasmFeatures {
2351 // Wasmtime by default supports all of the wasm 2.0 version of the
2352 // specification.
2353 let mut features = WasmFeatures::WASM2;
2354
2355 // On-by-default features that wasmtime has. Note that these are all
2356 // subject to the criteria at
2357 // https://docs.wasmtime.dev/contributing-implementing-wasm-proposals.html
2358 // and
2359 // https://docs.wasmtime.dev/stability-wasm-proposals.html
2360 features |= WasmFeatures::MULTI_MEMORY;
2361 features |= WasmFeatures::RELAXED_SIMD;
2362 features |= WasmFeatures::TAIL_CALL;
2363 features |= WasmFeatures::EXTENDED_CONST;
2364 features |= WasmFeatures::MEMORY64;
2365 // NB: if you add a feature above this line please double-check
2366 // https://docs.wasmtime.dev/stability-wasm-proposals.html
2367 // to ensure all requirements are met and/or update the documentation
2368 // there too.
2369
2370 // Set some features to their conditionally-enabled defaults depending
2371 // on crate compile-time features.
2372 features.set(WasmFeatures::GC_TYPES, cfg!(feature = "gc"));
2373 features.set(WasmFeatures::THREADS, cfg!(feature = "threads"));
2374 features.set(
2375 WasmFeatures::COMPONENT_MODEL,
2376 cfg!(feature = "component-model"),
2377 );
2378
2379 // From the default set of proposals remove any that the current
2380 // compiler backend may panic on if the module contains them.
2381 features = features & !self.compiler_panicking_wasm_features();
2382
2383 // After wasmtime's defaults are configured then factor in user requests
2384 // and disable/enable features. Note that the enable/disable sets should
2385 // be disjoint.
2386 debug_assert!((self.enabled_features & self.disabled_features).is_empty());
2387 features &= !self.disabled_features;
2388 features |= self.enabled_features;
2389
2390 features
2391 }
2392
2393 /// Returns the configured compiler target for this `Config`.
2394 pub(crate) fn compiler_target(&self) -> target_lexicon::Triple {
2395 // If a target is explicitly configured, always use that.
2396 if let Some(target) = self.target.clone() {
2397 return target;
2398 }
2399
2400 // If the `build.rs` script determined that this platform uses pulley by
2401 // default, then use Pulley.
2402 if cfg!(default_target_pulley) {
2403 return target_lexicon::Triple::pulley_host();
2404 }
2405
2406 // And at this point the target is for sure the host.
2407 target_lexicon::Triple::host()
2408 }
2409
2410 pub(crate) fn validate(&self) -> Result<(Tunables, WasmFeatures)> {
2411 let features = self.features();
2412
2413 // First validate that the selected compiler backend and configuration
2414 // supports the set of `features` that are enabled. This will help
2415 // provide more first class errors instead of panics about unsupported
2416 // features and configurations.
2417 let unsupported = features & self.compiler_panicking_wasm_features();
2418 if !unsupported.is_empty() {
2419 for flag in WasmFeatures::FLAGS.iter() {
2420 if !unsupported.contains(*flag.value()) {
2421 continue;
2422 }
2423 bail!(
2424 "the wasm_{} feature is not supported on this compiler configuration",
2425 flag.name().to_lowercase()
2426 );
2427 }
2428
2429 panic!("should have returned an error by now")
2430 }
2431
2432 #[cfg(any(feature = "async", feature = "stack-switching"))]
2433 if self.async_support && self.max_wasm_stack > self.async_stack_size {
2434 bail!("max_wasm_stack size cannot exceed the async_stack_size");
2435 }
2436 if self.max_wasm_stack == 0 {
2437 bail!("max_wasm_stack size cannot be zero");
2438 }
2439 if !cfg!(feature = "wmemcheck") && self.wmemcheck {
2440 bail!("wmemcheck (memory checker) was requested but is not enabled in this build");
2441 }
2442
2443 if !cfg!(feature = "gc") && features.gc_types() {
2444 bail!("support for GC was disabled at compile time")
2445 }
2446
2447 if !cfg!(feature = "gc") && features.contains(WasmFeatures::EXCEPTIONS) {
2448 bail!("exceptions support requires garbage collection (GC) to be enabled in the build");
2449 }
2450
2451 let mut tunables = Tunables::default_for_target(&self.compiler_target())?;
2452
2453 // If no target is explicitly specified then further refine `tunables`
2454 // for the configuration of this host depending on what platform
2455 // features were found available at compile time. This means that anyone
2456 // cross-compiling for a customized host will need to further refine
2457 // compilation options.
2458 if self.target.is_none() {
2459 // If this platform doesn't have native signals then change some
2460 // defaults to account for that. Note that VM guards are turned off
2461 // here because that's primarily a feature of eliding
2462 // bounds-checks.
2463 if !cfg!(has_native_signals) {
2464 tunables.signals_based_traps = cfg!(has_native_signals);
2465 tunables.memory_guard_size = 0;
2466 }
2467
2468 // When virtual memory is not available use slightly different
2469 // defaults for tunables to be more amenable to `MallocMemory`.
2470 // Note that these can still be overridden by config options.
2471 if !cfg!(has_virtual_memory) {
2472 tunables.memory_reservation = 0;
2473 tunables.memory_reservation_for_growth = 1 << 20; // 1MB
2474 tunables.memory_init_cow = false;
2475 }
2476 }
2477
2478 // If guest-debugging is enabled, we must disable
2479 // signals-based traps. Do this before we process the user's
2480 // provided tunables settings so we can detect a conflict with
2481 // an explicit request to use signals-based traps.
2482 #[cfg(feature = "debug")]
2483 if self.tunables.debug_guest == Some(true) {
2484 tunables.signals_based_traps = false;
2485 }
2486
2487 self.tunables.configure(&mut tunables);
2488
2489 // If we're going to compile with winch, we must use the winch calling convention.
2490 #[cfg(any(feature = "cranelift", feature = "winch"))]
2491 {
2492 tunables.winch_callable = self
2493 .compiler_config
2494 .as_ref()
2495 .is_some_and(|c| c.strategy == Some(Strategy::Winch));
2496 }
2497
2498 tunables.collector = if features.gc_types() {
2499 #[cfg(feature = "gc")]
2500 {
2501 use wasmtime_environ::Collector as EnvCollector;
2502 Some(match self.collector.try_not_auto()? {
2503 Collector::DeferredReferenceCounting => EnvCollector::DeferredReferenceCounting,
2504 Collector::Null => EnvCollector::Null,
2505 Collector::Auto => unreachable!(),
2506 })
2507 }
2508 #[cfg(not(feature = "gc"))]
2509 bail!("cannot use GC types: the `gc` feature was disabled at compile time")
2510 } else {
2511 None
2512 };
2513
2514 if tunables.debug_guest {
2515 ensure!(
2516 cfg!(feature = "debug"),
2517 "debug instrumentation support was disabled at compile time"
2518 );
2519 ensure!(
2520 !tunables.signals_based_traps,
2521 "cannot use signals-based traps with guest debugging enabled"
2522 );
2523 }
2524
2525 Ok((tunables, features))
2526 }
2527
2528 #[cfg(feature = "runtime")]
2529 pub(crate) fn build_allocator(
2530 &self,
2531 tunables: &Tunables,
2532 ) -> Result<Box<dyn InstanceAllocator + Send + Sync>> {
2533 #[cfg(feature = "async")]
2534 let (stack_size, stack_zeroing) = (self.async_stack_size, self.async_stack_zeroing);
2535
2536 #[cfg(not(feature = "async"))]
2537 let (stack_size, stack_zeroing) = (0, false);
2538
2539 let _ = tunables;
2540
2541 match &self.allocation_strategy {
2542 InstanceAllocationStrategy::OnDemand => {
2543 let mut _allocator = Box::new(OnDemandInstanceAllocator::new(
2544 self.mem_creator.clone(),
2545 stack_size,
2546 stack_zeroing,
2547 ));
2548 #[cfg(feature = "async")]
2549 if let Some(stack_creator) = &self.stack_creator {
2550 _allocator.set_stack_creator(stack_creator.clone());
2551 }
2552 Ok(_allocator)
2553 }
2554 #[cfg(feature = "pooling-allocator")]
2555 InstanceAllocationStrategy::Pooling(config) => {
2556 let mut config = config.config;
2557 config.stack_size = stack_size;
2558 config.async_stack_zeroing = stack_zeroing;
2559 Ok(Box::new(crate::runtime::vm::PoolingInstanceAllocator::new(
2560 &config, tunables,
2561 )?))
2562 }
2563 }
2564 }
2565
2566 #[cfg(feature = "runtime")]
2567 pub(crate) fn build_gc_runtime(&self) -> Result<Option<Arc<dyn GcRuntime>>> {
2568 if !self.features().gc_types() {
2569 return Ok(None);
2570 }
2571
2572 #[cfg(not(feature = "gc"))]
2573 bail!("cannot create a GC runtime: the `gc` feature was disabled at compile time");
2574
2575 #[cfg(feature = "gc")]
2576 #[cfg_attr(
2577 not(any(feature = "gc-null", feature = "gc-drc")),
2578 expect(unreachable_code, reason = "definitions known to be dummy")
2579 )]
2580 {
2581 Ok(Some(match self.collector.try_not_auto()? {
2582 #[cfg(feature = "gc-drc")]
2583 Collector::DeferredReferenceCounting => {
2584 Arc::new(crate::runtime::vm::DrcCollector::default()) as Arc<dyn GcRuntime>
2585 }
2586 #[cfg(not(feature = "gc-drc"))]
2587 Collector::DeferredReferenceCounting => unreachable!(),
2588
2589 #[cfg(feature = "gc-null")]
2590 Collector::Null => {
2591 Arc::new(crate::runtime::vm::NullCollector::default()) as Arc<dyn GcRuntime>
2592 }
2593 #[cfg(not(feature = "gc-null"))]
2594 Collector::Null => unreachable!(),
2595
2596 Collector::Auto => unreachable!(),
2597 }))
2598 }
2599 }
2600
2601 #[cfg(feature = "runtime")]
2602 pub(crate) fn build_profiler(&self) -> Result<Box<dyn ProfilingAgent>> {
2603 Ok(match self.profiling_strategy {
2604 ProfilingStrategy::PerfMap => profiling_agent::new_perfmap()?,
2605 ProfilingStrategy::JitDump => profiling_agent::new_jitdump()?,
2606 ProfilingStrategy::VTune => profiling_agent::new_vtune()?,
2607 ProfilingStrategy::None => profiling_agent::new_null(),
2608 ProfilingStrategy::Pulley => profiling_agent::new_pulley()?,
2609 })
2610 }
2611
2612 #[cfg(any(feature = "cranelift", feature = "winch"))]
2613 pub(crate) fn build_compiler(
2614 mut self,
2615 tunables: &mut Tunables,
2616 features: WasmFeatures,
2617 ) -> Result<(Self, Box<dyn wasmtime_environ::Compiler>)> {
2618 let target = self.compiler_target();
2619
2620 // The target passed to the builders below is an `Option<Triple>` where
2621 // `None` represents the current host with CPU features inferred from
2622 // the host's CPU itself. The `target` above is not an `Option`, so
2623 // switch it to `None` in the case that a target wasn't explicitly
2624 // specified (which indicates no feature inference) and the target
2625 // matches the host.
2626 let target_for_builder =
2627 if self.target.is_none() && target == target_lexicon::Triple::host() {
2628 None
2629 } else {
2630 Some(target.clone())
2631 };
2632
2633 let mut compiler = match self.compiler_config_mut().strategy {
2634 #[cfg(feature = "cranelift")]
2635 Some(Strategy::Cranelift) => wasmtime_cranelift::builder(target_for_builder)?,
2636 #[cfg(not(feature = "cranelift"))]
2637 Some(Strategy::Cranelift) => bail!("cranelift support not compiled in"),
2638 #[cfg(feature = "winch")]
2639 Some(Strategy::Winch) => wasmtime_winch::builder(target_for_builder)?,
2640 #[cfg(not(feature = "winch"))]
2641 Some(Strategy::Winch) => bail!("winch support not compiled in"),
2642
2643 None | Some(Strategy::Auto) => unreachable!(),
2644 };
2645
2646 if let Some(path) = &self.compiler_config_mut().clif_dir {
2647 compiler.clif_dir(path)?;
2648 }
2649
2650 // If probestack is enabled for a target, Wasmtime will always use the
2651 // inline strategy which doesn't require us to define a `__probestack`
2652 // function or similar.
2653 self.compiler_config_mut()
2654 .settings
2655 .insert("probestack_strategy".into(), "inline".into());
2656
2657 // We enable stack probing by default on all targets.
2658 // This is required on Windows because of the way Windows
2659 // commits its stacks, but it's also a good idea on other
2660 // platforms to ensure guard pages are hit for large frame
2661 // sizes.
2662 self.compiler_config_mut()
2663 .flags
2664 .insert("enable_probestack".into());
2665
2666 // The current wasm multivalue implementation depends on this.
2667 // FIXME(#9510) handle this in wasmtime-cranelift instead.
2668 self.compiler_config_mut()
2669 .flags
2670 .insert("enable_multi_ret_implicit_sret".into());
2671
2672 if let Some(unwind_requested) = self.native_unwind_info {
2673 if !self
2674 .compiler_config_mut()
2675 .ensure_setting_unset_or_given("unwind_info", &unwind_requested.to_string())
2676 {
2677 bail!(
2678 "incompatible settings requested for Cranelift and Wasmtime `unwind-info` settings"
2679 );
2680 }
2681 }
2682
2683 if target.operating_system == target_lexicon::OperatingSystem::Windows {
2684 if !self
2685 .compiler_config_mut()
2686 .ensure_setting_unset_or_given("unwind_info", "true")
2687 {
2688 bail!("`native_unwind_info` cannot be disabled on Windows");
2689 }
2690 }
2691
2692 // We require frame pointers for correct stack walking, which is safety
2693 // critical in the presence of reference types, and otherwise it is just
2694 // really bad developer experience to get wrong.
2695 self.compiler_config_mut()
2696 .settings
2697 .insert("preserve_frame_pointers".into(), "true".into());
2698
2699 if !tunables.signals_based_traps {
2700 let mut ok = self
2701 .compiler_config_mut()
2702 .ensure_setting_unset_or_given("enable_table_access_spectre_mitigation", "false");
2703 ok = ok
2704 && self.compiler_config_mut().ensure_setting_unset_or_given(
2705 "enable_heap_access_spectre_mitigation",
2706 "false",
2707 );
2708
2709 // Right now spectre-mitigated bounds checks will load from zero so
2710 // if host-based signal handlers are disabled then that's a mismatch
2711 // and doesn't work right now. Fixing this will require more thought
2712 // of how to implement the bounds check in spectre-only mode.
2713 if !ok {
2714 bail!(
2715 "when signals-based traps are disabled then spectre \
2716 mitigations must also be disabled"
2717 );
2718 }
2719 }
2720
2721 if features.contains(WasmFeatures::RELAXED_SIMD) && !features.contains(WasmFeatures::SIMD) {
2722 bail!("cannot disable the simd proposal but enable the relaxed simd proposal");
2723 }
2724
2725 if features.contains(WasmFeatures::STACK_SWITCHING) {
2726 use target_lexicon::OperatingSystem;
2727 let model = match target.operating_system {
2728 OperatingSystem::Windows => "update_windows_tib",
2729 OperatingSystem::Linux
2730 | OperatingSystem::MacOSX(_)
2731 | OperatingSystem::Darwin(_) => "basic",
2732 _ => bail!("stack-switching feature not supported on this platform "),
2733 };
2734
2735 if !self
2736 .compiler_config_mut()
2737 .ensure_setting_unset_or_given("stack_switch_model", model)
2738 {
2739 bail!(
2740 "compiler option 'stack_switch_model' must be set to '{model}' on this platform"
2741 );
2742 }
2743 }
2744
2745 // Apply compiler settings and flags
2746 compiler.set_tunables(tunables.clone())?;
2747 for (k, v) in self.compiler_config_mut().settings.iter() {
2748 compiler.set(k, v)?;
2749 }
2750 for flag in self.compiler_config_mut().flags.iter() {
2751 compiler.enable(flag)?;
2752 }
2753 *tunables = compiler.tunables().cloned().unwrap();
2754
2755 #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
2756 if let Some(cache_store) = &self.compiler_config_mut().cache_store {
2757 compiler.enable_incremental_compilation(cache_store.clone())?;
2758 }
2759
2760 compiler.wmemcheck(self.compiler_config_mut().wmemcheck);
2761
2762 Ok((self, compiler.build()?))
2763 }
2764
2765 /// Internal setting for whether adapter modules for components will have
2766 /// extra WebAssembly instructions inserted performing more debug checks
2767 /// then are necessary.
2768 #[cfg(feature = "component-model")]
2769 pub fn debug_adapter_modules(&mut self, debug: bool) -> &mut Self {
2770 self.tunables.debug_adapter_modules = Some(debug);
2771 self
2772 }
2773
2774 /// Enables clif output when compiling a WebAssembly module.
2775 #[cfg(any(feature = "cranelift", feature = "winch"))]
2776 pub fn emit_clif(&mut self, path: &Path) -> &mut Self {
2777 self.compiler_config_mut().clif_dir = Some(path.to_path_buf());
2778 self
2779 }
2780
2781 /// Configures whether, when on macOS, Mach ports are used for exception
2782 /// handling instead of traditional Unix-based signal handling.
2783 ///
2784 /// WebAssembly traps in Wasmtime are implemented with native faults, for
2785 /// example a `SIGSEGV` will occur when a WebAssembly guest accesses
2786 /// out-of-bounds memory. Handling this can be configured to either use Unix
2787 /// signals or Mach ports on macOS. By default Mach ports are used.
2788 ///
2789 /// Mach ports enable Wasmtime to work by default with foreign
2790 /// error-handling systems such as breakpad which also use Mach ports to
2791 /// handle signals. In this situation Wasmtime will continue to handle guest
2792 /// faults gracefully while any non-guest faults will get forwarded to
2793 /// process-level handlers such as breakpad. Some more background on this
2794 /// can be found in #2456.
2795 ///
2796 /// A downside of using mach ports, however, is that they don't interact
2797 /// well with `fork()`. Forking a Wasmtime process on macOS will produce a
2798 /// child process that cannot successfully run WebAssembly. In this
2799 /// situation traditional Unix signal handling should be used as that's
2800 /// inherited and works across forks.
2801 ///
2802 /// If your embedding wants to use a custom error handler which leverages
2803 /// Mach ports and you additionally wish to `fork()` the process and use
2804 /// Wasmtime in the child process that's not currently possible. Please
2805 /// reach out to us if you're in this bucket!
2806 ///
2807 /// This option defaults to `true`, using Mach ports by default.
2808 pub fn macos_use_mach_ports(&mut self, mach_ports: bool) -> &mut Self {
2809 self.macos_use_mach_ports = mach_ports;
2810 self
2811 }
2812
2813 /// Configures an embedder-provided function, `detect`, which is used to
2814 /// determine if an ISA-specific feature is available on the current host.
2815 ///
2816 /// This function is used to verify that any features enabled for a compiler
2817 /// backend, such as AVX support on x86\_64, are also available on the host.
2818 /// It is undefined behavior to execute an AVX instruction on a host that
2819 /// doesn't support AVX instructions, for example.
2820 ///
2821 /// When the `std` feature is active on this crate then this function is
2822 /// configured to a default implementation that uses the standard library's
2823 /// feature detection. When the `std` feature is disabled then there is no
2824 /// default available and this method must be called to configure a feature
2825 /// probing function.
2826 ///
2827 /// The `detect` function provided is given a string name of an ISA feature.
2828 /// The function should then return:
2829 ///
2830 /// * `Some(true)` - indicates that the feature was found on the host and it
2831 /// is supported.
2832 /// * `Some(false)` - the feature name was recognized but it was not
2833 /// detected on the host, for example the CPU is too old.
2834 /// * `None` - the feature name was not recognized and it's not known
2835 /// whether it's on the host or not.
2836 ///
2837 /// Feature names passed to `detect` match the same feature name used in the
2838 /// Rust standard library. For example `"sse4.2"` is used on x86\_64.
2839 ///
2840 /// # Unsafety
2841 ///
2842 /// This function is `unsafe` because it is undefined behavior to execute
2843 /// instructions that a host does not support. This means that the result of
2844 /// `detect` must be correct for memory safe execution at runtime.
2845 pub unsafe fn detect_host_feature(&mut self, detect: fn(&str) -> Option<bool>) -> &mut Self {
2846 self.detect_host_feature = Some(detect);
2847 self
2848 }
2849
2850 /// Configures Wasmtime to not use signals-based trap handlers, for example
2851 /// disables `SIGILL` and `SIGSEGV` handler registration on Unix platforms.
2852 ///
2853 /// > **Note:** this option has important performance ramifications, be sure
2854 /// > to understand the implications. Wasm programs have been measured to
2855 /// > run up to 2x slower when signals-based traps are disabled.
2856 ///
2857 /// Wasmtime will by default leverage signals-based trap handlers (or the
2858 /// platform equivalent, for example "vectored exception handlers" on
2859 /// Windows) to make generated code more efficient. For example, when
2860 /// Wasmtime can use signals-based traps, it can elide explicit bounds
2861 /// checks for Wasm linear memory accesses, instead relying on virtual
2862 /// memory guard pages to raise a `SIGSEGV` (on Unix) for out-of-bounds
2863 /// accesses, which Wasmtime's runtime then catches and handles. Another
2864 /// example is divide-by-zero: with signals-based traps, Wasmtime can let
2865 /// the hardware raise a trap when the divisor is zero. Without
2866 /// signals-based traps, Wasmtime must explicitly emit additional
2867 /// instructions to check for zero and conditionally branch to a trapping
2868 /// code path.
2869 ///
2870 /// Some environments however may not have access to signal handlers. For
2871 /// example embedded scenarios may not support virtual memory. Other
2872 /// environments where Wasmtime is embedded within the surrounding
2873 /// environment may require that new signal handlers aren't registered due
2874 /// to the global nature of signal handlers. This option exists to disable
2875 /// the signal handler registration when required for these scenarios.
2876 ///
2877 /// When signals-based trap handlers are disabled, then Wasmtime and its
2878 /// generated code will *never* rely on segfaults or other
2879 /// signals. Generated code will be slower because bounds must be explicitly
2880 /// checked along with other conditions like division by zero.
2881 ///
2882 /// The following additional factors can also affect Wasmtime's ability to
2883 /// elide explicit bounds checks and leverage signals-based traps:
2884 ///
2885 /// * The [`Config::memory_reservation`] and [`Config::memory_guard_size`]
2886 /// settings
2887 /// * The index type of the linear memory (e.g. 32-bit or 64-bit)
2888 /// * The page size of the linear memory
2889 ///
2890 /// When this option is disabled, the
2891 /// `enable_heap_access_spectre_mitigation` and
2892 /// `enable_table_access_spectre_mitigation` Cranelift settings must also be
2893 /// disabled. This means that generated code must have spectre mitigations
2894 /// disabled. This is because spectre mitigations rely on faults from
2895 /// loading from the null address to implement bounds checks.
2896 ///
2897 /// This option defaults to `true`: signals-based trap handlers are enabled
2898 /// by default.
2899 ///
2900 /// > **Note:** Disabling this option is not compatible with the Winch
2901 /// > compiler.
2902 pub fn signals_based_traps(&mut self, enable: bool) -> &mut Self {
2903 self.tunables.signals_based_traps = Some(enable);
2904 self
2905 }
2906
2907 /// Enable/disable GC support in Wasmtime entirely.
2908 ///
2909 /// This flag can be used to gate whether GC infrastructure is enabled or
2910 /// initialized in Wasmtime at all. Wasmtime's GC implementation is required
2911 /// for the [`Self::wasm_gc`] proposal, [`Self::wasm_function_references`],
2912 /// and [`Self::wasm_exceptions`] at this time. None of those proposal can
2913 /// be enabled without also having this option enabled.
2914 ///
2915 /// This option defaults to whether the crate `gc` feature is enabled or
2916 /// not.
2917 pub fn gc_support(&mut self, enable: bool) -> &mut Self {
2918 self.wasm_features(WasmFeatures::GC_TYPES, enable)
2919 }
2920
2921 /// Explicitly indicate or not whether the host is using a hardware float
2922 /// ABI on x86 targets.
2923 ///
2924 /// This configuration option is only applicable on the
2925 /// `x86_64-unknown-none` Rust target and has no effect on other host
2926 /// targets. The `x86_64-unknown-none` Rust target does not support hardware
2927 /// floats by default and uses a "soft float" implementation and ABI. This
2928 /// means that `f32`, for example, is passed in a general-purpose register
2929 /// between functions instead of a floating-point register. This does not
2930 /// match Cranelift's ABI for `f32` where it's passed in floating-point
2931 /// registers. Cranelift does not have support for a "soft float"
2932 /// implementation where all floating-point operations are lowered to
2933 /// libcalls.
2934 ///
2935 /// This means that for the `x86_64-unknown-none` target the ABI between
2936 /// Wasmtime's libcalls and the host is incompatible when floats are used.
2937 /// This further means that, by default, Wasmtime is unable to load native
2938 /// code when compiled to the `x86_64-unknown-none` target. The purpose of
2939 /// this option is to explicitly allow loading code and bypass this check.
2940 ///
2941 /// Setting this configuration option to `true` indicates that either:
2942 /// (a) the Rust target is compiled with the hard-float ABI manually via
2943 /// `-Zbuild-std` and a custom target JSON configuration, or (b) sufficient
2944 /// x86 features have been enabled in the compiler such that float libcalls
2945 /// will not be used in Wasmtime. For (a) there is no way in Rust at this
2946 /// time to detect whether a hard-float or soft-float ABI is in use on
2947 /// stable Rust, so this manual opt-in is required. For (b) the only
2948 /// instance where Wasmtime passes a floating-point value in a register
2949 /// between the host and compiled wasm code is with libcalls.
2950 ///
2951 /// Float-based libcalls are only used when the compilation target for a
2952 /// wasm module has insufficient target features enabled for native
2953 /// support. For example SSE4.1 is required for the `f32.ceil` WebAssembly
2954 /// instruction to be compiled to a native instruction. If SSE4.1 is not
2955 /// enabled then `f32.ceil` is translated to a "libcall" which is
2956 /// implemented on the host. Float-based libcalls can be avoided with
2957 /// sufficient target features enabled, for example:
2958 ///
2959 /// * `self.cranelift_flag_enable("has_sse3")`
2960 /// * `self.cranelift_flag_enable("has_ssse3")`
2961 /// * `self.cranelift_flag_enable("has_sse41")`
2962 /// * `self.cranelift_flag_enable("has_sse42")`
2963 /// * `self.cranelift_flag_enable("has_fma")`
2964 ///
2965 /// Note that when these features are enabled Wasmtime will perform a
2966 /// runtime check to determine that the host actually has the feature
2967 /// present.
2968 ///
2969 /// For some more discussion see [#11506].
2970 ///
2971 /// [#11506]: https://github.com/bytecodealliance/wasmtime/issues/11506
2972 ///
2973 /// # Safety
2974 ///
2975 /// This method is not safe because it cannot be detected in Rust right now
2976 /// whether the host is compiled with a soft or hard float ABI. Additionally
2977 /// if the host is compiled with a soft float ABI disabling this check does
2978 /// not ensure that the wasm module in question has zero usage of floats
2979 /// in the boundary to the host.
2980 ///
2981 /// Safely using this method requires one of:
2982 ///
2983 /// * The host target is compiled to use hardware floats.
2984 /// * Wasm modules loaded are compiled with enough x86 Cranelift features
2985 /// enabled to avoid float-related hostcalls.
2986 pub unsafe fn x86_float_abi_ok(&mut self, enable: bool) -> &mut Self {
2987 self.x86_float_abi_ok = Some(enable);
2988 self
2989 }
2990
2991 /// Enable or disable the ability to create a
2992 /// [`SharedMemory`](crate::SharedMemory).
2993 ///
2994 /// The WebAssembly threads proposal, configured by [`Config::wasm_threads`]
2995 /// is on-by-default but there are enough deficiencies in Wasmtime's
2996 /// implementation and API integration that creation of a shared memory is
2997 /// disabled by default. This cofiguration knob can be used to enable this.
2998 ///
2999 /// When enabling this method be aware that wasm threads are, at this time,
3000 /// a [tier 2
3001 /// feature](https://docs.wasmtime.dev/stability-tiers.html#tier-2) in
3002 /// Wasmtime meaning that it will not receive security updates or fixes to
3003 /// historical releases. Additionally security CVEs will not be issued for
3004 /// bugs in the implementation.
3005 ///
3006 /// This option is `false` by default.
3007 pub fn shared_memory(&mut self, enable: bool) -> &mut Self {
3008 self.shared_memory = enable;
3009 self
3010 }
3011}
3012
3013impl Default for Config {
3014 fn default() -> Config {
3015 Config::new()
3016 }
3017}
3018
3019impl fmt::Debug for Config {
3020 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
3021 let mut f = f.debug_struct("Config");
3022
3023 // Not every flag in WasmFeatures can be enabled as part of creating
3024 // a Config. This impl gives a complete picture of all WasmFeatures
3025 // enabled, and doesn't require maintenance by hand (which has become out
3026 // of date in the past), at the cost of possible confusion for why
3027 // a flag in this set doesn't have a Config setter.
3028 let features = self.features();
3029 for flag in WasmFeatures::FLAGS.iter() {
3030 f.field(
3031 &format!("wasm_{}", flag.name().to_lowercase()),
3032 &features.contains(*flag.value()),
3033 );
3034 }
3035
3036 f.field("parallel_compilation", &self.parallel_compilation);
3037 #[cfg(any(feature = "cranelift", feature = "winch"))]
3038 {
3039 f.field("compiler_config", &self.compiler_config);
3040 }
3041
3042 self.tunables.format(&mut f);
3043 f.finish()
3044 }
3045}
3046
3047/// Possible Compilation strategies for a wasm module.
3048///
3049/// This is used as an argument to the [`Config::strategy`] method.
3050#[non_exhaustive]
3051#[derive(PartialEq, Eq, Clone, Debug, Copy)]
3052pub enum Strategy {
3053 /// An indicator that the compilation strategy should be automatically
3054 /// selected.
3055 ///
3056 /// This is generally what you want for most projects and indicates that the
3057 /// `wasmtime` crate itself should make the decision about what the best
3058 /// code generator for a wasm module is.
3059 ///
3060 /// Currently this always defaults to Cranelift, but the default value may
3061 /// change over time.
3062 Auto,
3063
3064 /// Currently the default backend, Cranelift aims to be a reasonably fast
3065 /// code generator which generates high quality machine code.
3066 Cranelift,
3067
3068 /// A low-latency baseline compiler for WebAssembly.
3069 /// For more details regarding ISA support and Wasm proposals support
3070 /// see https://docs.wasmtime.dev/stability-tiers.html#current-tier-status
3071 Winch,
3072}
3073
3074#[cfg(any(feature = "winch", feature = "cranelift"))]
3075impl Strategy {
3076 fn not_auto(&self) -> Option<Strategy> {
3077 match self {
3078 Strategy::Auto => {
3079 if cfg!(feature = "cranelift") {
3080 Some(Strategy::Cranelift)
3081 } else if cfg!(feature = "winch") {
3082 Some(Strategy::Winch)
3083 } else {
3084 None
3085 }
3086 }
3087 other => Some(*other),
3088 }
3089 }
3090}
3091
3092/// Possible garbage collector implementations for Wasm.
3093///
3094/// This is used as an argument to the [`Config::collector`] method.
3095///
3096/// The properties of Wasmtime's available collectors are summarized in the
3097/// following table:
3098///
3099/// | Collector | Collects Garbage[^1] | Latency[^2] | Throughput[^3] | Allocation Speed[^4] | Heap Utilization[^5] |
3100/// |-----------------------------|----------------------|-------------|----------------|----------------------|----------------------|
3101/// | `DeferredReferenceCounting` | Yes, but not cycles | 🙂 | 🙁 | 😐 | 😐 |
3102/// | `Null` | No | 🙂 | 🙂 | 🙂 | 🙂 |
3103///
3104/// [^1]: Whether or not the collector is capable of collecting garbage and cyclic garbage.
3105///
3106/// [^2]: How long the Wasm program is paused during garbage
3107/// collections. Shorter is better. In general, better latency implies
3108/// worse throughput and vice versa.
3109///
3110/// [^3]: How fast the Wasm program runs when using this collector. Roughly
3111/// equivalent to the number of Wasm instructions executed per
3112/// second. Faster is better. In general, better throughput implies worse
3113/// latency and vice versa.
3114///
3115/// [^4]: How fast can individual objects be allocated?
3116///
3117/// [^5]: How many objects can the collector fit into N bytes of memory? That
3118/// is, how much space for bookkeeping and metadata does this collector
3119/// require? Less space taken up by metadata means more space for
3120/// additional objects. Reference counts are larger than mark bits and
3121/// free lists are larger than bump pointers, for example.
3122#[non_exhaustive]
3123#[derive(PartialEq, Eq, Clone, Debug, Copy)]
3124pub enum Collector {
3125 /// An indicator that the garbage collector should be automatically
3126 /// selected.
3127 ///
3128 /// This is generally what you want for most projects and indicates that the
3129 /// `wasmtime` crate itself should make the decision about what the best
3130 /// collector for a wasm module is.
3131 ///
3132 /// Currently this always defaults to the deferred reference-counting
3133 /// collector, but the default value may change over time.
3134 Auto,
3135
3136 /// The deferred reference-counting collector.
3137 ///
3138 /// A reference-counting collector, generally trading improved latency for
3139 /// worsened throughput. However, to avoid the largest overheads of
3140 /// reference counting, it avoids manipulating reference counts for Wasm
3141 /// objects on the stack. Instead, it will hold a reference count for an
3142 /// over-approximation of all objects that are currently on the stack, trace
3143 /// the stack during collection to find the precise set of on-stack roots,
3144 /// and decrement the reference count of any object that was in the
3145 /// over-approximation but not the precise set. This improves throughput,
3146 /// compared to "pure" reference counting, by performing many fewer
3147 /// refcount-increment and -decrement operations. The cost is the increased
3148 /// latency associated with tracing the stack.
3149 ///
3150 /// This collector cannot currently collect cycles; they will leak until the
3151 /// GC heap's store is dropped.
3152 DeferredReferenceCounting,
3153
3154 /// The null collector.
3155 ///
3156 /// This collector does not actually collect any garbage. It simply
3157 /// allocates objects until it runs out of memory, at which point further
3158 /// objects allocation attempts will trap.
3159 ///
3160 /// This collector is useful for incredibly short-running Wasm instances
3161 /// where additionally you would rather halt an over-allocating Wasm program
3162 /// than spend time collecting its garbage to allow it to keep running. It
3163 /// is also useful for measuring the overheads associated with other
3164 /// collectors, as this collector imposes as close to zero throughput and
3165 /// latency overhead as possible.
3166 Null,
3167}
3168
3169impl Default for Collector {
3170 fn default() -> Collector {
3171 Collector::Auto
3172 }
3173}
3174
3175#[cfg(feature = "gc")]
3176impl Collector {
3177 fn not_auto(&self) -> Option<Collector> {
3178 match self {
3179 Collector::Auto => {
3180 if cfg!(feature = "gc-drc") {
3181 Some(Collector::DeferredReferenceCounting)
3182 } else if cfg!(feature = "gc-null") {
3183 Some(Collector::Null)
3184 } else {
3185 None
3186 }
3187 }
3188 other => Some(*other),
3189 }
3190 }
3191
3192 fn try_not_auto(&self) -> Result<Self> {
3193 match self.not_auto() {
3194 #[cfg(feature = "gc-drc")]
3195 Some(c @ Collector::DeferredReferenceCounting) => Ok(c),
3196 #[cfg(not(feature = "gc-drc"))]
3197 Some(Collector::DeferredReferenceCounting) => bail!(
3198 "cannot create an engine using the deferred reference-counting \
3199 collector because the `gc-drc` feature was not enabled at \
3200 compile time",
3201 ),
3202
3203 #[cfg(feature = "gc-null")]
3204 Some(c @ Collector::Null) => Ok(c),
3205 #[cfg(not(feature = "gc-null"))]
3206 Some(Collector::Null) => bail!(
3207 "cannot create an engine using the null collector because \
3208 the `gc-null` feature was not enabled at compile time",
3209 ),
3210
3211 Some(Collector::Auto) => unreachable!(),
3212
3213 None => bail!(
3214 "cannot create an engine with GC support when none of the \
3215 collectors are available; enable one of the following \
3216 features: `gc-drc`, `gc-null`",
3217 ),
3218 }
3219 }
3220}
3221
3222/// Possible optimization levels for the Cranelift codegen backend.
3223#[non_exhaustive]
3224#[derive(Copy, Clone, Debug, Eq, PartialEq)]
3225pub enum OptLevel {
3226 /// No optimizations performed, minimizes compilation time by disabling most
3227 /// optimizations.
3228 None,
3229 /// Generates the fastest possible code, but may take longer.
3230 Speed,
3231 /// Similar to `speed`, but also performs transformations aimed at reducing
3232 /// code size.
3233 SpeedAndSize,
3234}
3235
3236/// Possible register allocator algorithms for the Cranelift codegen backend.
3237#[non_exhaustive]
3238#[derive(Copy, Clone, Debug, Eq, PartialEq)]
3239pub enum RegallocAlgorithm {
3240 /// Generates the fastest possible code, but may take longer.
3241 ///
3242 /// This algorithm performs "backtracking", which means that it may
3243 /// undo its earlier work and retry as it discovers conflicts. This
3244 /// results in better register utilization, producing fewer spills
3245 /// and moves, but can cause super-linear compile runtime.
3246 Backtracking,
3247 /// Generates acceptable code very quickly.
3248 ///
3249 /// This algorithm performs a single pass through the code,
3250 /// guaranteed to work in linear time. (Note that the rest of
3251 /// Cranelift is not necessarily guaranteed to run in linear time,
3252 /// however.) It cannot undo earlier decisions, however, and it
3253 /// cannot foresee constraints or issues that may occur further
3254 /// ahead in the code, so the code may have more spills and moves as
3255 /// a result.
3256 ///
3257 /// > **Note**: This algorithm is not yet production-ready and has
3258 /// > historically had known problems. It is not recommended to enable this
3259 /// > algorithm for security-sensitive applications and the Wasmtime project
3260 /// > does not consider this configuration option for issuing security
3261 /// > advisories at this time.
3262 SinglePass,
3263}
3264
3265/// Select which profiling technique to support.
3266#[derive(Debug, Clone, Copy, PartialEq)]
3267pub enum ProfilingStrategy {
3268 /// No profiler support.
3269 None,
3270
3271 /// Collect function name information as the "perf map" file format, used with `perf` on Linux.
3272 PerfMap,
3273
3274 /// Collect profiling info for "jitdump" file format, used with `perf` on
3275 /// Linux.
3276 JitDump,
3277
3278 /// Collect profiling info using the "ittapi", used with `VTune` on Linux.
3279 VTune,
3280
3281 /// Support for profiling Pulley, Wasmtime's interpreter. Note that enabling
3282 /// this at runtime requires enabling the `profile-pulley` Cargo feature at
3283 /// compile time.
3284 Pulley,
3285}
3286
3287/// Select how wasm backtrace detailed information is handled.
3288#[derive(Debug, Clone, Copy)]
3289pub enum WasmBacktraceDetails {
3290 /// Support is unconditionally enabled and wasmtime will parse and read
3291 /// debug information.
3292 Enable,
3293
3294 /// Support is disabled, and wasmtime will not parse debug information for
3295 /// backtrace details.
3296 Disable,
3297
3298 /// Support for backtrace details is conditional on the
3299 /// `WASMTIME_BACKTRACE_DETAILS` environment variable.
3300 Environment,
3301}
3302
3303/// Describe the tri-state configuration of keys such as MPK or PAGEMAP_SCAN.
3304#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
3305pub enum Enabled {
3306 /// Enable this feature if it's detected on the host system, otherwise leave
3307 /// it disabled.
3308 Auto,
3309 /// Enable this feature and fail configuration if the feature is not
3310 /// detected on the host system.
3311 Yes,
3312 /// Do not enable this feature, even if the host system supports it.
3313 No,
3314}
3315
3316/// Configuration options used with [`InstanceAllocationStrategy::Pooling`] to
3317/// change the behavior of the pooling instance allocator.
3318///
3319/// This structure has a builder-style API in the same manner as [`Config`] and
3320/// is configured with [`Config::allocation_strategy`].
3321///
3322/// Note that usage of the pooling allocator does not affect compiled
3323/// WebAssembly code. Compiled `*.cwasm` files, for example, are usable both
3324/// with and without the pooling allocator.
3325///
3326/// ## Advantages of Pooled Allocation
3327///
3328/// The main benefit of the pooling allocator is to make WebAssembly
3329/// instantiation both faster and more scalable in terms of parallelism.
3330/// Allocation is faster because virtual memory is already configured and ready
3331/// to go within the pool, there's no need to [`mmap`] (for example on Unix) a
3332/// new region and configure it with guard pages. By avoiding [`mmap`] this
3333/// avoids whole-process virtual memory locks which can improve scalability and
3334/// performance through avoiding this.
3335///
3336/// Additionally with pooled allocation it's possible to create "affine slots"
3337/// to a particular WebAssembly module or component over time. For example if
3338/// the same module is multiple times over time the pooling allocator will, by
3339/// default, attempt to reuse the same slot. This mean that the slot has been
3340/// pre-configured and can retain virtual memory mappings for a copy-on-write
3341/// image, for example (see [`Config::memory_init_cow`] for more information.
3342/// This means that in a steady state instance deallocation is a single
3343/// [`madvise`] to reset linear memory to its original contents followed by a
3344/// single (optional) [`mprotect`] during the next instantiation to shrink
3345/// memory back to its original size. Compared to non-pooled allocation this
3346/// avoids the need to [`mmap`] a new region of memory, [`munmap`] it, and
3347/// [`mprotect`] regions too.
3348///
3349/// Another benefit of pooled allocation is that it's possible to configure
3350/// things such that no virtual memory management is required at all in a steady
3351/// state. For example a pooling allocator can be configured with:
3352///
3353/// * [`Config::memory_init_cow`] disabled
3354/// * [`Config::memory_guard_size`] disabled
3355/// * [`Config::memory_reservation`] shrunk to minimal size
3356/// * [`PoolingAllocationConfig::table_keep_resident`] sufficiently large
3357/// * [`PoolingAllocationConfig::linear_memory_keep_resident`] sufficiently large
3358///
3359/// With all these options in place no virtual memory tricks are used at all and
3360/// everything is manually managed by Wasmtime (for example resetting memory is
3361/// a `memset(0)`). This is not as fast in a single-threaded scenario but can
3362/// provide benefits in high-parallelism situations as no virtual memory locks
3363/// or IPIs need happen.
3364///
3365/// ## Disadvantages of Pooled Allocation
3366///
3367/// Despite the above advantages to instantiation performance the pooling
3368/// allocator is not enabled by default in Wasmtime. One reason is that the
3369/// performance advantages are not necessarily portable, for example while the
3370/// pooling allocator works on Windows it has not been tuned for performance on
3371/// Windows in the same way it has on Linux.
3372///
3373/// Additionally the main cost of the pooling allocator is that it requires a
3374/// very large reservation of virtual memory (on the order of most of the
3375/// addressable virtual address space). WebAssembly 32-bit linear memories in
3376/// Wasmtime are, by default 4G address space reservations with a small guard
3377/// region both before and after the linear memory. Memories in the pooling
3378/// allocator are contiguous which means that we only need a guard after linear
3379/// memory because the previous linear memory's slot post-guard is our own
3380/// pre-guard. This means that, by default, the pooling allocator uses roughly
3381/// 4G of virtual memory per WebAssembly linear memory slot. 4G of virtual
3382/// memory is 32 bits of a 64-bit address. Many 64-bit systems can only
3383/// actually use 48-bit addresses by default (although this can be extended on
3384/// architectures nowadays too), and of those 48 bits one of them is reserved
3385/// to indicate kernel-vs-userspace. This leaves 47-32=15 bits left,
3386/// meaning you can only have at most 32k slots of linear memories on many
3387/// systems by default. This is a relatively small number and shows how the
3388/// pooling allocator can quickly exhaust all of virtual memory.
3389///
3390/// Another disadvantage of the pooling allocator is that it may keep memory
3391/// alive when nothing is using it. A previously used slot for an instance might
3392/// have paged-in memory that will not get paged out until the
3393/// [`Engine`](crate::Engine) owning the pooling allocator is dropped. While
3394/// suitable for some applications this behavior may not be suitable for all
3395/// applications.
3396///
3397/// Finally the last disadvantage of the pooling allocator is that the
3398/// configuration values for the maximum number of instances, memories, tables,
3399/// etc, must all be fixed up-front. There's not always a clear answer as to
3400/// what these values should be so not all applications may be able to work
3401/// with this constraint.
3402///
3403/// [`madvise`]: https://man7.org/linux/man-pages/man2/madvise.2.html
3404/// [`mprotect`]: https://man7.org/linux/man-pages/man2/mprotect.2.html
3405/// [`mmap`]: https://man7.org/linux/man-pages/man2/mmap.2.html
3406/// [`munmap`]: https://man7.org/linux/man-pages/man2/munmap.2.html
3407#[cfg(feature = "pooling-allocator")]
3408#[derive(Debug, Clone, Default)]
3409pub struct PoolingAllocationConfig {
3410 config: crate::runtime::vm::PoolingInstanceAllocatorConfig,
3411}
3412
3413#[cfg(feature = "pooling-allocator")]
3414impl PoolingAllocationConfig {
3415 /// Returns a new configuration builder with all default settings
3416 /// configured.
3417 pub fn new() -> PoolingAllocationConfig {
3418 PoolingAllocationConfig::default()
3419 }
3420
3421 /// Configures the maximum number of "unused warm slots" to retain in the
3422 /// pooling allocator.
3423 ///
3424 /// The pooling allocator operates over slots to allocate from, and each
3425 /// slot is considered "cold" if it's never been used before or "warm" if
3426 /// it's been used by some module in the past. Slots in the pooling
3427 /// allocator additionally track an "affinity" flag to a particular core
3428 /// wasm module. When a module is instantiated into a slot then the slot is
3429 /// considered affine to that module, even after the instance has been
3430 /// deallocated.
3431 ///
3432 /// When a new instance is created then a slot must be chosen, and the
3433 /// current algorithm for selecting a slot is:
3434 ///
3435 /// * If there are slots that are affine to the module being instantiated,
3436 /// then the most recently used slot is selected to be allocated from.
3437 /// This is done to improve reuse of resources such as memory mappings and
3438 /// additionally try to benefit from temporal locality for things like
3439 /// caches.
3440 ///
3441 /// * Otherwise if there are more than N affine slots to other modules, then
3442 /// one of those affine slots is chosen to be allocated. The slot chosen
3443 /// is picked on a least-recently-used basis.
3444 ///
3445 /// * Finally, if there are less than N affine slots to other modules, then
3446 /// the non-affine slots are allocated from.
3447 ///
3448 /// This setting, `max_unused_warm_slots`, is the value for N in the above
3449 /// algorithm. The purpose of this setting is to have a knob over the RSS
3450 /// impact of "unused slots" for a long-running wasm server.
3451 ///
3452 /// If this setting is set to 0, for example, then affine slots are
3453 /// aggressively reused on a least-recently-used basis. A "cold" slot is
3454 /// only used if there are no affine slots available to allocate from. This
3455 /// means that the set of slots used over the lifetime of a program is the
3456 /// same as the maximum concurrent number of wasm instances.
3457 ///
3458 /// If this setting is set to infinity, however, then cold slots are
3459 /// prioritized to be allocated from. This means that the set of slots used
3460 /// over the lifetime of a program will approach
3461 /// [`PoolingAllocationConfig::total_memories`], or the maximum number of
3462 /// slots in the pooling allocator.
3463 ///
3464 /// Wasmtime does not aggressively decommit all resources associated with a
3465 /// slot when the slot is not in use. For example the
3466 /// [`PoolingAllocationConfig::linear_memory_keep_resident`] option can be
3467 /// used to keep memory associated with a slot, even when it's not in use.
3468 /// This means that the total set of used slots in the pooling instance
3469 /// allocator can impact the overall RSS usage of a program.
3470 ///
3471 /// The default value for this option is `100`.
3472 pub fn max_unused_warm_slots(&mut self, max: u32) -> &mut Self {
3473 self.config.max_unused_warm_slots = max;
3474 self
3475 }
3476
3477 /// The target number of decommits to do per batch.
3478 ///
3479 /// This is not precise, as we can queue up decommits at times when we
3480 /// aren't prepared to immediately flush them, and so we may go over this
3481 /// target size occasionally.
3482 ///
3483 /// A batch size of one effectively disables batching.
3484 ///
3485 /// Defaults to `1`.
3486 pub fn decommit_batch_size(&mut self, batch_size: usize) -> &mut Self {
3487 self.config.decommit_batch_size = batch_size;
3488 self
3489 }
3490
3491 /// How much memory, in bytes, to keep resident for async stacks allocated
3492 /// with the pooling allocator.
3493 ///
3494 /// When [`Config::async_stack_zeroing`] is enabled then Wasmtime will reset
3495 /// the contents of async stacks back to zero upon deallocation. This option
3496 /// can be used to perform the zeroing operation with `memset` up to a
3497 /// certain threshold of bytes instead of using system calls to reset the
3498 /// stack to zero.
3499 ///
3500 /// Note that when using this option the memory with async stacks will
3501 /// never be decommitted.
3502 #[cfg(feature = "async")]
3503 pub fn async_stack_keep_resident(&mut self, size: usize) -> &mut Self {
3504 self.config.async_stack_keep_resident = size;
3505 self
3506 }
3507
3508 /// How much memory, in bytes, to keep resident for each linear memory
3509 /// after deallocation.
3510 ///
3511 /// This option is only applicable on Linux and has no effect on other
3512 /// platforms.
3513 ///
3514 /// By default Wasmtime will use `madvise` to reset the entire contents of
3515 /// linear memory back to zero when a linear memory is deallocated. This
3516 /// option can be used to use `memset` instead to set memory back to zero
3517 /// which can, in some configurations, reduce the number of page faults
3518 /// taken when a slot is reused.
3519 pub fn linear_memory_keep_resident(&mut self, size: usize) -> &mut Self {
3520 self.config.linear_memory_keep_resident = size;
3521 self
3522 }
3523
3524 /// How much memory, in bytes, to keep resident for each table after
3525 /// deallocation.
3526 ///
3527 /// This option is only applicable on Linux and has no effect on other
3528 /// platforms.
3529 ///
3530 /// This option is the same as
3531 /// [`PoolingAllocationConfig::linear_memory_keep_resident`] except that it
3532 /// is applicable to tables instead.
3533 pub fn table_keep_resident(&mut self, size: usize) -> &mut Self {
3534 self.config.table_keep_resident = size;
3535 self
3536 }
3537
3538 /// The maximum number of concurrent component instances supported (default
3539 /// is `1000`).
3540 ///
3541 /// This provides an upper-bound on the total size of component
3542 /// metadata-related allocations, along with
3543 /// [`PoolingAllocationConfig::max_component_instance_size`]. The upper bound is
3544 ///
3545 /// ```text
3546 /// total_component_instances * max_component_instance_size
3547 /// ```
3548 ///
3549 /// where `max_component_instance_size` is rounded up to the size and alignment
3550 /// of the internal representation of the metadata.
3551 pub fn total_component_instances(&mut self, count: u32) -> &mut Self {
3552 self.config.limits.total_component_instances = count;
3553 self
3554 }
3555
3556 /// The maximum size, in bytes, allocated for a component instance's
3557 /// `VMComponentContext` metadata.
3558 ///
3559 /// The [`wasmtime::component::Instance`][crate::component::Instance] type
3560 /// has a static size but its internal `VMComponentContext` is dynamically
3561 /// sized depending on the component being instantiated. This size limit
3562 /// loosely correlates to the size of the component, taking into account
3563 /// factors such as:
3564 ///
3565 /// * number of lifted and lowered functions,
3566 /// * number of memories
3567 /// * number of inner instances
3568 /// * number of resources
3569 ///
3570 /// If the allocated size per instance is too small then instantiation of a
3571 /// module will fail at runtime with an error indicating how many bytes were
3572 /// needed.
3573 ///
3574 /// The default value for this is 1MiB.
3575 ///
3576 /// This provides an upper-bound on the total size of component
3577 /// metadata-related allocations, along with
3578 /// [`PoolingAllocationConfig::total_component_instances`]. The upper bound is
3579 ///
3580 /// ```text
3581 /// total_component_instances * max_component_instance_size
3582 /// ```
3583 ///
3584 /// where `max_component_instance_size` is rounded up to the size and alignment
3585 /// of the internal representation of the metadata.
3586 pub fn max_component_instance_size(&mut self, size: usize) -> &mut Self {
3587 self.config.limits.component_instance_size = size;
3588 self
3589 }
3590
3591 /// The maximum number of core instances a single component may contain
3592 /// (default is unlimited).
3593 ///
3594 /// This method (along with
3595 /// [`PoolingAllocationConfig::max_memories_per_component`],
3596 /// [`PoolingAllocationConfig::max_tables_per_component`], and
3597 /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3598 /// the amount of resources a single component allocation consumes.
3599 ///
3600 /// If a component will instantiate more core instances than `count`, then
3601 /// the component will fail to instantiate.
3602 pub fn max_core_instances_per_component(&mut self, count: u32) -> &mut Self {
3603 self.config.limits.max_core_instances_per_component = count;
3604 self
3605 }
3606
3607 /// The maximum number of Wasm linear memories that a single component may
3608 /// transitively contain (default is unlimited).
3609 ///
3610 /// This method (along with
3611 /// [`PoolingAllocationConfig::max_core_instances_per_component`],
3612 /// [`PoolingAllocationConfig::max_tables_per_component`], and
3613 /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3614 /// the amount of resources a single component allocation consumes.
3615 ///
3616 /// If a component transitively contains more linear memories than `count`,
3617 /// then the component will fail to instantiate.
3618 pub fn max_memories_per_component(&mut self, count: u32) -> &mut Self {
3619 self.config.limits.max_memories_per_component = count;
3620 self
3621 }
3622
3623 /// The maximum number of tables that a single component may transitively
3624 /// contain (default is unlimited).
3625 ///
3626 /// This method (along with
3627 /// [`PoolingAllocationConfig::max_core_instances_per_component`],
3628 /// [`PoolingAllocationConfig::max_memories_per_component`],
3629 /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3630 /// the amount of resources a single component allocation consumes.
3631 ///
3632 /// If a component will transitively contains more tables than `count`, then
3633 /// the component will fail to instantiate.
3634 pub fn max_tables_per_component(&mut self, count: u32) -> &mut Self {
3635 self.config.limits.max_tables_per_component = count;
3636 self
3637 }
3638
3639 /// The maximum number of concurrent Wasm linear memories supported (default
3640 /// is `1000`).
3641 ///
3642 /// This value has a direct impact on the amount of memory allocated by the pooling
3643 /// instance allocator.
3644 ///
3645 /// The pooling instance allocator allocates a memory pool, where each entry
3646 /// in the pool contains the reserved address space for each linear memory
3647 /// supported by an instance.
3648 ///
3649 /// The memory pool will reserve a large quantity of host process address
3650 /// space to elide the bounds checks required for correct WebAssembly memory
3651 /// semantics. Even with 64-bit address spaces, the address space is limited
3652 /// when dealing with a large number of linear memories.
3653 ///
3654 /// For example, on Linux x86_64, the userland address space limit is 128
3655 /// TiB. That might seem like a lot, but each linear memory will *reserve* 6
3656 /// GiB of space by default.
3657 pub fn total_memories(&mut self, count: u32) -> &mut Self {
3658 self.config.limits.total_memories = count;
3659 self
3660 }
3661
3662 /// The maximum number of concurrent tables supported (default is `1000`).
3663 ///
3664 /// This value has a direct impact on the amount of memory allocated by the
3665 /// pooling instance allocator.
3666 ///
3667 /// The pooling instance allocator allocates a table pool, where each entry
3668 /// in the pool contains the space needed for each WebAssembly table
3669 /// supported by an instance (see `table_elements` to control the size of
3670 /// each table).
3671 pub fn total_tables(&mut self, count: u32) -> &mut Self {
3672 self.config.limits.total_tables = count;
3673 self
3674 }
3675
3676 /// The maximum number of execution stacks allowed for asynchronous
3677 /// execution, when enabled (default is `1000`).
3678 ///
3679 /// This value has a direct impact on the amount of memory allocated by the
3680 /// pooling instance allocator.
3681 #[cfg(feature = "async")]
3682 pub fn total_stacks(&mut self, count: u32) -> &mut Self {
3683 self.config.limits.total_stacks = count;
3684 self
3685 }
3686
3687 /// The maximum number of concurrent core instances supported (default is
3688 /// `1000`).
3689 ///
3690 /// This provides an upper-bound on the total size of core instance
3691 /// metadata-related allocations, along with
3692 /// [`PoolingAllocationConfig::max_core_instance_size`]. The upper bound is
3693 ///
3694 /// ```text
3695 /// total_core_instances * max_core_instance_size
3696 /// ```
3697 ///
3698 /// where `max_core_instance_size` is rounded up to the size and alignment of
3699 /// the internal representation of the metadata.
3700 pub fn total_core_instances(&mut self, count: u32) -> &mut Self {
3701 self.config.limits.total_core_instances = count;
3702 self
3703 }
3704
3705 /// The maximum size, in bytes, allocated for a core instance's `VMContext`
3706 /// metadata.
3707 ///
3708 /// The [`Instance`][crate::Instance] type has a static size but its
3709 /// `VMContext` metadata is dynamically sized depending on the module being
3710 /// instantiated. This size limit loosely correlates to the size of the Wasm
3711 /// module, taking into account factors such as:
3712 ///
3713 /// * number of functions
3714 /// * number of globals
3715 /// * number of memories
3716 /// * number of tables
3717 /// * number of function types
3718 ///
3719 /// If the allocated size per instance is too small then instantiation of a
3720 /// module will fail at runtime with an error indicating how many bytes were
3721 /// needed.
3722 ///
3723 /// The default value for this is 1MiB.
3724 ///
3725 /// This provides an upper-bound on the total size of core instance
3726 /// metadata-related allocations, along with
3727 /// [`PoolingAllocationConfig::total_core_instances`]. The upper bound is
3728 ///
3729 /// ```text
3730 /// total_core_instances * max_core_instance_size
3731 /// ```
3732 ///
3733 /// where `max_core_instance_size` is rounded up to the size and alignment of
3734 /// the internal representation of the metadata.
3735 pub fn max_core_instance_size(&mut self, size: usize) -> &mut Self {
3736 self.config.limits.core_instance_size = size;
3737 self
3738 }
3739
3740 /// The maximum number of defined tables for a core module (default is `1`).
3741 ///
3742 /// This value controls the capacity of the `VMTableDefinition` table in
3743 /// each instance's `VMContext` structure.
3744 ///
3745 /// The allocated size of the table will be `tables *
3746 /// sizeof(VMTableDefinition)` for each instance regardless of how many
3747 /// tables are defined by an instance's module.
3748 pub fn max_tables_per_module(&mut self, tables: u32) -> &mut Self {
3749 self.config.limits.max_tables_per_module = tables;
3750 self
3751 }
3752
3753 /// The maximum table elements for any table defined in a module (default is
3754 /// `20000`).
3755 ///
3756 /// If a table's minimum element limit is greater than this value, the
3757 /// module will fail to instantiate.
3758 ///
3759 /// If a table's maximum element limit is unbounded or greater than this
3760 /// value, the maximum will be `table_elements` for the purpose of any
3761 /// `table.grow` instruction.
3762 ///
3763 /// This value is used to reserve the maximum space for each supported
3764 /// table; table elements are pointer-sized in the Wasmtime runtime.
3765 /// Therefore, the space reserved for each instance is `tables *
3766 /// table_elements * sizeof::<*const ()>`.
3767 pub fn table_elements(&mut self, elements: usize) -> &mut Self {
3768 self.config.limits.table_elements = elements;
3769 self
3770 }
3771
3772 /// The maximum number of defined linear memories for a module (default is
3773 /// `1`).
3774 ///
3775 /// This value controls the capacity of the `VMMemoryDefinition` table in
3776 /// each core instance's `VMContext` structure.
3777 ///
3778 /// The allocated size of the table will be `memories *
3779 /// sizeof(VMMemoryDefinition)` for each core instance regardless of how
3780 /// many memories are defined by the core instance's module.
3781 pub fn max_memories_per_module(&mut self, memories: u32) -> &mut Self {
3782 self.config.limits.max_memories_per_module = memories;
3783 self
3784 }
3785
3786 /// The maximum byte size that any WebAssembly linear memory may grow to.
3787 ///
3788 /// This option defaults to 4 GiB meaning that for 32-bit linear memories
3789 /// there is no restrictions. 64-bit linear memories will not be allowed to
3790 /// grow beyond 4 GiB by default.
3791 ///
3792 /// If a memory's minimum size is greater than this value, the module will
3793 /// fail to instantiate.
3794 ///
3795 /// If a memory's maximum size is unbounded or greater than this value, the
3796 /// maximum will be `max_memory_size` for the purpose of any `memory.grow`
3797 /// instruction.
3798 ///
3799 /// This value is used to control the maximum accessible space for each
3800 /// linear memory of a core instance. This can be thought of as a simple
3801 /// mechanism like [`Store::limiter`](crate::Store::limiter) to limit memory
3802 /// at runtime. This value can also affect striping/coloring behavior when
3803 /// used in conjunction with
3804 /// [`memory_protection_keys`](PoolingAllocationConfig::memory_protection_keys).
3805 ///
3806 /// The virtual memory reservation size of each linear memory is controlled
3807 /// by the [`Config::memory_reservation`] setting and this method's
3808 /// configuration cannot exceed [`Config::memory_reservation`].
3809 pub fn max_memory_size(&mut self, bytes: usize) -> &mut Self {
3810 self.config.limits.max_memory_size = bytes;
3811 self
3812 }
3813
3814 /// Configures whether memory protection keys (MPK) should be used for more
3815 /// efficient layout of pool-allocated memories.
3816 ///
3817 /// When using the pooling allocator (see [`Config::allocation_strategy`],
3818 /// [`InstanceAllocationStrategy::Pooling`]), memory protection keys can
3819 /// reduce the total amount of allocated virtual memory by eliminating guard
3820 /// regions between WebAssembly memories in the pool. It does so by
3821 /// "coloring" memory regions with different memory keys and setting which
3822 /// regions are accessible each time executions switches from host to guest
3823 /// (or vice versa).
3824 ///
3825 /// Leveraging MPK requires configuring a smaller-than-default
3826 /// [`max_memory_size`](PoolingAllocationConfig::max_memory_size) to enable
3827 /// this coloring/striping behavior. For example embeddings might want to
3828 /// reduce the default 4G allowance to 128M.
3829 ///
3830 /// MPK is only available on Linux (called `pku` there) and recent x86
3831 /// systems; we check for MPK support at runtime by examining the `CPUID`
3832 /// register. This configuration setting can be in three states:
3833 ///
3834 /// - `auto`: if MPK support is available the guard regions are removed; if
3835 /// not, the guard regions remain
3836 /// - `yes`: use MPK to eliminate guard regions; fail if MPK is not
3837 /// supported
3838 /// - `no`: never use MPK
3839 ///
3840 /// By default this value is `no`, but may become `auto` in future
3841 /// releases.
3842 ///
3843 /// __WARNING__: this configuration options is still experimental--use at
3844 /// your own risk! MPK uses kernel and CPU features to protect memory
3845 /// regions; you may observe segmentation faults if anything is
3846 /// misconfigured.
3847 #[cfg(feature = "memory-protection-keys")]
3848 pub fn memory_protection_keys(&mut self, enable: Enabled) -> &mut Self {
3849 self.config.memory_protection_keys = enable;
3850 self
3851 }
3852
3853 /// Sets an upper limit on how many memory protection keys (MPK) Wasmtime
3854 /// will use.
3855 ///
3856 /// This setting is only applicable when
3857 /// [`PoolingAllocationConfig::memory_protection_keys`] is set to `enable`
3858 /// or `auto`. Configuring this above the HW and OS limits (typically 15)
3859 /// has no effect.
3860 ///
3861 /// If multiple Wasmtime engines are used in the same process, note that all
3862 /// engines will share the same set of allocated keys; this setting will
3863 /// limit how many keys are allocated initially and thus available to all
3864 /// other engines.
3865 #[cfg(feature = "memory-protection-keys")]
3866 pub fn max_memory_protection_keys(&mut self, max: usize) -> &mut Self {
3867 self.config.max_memory_protection_keys = max;
3868 self
3869 }
3870
3871 /// Check if memory protection keys (MPK) are available on the current host.
3872 ///
3873 /// This is a convenience method for determining MPK availability using the
3874 /// same method that [`Enabled::Auto`] does. See
3875 /// [`PoolingAllocationConfig::memory_protection_keys`] for more
3876 /// information.
3877 #[cfg(feature = "memory-protection-keys")]
3878 pub fn are_memory_protection_keys_available() -> bool {
3879 crate::runtime::vm::mpk::is_supported()
3880 }
3881
3882 /// The maximum number of concurrent GC heaps supported (default is `1000`).
3883 ///
3884 /// This value has a direct impact on the amount of memory allocated by the
3885 /// pooling instance allocator.
3886 ///
3887 /// The pooling instance allocator allocates a GC heap pool, where each
3888 /// entry in the pool contains the space needed for each GC heap used by a
3889 /// store.
3890 #[cfg(feature = "gc")]
3891 pub fn total_gc_heaps(&mut self, count: u32) -> &mut Self {
3892 self.config.limits.total_gc_heaps = count;
3893 self
3894 }
3895
3896 /// Configures whether the Linux-specific [`PAGEMAP_SCAN` ioctl][ioctl] is
3897 /// used to help reset linear memory.
3898 ///
3899 /// When [`Self::linear_memory_keep_resident`] or
3900 /// [`Self::table_keep_resident`] options are configured to nonzero values
3901 /// the default behavior is to `memset` the lowest addresses of a table or
3902 /// memory back to their original contents. With the `PAGEMAP_SCAN` ioctl on
3903 /// Linux this can be done to more intelligently scan for resident pages in
3904 /// the region and only reset those pages back to their original contents
3905 /// with `memset` rather than assuming the low addresses are all resident.
3906 ///
3907 /// This ioctl has the potential to provide a number of performance benefits
3908 /// in high-reuse and high concurrency scenarios. Notably this enables
3909 /// Wasmtime to scan the entire region of WebAssembly linear memory and
3910 /// manually reset memory back to its original contents, up to
3911 /// [`Self::linear_memory_keep_resident`] bytes, possibly skipping an
3912 /// `madvise` entirely. This can be more efficient by avoiding removing
3913 /// pages from the address space entirely and additionally ensuring that
3914 /// future use of the linear memory doesn't incur page faults as the pages
3915 /// remain resident.
3916 ///
3917 /// At this time this configuration option is still being evaluated as to
3918 /// how appropriate it is for all use cases. It currently defaults to
3919 /// `no` or disabled but may change to `auto`, enable if supported, in the
3920 /// future. This option is only supported on Linux and requires a kernel
3921 /// version of 6.7 or higher.
3922 ///
3923 /// [ioctl]: https://www.man7.org/linux/man-pages/man2/PAGEMAP_SCAN.2const.html
3924 pub fn pagemap_scan(&mut self, enable: Enabled) -> &mut Self {
3925 self.config.pagemap_scan = enable;
3926 self
3927 }
3928
3929 /// Tests whether [`Self::pagemap_scan`] is available or not on the host
3930 /// system.
3931 pub fn is_pagemap_scan_available() -> bool {
3932 crate::runtime::vm::PoolingInstanceAllocatorConfig::is_pagemap_scan_available()
3933 }
3934}
3935
3936#[cfg(feature = "std")]
3937fn detect_host_feature(feature: &str) -> Option<bool> {
3938 #[cfg(target_arch = "aarch64")]
3939 {
3940 return match feature {
3941 "lse" => Some(std::arch::is_aarch64_feature_detected!("lse")),
3942 "paca" => Some(std::arch::is_aarch64_feature_detected!("paca")),
3943 "fp16" => Some(std::arch::is_aarch64_feature_detected!("fp16")),
3944
3945 _ => None,
3946 };
3947 }
3948
3949 // `is_s390x_feature_detected` is nightly only for now, so use the
3950 // STORE FACILITY LIST EXTENDED instruction as a temporary measure.
3951 #[cfg(target_arch = "s390x")]
3952 {
3953 let mut facility_list: [u64; 4] = [0; 4];
3954 unsafe {
3955 core::arch::asm!(
3956 "stfle 0({})",
3957 in(reg_addr) facility_list.as_mut_ptr() ,
3958 inout("r0") facility_list.len() as u64 - 1 => _,
3959 options(nostack)
3960 );
3961 }
3962 let get_facility_bit = |n: usize| {
3963 // NOTE: bits are numbered from the left.
3964 facility_list[n / 64] & (1 << (63 - (n % 64))) != 0
3965 };
3966
3967 return match feature {
3968 "mie3" => Some(get_facility_bit(61)),
3969 "mie4" => Some(get_facility_bit(84)),
3970 "vxrs_ext2" => Some(get_facility_bit(148)),
3971 "vxrs_ext3" => Some(get_facility_bit(198)),
3972
3973 _ => None,
3974 };
3975 }
3976
3977 #[cfg(target_arch = "riscv64")]
3978 {
3979 return match feature {
3980 // due to `is_riscv64_feature_detected` is not stable.
3981 // we cannot use it. For now lie and say all features are always
3982 // found to keep tests working.
3983 _ => Some(true),
3984 };
3985 }
3986
3987 #[cfg(target_arch = "x86_64")]
3988 {
3989 return match feature {
3990 "cmpxchg16b" => Some(std::is_x86_feature_detected!("cmpxchg16b")),
3991 "sse3" => Some(std::is_x86_feature_detected!("sse3")),
3992 "ssse3" => Some(std::is_x86_feature_detected!("ssse3")),
3993 "sse4.1" => Some(std::is_x86_feature_detected!("sse4.1")),
3994 "sse4.2" => Some(std::is_x86_feature_detected!("sse4.2")),
3995 "popcnt" => Some(std::is_x86_feature_detected!("popcnt")),
3996 "avx" => Some(std::is_x86_feature_detected!("avx")),
3997 "avx2" => Some(std::is_x86_feature_detected!("avx2")),
3998 "fma" => Some(std::is_x86_feature_detected!("fma")),
3999 "bmi1" => Some(std::is_x86_feature_detected!("bmi1")),
4000 "bmi2" => Some(std::is_x86_feature_detected!("bmi2")),
4001 "avx512bitalg" => Some(std::is_x86_feature_detected!("avx512bitalg")),
4002 "avx512dq" => Some(std::is_x86_feature_detected!("avx512dq")),
4003 "avx512f" => Some(std::is_x86_feature_detected!("avx512f")),
4004 "avx512vl" => Some(std::is_x86_feature_detected!("avx512vl")),
4005 "avx512vbmi" => Some(std::is_x86_feature_detected!("avx512vbmi")),
4006 "lzcnt" => Some(std::is_x86_feature_detected!("lzcnt")),
4007
4008 _ => None,
4009 };
4010 }
4011
4012 #[allow(
4013 unreachable_code,
4014 reason = "reachable or not depending on if a target above matches"
4015 )]
4016 {
4017 let _ = feature;
4018 return None;
4019 }
4020}