wasmtime/config.rs
1use crate::prelude::*;
2use alloc::sync::Arc;
3use bitflags::Flags;
4use core::fmt;
5use core::str::FromStr;
6#[cfg(any(feature = "cranelift", feature = "winch"))]
7use std::path::Path;
8pub use wasmparser::WasmFeatures;
9use wasmtime_environ::{ConfigTunables, TripleExt, Tunables};
10
11#[cfg(feature = "runtime")]
12use crate::memory::MemoryCreator;
13#[cfg(feature = "runtime")]
14use crate::profiling_agent::{self, ProfilingAgent};
15#[cfg(feature = "runtime")]
16use crate::runtime::vm::{
17 GcRuntime, InstanceAllocator, OnDemandInstanceAllocator, RuntimeMemoryCreator,
18};
19#[cfg(feature = "runtime")]
20use crate::trampoline::MemoryCreatorProxy;
21
22#[cfg(feature = "async")]
23use crate::stack::{StackCreator, StackCreatorProxy};
24#[cfg(feature = "async")]
25use wasmtime_fiber::RuntimeFiberStackCreator;
26
27#[cfg(feature = "runtime")]
28pub use crate::runtime::code_memory::CustomCodeMemory;
29#[cfg(feature = "cache")]
30pub use wasmtime_cache::{Cache, CacheConfig};
31#[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
32pub use wasmtime_environ::CacheStore;
33
34/// Represents the module instance allocation strategy to use.
35#[derive(Clone)]
36#[non_exhaustive]
37pub enum InstanceAllocationStrategy {
38 /// The on-demand instance allocation strategy.
39 ///
40 /// Resources related to a module instance are allocated at instantiation time and
41 /// immediately deallocated when the `Store` referencing the instance is dropped.
42 ///
43 /// This is the default allocation strategy for Wasmtime.
44 OnDemand,
45 /// The pooling instance allocation strategy.
46 ///
47 /// A pool of resources is created in advance and module instantiation reuses resources
48 /// from the pool. Resources are returned to the pool when the `Store` referencing the instance
49 /// is dropped.
50 #[cfg(feature = "pooling-allocator")]
51 Pooling(PoolingAllocationConfig),
52}
53
54impl InstanceAllocationStrategy {
55 /// The default pooling instance allocation strategy.
56 #[cfg(feature = "pooling-allocator")]
57 pub fn pooling() -> Self {
58 Self::Pooling(Default::default())
59 }
60}
61
62impl Default for InstanceAllocationStrategy {
63 fn default() -> Self {
64 Self::OnDemand
65 }
66}
67
68#[cfg(feature = "pooling-allocator")]
69impl From<PoolingAllocationConfig> for InstanceAllocationStrategy {
70 fn from(cfg: PoolingAllocationConfig) -> InstanceAllocationStrategy {
71 InstanceAllocationStrategy::Pooling(cfg)
72 }
73}
74
75#[derive(Clone)]
76/// Configure the strategy used for versioning in serializing and deserializing [`crate::Module`].
77pub enum ModuleVersionStrategy {
78 /// Use the wasmtime crate's Cargo package version.
79 WasmtimeVersion,
80 /// Use a custom version string. Must be at most 255 bytes.
81 Custom(String),
82 /// Emit no version string in serialization, and accept all version strings in deserialization.
83 None,
84}
85
86impl Default for ModuleVersionStrategy {
87 fn default() -> Self {
88 ModuleVersionStrategy::WasmtimeVersion
89 }
90}
91
92impl core::hash::Hash for ModuleVersionStrategy {
93 fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
94 match self {
95 Self::WasmtimeVersion => env!("CARGO_PKG_VERSION").hash(hasher),
96 Self::Custom(s) => s.hash(hasher),
97 Self::None => {}
98 };
99 }
100}
101
102impl ModuleVersionStrategy {
103 /// Get the string-encoding version of the module.
104 pub fn as_str(&self) -> &str {
105 match &self {
106 Self::WasmtimeVersion => env!("CARGO_PKG_VERSION_MAJOR"),
107 Self::Custom(c) => c,
108 Self::None => "",
109 }
110 }
111}
112
113/// Configuration for record/replay
114#[derive(Clone)]
115#[non_exhaustive]
116pub enum RRConfig {
117 #[cfg(feature = "rr")]
118 /// Recording on store is enabled
119 Recording,
120 #[cfg(feature = "rr")]
121 /// Replaying on store is enabled
122 Replaying,
123 /// No record/replay is enabled
124 None,
125}
126
127/// Global configuration options used to create an [`Engine`](crate::Engine)
128/// and customize its behavior.
129///
130/// This structure exposed a builder-like interface and is primarily consumed by
131/// [`Engine::new()`](crate::Engine::new).
132///
133/// The validation of `Config` is deferred until the engine is being built, thus
134/// a problematic config may cause `Engine::new` to fail.
135///
136/// # Defaults
137///
138/// The `Default` trait implementation and the return value from
139/// [`Config::new()`] are the same and represent the default set of
140/// configuration for an engine. The exact set of defaults will differ based on
141/// properties such as enabled Cargo features at compile time and the configured
142/// target (see [`Config::target`]). Configuration options document their
143/// default values and what the conditional value of the default is where
144/// applicable.
145#[derive(Clone)]
146pub struct Config {
147 #[cfg(any(feature = "cranelift", feature = "winch"))]
148 compiler_config: Option<CompilerConfig>,
149 target: Option<target_lexicon::Triple>,
150 #[cfg(feature = "gc")]
151 collector: Collector,
152 profiling_strategy: ProfilingStrategy,
153 tunables: ConfigTunables,
154
155 #[cfg(feature = "cache")]
156 pub(crate) cache: Option<Cache>,
157 #[cfg(feature = "runtime")]
158 pub(crate) mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>,
159 #[cfg(feature = "runtime")]
160 pub(crate) custom_code_memory: Option<Arc<dyn CustomCodeMemory>>,
161 pub(crate) allocation_strategy: InstanceAllocationStrategy,
162 pub(crate) max_wasm_stack: usize,
163 /// Explicitly enabled features via `Config::wasm_*` methods. This is a
164 /// signal that the embedder specifically wants something turned on
165 /// regardless of the defaults that Wasmtime might otherwise have enabled.
166 ///
167 /// Note that this, and `disabled_features` below, start as the empty set of
168 /// features to only track explicit user requests.
169 pub(crate) enabled_features: WasmFeatures,
170 /// Same as `enabled_features`, but for those that are explicitly disabled.
171 pub(crate) disabled_features: WasmFeatures,
172 pub(crate) wasm_backtrace: bool,
173 pub(crate) wasm_backtrace_details_env_used: bool,
174 pub(crate) native_unwind_info: Option<bool>,
175 #[cfg(any(feature = "async", feature = "stack-switching"))]
176 pub(crate) async_stack_size: usize,
177 #[cfg(feature = "async")]
178 pub(crate) async_stack_zeroing: bool,
179 #[cfg(feature = "async")]
180 pub(crate) stack_creator: Option<Arc<dyn RuntimeFiberStackCreator>>,
181 pub(crate) module_version: ModuleVersionStrategy,
182 pub(crate) parallel_compilation: bool,
183 pub(crate) memory_guaranteed_dense_image_size: u64,
184 pub(crate) force_memory_init_memfd: bool,
185 pub(crate) wmemcheck: bool,
186 #[cfg(feature = "coredump")]
187 pub(crate) coredump_on_trap: bool,
188 pub(crate) macos_use_mach_ports: bool,
189 pub(crate) detect_host_feature: Option<fn(&str) -> Option<bool>>,
190 pub(crate) x86_float_abi_ok: Option<bool>,
191 pub(crate) shared_memory: bool,
192 pub(crate) rr_config: RRConfig,
193}
194
195/// User-provided configuration for the compiler.
196#[cfg(any(feature = "cranelift", feature = "winch"))]
197#[derive(Debug, Clone)]
198struct CompilerConfig {
199 strategy: Option<Strategy>,
200 settings: crate::hash_map::HashMap<String, String>,
201 flags: crate::hash_set::HashSet<String>,
202 #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
203 cache_store: Option<Arc<dyn CacheStore>>,
204 clif_dir: Option<std::path::PathBuf>,
205 wmemcheck: bool,
206}
207
208#[cfg(any(feature = "cranelift", feature = "winch"))]
209impl CompilerConfig {
210 fn new() -> Self {
211 Self {
212 strategy: Strategy::Auto.not_auto(),
213 settings: Default::default(),
214 flags: Default::default(),
215 #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
216 cache_store: None,
217 clif_dir: None,
218 wmemcheck: false,
219 }
220 }
221
222 /// Ensures that the key is not set or equals to the given value.
223 /// If the key is not set, it will be set to the given value.
224 ///
225 /// # Returns
226 ///
227 /// Returns true if successfully set or already had the given setting
228 /// value, or false if the setting was explicitly set to something
229 /// else previously.
230 fn ensure_setting_unset_or_given(&mut self, k: &str, v: &str) -> bool {
231 if let Some(value) = self.settings.get(k) {
232 if value != v {
233 return false;
234 }
235 } else {
236 self.settings.insert(k.to_string(), v.to_string());
237 }
238 true
239 }
240}
241
242#[cfg(any(feature = "cranelift", feature = "winch"))]
243impl Default for CompilerConfig {
244 fn default() -> Self {
245 Self::new()
246 }
247}
248
249impl Config {
250 /// Creates a new configuration object with the default configuration
251 /// specified.
252 pub fn new() -> Self {
253 let mut ret = Self {
254 tunables: ConfigTunables::default(),
255 #[cfg(any(feature = "cranelift", feature = "winch"))]
256 compiler_config: Some(CompilerConfig::default()),
257 target: None,
258 #[cfg(feature = "gc")]
259 collector: Collector::default(),
260 #[cfg(feature = "cache")]
261 cache: None,
262 profiling_strategy: ProfilingStrategy::None,
263 #[cfg(feature = "runtime")]
264 mem_creator: None,
265 #[cfg(feature = "runtime")]
266 custom_code_memory: None,
267 allocation_strategy: InstanceAllocationStrategy::OnDemand,
268 // 512k of stack -- note that this is chosen currently to not be too
269 // big, not be too small, and be a good default for most platforms.
270 // One platform of particular note is Windows where the stack size
271 // of the main thread seems to, by default, be smaller than that of
272 // Linux and macOS. This 512k value at least lets our current test
273 // suite pass on the main thread of Windows (using `--test-threads
274 // 1` forces this), or at least it passed when this change was
275 // committed.
276 max_wasm_stack: 512 * 1024,
277 wasm_backtrace: true,
278 wasm_backtrace_details_env_used: false,
279 native_unwind_info: None,
280 enabled_features: WasmFeatures::empty(),
281 disabled_features: WasmFeatures::empty(),
282 #[cfg(any(feature = "async", feature = "stack-switching"))]
283 async_stack_size: 2 << 20,
284 #[cfg(feature = "async")]
285 async_stack_zeroing: false,
286 #[cfg(feature = "async")]
287 stack_creator: None,
288 module_version: ModuleVersionStrategy::default(),
289 parallel_compilation: !cfg!(miri),
290 memory_guaranteed_dense_image_size: 16 << 20,
291 force_memory_init_memfd: false,
292 wmemcheck: false,
293 #[cfg(feature = "coredump")]
294 coredump_on_trap: false,
295 macos_use_mach_ports: !cfg!(miri),
296 #[cfg(feature = "std")]
297 detect_host_feature: Some(detect_host_feature),
298 #[cfg(not(feature = "std"))]
299 detect_host_feature: None,
300 x86_float_abi_ok: None,
301 shared_memory: false,
302 rr_config: RRConfig::None,
303 };
304 ret.wasm_backtrace_details(WasmBacktraceDetails::Environment);
305 ret
306 }
307
308 #[cfg(any(feature = "cranelift", feature = "winch"))]
309 pub(crate) fn has_compiler(&self) -> bool {
310 self.compiler_config.is_some()
311 }
312
313 #[track_caller]
314 #[cfg(any(feature = "cranelift", feature = "winch"))]
315 fn compiler_config_mut(&mut self) -> &mut CompilerConfig {
316 self.compiler_config.as_mut().expect(
317 "cannot configure compiler settings for `Config`s \
318 created by `Config::without_compiler`",
319 )
320 }
321
322 /// Configure whether Wasm compilation is enabled.
323 ///
324 /// Disabling Wasm compilation will allow you to load and run
325 /// [pre-compiled][crate::Engine::precompile_module] Wasm programs, but not
326 /// to compile and run new Wasm programs that have not already been
327 /// pre-compiled.
328 ///
329 /// Many compilation-related configuration methods will panic if compilation
330 /// has been disabled.
331 ///
332 /// Note that there are two ways to disable Wasm compilation:
333 ///
334 /// 1. Statically, by disabling the `"cranelift"` and `"winch"` cargo
335 /// features when building Wasmtime. These builds of Wasmtime will have
336 /// smaller code size, since they do not include any of the code to
337 /// compile Wasm.
338 ///
339 /// 2. Dynamically, by passing `false` to this method at run-time when
340 /// configuring Wasmtime. The Wasmtime binary will still include the code
341 /// for compiling Wasm, it just won't be executed, so code size is larger
342 /// than with the first approach.
343 ///
344 /// The static approach is better in most cases, however dynamically calling
345 /// `enable_compiler(false)` is useful whenever you create multiple
346 /// `Engine`s in the same process, some of which must be able to compile
347 /// Wasm and some of which should never do so. Tests are a common example of
348 /// such a situation, especially when there are multiple Rust binaries in
349 /// the same cargo workspace, and cargo's feature resolution enables the
350 /// `"cranelift"` or `"winch"` features across the whole workspace.
351 #[cfg(any(feature = "cranelift", feature = "winch"))]
352 pub fn enable_compiler(&mut self, enable: bool) -> &mut Self {
353 match (enable, &self.compiler_config) {
354 (true, Some(_)) | (false, None) => {}
355 (true, None) => {
356 self.compiler_config = Some(CompilerConfig::default());
357 }
358 (false, Some(_)) => {
359 self.compiler_config = None;
360 }
361 }
362 self
363 }
364
365 /// Configures the target platform of this [`Config`].
366 ///
367 /// This method is used to configure the output of compilation in an
368 /// [`Engine`](crate::Engine). This can be used, for example, to
369 /// cross-compile from one platform to another. By default, the host target
370 /// triple is used meaning compiled code is suitable to run on the host.
371 ///
372 /// Note that the [`Module`](crate::Module) type can only be created if the
373 /// target configured here matches the host. Otherwise if a cross-compile is
374 /// being performed where the host doesn't match the target then
375 /// [`Engine::precompile_module`](crate::Engine::precompile_module) must be
376 /// used instead.
377 ///
378 /// Target-specific flags (such as CPU features) will not be inferred by
379 /// default for the target when one is provided here. This means that this
380 /// can also be used, for example, with the host architecture to disable all
381 /// host-inferred feature flags. Configuring target-specific flags can be
382 /// done with [`Config::cranelift_flag_set`] and
383 /// [`Config::cranelift_flag_enable`].
384 ///
385 /// # Errors
386 ///
387 /// This method will error if the given target triple is not supported.
388 pub fn target(&mut self, target: &str) -> Result<&mut Self> {
389 self.target =
390 Some(target_lexicon::Triple::from_str(target).map_err(|e| crate::format_err!(e))?);
391
392 Ok(self)
393 }
394
395 /// Enables the incremental compilation cache in Cranelift, using the provided `CacheStore`
396 /// backend for storage.
397 ///
398 /// # Panics
399 ///
400 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
401 #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
402 pub fn enable_incremental_compilation(
403 &mut self,
404 cache_store: Arc<dyn CacheStore>,
405 ) -> Result<&mut Self> {
406 self.compiler_config_mut().cache_store = Some(cache_store);
407 Ok(self)
408 }
409
410 #[doc(hidden)]
411 #[deprecated(note = "no longer has any effect")]
412 #[cfg(feature = "async")]
413 pub fn async_support(&mut self, _enable: bool) -> &mut Self {
414 self
415 }
416
417 /// Configures whether DWARF debug information will be emitted
418 /// during compilation for a native debugger on the Wasmtime
419 /// process to consume.
420 ///
421 /// Note that the `debug-builtins` compile-time Cargo feature must also be
422 /// enabled for native debuggers such as GDB or LLDB to be able to debug
423 /// guest WebAssembly programs.
424 ///
425 /// By default this option is `false`.
426 /// **Note** Enabling this option is not compatible with the Winch compiler.
427 pub fn debug_info(&mut self, enable: bool) -> &mut Self {
428 self.tunables.debug_native = Some(enable);
429 self
430 }
431
432 /// Configures whether compiled guest code will be instrumented to
433 /// provide debugging at the Wasm VM level.
434 ///
435 /// This is required in order to enable a guest-level debugging
436 /// API that can precisely examine Wasm VM state and (eventually,
437 /// once it is complete) set breakpoints and watchpoints and step
438 /// through code.
439 ///
440 /// Without this enabled, debugging can only be done via a native
441 /// debugger operating on the compiled guest code (see
442 /// [`Config::debug_info`] and is "best-effort": we may be able to
443 /// recover some Wasm locals or operand stack values, but it is
444 /// not guaranteed, even when optimizations are disabled.
445 ///
446 /// When this is enabled, additional instrumentation is inserted
447 /// that directly tracks the Wasm VM state at every step. This has
448 /// some performance impact, but allows perfect debugging
449 /// fidelity.
450 ///
451 /// Breakpoints, watchpoints, and stepping are not yet supported,
452 /// but will be added in a future version of Wasmtime.
453 ///
454 /// This enables use of the [`crate::DebugFrameCursor`] API which is
455 /// provided by [`crate::Caller::debug_frames`] from within a
456 /// hostcall context.
457 ///
458 /// ***Note*** Enabling this option is not compatible with the
459 /// Winch compiler.
460 #[cfg(feature = "debug")]
461 pub fn guest_debug(&mut self, enable: bool) -> &mut Self {
462 self.tunables.debug_guest = Some(enable);
463 self
464 }
465
466 /// Configures whether [`WasmBacktrace`] will be present in the context of
467 /// errors returned from Wasmtime.
468 ///
469 /// A backtrace may be collected whenever an error is returned from a host
470 /// function call through to WebAssembly or when WebAssembly itself hits a
471 /// trap condition, such as an out-of-bounds memory access. This flag
472 /// indicates, in these conditions, whether the backtrace is collected or
473 /// not.
474 ///
475 /// Currently wasm backtraces are implemented through frame pointer walking.
476 /// This means that collecting a backtrace is expected to be a fast and
477 /// relatively cheap operation. Additionally backtrace collection is
478 /// suitable in concurrent environments since one thread capturing a
479 /// backtrace won't block other threads.
480 ///
481 /// Collected backtraces are attached via
482 /// [`Error::context`](crate::Error::context) to errors returned from host
483 /// functions. The [`WasmBacktrace`] type can be acquired via
484 /// [`Error::downcast_ref`](crate::Error::downcast_ref) to inspect the
485 /// backtrace. When this option is disabled then this context is never
486 /// applied to errors coming out of wasm.
487 ///
488 /// This option is `true` by default.
489 ///
490 /// [`WasmBacktrace`]: crate::WasmBacktrace
491 pub fn wasm_backtrace(&mut self, enable: bool) -> &mut Self {
492 self.wasm_backtrace = enable;
493 self
494 }
495
496 /// Configures whether backtraces in `Trap` will parse debug info in the wasm file to
497 /// have filename/line number information.
498 ///
499 /// When enabled this will causes modules to retain debugging information
500 /// found in wasm binaries. This debug information will be used when a trap
501 /// happens to symbolicate each stack frame and attempt to print a
502 /// filename/line number for each wasm frame in the stack trace.
503 ///
504 /// By default this option is `WasmBacktraceDetails::Environment`, meaning
505 /// that wasm will read `WASMTIME_BACKTRACE_DETAILS` to indicate whether
506 /// details should be parsed. Note that the `std` feature of this crate must
507 /// be active to read environment variables, otherwise this is disabled by
508 /// default.
509 pub fn wasm_backtrace_details(&mut self, enable: WasmBacktraceDetails) -> &mut Self {
510 self.wasm_backtrace_details_env_used = false;
511 self.tunables.parse_wasm_debuginfo = match enable {
512 WasmBacktraceDetails::Enable => Some(true),
513 WasmBacktraceDetails::Disable => Some(false),
514 WasmBacktraceDetails::Environment => {
515 #[cfg(feature = "std")]
516 {
517 self.wasm_backtrace_details_env_used = true;
518 std::env::var("WASMTIME_BACKTRACE_DETAILS")
519 .map(|s| Some(s == "1"))
520 .unwrap_or(Some(false))
521 }
522 #[cfg(not(feature = "std"))]
523 {
524 Some(false)
525 }
526 }
527 };
528 self
529 }
530
531 /// Configures whether to generate native unwind information
532 /// (e.g. `.eh_frame` on Linux).
533 ///
534 /// This configuration option only exists to help third-party stack
535 /// capturing mechanisms, such as the system's unwinder or the `backtrace`
536 /// crate, determine how to unwind through Wasm frames. It does not affect
537 /// whether Wasmtime can capture Wasm backtraces or not. The presence of
538 /// [`WasmBacktrace`] is controlled by the [`Config::wasm_backtrace`]
539 /// option.
540 ///
541 /// Native unwind information is included:
542 /// - When targeting Windows, since the Windows ABI requires it.
543 /// - By default.
544 ///
545 /// Note that systems loading many modules may wish to disable this
546 /// configuration option instead of leaving it on-by-default. Some platforms
547 /// exhibit quadratic behavior when registering/unregistering unwinding
548 /// information which can greatly slow down the module loading/unloading
549 /// process.
550 ///
551 /// [`WasmBacktrace`]: crate::WasmBacktrace
552 pub fn native_unwind_info(&mut self, enable: bool) -> &mut Self {
553 self.native_unwind_info = Some(enable);
554 self
555 }
556
557 /// Configures whether execution of WebAssembly will "consume fuel" to
558 /// either halt or yield execution as desired.
559 ///
560 /// This can be used to deterministically prevent infinitely-executing
561 /// WebAssembly code by instrumenting generated code to consume fuel as it
562 /// executes. When fuel runs out a trap is raised, however [`Store`] can be
563 /// configured to yield execution periodically via
564 /// [`crate::Store::fuel_async_yield_interval`].
565 ///
566 /// Note that a [`Store`] starts with no fuel, so if you enable this option
567 /// you'll have to be sure to pour some fuel into [`Store`] before
568 /// executing some code.
569 ///
570 /// By default this option is `false`.
571 ///
572 /// **Note** Enabling this option is not compatible with the Winch compiler.
573 ///
574 /// [`Store`]: crate::Store
575 pub fn consume_fuel(&mut self, enable: bool) -> &mut Self {
576 self.tunables.consume_fuel = Some(enable);
577 self
578 }
579
580 /// Enables epoch-based interruption.
581 ///
582 /// When executing code in async mode, we sometimes want to
583 /// implement a form of cooperative timeslicing: long-running Wasm
584 /// guest code should periodically yield to the executor
585 /// loop. This yielding could be implemented by using "fuel" (see
586 /// [`consume_fuel`](Config::consume_fuel)). However, fuel
587 /// instrumentation is somewhat expensive: it modifies the
588 /// compiled form of the Wasm code so that it maintains a precise
589 /// instruction count, frequently checking this count against the
590 /// remaining fuel. If one does not need this precise count or
591 /// deterministic interruptions, and only needs a periodic
592 /// interrupt of some form, then It would be better to have a more
593 /// lightweight mechanism.
594 ///
595 /// Epoch-based interruption is that mechanism. There is a global
596 /// "epoch", which is a counter that divides time into arbitrary
597 /// periods (or epochs). This counter lives on the
598 /// [`Engine`](crate::Engine) and can be incremented by calling
599 /// [`Engine::increment_epoch`](crate::Engine::increment_epoch).
600 /// Epoch-based instrumentation works by setting a "deadline
601 /// epoch". The compiled code knows the deadline, and at certain
602 /// points, checks the current epoch against that deadline. It
603 /// will yield if the deadline has been reached.
604 ///
605 /// The idea is that checking an infrequently-changing counter is
606 /// cheaper than counting and frequently storing a precise metric
607 /// (instructions executed) locally. The interruptions are not
608 /// deterministic, but if the embedder increments the epoch in a
609 /// periodic way (say, every regular timer tick by a thread or
610 /// signal handler), then we can ensure that all async code will
611 /// yield to the executor within a bounded time.
612 ///
613 /// The deadline check cannot be avoided by malicious wasm code. It is safe
614 /// to use epoch deadlines to limit the execution time of untrusted
615 /// code.
616 ///
617 /// The [`Store`](crate::Store) tracks the deadline, and controls
618 /// what happens when the deadline is reached during
619 /// execution. Several behaviors are possible:
620 ///
621 /// - Trap if code is executing when the epoch deadline is
622 /// met. See
623 /// [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap).
624 ///
625 /// - Call an arbitrary function. This function may chose to trap or
626 /// increment the epoch. See
627 /// [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback).
628 ///
629 /// - Yield to the executor loop, then resume when the future is
630 /// next polled. See
631 /// [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update).
632 ///
633 /// Trapping is the default. The yielding behaviour may be used for
634 /// the timeslicing behavior described above.
635 ///
636 /// This feature is available with or without async support.
637 /// However, without async support, the timeslicing behaviour is
638 /// not available. This means epoch-based interruption can only
639 /// serve as a simple external-interruption mechanism.
640 ///
641 /// An initial deadline must be set before executing code by calling
642 /// [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline). If this
643 /// deadline is not configured then wasm will immediately trap.
644 ///
645 /// ## Interaction with blocking host calls
646 ///
647 /// Epochs (and fuel) do not assist in handling WebAssembly code blocked in
648 /// a call to the host. For example if the WebAssembly function calls
649 /// `wasi:io/poll.poll` to sleep epochs will not assist in waking this up or
650 /// timing it out. Epochs intentionally only affect running WebAssembly code
651 /// itself and it's left to the embedder to determine how best to wake up
652 /// indefinitely blocking code in the host.
653 ///
654 /// The typical solution for this, however, is to use the `async` variant of
655 /// WASI host functions. This models computation as a Rust `Future` which
656 /// means that when blocking happens the future is only suspended and
657 /// control yields back to the main event loop. This gives the embedder the
658 /// opportunity to use `tokio::time::timeout` for example on a wasm
659 /// computation and have the desired effect of cancelling a blocking
660 /// operation when a timeout expires.
661 ///
662 /// ## When to use fuel vs. epochs
663 ///
664 /// In general, epoch-based interruption results in faster
665 /// execution. This difference is sometimes significant: in some
666 /// measurements, up to 2-3x. This is because epoch-based
667 /// interruption does less work: it only watches for a global
668 /// rarely-changing counter to increment, rather than keeping a
669 /// local frequently-changing counter and comparing it to a
670 /// deadline.
671 ///
672 /// Fuel, in contrast, should be used when *deterministic*
673 /// yielding or trapping is needed. For example, if it is required
674 /// that the same function call with the same starting state will
675 /// always either complete or trap with an out-of-fuel error,
676 /// deterministically, then fuel with a fixed bound should be
677 /// used.
678 ///
679 /// **Note** Enabling this option is not compatible with the Winch compiler.
680 ///
681 /// # See Also
682 ///
683 /// - [`Engine::increment_epoch`](crate::Engine::increment_epoch)
684 /// - [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline)
685 /// - [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap)
686 /// - [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback)
687 /// - [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update)
688 pub fn epoch_interruption(&mut self, enable: bool) -> &mut Self {
689 self.tunables.epoch_interruption = Some(enable);
690 self
691 }
692
693 /// Configures the maximum amount of stack space available for
694 /// executing WebAssembly code.
695 ///
696 /// WebAssembly has well-defined semantics on stack overflow. This is
697 /// intended to be a knob which can help configure how much stack space
698 /// wasm execution is allowed to consume. Note that the number here is not
699 /// super-precise, but rather wasm will take at most "pretty close to this
700 /// much" stack space.
701 ///
702 /// If a wasm call (or series of nested wasm calls) take more stack space
703 /// than the `size` specified then a stack overflow trap will be raised.
704 ///
705 /// Caveat: this knob only limits the stack space consumed by wasm code.
706 /// More importantly, it does not ensure that this much stack space is
707 /// available on the calling thread stack. Exhausting the thread stack
708 /// typically leads to an **abort** of the process.
709 ///
710 /// Here are some examples of how that could happen:
711 ///
712 /// - Let's assume this option is set to 2 MiB and then a thread that has
713 /// a stack with 512 KiB left.
714 ///
715 /// If wasm code consumes more than 512 KiB then the process will be aborted.
716 ///
717 /// - Assuming the same conditions, but this time wasm code does not consume
718 /// any stack but calls into a host function. The host function consumes
719 /// more than 512 KiB of stack space. The process will be aborted.
720 ///
721 /// There's another gotcha related to recursive calling into wasm: the stack
722 /// space consumed by a host function is counted towards this limit. The
723 /// host functions are not prevented from consuming more than this limit.
724 /// However, if the host function that used more than this limit and called
725 /// back into wasm, then the execution will trap immediately because of
726 /// stack overflow.
727 ///
728 /// When the `async` feature is enabled, this value cannot exceed the
729 /// `async_stack_size` option. Be careful not to set this value too close
730 /// to `async_stack_size` as doing so may limit how much stack space
731 /// is available for host functions.
732 ///
733 /// By default this option is 512 KiB.
734 ///
735 /// # Errors
736 ///
737 /// The `Engine::new` method will fail if the `size` specified here is
738 /// either 0 or larger than the [`Config::async_stack_size`] configuration.
739 pub fn max_wasm_stack(&mut self, size: usize) -> &mut Self {
740 self.max_wasm_stack = size;
741 self
742 }
743
744 /// Configures the size of the stacks used for asynchronous execution.
745 ///
746 /// This setting configures the size of the stacks that are allocated for
747 /// asynchronous execution. The value cannot be less than `max_wasm_stack`.
748 ///
749 /// The amount of stack space guaranteed for host functions is
750 /// `async_stack_size - max_wasm_stack`, so take care not to set these two values
751 /// close to one another; doing so may cause host functions to overflow the
752 /// stack and abort the process.
753 ///
754 /// By default this option is 2 MiB.
755 ///
756 /// # Errors
757 ///
758 /// The `Engine::new` method will fail if the value for this option is
759 /// smaller than the [`Config::max_wasm_stack`] option.
760 #[cfg(any(feature = "async", feature = "stack-switching"))]
761 pub fn async_stack_size(&mut self, size: usize) -> &mut Self {
762 self.async_stack_size = size;
763 self
764 }
765
766 /// Configures whether or not stacks used for async futures are zeroed
767 /// before (re)use.
768 ///
769 /// When the [`call_async`] variant of calling WebAssembly is used
770 /// then Wasmtime will create a separate runtime execution stack for each
771 /// future produced by [`call_async`]. By default upon allocation, depending
772 /// on the platform, these stacks might be filled with uninitialized
773 /// memory. This is safe and correct because, modulo bugs in Wasmtime,
774 /// compiled Wasm code will never read from a stack slot before it
775 /// initializes the stack slot.
776 ///
777 /// However, as a defense-in-depth mechanism, you may configure Wasmtime to
778 /// ensure that these stacks are zeroed before they are used. Notably, if
779 /// you are using the pooling allocator, stacks can be pooled and reused
780 /// across different Wasm guests; ensuring that stacks are zeroed can
781 /// prevent data leakage between Wasm guests even in the face of potential
782 /// read-of-stack-slot-before-initialization bugs in Wasmtime's compiler.
783 ///
784 /// Stack zeroing can be a costly operation in highly concurrent
785 /// environments due to modifications of the virtual address space requiring
786 /// process-wide synchronization. It can also be costly in `no-std`
787 /// environments that must manually zero memory, and cannot rely on an OS
788 /// and virtual memory to provide zeroed pages.
789 ///
790 /// This option defaults to `false`.
791 ///
792 /// [`call_async`]: crate::TypedFunc::call_async
793 #[cfg(feature = "async")]
794 pub fn async_stack_zeroing(&mut self, enable: bool) -> &mut Self {
795 self.async_stack_zeroing = enable;
796 self
797 }
798
799 /// Explicitly enables (and un-disables) a given set of [`WasmFeatures`].
800 ///
801 /// Note: this is a low-level method that does not necessarily imply that
802 /// wasmtime _supports_ a feature. It should only be used to _disable_
803 /// features that callers want to be rejected by the parser or _enable_
804 /// features callers are certain that the current configuration of wasmtime
805 /// supports.
806 ///
807 /// Feature validation is deferred until an engine is being built, thus by
808 /// enabling features here a caller may cause
809 /// [`Engine::new`](crate::Engine::new) to fail later, if the feature
810 /// configuration isn't supported.
811 pub fn wasm_features(&mut self, flag: WasmFeatures, enable: bool) -> &mut Self {
812 self.enabled_features.set(flag, enable);
813 self.disabled_features.set(flag, !enable);
814 self
815 }
816
817 /// Configures whether the WebAssembly tail calls proposal will be enabled
818 /// for compilation or not.
819 ///
820 /// The [WebAssembly tail calls proposal] introduces the `return_call` and
821 /// `return_call_indirect` instructions. These instructions allow for Wasm
822 /// programs to implement some recursive algorithms with *O(1)* stack space
823 /// usage.
824 ///
825 /// This is `true` by default except when the Winch compiler is enabled.
826 ///
827 /// [WebAssembly tail calls proposal]: https://github.com/WebAssembly/tail-call
828 pub fn wasm_tail_call(&mut self, enable: bool) -> &mut Self {
829 self.wasm_features(WasmFeatures::TAIL_CALL, enable);
830 self
831 }
832
833 /// Configures whether the WebAssembly custom-page-sizes proposal will be
834 /// enabled for compilation or not.
835 ///
836 /// The [WebAssembly custom-page-sizes proposal] allows a memory to
837 /// customize its page sizes. By default, Wasm page sizes are 64KiB
838 /// large. This proposal allows the memory to opt into smaller page sizes
839 /// instead, allowing Wasm to run in environments with less than 64KiB RAM
840 /// available, for example.
841 ///
842 /// Note that the page size is part of the memory's type, and because
843 /// different memories may have different types, they may also have
844 /// different page sizes.
845 ///
846 /// Currently the only valid page sizes are 64KiB (the default) and 1
847 /// byte. Future extensions may relax this constraint and allow all powers
848 /// of two.
849 ///
850 /// Support for this proposal is disabled by default.
851 ///
852 /// [WebAssembly custom-page-sizes proposal]: https://github.com/WebAssembly/custom-page-sizes
853 pub fn wasm_custom_page_sizes(&mut self, enable: bool) -> &mut Self {
854 self.wasm_features(WasmFeatures::CUSTOM_PAGE_SIZES, enable);
855 self
856 }
857
858 /// Configures whether the WebAssembly [threads] proposal will be enabled
859 /// for compilation.
860 ///
861 /// This feature gates items such as shared memories and atomic
862 /// instructions. Note that the threads feature depends on the bulk memory
863 /// feature, which is enabled by default. Additionally note that while the
864 /// wasm feature is called "threads" it does not actually include the
865 /// ability to spawn threads. Spawning threads is part of the [wasi-threads]
866 /// proposal which is a separately gated feature in Wasmtime.
867 ///
868 /// Embeddings of Wasmtime are able to build their own custom threading
869 /// scheme on top of the core wasm threads proposal, however.
870 ///
871 /// The default value for this option is whether the `threads`
872 /// crate feature of Wasmtime is enabled or not. By default this crate
873 /// feature is enabled.
874 ///
875 /// [threads]: https://github.com/webassembly/threads
876 /// [wasi-threads]: https://github.com/webassembly/wasi-threads
877 #[cfg(feature = "threads")]
878 pub fn wasm_threads(&mut self, enable: bool) -> &mut Self {
879 self.wasm_features(WasmFeatures::THREADS, enable);
880 self
881 }
882
883 /// Configures whether the WebAssembly [shared-everything-threads] proposal
884 /// will be enabled for compilation.
885 ///
886 /// This feature gates extended use of the `shared` attribute on items other
887 /// than memories, extra atomic instructions, and new component model
888 /// intrinsics for spawning threads. It depends on the
889 /// [`wasm_threads`][Self::wasm_threads] being enabled.
890 ///
891 /// [shared-everything-threads]:
892 /// https://github.com/webassembly/shared-everything-threads
893 pub fn wasm_shared_everything_threads(&mut self, enable: bool) -> &mut Self {
894 self.wasm_features(WasmFeatures::SHARED_EVERYTHING_THREADS, enable);
895 self
896 }
897
898 /// Configures whether the [WebAssembly reference types proposal][proposal]
899 /// will be enabled for compilation.
900 ///
901 /// This feature gates items such as the `externref` and `funcref` types as
902 /// well as allowing a module to define multiple tables.
903 ///
904 /// Note that the reference types proposal depends on the bulk memory proposal.
905 ///
906 /// This feature is `true` by default.
907 ///
908 /// # Errors
909 ///
910 /// The validation of this feature are deferred until the engine is being built,
911 /// and thus may cause `Engine::new` fail if the `bulk_memory` feature is disabled.
912 ///
913 /// [proposal]: https://github.com/webassembly/reference-types
914 #[cfg(feature = "gc")]
915 pub fn wasm_reference_types(&mut self, enable: bool) -> &mut Self {
916 self.wasm_features(WasmFeatures::REFERENCE_TYPES, enable);
917 self
918 }
919
920 /// Configures whether the [WebAssembly function references
921 /// proposal][proposal] will be enabled for compilation.
922 ///
923 /// This feature gates non-nullable reference types, function reference
924 /// types, `call_ref`, `ref.func`, and non-nullable reference related
925 /// instructions.
926 ///
927 /// Note that the function references proposal depends on the reference
928 /// types proposal.
929 ///
930 /// This feature is `false` by default.
931 ///
932 /// [proposal]: https://github.com/WebAssembly/function-references
933 #[cfg(feature = "gc")]
934 pub fn wasm_function_references(&mut self, enable: bool) -> &mut Self {
935 self.wasm_features(WasmFeatures::FUNCTION_REFERENCES, enable);
936 self
937 }
938
939 /// Configures whether the [WebAssembly wide-arithmetic][proposal] will be
940 /// enabled for compilation.
941 ///
942 /// This feature is `false` by default.
943 ///
944 /// [proposal]: https://github.com/WebAssembly/wide-arithmetic
945 pub fn wasm_wide_arithmetic(&mut self, enable: bool) -> &mut Self {
946 self.wasm_features(WasmFeatures::WIDE_ARITHMETIC, enable);
947 self
948 }
949
950 /// Configures whether the [WebAssembly Garbage Collection
951 /// proposal][proposal] will be enabled for compilation.
952 ///
953 /// This feature gates `struct` and `array` type definitions and references,
954 /// the `i31ref` type, and all related instructions.
955 ///
956 /// Note that the function references proposal depends on the typed function
957 /// references proposal.
958 ///
959 /// This feature is `false` by default.
960 ///
961 /// **Warning: Wasmtime's implementation of the GC proposal is still in
962 /// progress and generally not ready for primetime.**
963 ///
964 /// [proposal]: https://github.com/WebAssembly/gc
965 #[cfg(feature = "gc")]
966 pub fn wasm_gc(&mut self, enable: bool) -> &mut Self {
967 self.wasm_features(WasmFeatures::GC, enable);
968 self
969 }
970
971 /// Configures whether the WebAssembly SIMD proposal will be
972 /// enabled for compilation.
973 ///
974 /// The [WebAssembly SIMD proposal][proposal]. This feature gates items such
975 /// as the `v128` type and all of its operators being in a module. Note that
976 /// this does not enable the [relaxed simd proposal].
977 ///
978 /// **Note**
979 ///
980 /// On x86_64 platforms the base CPU feature requirement for SIMD
981 /// is SSE2 for the Cranelift compiler and AVX for the Winch compiler.
982 ///
983 /// This is `true` by default.
984 ///
985 /// [proposal]: https://github.com/webassembly/simd
986 /// [relaxed simd proposal]: https://github.com/WebAssembly/relaxed-simd
987 pub fn wasm_simd(&mut self, enable: bool) -> &mut Self {
988 self.wasm_features(WasmFeatures::SIMD, enable);
989 self
990 }
991
992 /// Configures whether the WebAssembly Relaxed SIMD proposal will be
993 /// enabled for compilation.
994 ///
995 /// The relaxed SIMD proposal adds new instructions to WebAssembly which,
996 /// for some specific inputs, are allowed to produce different results on
997 /// different hosts. More-or-less this proposal enables exposing
998 /// platform-specific semantics of SIMD instructions in a controlled
999 /// fashion to a WebAssembly program. From an embedder's perspective this
1000 /// means that WebAssembly programs may execute differently depending on
1001 /// whether the host is x86_64 or AArch64, for example.
1002 ///
1003 /// By default Wasmtime lowers relaxed SIMD instructions to the fastest
1004 /// lowering for the platform it's running on. This means that, by default,
1005 /// some relaxed SIMD instructions may have different results for the same
1006 /// inputs across x86_64 and AArch64. This behavior can be disabled through
1007 /// the [`Config::relaxed_simd_deterministic`] option which will force
1008 /// deterministic behavior across all platforms, as classified by the
1009 /// specification, at the cost of performance.
1010 ///
1011 /// This is `true` by default.
1012 ///
1013 /// [proposal]: https://github.com/webassembly/relaxed-simd
1014 pub fn wasm_relaxed_simd(&mut self, enable: bool) -> &mut Self {
1015 self.wasm_features(WasmFeatures::RELAXED_SIMD, enable);
1016 self
1017 }
1018
1019 /// This option can be used to control the behavior of the [relaxed SIMD
1020 /// proposal's][proposal] instructions.
1021 ///
1022 /// The relaxed SIMD proposal introduces instructions that are allowed to
1023 /// have different behavior on different architectures, primarily to afford
1024 /// an efficient implementation on all architectures. This means, however,
1025 /// that the same module may execute differently on one host than another,
1026 /// which typically is not otherwise the case. This option is provided to
1027 /// force Wasmtime to generate deterministic code for all relaxed simd
1028 /// instructions, at the cost of performance, for all architectures. When
1029 /// this option is enabled then the deterministic behavior of all
1030 /// instructions in the relaxed SIMD proposal is selected.
1031 ///
1032 /// This is `false` by default.
1033 ///
1034 /// [proposal]: https://github.com/webassembly/relaxed-simd
1035 pub fn relaxed_simd_deterministic(&mut self, enable: bool) -> &mut Self {
1036 self.tunables.relaxed_simd_deterministic = Some(enable);
1037 self
1038 }
1039
1040 /// Configures whether the [WebAssembly bulk memory operations
1041 /// proposal][proposal] will be enabled for compilation.
1042 ///
1043 /// This feature gates items such as the `memory.copy` instruction, passive
1044 /// data/table segments, etc, being in a module.
1045 ///
1046 /// This is `true` by default.
1047 ///
1048 /// Feature `reference_types`, which is also `true` by default, requires
1049 /// this feature to be enabled. Thus disabling this feature must also disable
1050 /// `reference_types` as well using [`wasm_reference_types`](crate::Config::wasm_reference_types).
1051 ///
1052 /// # Errors
1053 ///
1054 /// Disabling this feature without disabling `reference_types` will cause
1055 /// `Engine::new` to fail.
1056 ///
1057 /// [proposal]: https://github.com/webassembly/bulk-memory-operations
1058 pub fn wasm_bulk_memory(&mut self, enable: bool) -> &mut Self {
1059 self.wasm_features(WasmFeatures::BULK_MEMORY, enable);
1060 self
1061 }
1062
1063 /// Configures whether the WebAssembly multi-value [proposal] will
1064 /// be enabled for compilation.
1065 ///
1066 /// This feature gates functions and blocks returning multiple values in a
1067 /// module, for example.
1068 ///
1069 /// This is `true` by default.
1070 ///
1071 /// [proposal]: https://github.com/webassembly/multi-value
1072 pub fn wasm_multi_value(&mut self, enable: bool) -> &mut Self {
1073 self.wasm_features(WasmFeatures::MULTI_VALUE, enable);
1074 self
1075 }
1076
1077 /// Configures whether the WebAssembly multi-memory [proposal] will
1078 /// be enabled for compilation.
1079 ///
1080 /// This feature gates modules having more than one linear memory
1081 /// declaration or import.
1082 ///
1083 /// This is `true` by default.
1084 ///
1085 /// [proposal]: https://github.com/webassembly/multi-memory
1086 pub fn wasm_multi_memory(&mut self, enable: bool) -> &mut Self {
1087 self.wasm_features(WasmFeatures::MULTI_MEMORY, enable);
1088 self
1089 }
1090
1091 /// Configures whether the WebAssembly memory64 [proposal] will
1092 /// be enabled for compilation.
1093 ///
1094 /// Note that this the upstream specification is not finalized and Wasmtime
1095 /// may also have bugs for this feature since it hasn't been exercised
1096 /// much.
1097 ///
1098 /// This is `false` by default.
1099 ///
1100 /// [proposal]: https://github.com/webassembly/memory64
1101 pub fn wasm_memory64(&mut self, enable: bool) -> &mut Self {
1102 self.wasm_features(WasmFeatures::MEMORY64, enable);
1103 self
1104 }
1105
1106 /// Configures whether the WebAssembly extended-const [proposal] will
1107 /// be enabled for compilation.
1108 ///
1109 /// This is `true` by default.
1110 ///
1111 /// [proposal]: https://github.com/webassembly/extended-const
1112 pub fn wasm_extended_const(&mut self, enable: bool) -> &mut Self {
1113 self.wasm_features(WasmFeatures::EXTENDED_CONST, enable);
1114 self
1115 }
1116
1117 /// Configures whether the [WebAssembly stack switching
1118 /// proposal][proposal] will be enabled for compilation.
1119 ///
1120 /// This feature gates the use of control tags.
1121 ///
1122 /// This feature depends on the `function_reference_types` and
1123 /// `exceptions` features.
1124 ///
1125 /// This feature is `false` by default.
1126 ///
1127 /// # Errors
1128 ///
1129 /// [proposal]: https://github.com/webassembly/stack-switching
1130 pub fn wasm_stack_switching(&mut self, enable: bool) -> &mut Self {
1131 self.wasm_features(WasmFeatures::STACK_SWITCHING, enable);
1132 self
1133 }
1134
1135 /// Configures whether the WebAssembly component-model [proposal] will
1136 /// be enabled for compilation.
1137 ///
1138 /// This flag can be used to blanket disable all components within Wasmtime.
1139 /// Otherwise usage of components requires statically using
1140 /// [`Component`](crate::component::Component) instead of
1141 /// [`Module`](crate::Module) for example anyway.
1142 ///
1143 /// The default value for this option is whether the `component-model`
1144 /// crate feature of Wasmtime is enabled or not. By default this crate
1145 /// feature is enabled.
1146 ///
1147 /// [proposal]: https://github.com/webassembly/component-model
1148 #[cfg(feature = "component-model")]
1149 pub fn wasm_component_model(&mut self, enable: bool) -> &mut Self {
1150 self.wasm_features(WasmFeatures::COMPONENT_MODEL, enable);
1151 self
1152 }
1153
1154 /// Configures whether components support the async ABI [proposal] for
1155 /// lifting and lowering functions, as well as `stream`, `future`, and
1156 /// `error-context` types.
1157 ///
1158 /// Please note that Wasmtime's support for this feature is _very_
1159 /// incomplete.
1160 ///
1161 /// [proposal]:
1162 /// https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1163 #[cfg(feature = "component-model-async")]
1164 pub fn wasm_component_model_async(&mut self, enable: bool) -> &mut Self {
1165 self.wasm_features(WasmFeatures::CM_ASYNC, enable);
1166 self
1167 }
1168
1169 /// This corresponds to the 🚝 emoji in the component model specification.
1170 ///
1171 /// Please note that Wasmtime's support for this feature is _very_
1172 /// incomplete.
1173 ///
1174 /// [proposal]:
1175 /// https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1176 #[cfg(feature = "component-model-async")]
1177 pub fn wasm_component_model_async_builtins(&mut self, enable: bool) -> &mut Self {
1178 self.wasm_features(WasmFeatures::CM_ASYNC_BUILTINS, enable);
1179 self
1180 }
1181
1182 /// This corresponds to the 🚟 emoji in the component model specification.
1183 ///
1184 /// Please note that Wasmtime's support for this feature is _very_
1185 /// incomplete.
1186 ///
1187 /// [proposal]: https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1188 #[cfg(feature = "component-model-async")]
1189 pub fn wasm_component_model_async_stackful(&mut self, enable: bool) -> &mut Self {
1190 self.wasm_features(WasmFeatures::CM_ASYNC_STACKFUL, enable);
1191 self
1192 }
1193
1194 /// This corresponds to the 🧵 emoji in the component model specification.
1195 ///
1196 /// Please note that Wasmtime's support for this feature is _very_
1197 /// incomplete.
1198 ///
1199 /// [proposal]:
1200 /// https://github.com/WebAssembly/component-model/pull/557
1201 #[cfg(feature = "component-model-async")]
1202 pub fn wasm_component_model_threading(&mut self, enable: bool) -> &mut Self {
1203 self.wasm_features(WasmFeatures::CM_THREADING, enable);
1204 self
1205 }
1206
1207 /// This corresponds to the 📝 emoji in the component model specification.
1208 ///
1209 /// Please note that Wasmtime's support for this feature is _very_
1210 /// incomplete.
1211 ///
1212 /// [proposal]: https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1213 #[cfg(feature = "component-model")]
1214 pub fn wasm_component_model_error_context(&mut self, enable: bool) -> &mut Self {
1215 self.wasm_features(WasmFeatures::CM_ERROR_CONTEXT, enable);
1216 self
1217 }
1218
1219 /// Configures whether the [GC extension to the component-model
1220 /// proposal][proposal] is enabled or not.
1221 ///
1222 /// This corresponds to the 🛸 emoji in the component model specification.
1223 ///
1224 /// Please note that Wasmtime's support for this feature is _very_
1225 /// incomplete.
1226 ///
1227 /// [proposal]: https://github.com/WebAssembly/component-model/issues/525
1228 #[cfg(feature = "component-model")]
1229 pub fn wasm_component_model_gc(&mut self, enable: bool) -> &mut Self {
1230 self.wasm_features(WasmFeatures::CM_GC, enable);
1231 self
1232 }
1233
1234 /// This corresponds to the 🔧 emoji in the component model specification.
1235 ///
1236 /// Please note that Wasmtime's support for this feature is _very_
1237 /// incomplete.
1238 #[cfg(feature = "component-model")]
1239 pub fn wasm_component_model_fixed_length_lists(&mut self, enable: bool) -> &mut Self {
1240 self.wasm_features(WasmFeatures::CM_FIXED_SIZE_LIST, enable);
1241 self
1242 }
1243
1244 /// Configures whether the [Exception-handling proposal][proposal] is enabled or not.
1245 ///
1246 /// [proposal]: https://github.com/WebAssembly/exception-handling
1247 #[cfg(feature = "gc")]
1248 pub fn wasm_exceptions(&mut self, enable: bool) -> &mut Self {
1249 self.wasm_features(WasmFeatures::EXCEPTIONS, enable);
1250 self
1251 }
1252
1253 #[doc(hidden)] // FIXME(#3427) - if/when implemented then un-hide this
1254 #[deprecated = "This configuration option only exists for internal \
1255 usage with the spec testsuite. It may be removed at \
1256 any time and without warning. Do not rely on it!"]
1257 pub fn wasm_legacy_exceptions(&mut self, enable: bool) -> &mut Self {
1258 self.wasm_features(WasmFeatures::LEGACY_EXCEPTIONS, enable);
1259 self
1260 }
1261
1262 /// Configures which compilation strategy will be used for wasm modules.
1263 ///
1264 /// This method can be used to configure which compiler is used for wasm
1265 /// modules, and for more documentation consult the [`Strategy`] enumeration
1266 /// and its documentation.
1267 ///
1268 /// The default value for this is `Strategy::Auto`.
1269 ///
1270 /// # Panics
1271 ///
1272 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1273 #[cfg(any(feature = "cranelift", feature = "winch"))]
1274 pub fn strategy(&mut self, strategy: Strategy) -> &mut Self {
1275 self.compiler_config_mut().strategy = strategy.not_auto();
1276 self
1277 }
1278
1279 /// Configures which garbage collector will be used for Wasm modules.
1280 ///
1281 /// This method can be used to configure which garbage collector
1282 /// implementation is used for Wasm modules. For more documentation, consult
1283 /// the [`Collector`] enumeration and its documentation.
1284 ///
1285 /// The default value for this is `Collector::Auto`.
1286 #[cfg(feature = "gc")]
1287 pub fn collector(&mut self, collector: Collector) -> &mut Self {
1288 self.collector = collector;
1289 self
1290 }
1291
1292 /// Creates a default profiler based on the profiling strategy chosen.
1293 ///
1294 /// Profiler creation calls the type's default initializer where the purpose is
1295 /// really just to put in place the type used for profiling.
1296 ///
1297 /// Some [`ProfilingStrategy`] require specific platforms or particular feature
1298 /// to be enabled, such as `ProfilingStrategy::JitDump` requires the `jitdump`
1299 /// feature.
1300 ///
1301 /// # Errors
1302 ///
1303 /// The validation of this field is deferred until the engine is being built, and thus may
1304 /// cause `Engine::new` fail if the required feature is disabled, or the platform is not
1305 /// supported.
1306 pub fn profiler(&mut self, profile: ProfilingStrategy) -> &mut Self {
1307 self.profiling_strategy = profile;
1308 self
1309 }
1310
1311 /// Configures whether the debug verifier of Cranelift is enabled or not.
1312 ///
1313 /// When Cranelift is used as a code generation backend this will configure
1314 /// it to have the `enable_verifier` flag which will enable a number of debug
1315 /// checks inside of Cranelift. This is largely only useful for the
1316 /// developers of wasmtime itself.
1317 ///
1318 /// The default value for this is `false`
1319 ///
1320 /// # Panics
1321 ///
1322 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1323 #[cfg(any(feature = "cranelift", feature = "winch"))]
1324 pub fn cranelift_debug_verifier(&mut self, enable: bool) -> &mut Self {
1325 let val = if enable { "true" } else { "false" };
1326 self.compiler_config_mut()
1327 .settings
1328 .insert("enable_verifier".to_string(), val.to_string());
1329 self
1330 }
1331
1332 /// Configures whether extra debug checks are inserted into
1333 /// Wasmtime-generated code by Cranelift.
1334 ///
1335 /// The default value for this is `false`
1336 ///
1337 /// # Panics
1338 ///
1339 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1340 #[cfg(any(feature = "cranelift", feature = "winch"))]
1341 pub fn cranelift_wasmtime_debug_checks(&mut self, enable: bool) -> &mut Self {
1342 unsafe { self.cranelift_flag_set("wasmtime_debug_checks", &enable.to_string()) }
1343 }
1344
1345 /// Configures the Cranelift code generator optimization level.
1346 ///
1347 /// When the Cranelift code generator is used you can configure the
1348 /// optimization level used for generated code in a few various ways. For
1349 /// more information see the documentation of [`OptLevel`].
1350 ///
1351 /// The default value for this is `OptLevel::Speed`.
1352 ///
1353 /// # Panics
1354 ///
1355 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1356 #[cfg(any(feature = "cranelift", feature = "winch"))]
1357 pub fn cranelift_opt_level(&mut self, level: OptLevel) -> &mut Self {
1358 let val = match level {
1359 OptLevel::None => "none",
1360 OptLevel::Speed => "speed",
1361 OptLevel::SpeedAndSize => "speed_and_size",
1362 };
1363 self.compiler_config_mut()
1364 .settings
1365 .insert("opt_level".to_string(), val.to_string());
1366 self
1367 }
1368
1369 /// Configures the regalloc algorithm used by the Cranelift code generator.
1370 ///
1371 /// Cranelift can select any of several register allocator algorithms. Each
1372 /// of these algorithms generates correct code, but they represent different
1373 /// tradeoffs between compile speed (how expensive the compilation process
1374 /// is) and run-time speed (how fast the generated code runs).
1375 /// For more information see the documentation of [`RegallocAlgorithm`].
1376 ///
1377 /// The default value for this is `RegallocAlgorithm::Backtracking`.
1378 ///
1379 /// # Panics
1380 ///
1381 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1382 #[cfg(any(feature = "cranelift", feature = "winch"))]
1383 pub fn cranelift_regalloc_algorithm(&mut self, algo: RegallocAlgorithm) -> &mut Self {
1384 let val = match algo {
1385 RegallocAlgorithm::Backtracking => "backtracking",
1386 RegallocAlgorithm::SinglePass => "single_pass",
1387 };
1388 self.compiler_config_mut()
1389 .settings
1390 .insert("regalloc_algorithm".to_string(), val.to_string());
1391 self
1392 }
1393
1394 /// Configures whether Cranelift should perform a NaN-canonicalization pass.
1395 ///
1396 /// When Cranelift is used as a code generation backend this will configure
1397 /// it to replace NaNs with a single canonical value. This is useful for
1398 /// users requiring entirely deterministic WebAssembly computation. This is
1399 /// not required by the WebAssembly spec, so it is not enabled by default.
1400 ///
1401 /// Note that this option affects not only WebAssembly's `f32` and `f64`
1402 /// types but additionally the `v128` type. This option will cause
1403 /// operations using any of these types to have extra checks placed after
1404 /// them to normalize NaN values as needed.
1405 ///
1406 /// The default value for this is `false`
1407 ///
1408 /// # Panics
1409 ///
1410 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1411 #[cfg(any(feature = "cranelift", feature = "winch"))]
1412 pub fn cranelift_nan_canonicalization(&mut self, enable: bool) -> &mut Self {
1413 let val = if enable { "true" } else { "false" };
1414 self.compiler_config_mut()
1415 .settings
1416 .insert("enable_nan_canonicalization".to_string(), val.to_string());
1417 self
1418 }
1419
1420 /// Controls whether proof-carrying code (PCC) is used to validate
1421 /// lowering of Wasm sandbox checks.
1422 ///
1423 /// Proof-carrying code carries "facts" about program values from
1424 /// the IR all the way to machine code, and checks those facts
1425 /// against known machine-instruction semantics. This guards
1426 /// against bugs in instruction lowering that might create holes
1427 /// in the Wasm sandbox.
1428 ///
1429 /// PCC is designed to be fast: it does not require complex
1430 /// solvers or logic engines to verify, but only a linear pass
1431 /// over a trail of "breadcrumbs" or facts at each intermediate
1432 /// value. Thus, it is appropriate to enable in production.
1433 ///
1434 /// # Panics
1435 ///
1436 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1437 #[cfg(any(feature = "cranelift", feature = "winch"))]
1438 pub fn cranelift_pcc(&mut self, enable: bool) -> &mut Self {
1439 let val = if enable { "true" } else { "false" };
1440 self.compiler_config_mut()
1441 .settings
1442 .insert("enable_pcc".to_string(), val.to_string());
1443 self
1444 }
1445
1446 /// Allows setting a Cranelift boolean flag or preset. This allows
1447 /// fine-tuning of Cranelift settings.
1448 ///
1449 /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1450 /// either; other `Config` functions should be preferred for stability.
1451 ///
1452 /// # Safety
1453 ///
1454 /// This is marked as unsafe, because setting the wrong flag might break invariants,
1455 /// resulting in execution hazards.
1456 ///
1457 /// # Errors
1458 ///
1459 /// The validation of the flags are deferred until the engine is being built, and thus may
1460 /// cause `Engine::new` fail if the flag's name does not exist, or the value is not appropriate
1461 /// for the flag type.
1462 ///
1463 /// # Panics
1464 ///
1465 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1466 #[cfg(any(feature = "cranelift", feature = "winch"))]
1467 pub unsafe fn cranelift_flag_enable(&mut self, flag: &str) -> &mut Self {
1468 self.compiler_config_mut().flags.insert(flag.to_string());
1469 self
1470 }
1471
1472 /// Allows settings another Cranelift flag defined by a flag name and value. This allows
1473 /// fine-tuning of Cranelift settings.
1474 ///
1475 /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1476 /// either; other `Config` functions should be preferred for stability.
1477 ///
1478 /// # Safety
1479 ///
1480 /// This is marked as unsafe, because setting the wrong flag might break invariants,
1481 /// resulting in execution hazards.
1482 ///
1483 /// # Errors
1484 ///
1485 /// The validation of the flags are deferred until the engine is being built, and thus may
1486 /// cause `Engine::new` fail if the flag's name does not exist, or incompatible with other
1487 /// settings.
1488 ///
1489 /// For example, feature `wasm_backtrace` will set `unwind_info` to `true`, but if it's
1490 /// manually set to false then it will fail.
1491 ///
1492 /// # Panics
1493 ///
1494 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1495 #[cfg(any(feature = "cranelift", feature = "winch"))]
1496 pub unsafe fn cranelift_flag_set(&mut self, name: &str, value: &str) -> &mut Self {
1497 self.compiler_config_mut()
1498 .settings
1499 .insert(name.to_string(), value.to_string());
1500 self
1501 }
1502
1503 /// Set a custom [`Cache`].
1504 ///
1505 /// To load a cache configuration from a file, use [`Cache::from_file`]. Otherwise, you can
1506 /// create a new cache config using [`CacheConfig::new`] and passing that to [`Cache::new`].
1507 ///
1508 /// If you want to disable the cache, you can call this method with `None`.
1509 ///
1510 /// By default, new configs do not have caching enabled.
1511 /// Every call to [`Module::new(my_wasm)`][crate::Module::new] will recompile `my_wasm`,
1512 /// even when it is unchanged, unless an enabled `CacheConfig` is provided.
1513 ///
1514 /// This method is only available when the `cache` feature of this crate is
1515 /// enabled.
1516 ///
1517 /// [docs]: https://bytecodealliance.github.io/wasmtime/cli-cache.html
1518 #[cfg(feature = "cache")]
1519 pub fn cache(&mut self, cache: Option<Cache>) -> &mut Self {
1520 self.cache = cache;
1521 self
1522 }
1523
1524 /// Sets a custom memory creator.
1525 ///
1526 /// Custom memory creators are used when creating host `Memory` objects or when
1527 /// creating instance linear memories for the on-demand instance allocation strategy.
1528 #[cfg(feature = "runtime")]
1529 pub fn with_host_memory(&mut self, mem_creator: Arc<dyn MemoryCreator>) -> &mut Self {
1530 self.mem_creator = Some(Arc::new(MemoryCreatorProxy(mem_creator)));
1531 self
1532 }
1533
1534 /// Sets a custom stack creator.
1535 ///
1536 /// Custom memory creators are used when creating creating async instance stacks for
1537 /// the on-demand instance allocation strategy.
1538 #[cfg(feature = "async")]
1539 pub fn with_host_stack(&mut self, stack_creator: Arc<dyn StackCreator>) -> &mut Self {
1540 self.stack_creator = Some(Arc::new(StackCreatorProxy(stack_creator)));
1541 self
1542 }
1543
1544 /// Sets a custom executable-memory publisher.
1545 ///
1546 /// Custom executable-memory publishers are hooks that allow
1547 /// Wasmtime to make certain regions of memory executable when
1548 /// loading precompiled modules or compiling new modules
1549 /// in-process. In most modern operating systems, memory allocated
1550 /// for heap usage is readable and writable by default but not
1551 /// executable. To jump to machine code stored in that memory, we
1552 /// need to make it executable. For security reasons, we usually
1553 /// also make it read-only at the same time, so the executing code
1554 /// can't be modified later.
1555 ///
1556 /// By default, Wasmtime will use the appropriate system calls on
1557 /// the host platform for this work. However, it also allows
1558 /// plugging in a custom implementation via this configuration
1559 /// option. This may be useful on custom or `no_std` platforms,
1560 /// for example, especially where virtual memory is not otherwise
1561 /// used by Wasmtime (no `signals-and-traps` feature).
1562 #[cfg(feature = "runtime")]
1563 pub fn with_custom_code_memory(
1564 &mut self,
1565 custom_code_memory: Option<Arc<dyn CustomCodeMemory>>,
1566 ) -> &mut Self {
1567 self.custom_code_memory = custom_code_memory;
1568 self
1569 }
1570
1571 /// Sets the instance allocation strategy to use.
1572 ///
1573 /// This is notably used in conjunction with
1574 /// [`InstanceAllocationStrategy::Pooling`] and [`PoolingAllocationConfig`].
1575 pub fn allocation_strategy(
1576 &mut self,
1577 strategy: impl Into<InstanceAllocationStrategy>,
1578 ) -> &mut Self {
1579 self.allocation_strategy = strategy.into();
1580 self
1581 }
1582
1583 /// Specifies the capacity of linear memories, in bytes, in their initial
1584 /// allocation.
1585 ///
1586 /// > Note: this value has important performance ramifications, be sure to
1587 /// > benchmark when setting this to a non-default value and read over this
1588 /// > documentation.
1589 ///
1590 /// This function will change the size of the initial memory allocation made
1591 /// for linear memories. This setting is only applicable when the initial
1592 /// size of a linear memory is below this threshold. Linear memories are
1593 /// allocated in the virtual address space of the host process with OS APIs
1594 /// such as `mmap` and this setting affects how large the allocation will
1595 /// be.
1596 ///
1597 /// ## Background: WebAssembly Linear Memories
1598 ///
1599 /// WebAssembly linear memories always start with a minimum size and can
1600 /// possibly grow up to a maximum size. The minimum size is always specified
1601 /// in a WebAssembly module itself and the maximum size can either be
1602 /// optionally specified in the module or inherently limited by the index
1603 /// type. For example for this module:
1604 ///
1605 /// ```wasm
1606 /// (module
1607 /// (memory $a 4)
1608 /// (memory $b 4096 4096 (pagesize 1))
1609 /// (memory $c i64 10)
1610 /// )
1611 /// ```
1612 ///
1613 /// * Memory `$a` initially allocates 4 WebAssembly pages (256KiB) and can
1614 /// grow up to 4GiB, the limit of the 32-bit index space.
1615 /// * Memory `$b` initially allocates 4096 WebAssembly pages, but in this
1616 /// case its page size is 1, so it's 4096 bytes. Memory can also grow no
1617 /// further meaning that it will always be 4096 bytes.
1618 /// * Memory `$c` is a 64-bit linear memory which starts with 640KiB of
1619 /// memory and can theoretically grow up to 2^64 bytes, although most
1620 /// hosts will run out of memory long before that.
1621 ///
1622 /// All operations on linear memories done by wasm are required to be
1623 /// in-bounds. Any access beyond the end of a linear memory is considered a
1624 /// trap.
1625 ///
1626 /// ## What this setting affects: Virtual Memory
1627 ///
1628 /// This setting is used to configure the behavior of the size of the linear
1629 /// memory allocation performed for each of these memories. For example the
1630 /// initial linear memory allocation looks like this:
1631 ///
1632 /// ```text
1633 /// memory_reservation
1634 /// |
1635 /// ◄─────────┴────────────────►
1636 /// ┌───────┬─────────┬──────────────────┬───────┐
1637 /// │ guard │ initial │ ... capacity ... │ guard │
1638 /// └───────┴─────────┴──────────────────┴───────┘
1639 /// ◄──┬──► ◄──┬──►
1640 /// │ │
1641 /// │ memory_guard_size
1642 /// │
1643 /// │
1644 /// memory_guard_size (if guard_before_linear_memory)
1645 /// ```
1646 ///
1647 /// Memory in the `initial` range is accessible to the instance and can be
1648 /// read/written by wasm code. Memory in the `guard` regions is never
1649 /// accessible to wasm code and memory in `capacity` is initially
1650 /// inaccessible but may become accessible through `memory.grow` instructions
1651 /// for example.
1652 ///
1653 /// This means that this setting is the size of the initial chunk of virtual
1654 /// memory that a linear memory may grow into.
1655 ///
1656 /// ## What this setting affects: Runtime Speed
1657 ///
1658 /// This is a performance-sensitive setting which is taken into account
1659 /// during the compilation process of a WebAssembly module. For example if a
1660 /// 32-bit WebAssembly linear memory has a `memory_reservation` size of 4GiB
1661 /// then bounds checks can be elided because `capacity` will be guaranteed
1662 /// to be unmapped for all addressable bytes that wasm can access (modulo a
1663 /// few details).
1664 ///
1665 /// If `memory_reservation` was something smaller like 256KiB then that
1666 /// would have a much smaller impact on virtual memory but the compile code
1667 /// would then need to have explicit bounds checks to ensure that
1668 /// loads/stores are in-bounds.
1669 ///
1670 /// The goal of this setting is to enable skipping bounds checks in most
1671 /// modules by default. Some situations which require explicit bounds checks
1672 /// though are:
1673 ///
1674 /// * When `memory_reservation` is smaller than the addressable size of the
1675 /// linear memory. For example if 64-bit linear memories always need
1676 /// bounds checks as they can address the entire virtual address spacce.
1677 /// For 32-bit linear memories a `memory_reservation` minimum size of 4GiB
1678 /// is required to elide bounds checks.
1679 ///
1680 /// * When linear memories have a page size of 1 then bounds checks are
1681 /// required. In this situation virtual memory can't be relied upon
1682 /// because that operates at the host page size granularity where wasm
1683 /// requires a per-byte level granularity.
1684 ///
1685 /// * Configuration settings such as [`Config::signals_based_traps`] can be
1686 /// used to disable the use of signal handlers and virtual memory so
1687 /// explicit bounds checks are required.
1688 ///
1689 /// * When [`Config::memory_guard_size`] is too small a bounds check may be
1690 /// required. For 32-bit wasm addresses are actually 33-bit effective
1691 /// addresses because loads/stores have a 32-bit static offset to add to
1692 /// the dynamic 32-bit address. If the static offset is larger than the
1693 /// size of the guard region then an explicit bounds check is required.
1694 ///
1695 /// ## What this setting affects: Memory Growth Behavior
1696 ///
1697 /// In addition to affecting bounds checks emitted in compiled code this
1698 /// setting also affects how WebAssembly linear memories are grown. The
1699 /// `memory.grow` instruction can be used to make a linear memory larger and
1700 /// this is also affected by APIs such as
1701 /// [`Memory::grow`](crate::Memory::grow).
1702 ///
1703 /// In these situations when the amount being grown is small enough to fit
1704 /// within the remaining capacity then the linear memory doesn't have to be
1705 /// moved at runtime. If the capacity runs out though then a new linear
1706 /// memory allocation must be made and the contents of linear memory is
1707 /// copied over.
1708 ///
1709 /// For example here's a situation where a copy happens:
1710 ///
1711 /// * The `memory_reservation` setting is configured to 128KiB.
1712 /// * A WebAssembly linear memory starts with a single 64KiB page.
1713 /// * This memory can be grown by one page to contain the full 128KiB of
1714 /// memory.
1715 /// * If grown by one more page, though, then a 192KiB allocation must be
1716 /// made and the previous 128KiB of contents are copied into the new
1717 /// allocation.
1718 ///
1719 /// This growth behavior can have a significant performance impact if lots
1720 /// of data needs to be copied on growth. Conversely if memory growth never
1721 /// needs to happen because the capacity will always be large enough then
1722 /// optimizations can be applied to cache the base pointer of linear memory.
1723 ///
1724 /// When memory is grown then the
1725 /// [`Config::memory_reservation_for_growth`] is used for the new
1726 /// memory allocation to have memory to grow into.
1727 ///
1728 /// When using the pooling allocator via [`PoolingAllocationConfig`] then
1729 /// memories are never allowed to move so requests for growth are instead
1730 /// rejected with an error.
1731 ///
1732 /// ## When this setting is not used
1733 ///
1734 /// This setting is ignored and unused when the initial size of linear
1735 /// memory is larger than this threshold. For example if this setting is set
1736 /// to 1MiB but a wasm module requires a 2MiB minimum allocation then this
1737 /// setting is ignored. In this situation the minimum size of memory will be
1738 /// allocated along with [`Config::memory_reservation_for_growth`]
1739 /// after it to grow into.
1740 ///
1741 /// That means that this value can be set to zero. That can be useful in
1742 /// benchmarking to see the overhead of bounds checks for example.
1743 /// Additionally it can be used to minimize the virtual memory allocated by
1744 /// Wasmtime.
1745 ///
1746 /// ## Default Value
1747 ///
1748 /// The default value for this property depends on the host platform. For
1749 /// 64-bit platforms there's lots of address space available, so the default
1750 /// configured here is 4GiB. When coupled with the default size of
1751 /// [`Config::memory_guard_size`] this means that 32-bit WebAssembly linear
1752 /// memories with 64KiB page sizes will skip almost all bounds checks by
1753 /// default.
1754 ///
1755 /// For 32-bit platforms this value defaults to 10MiB. This means that
1756 /// bounds checks will be required on 32-bit platforms.
1757 pub fn memory_reservation(&mut self, bytes: u64) -> &mut Self {
1758 self.tunables.memory_reservation = Some(bytes);
1759 self
1760 }
1761
1762 /// Indicates whether linear memories may relocate their base pointer at
1763 /// runtime.
1764 ///
1765 /// WebAssembly linear memories either have a maximum size that's explicitly
1766 /// listed in the type of a memory or inherently limited by the index type
1767 /// of the memory (e.g. 4GiB for 32-bit linear memories). Depending on how
1768 /// the linear memory is allocated (see [`Config::memory_reservation`]) it
1769 /// may be necessary to move the memory in the host's virtual address space
1770 /// during growth. This option controls whether this movement is allowed or
1771 /// not.
1772 ///
1773 /// An example of a linear memory needing to move is when
1774 /// [`Config::memory_reservation`] is 0 then a linear memory will be
1775 /// allocated as the minimum size of the memory plus
1776 /// [`Config::memory_reservation_for_growth`]. When memory grows beyond the
1777 /// reservation for growth then the memory needs to be relocated.
1778 ///
1779 /// When this option is set to `false` then it can have a number of impacts
1780 /// on how memories work at runtime:
1781 ///
1782 /// * Modules can be compiled with static knowledge the base pointer of
1783 /// linear memory never changes to enable optimizations such as
1784 /// loop invariant code motion (hoisting the base pointer out of a loop).
1785 ///
1786 /// * Memories cannot grow in excess of their original allocation. This
1787 /// means that [`Config::memory_reservation`] and
1788 /// [`Config::memory_reservation_for_growth`] may need tuning to ensure
1789 /// the memory configuration works at runtime.
1790 ///
1791 /// The default value for this option is `true`.
1792 pub fn memory_may_move(&mut self, enable: bool) -> &mut Self {
1793 self.tunables.memory_may_move = Some(enable);
1794 self
1795 }
1796
1797 /// Configures the size, in bytes, of the guard region used at the end of a
1798 /// linear memory's address space reservation.
1799 ///
1800 /// > Note: this value has important performance ramifications, be sure to
1801 /// > understand what this value does before tweaking it and benchmarking.
1802 ///
1803 /// This setting controls how many bytes are guaranteed to be unmapped after
1804 /// the virtual memory allocation of a linear memory. When
1805 /// combined with sufficiently large values of
1806 /// [`Config::memory_reservation`] (e.g. 4GiB for 32-bit linear memories)
1807 /// then a guard region can be used to eliminate bounds checks in generated
1808 /// code.
1809 ///
1810 /// This setting additionally can be used to help deduplicate bounds checks
1811 /// in code that otherwise requires bounds checks. For example with a 4KiB
1812 /// guard region then a 64-bit linear memory which accesses addresses `x+8`
1813 /// and `x+16` only needs to perform a single bounds check on `x`. If that
1814 /// bounds check passes then the offset is guaranteed to either reside in
1815 /// linear memory or the guard region, resulting in deterministic behavior
1816 /// either way.
1817 ///
1818 /// ## How big should the guard be?
1819 ///
1820 /// In general, like with configuring [`Config::memory_reservation`], you
1821 /// probably don't want to change this value from the defaults. Removing
1822 /// bounds checks is dependent on a number of factors where the size of the
1823 /// guard region is only one piece of the equation. Other factors include:
1824 ///
1825 /// * [`Config::memory_reservation`]
1826 /// * The index type of the linear memory (e.g. 32-bit or 64-bit)
1827 /// * The page size of the linear memory
1828 /// * Other settings such as [`Config::signals_based_traps`]
1829 ///
1830 /// Embeddings using virtual memory almost always want at least some guard
1831 /// region, but otherwise changes from the default should be profiled
1832 /// locally to see the performance impact.
1833 ///
1834 /// ## Default
1835 ///
1836 /// The default value for this property is 32MiB on 64-bit platforms. This
1837 /// allows eliminating almost all bounds checks on loads/stores with an
1838 /// immediate offset of less than 32MiB. On 32-bit platforms this defaults
1839 /// to 64KiB.
1840 pub fn memory_guard_size(&mut self, bytes: u64) -> &mut Self {
1841 self.tunables.memory_guard_size = Some(bytes);
1842 self
1843 }
1844
1845 /// Configures the size, in bytes, of the extra virtual memory space
1846 /// reserved after a linear memory is relocated.
1847 ///
1848 /// This setting is used in conjunction with [`Config::memory_reservation`]
1849 /// to configure what happens after a linear memory is relocated in the host
1850 /// address space. If the initial size of a linear memory exceeds
1851 /// [`Config::memory_reservation`] or if it grows beyond that size
1852 /// throughout its lifetime then this setting will be used.
1853 ///
1854 /// When a linear memory is relocated it will initially look like this:
1855 ///
1856 /// ```text
1857 /// memory.size
1858 /// │
1859 /// ◄──────┴─────►
1860 /// ┌───────┬──────────────┬───────┐
1861 /// │ guard │ accessible │ guard │
1862 /// └───────┴──────────────┴───────┘
1863 /// ◄──┬──►
1864 /// │
1865 /// memory_guard_size
1866 /// ```
1867 ///
1868 /// where `accessible` needs to be grown but there's no more memory to grow
1869 /// into. A new region of the virtual address space will be allocated that
1870 /// looks like this:
1871 ///
1872 /// ```text
1873 /// memory_reservation_for_growth
1874 /// │
1875 /// memory.size │
1876 /// │ │
1877 /// ◄──────┴─────► ◄─────────────┴───────────►
1878 /// ┌───────┬──────────────┬───────────────────────────┬───────┐
1879 /// │ guard │ accessible │ .. reserved for growth .. │ guard │
1880 /// └───────┴──────────────┴───────────────────────────┴───────┘
1881 /// ◄──┬──►
1882 /// │
1883 /// memory_guard_size
1884 /// ```
1885 ///
1886 /// This means that up to `memory_reservation_for_growth` bytes can be
1887 /// allocated again before the entire linear memory needs to be moved again
1888 /// when another `memory_reservation_for_growth` bytes will be appended to
1889 /// the size of the allocation.
1890 ///
1891 /// Note that this is a currently simple heuristic for optimizing the growth
1892 /// of dynamic memories, primarily implemented for the memory64 proposal
1893 /// where the maximum size of memory is larger than 4GiB. This setting is
1894 /// unlikely to be a one-size-fits-all style approach and if you're an
1895 /// embedder running into issues with growth and are interested in having
1896 /// other growth strategies available here please feel free to [open an
1897 /// issue on the Wasmtime repository][issue]!
1898 ///
1899 /// [issue]: https://github.com/bytecodealliance/wasmtime/issues/new
1900 ///
1901 /// ## Default
1902 ///
1903 /// For 64-bit platforms this defaults to 2GiB, and for 32-bit platforms
1904 /// this defaults to 1MiB.
1905 pub fn memory_reservation_for_growth(&mut self, bytes: u64) -> &mut Self {
1906 self.tunables.memory_reservation_for_growth = Some(bytes);
1907 self
1908 }
1909
1910 /// Indicates whether a guard region is present before allocations of
1911 /// linear memory.
1912 ///
1913 /// Guard regions before linear memories are never used during normal
1914 /// operation of WebAssembly modules, even if they have out-of-bounds
1915 /// loads. The only purpose for a preceding guard region in linear memory
1916 /// is extra protection against possible bugs in code generators like
1917 /// Cranelift. This setting does not affect performance in any way, but will
1918 /// result in larger virtual memory reservations for linear memories (it
1919 /// won't actually ever use more memory, just use more of the address
1920 /// space).
1921 ///
1922 /// The size of the guard region before linear memory is the same as the
1923 /// guard size that comes after linear memory, which is configured by
1924 /// [`Config::memory_guard_size`].
1925 ///
1926 /// ## Default
1927 ///
1928 /// This value defaults to `true`.
1929 pub fn guard_before_linear_memory(&mut self, enable: bool) -> &mut Self {
1930 self.tunables.guard_before_linear_memory = Some(enable);
1931 self
1932 }
1933
1934 /// Indicates whether to initialize tables lazily, so that instantiation
1935 /// is fast but indirect calls are a little slower. If false, tables
1936 /// are initialized eagerly during instantiation from any active element
1937 /// segments that apply to them.
1938 ///
1939 /// **Note** Disabling this option is not compatible with the Winch compiler.
1940 ///
1941 /// ## Default
1942 ///
1943 /// This value defaults to `true`.
1944 pub fn table_lazy_init(&mut self, table_lazy_init: bool) -> &mut Self {
1945 self.tunables.table_lazy_init = Some(table_lazy_init);
1946 self
1947 }
1948
1949 /// Configure the version information used in serialized and deserialized [`crate::Module`]s.
1950 /// This effects the behavior of [`crate::Module::serialize()`], as well as
1951 /// [`crate::Module::deserialize()`] and related functions.
1952 ///
1953 /// The default strategy is to use the wasmtime crate's Cargo package version.
1954 pub fn module_version(&mut self, strategy: ModuleVersionStrategy) -> Result<&mut Self> {
1955 match strategy {
1956 // This case requires special precondition for assertion in SerializedModule::to_bytes
1957 ModuleVersionStrategy::Custom(ref v) => {
1958 if v.as_bytes().len() > 255 {
1959 bail!("custom module version cannot be more than 255 bytes: {v}");
1960 }
1961 }
1962 _ => {}
1963 }
1964 self.module_version = strategy;
1965 Ok(self)
1966 }
1967
1968 /// Configure whether wasmtime should compile a module using multiple
1969 /// threads.
1970 ///
1971 /// Disabling this will result in a single thread being used to compile
1972 /// the wasm bytecode.
1973 ///
1974 /// By default parallel compilation is enabled.
1975 #[cfg(feature = "parallel-compilation")]
1976 pub fn parallel_compilation(&mut self, parallel: bool) -> &mut Self {
1977 self.parallel_compilation = parallel;
1978 self
1979 }
1980
1981 /// Configures whether compiled artifacts will contain information to map
1982 /// native program addresses back to the original wasm module.
1983 ///
1984 /// This configuration option is `true` by default and, if enabled,
1985 /// generates the appropriate tables in compiled modules to map from native
1986 /// address back to wasm source addresses. This is used for displaying wasm
1987 /// program counters in backtraces as well as generating filenames/line
1988 /// numbers if so configured as well (and the original wasm module has DWARF
1989 /// debugging information present).
1990 pub fn generate_address_map(&mut self, generate: bool) -> &mut Self {
1991 self.tunables.generate_address_map = Some(generate);
1992 self
1993 }
1994
1995 /// Configures whether copy-on-write memory-mapped data is used to
1996 /// initialize a linear memory.
1997 ///
1998 /// Initializing linear memory via a copy-on-write mapping can drastically
1999 /// improve instantiation costs of a WebAssembly module because copying
2000 /// memory is deferred. Additionally if a page of memory is only ever read
2001 /// from WebAssembly and never written too then the same underlying page of
2002 /// data will be reused between all instantiations of a module meaning that
2003 /// if a module is instantiated many times this can lower the overall memory
2004 /// required needed to run that module.
2005 ///
2006 /// The main disadvantage of copy-on-write initialization, however, is that
2007 /// it may be possible for highly-parallel scenarios to be less scalable. If
2008 /// a page is read initially by a WebAssembly module then that page will be
2009 /// mapped to a read-only copy shared between all WebAssembly instances. If
2010 /// the same page is then written, however, then a private copy is created
2011 /// and swapped out from the read-only version. This also requires an [IPI],
2012 /// however, which can be a significant bottleneck in high-parallelism
2013 /// situations.
2014 ///
2015 /// This feature is only applicable when a WebAssembly module meets specific
2016 /// criteria to be initialized in this fashion, such as:
2017 ///
2018 /// * Only memories defined in the module can be initialized this way.
2019 /// * Data segments for memory must use statically known offsets.
2020 /// * Data segments for memory must all be in-bounds.
2021 ///
2022 /// Modules which do not meet these criteria will fall back to
2023 /// initialization of linear memory based on copying memory.
2024 ///
2025 /// This feature of Wasmtime is also platform-specific:
2026 ///
2027 /// * Linux - this feature is supported for all instances of [`Module`].
2028 /// Modules backed by an existing mmap (such as those created by
2029 /// [`Module::deserialize_file`]) will reuse that mmap to cow-initialize
2030 /// memory. Other instance of [`Module`] may use the `memfd_create`
2031 /// syscall to create an initialization image to `mmap`.
2032 /// * Unix (not Linux) - this feature is only supported when loading modules
2033 /// from a precompiled file via [`Module::deserialize_file`] where there
2034 /// is a file descriptor to use to map data into the process. Note that
2035 /// the module must have been compiled with this setting enabled as well.
2036 /// * Windows - there is no support for this feature at this time. Memory
2037 /// initialization will always copy bytes.
2038 ///
2039 /// By default this option is enabled.
2040 ///
2041 /// [`Module::deserialize_file`]: crate::Module::deserialize_file
2042 /// [`Module`]: crate::Module
2043 /// [IPI]: https://en.wikipedia.org/wiki/Inter-processor_interrupt
2044 pub fn memory_init_cow(&mut self, enable: bool) -> &mut Self {
2045 self.tunables.memory_init_cow = Some(enable);
2046 self
2047 }
2048
2049 /// A configuration option to force the usage of `memfd_create` on Linux to
2050 /// be used as the backing source for a module's initial memory image.
2051 ///
2052 /// When [`Config::memory_init_cow`] is enabled, which is enabled by
2053 /// default, module memory initialization images are taken from a module's
2054 /// original mmap if possible. If a precompiled module was loaded from disk
2055 /// this means that the disk's file is used as an mmap source for the
2056 /// initial linear memory contents. This option can be used to force, on
2057 /// Linux, that instead of using the original file on disk a new in-memory
2058 /// file is created with `memfd_create` to hold the contents of the initial
2059 /// image.
2060 ///
2061 /// This option can be used to avoid possibly loading the contents of memory
2062 /// from disk through a page fault. Instead with `memfd_create` the contents
2063 /// of memory are always in RAM, meaning that even page faults which
2064 /// initially populate a wasm linear memory will only work with RAM instead
2065 /// of ever hitting the disk that the original precompiled module is stored
2066 /// on.
2067 ///
2068 /// This option is disabled by default.
2069 pub fn force_memory_init_memfd(&mut self, enable: bool) -> &mut Self {
2070 self.force_memory_init_memfd = enable;
2071 self
2072 }
2073
2074 /// Configures whether or not a coredump should be generated and attached to
2075 /// the [`Error`](crate::Error) when a trap is raised.
2076 ///
2077 /// This option is disabled by default.
2078 #[cfg(feature = "coredump")]
2079 pub fn coredump_on_trap(&mut self, enable: bool) -> &mut Self {
2080 self.coredump_on_trap = enable;
2081 self
2082 }
2083
2084 /// Enables memory error checking for wasm programs.
2085 ///
2086 /// This option is disabled by default.
2087 ///
2088 /// # Panics
2089 ///
2090 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
2091 #[cfg(any(feature = "cranelift", feature = "winch"))]
2092 pub fn wmemcheck(&mut self, enable: bool) -> &mut Self {
2093 self.wmemcheck = enable;
2094 self.compiler_config_mut().wmemcheck = enable;
2095 self
2096 }
2097
2098 /// Configures the "guaranteed dense image size" for copy-on-write
2099 /// initialized memories.
2100 ///
2101 /// When using the [`Config::memory_init_cow`] feature to initialize memory
2102 /// efficiently (which is enabled by default), compiled modules contain an
2103 /// image of the module's initial heap. If the module has a fairly sparse
2104 /// initial heap, with just a few data segments at very different offsets,
2105 /// this could result in a large region of zero bytes in the image. In
2106 /// other words, it's not very memory-efficient.
2107 ///
2108 /// We normally use a heuristic to avoid this: if less than half
2109 /// of the initialized range (first non-zero to last non-zero
2110 /// byte) of any memory in the module has pages with nonzero
2111 /// bytes, then we avoid creating a memory image for the entire module.
2112 ///
2113 /// However, if the embedder always needs the instantiation-time efficiency
2114 /// of copy-on-write initialization, and is otherwise carefully controlling
2115 /// parameters of the modules (for example, by limiting the maximum heap
2116 /// size of the modules), then it may be desirable to ensure a memory image
2117 /// is created even if this could go against the heuristic above. Thus, we
2118 /// add another condition: there is a size of initialized data region up to
2119 /// which we *always* allow a memory image. The embedder can set this to a
2120 /// known maximum heap size if they desire to always get the benefits of
2121 /// copy-on-write images.
2122 ///
2123 /// In the future we may implement a "best of both worlds"
2124 /// solution where we have a dense image up to some limit, and
2125 /// then support a sparse list of initializers beyond that; this
2126 /// would get most of the benefit of copy-on-write and pay the incremental
2127 /// cost of eager initialization only for those bits of memory
2128 /// that are out-of-bounds. However, for now, an embedder desiring
2129 /// fast instantiation should ensure that this setting is as large
2130 /// as the maximum module initial memory content size.
2131 ///
2132 /// By default this value is 16 MiB.
2133 pub fn memory_guaranteed_dense_image_size(&mut self, size_in_bytes: u64) -> &mut Self {
2134 self.memory_guaranteed_dense_image_size = size_in_bytes;
2135 self
2136 }
2137
2138 /// Whether to enable function inlining during compilation or not.
2139 ///
2140 /// This may result in faster execution at runtime, but adds additional
2141 /// compilation time. Inlining may also enlarge the size of compiled
2142 /// artifacts (for example, the size of the result of
2143 /// [`Engine::precompile_component`](crate::Engine::precompile_component)).
2144 ///
2145 /// Inlining is not supported by all of Wasmtime's compilation strategies;
2146 /// currently, it only Cranelift supports it. This setting will be ignored
2147 /// when using a compilation strategy that does not support inlining, like
2148 /// Winch.
2149 ///
2150 /// Note that inlining is still somewhat experimental at the moment (as of
2151 /// the Wasmtime version 36).
2152 pub fn compiler_inlining(&mut self, inlining: bool) -> &mut Self {
2153 self.tunables.inlining = Some(inlining);
2154 self
2155 }
2156
2157 /// Returns the set of features that the currently selected compiler backend
2158 /// does not support at all and may panic on.
2159 ///
2160 /// Wasmtime strives to reject unknown modules or unsupported modules with
2161 /// first-class errors instead of panics. Not all compiler backends have the
2162 /// same level of feature support on all platforms as well. This method
2163 /// returns a set of features that the currently selected compiler
2164 /// configuration is known to not support and may panic on. This acts as a
2165 /// first-level filter on incoming wasm modules/configuration to fail-fast
2166 /// instead of panicking later on.
2167 ///
2168 /// Note that if a feature is not listed here it does not mean that the
2169 /// backend fully supports the proposal. Instead that means that the backend
2170 /// doesn't ever panic on the proposal, but errors during compilation may
2171 /// still be returned. This means that features listed here are definitely
2172 /// not supported at all, but features not listed here may still be
2173 /// partially supported. For example at the time of this writing the Winch
2174 /// backend partially supports simd so it's not listed here. Winch doesn't
2175 /// fully support simd but unimplemented instructions just return errors.
2176 fn compiler_panicking_wasm_features(&self) -> WasmFeatures {
2177 // First we compute the set of features that Wasmtime itself knows;
2178 // this is a sort of "maximal set" that we invert to create a set
2179 // of features we _definitely can't support_ because wasmtime
2180 // has never heard of them.
2181 let features_known_to_wasmtime = WasmFeatures::empty()
2182 | WasmFeatures::MUTABLE_GLOBAL
2183 | WasmFeatures::SATURATING_FLOAT_TO_INT
2184 | WasmFeatures::SIGN_EXTENSION
2185 | WasmFeatures::REFERENCE_TYPES
2186 | WasmFeatures::CALL_INDIRECT_OVERLONG
2187 | WasmFeatures::MULTI_VALUE
2188 | WasmFeatures::BULK_MEMORY
2189 | WasmFeatures::BULK_MEMORY_OPT
2190 | WasmFeatures::SIMD
2191 | WasmFeatures::RELAXED_SIMD
2192 | WasmFeatures::THREADS
2193 | WasmFeatures::SHARED_EVERYTHING_THREADS
2194 | WasmFeatures::TAIL_CALL
2195 | WasmFeatures::FLOATS
2196 | WasmFeatures::MULTI_MEMORY
2197 | WasmFeatures::EXCEPTIONS
2198 | WasmFeatures::MEMORY64
2199 | WasmFeatures::EXTENDED_CONST
2200 | WasmFeatures::COMPONENT_MODEL
2201 | WasmFeatures::FUNCTION_REFERENCES
2202 | WasmFeatures::GC
2203 | WasmFeatures::CUSTOM_PAGE_SIZES
2204 | WasmFeatures::GC_TYPES
2205 | WasmFeatures::STACK_SWITCHING
2206 | WasmFeatures::WIDE_ARITHMETIC
2207 | WasmFeatures::CM_ASYNC
2208 | WasmFeatures::CM_ASYNC_STACKFUL
2209 | WasmFeatures::CM_ASYNC_BUILTINS
2210 | WasmFeatures::CM_THREADING
2211 | WasmFeatures::CM_ERROR_CONTEXT
2212 | WasmFeatures::CM_GC
2213 | WasmFeatures::CM_FIXED_SIZE_LIST;
2214
2215 #[allow(unused_mut, reason = "easier to avoid #[cfg]")]
2216 let mut unsupported = !features_known_to_wasmtime;
2217
2218 #[cfg(any(feature = "cranelift", feature = "winch"))]
2219 match self.compiler_config.as_ref().and_then(|c| c.strategy) {
2220 None | Some(Strategy::Cranelift) => {
2221 // Pulley at this time fundamentally doesn't support the
2222 // `threads` proposal, notably shared memory, because Rust can't
2223 // safely implement loads/stores in the face of shared memory.
2224 // Stack switching is not implemented, either.
2225 if self.compiler_target().is_pulley() {
2226 unsupported |= WasmFeatures::THREADS;
2227 unsupported |= WasmFeatures::STACK_SWITCHING;
2228 }
2229
2230 use target_lexicon::*;
2231 match self.compiler_target() {
2232 Triple {
2233 architecture: Architecture::X86_64 | Architecture::X86_64h,
2234 operating_system:
2235 OperatingSystem::Linux
2236 | OperatingSystem::MacOSX(_)
2237 | OperatingSystem::Darwin(_),
2238 ..
2239 } => {
2240 // Stack switching supported on (non-Pulley) Cranelift.
2241 }
2242
2243 _ => {
2244 // On platforms other than x64 Unix-like, we don't
2245 // support stack switching.
2246 unsupported |= WasmFeatures::STACK_SWITCHING;
2247 }
2248 }
2249 }
2250 Some(Strategy::Winch) => {
2251 unsupported |= WasmFeatures::GC
2252 | WasmFeatures::FUNCTION_REFERENCES
2253 | WasmFeatures::RELAXED_SIMD
2254 | WasmFeatures::TAIL_CALL
2255 | WasmFeatures::GC_TYPES
2256 | WasmFeatures::EXCEPTIONS
2257 | WasmFeatures::LEGACY_EXCEPTIONS
2258 | WasmFeatures::STACK_SWITCHING
2259 | WasmFeatures::CM_ASYNC;
2260 match self.compiler_target().architecture {
2261 target_lexicon::Architecture::Aarch64(_) => {
2262 unsupported |= WasmFeatures::THREADS;
2263 unsupported |= WasmFeatures::WIDE_ARITHMETIC;
2264 }
2265
2266 // Winch doesn't support other non-x64 architectures at this
2267 // time either but will return an first-class error for
2268 // them.
2269 _ => {}
2270 }
2271 }
2272 Some(Strategy::Auto) => unreachable!(),
2273 }
2274 unsupported
2275 }
2276
2277 /// Calculates the set of features that are enabled for this `Config`.
2278 ///
2279 /// This method internally will start with the an empty set of features to
2280 /// avoid being tied to wasmparser's defaults. Next Wasmtime's set of
2281 /// default features are added to this set, some of which are conditional
2282 /// depending on crate features. Finally explicitly requested features via
2283 /// `wasm_*` methods on `Config` are applied. Everything is then validated
2284 /// later in `Config::validate`.
2285 fn features(&self) -> WasmFeatures {
2286 // Wasmtime by default supports all of the wasm 2.0 version of the
2287 // specification.
2288 let mut features = WasmFeatures::WASM2;
2289
2290 // On-by-default features that wasmtime has. Note that these are all
2291 // subject to the criteria at
2292 // https://docs.wasmtime.dev/contributing-implementing-wasm-proposals.html
2293 // and
2294 // https://docs.wasmtime.dev/stability-wasm-proposals.html
2295 features |= WasmFeatures::MULTI_MEMORY;
2296 features |= WasmFeatures::RELAXED_SIMD;
2297 features |= WasmFeatures::TAIL_CALL;
2298 features |= WasmFeatures::EXTENDED_CONST;
2299 features |= WasmFeatures::MEMORY64;
2300 // NB: if you add a feature above this line please double-check
2301 // https://docs.wasmtime.dev/stability-wasm-proposals.html
2302 // to ensure all requirements are met and/or update the documentation
2303 // there too.
2304
2305 // Set some features to their conditionally-enabled defaults depending
2306 // on crate compile-time features.
2307 features.set(WasmFeatures::GC_TYPES, cfg!(feature = "gc"));
2308 features.set(WasmFeatures::THREADS, cfg!(feature = "threads"));
2309 features.set(
2310 WasmFeatures::COMPONENT_MODEL,
2311 cfg!(feature = "component-model"),
2312 );
2313
2314 // From the default set of proposals remove any that the current
2315 // compiler backend may panic on if the module contains them.
2316 features = features & !self.compiler_panicking_wasm_features();
2317
2318 // After wasmtime's defaults are configured then factor in user requests
2319 // and disable/enable features. Note that the enable/disable sets should
2320 // be disjoint.
2321 debug_assert!((self.enabled_features & self.disabled_features).is_empty());
2322 features &= !self.disabled_features;
2323 features |= self.enabled_features;
2324
2325 features
2326 }
2327
2328 /// Returns the configured compiler target for this `Config`.
2329 pub(crate) fn compiler_target(&self) -> target_lexicon::Triple {
2330 // If a target is explicitly configured, always use that.
2331 if let Some(target) = self.target.clone() {
2332 return target;
2333 }
2334
2335 // If the `build.rs` script determined that this platform uses pulley by
2336 // default, then use Pulley.
2337 if cfg!(default_target_pulley) {
2338 return target_lexicon::Triple::pulley_host();
2339 }
2340
2341 // And at this point the target is for sure the host.
2342 target_lexicon::Triple::host()
2343 }
2344
2345 pub(crate) fn validate(&self) -> Result<(Tunables, WasmFeatures)> {
2346 let features = self.features();
2347
2348 // First validate that the selected compiler backend and configuration
2349 // supports the set of `features` that are enabled. This will help
2350 // provide more first class errors instead of panics about unsupported
2351 // features and configurations.
2352 let unsupported = features & self.compiler_panicking_wasm_features();
2353 if !unsupported.is_empty() {
2354 for flag in WasmFeatures::FLAGS.iter() {
2355 if !unsupported.contains(*flag.value()) {
2356 continue;
2357 }
2358 bail!(
2359 "the wasm_{} feature is not supported on this compiler configuration",
2360 flag.name().to_lowercase()
2361 );
2362 }
2363
2364 panic!("should have returned an error by now")
2365 }
2366
2367 #[cfg(any(feature = "async", feature = "stack-switching"))]
2368 if self.max_wasm_stack > self.async_stack_size {
2369 bail!("max_wasm_stack size cannot exceed the async_stack_size");
2370 }
2371 if self.max_wasm_stack == 0 {
2372 bail!("max_wasm_stack size cannot be zero");
2373 }
2374 if !cfg!(feature = "wmemcheck") && self.wmemcheck {
2375 bail!("wmemcheck (memory checker) was requested but is not enabled in this build");
2376 }
2377
2378 if !cfg!(feature = "gc") && features.gc_types() {
2379 bail!("support for GC was disabled at compile time")
2380 }
2381
2382 if !cfg!(feature = "gc") && features.contains(WasmFeatures::EXCEPTIONS) {
2383 bail!("exceptions support requires garbage collection (GC) to be enabled in the build");
2384 }
2385
2386 match &self.rr_config {
2387 #[cfg(feature = "rr")]
2388 RRConfig::Recording | RRConfig::Replaying => {
2389 self.validate_rr_determinism_conflicts()?;
2390 }
2391 RRConfig::None => {}
2392 };
2393
2394 let mut tunables = Tunables::default_for_target(&self.compiler_target())?;
2395
2396 // By default this is enabled with the Cargo feature, and if the feature
2397 // is missing this is disabled.
2398 tunables.concurrency_support = cfg!(feature = "component-model-async");
2399
2400 // If no target is explicitly specified then further refine `tunables`
2401 // for the configuration of this host depending on what platform
2402 // features were found available at compile time. This means that anyone
2403 // cross-compiling for a customized host will need to further refine
2404 // compilation options.
2405 if self.target.is_none() {
2406 // If this platform doesn't have native signals then change some
2407 // defaults to account for that. Note that VM guards are turned off
2408 // here because that's primarily a feature of eliding
2409 // bounds-checks.
2410 if !cfg!(has_native_signals) {
2411 tunables.signals_based_traps = cfg!(has_native_signals);
2412 tunables.memory_guard_size = 0;
2413 }
2414
2415 // When virtual memory is not available use slightly different
2416 // defaults for tunables to be more amenable to `MallocMemory`.
2417 // Note that these can still be overridden by config options.
2418 if !cfg!(has_virtual_memory) {
2419 tunables.memory_reservation = 0;
2420 tunables.memory_reservation_for_growth = 1 << 20; // 1MB
2421 tunables.memory_init_cow = false;
2422 }
2423 }
2424
2425 // If guest-debugging is enabled, we must disable
2426 // signals-based traps. Do this before we process the user's
2427 // provided tunables settings so we can detect a conflict with
2428 // an explicit request to use signals-based traps.
2429 #[cfg(feature = "debug")]
2430 if self.tunables.debug_guest == Some(true) {
2431 tunables.signals_based_traps = false;
2432 }
2433
2434 self.tunables.configure(&mut tunables);
2435
2436 // If we're going to compile with winch, we must use the winch calling convention.
2437 #[cfg(any(feature = "cranelift", feature = "winch"))]
2438 {
2439 tunables.winch_callable = self
2440 .compiler_config
2441 .as_ref()
2442 .is_some_and(|c| c.strategy == Some(Strategy::Winch));
2443 }
2444
2445 tunables.collector = if features.gc_types() {
2446 #[cfg(feature = "gc")]
2447 {
2448 use wasmtime_environ::Collector as EnvCollector;
2449 Some(match self.collector.try_not_auto()? {
2450 Collector::DeferredReferenceCounting => EnvCollector::DeferredReferenceCounting,
2451 Collector::Null => EnvCollector::Null,
2452 Collector::Auto => unreachable!(),
2453 })
2454 }
2455 #[cfg(not(feature = "gc"))]
2456 bail!("cannot use GC types: the `gc` feature was disabled at compile time")
2457 } else {
2458 None
2459 };
2460
2461 if tunables.debug_guest {
2462 ensure!(
2463 cfg!(feature = "debug"),
2464 "debug instrumentation support was disabled at compile time"
2465 );
2466 ensure!(
2467 !tunables.signals_based_traps,
2468 "cannot use signals-based traps with guest debugging enabled"
2469 );
2470 }
2471
2472 // Concurrency support is required for some component model features.
2473 let requires_concurrency = WasmFeatures::CM_ASYNC
2474 | WasmFeatures::CM_ASYNC_BUILTINS
2475 | WasmFeatures::CM_ASYNC_STACKFUL
2476 | WasmFeatures::CM_THREADING
2477 | WasmFeatures::CM_ERROR_CONTEXT;
2478 if tunables.concurrency_support && !cfg!(feature = "component-model-async") {
2479 bail!(
2480 "concurrency support was requested but was not \
2481 compiled into this build of Wasmtime"
2482 )
2483 }
2484 if !tunables.concurrency_support && features.intersects(requires_concurrency) {
2485 bail!(
2486 "concurrency support must be enabled to use the component \
2487 model async or threading features"
2488 )
2489 }
2490
2491 Ok((tunables, features))
2492 }
2493
2494 #[cfg(feature = "runtime")]
2495 pub(crate) fn build_allocator(
2496 &self,
2497 tunables: &Tunables,
2498 ) -> Result<Box<dyn InstanceAllocator + Send + Sync>> {
2499 #[cfg(feature = "async")]
2500 let (stack_size, stack_zeroing) = (self.async_stack_size, self.async_stack_zeroing);
2501
2502 #[cfg(not(feature = "async"))]
2503 let (stack_size, stack_zeroing) = (0, false);
2504
2505 let _ = tunables;
2506
2507 match &self.allocation_strategy {
2508 InstanceAllocationStrategy::OnDemand => {
2509 let mut _allocator = try_new::<Box<_>>(OnDemandInstanceAllocator::new(
2510 self.mem_creator.clone(),
2511 stack_size,
2512 stack_zeroing,
2513 ))?;
2514 #[cfg(feature = "async")]
2515 if let Some(stack_creator) = &self.stack_creator {
2516 _allocator.set_stack_creator(stack_creator.clone());
2517 }
2518 Ok(_allocator as _)
2519 }
2520 #[cfg(feature = "pooling-allocator")]
2521 InstanceAllocationStrategy::Pooling(config) => {
2522 let mut config = config.config;
2523 config.stack_size = stack_size;
2524 config.async_stack_zeroing = stack_zeroing;
2525 let allocator = try_new::<Box<_>>(
2526 crate::runtime::vm::PoolingInstanceAllocator::new(&config, tunables)?,
2527 )?;
2528 Ok(allocator as _)
2529 }
2530 }
2531 }
2532
2533 #[cfg(feature = "runtime")]
2534 pub(crate) fn build_gc_runtime(&self) -> Result<Option<Arc<dyn GcRuntime>>> {
2535 if !self.features().gc_types() {
2536 return Ok(None);
2537 }
2538
2539 #[cfg(not(feature = "gc"))]
2540 bail!("cannot create a GC runtime: the `gc` feature was disabled at compile time");
2541
2542 #[cfg(feature = "gc")]
2543 #[cfg_attr(
2544 not(any(feature = "gc-null", feature = "gc-drc")),
2545 expect(unreachable_code, reason = "definitions known to be dummy")
2546 )]
2547 {
2548 Ok(Some(match self.collector.try_not_auto()? {
2549 #[cfg(feature = "gc-drc")]
2550 Collector::DeferredReferenceCounting => {
2551 try_new::<Arc<_>>(crate::runtime::vm::DrcCollector::default())? as _
2552 }
2553 #[cfg(not(feature = "gc-drc"))]
2554 Collector::DeferredReferenceCounting => unreachable!(),
2555
2556 #[cfg(feature = "gc-null")]
2557 Collector::Null => {
2558 try_new::<Arc<_>>(crate::runtime::vm::NullCollector::default())? as _
2559 }
2560 #[cfg(not(feature = "gc-null"))]
2561 Collector::Null => unreachable!(),
2562
2563 Collector::Auto => unreachable!(),
2564 }))
2565 }
2566 }
2567
2568 #[cfg(feature = "runtime")]
2569 pub(crate) fn build_profiler(&self) -> Result<Box<dyn ProfilingAgent>> {
2570 Ok(match self.profiling_strategy {
2571 ProfilingStrategy::PerfMap => profiling_agent::new_perfmap()?,
2572 ProfilingStrategy::JitDump => profiling_agent::new_jitdump()?,
2573 ProfilingStrategy::VTune => profiling_agent::new_vtune()?,
2574 ProfilingStrategy::None => profiling_agent::new_null(),
2575 ProfilingStrategy::Pulley => profiling_agent::new_pulley()?,
2576 })
2577 }
2578
2579 #[cfg(any(feature = "cranelift", feature = "winch"))]
2580 pub(crate) fn build_compiler(
2581 mut self,
2582 tunables: &mut Tunables,
2583 features: WasmFeatures,
2584 ) -> Result<(Self, Box<dyn wasmtime_environ::Compiler>)> {
2585 let target = self.compiler_target();
2586
2587 // The target passed to the builders below is an `Option<Triple>` where
2588 // `None` represents the current host with CPU features inferred from
2589 // the host's CPU itself. The `target` above is not an `Option`, so
2590 // switch it to `None` in the case that a target wasn't explicitly
2591 // specified (which indicates no feature inference) and the target
2592 // matches the host.
2593 let target_for_builder =
2594 if self.target.is_none() && target == target_lexicon::Triple::host() {
2595 None
2596 } else {
2597 Some(target.clone())
2598 };
2599
2600 let mut compiler = match self.compiler_config_mut().strategy {
2601 #[cfg(feature = "cranelift")]
2602 Some(Strategy::Cranelift) => wasmtime_cranelift::builder(target_for_builder)?,
2603 #[cfg(not(feature = "cranelift"))]
2604 Some(Strategy::Cranelift) => bail!("cranelift support not compiled in"),
2605 #[cfg(feature = "winch")]
2606 Some(Strategy::Winch) => wasmtime_winch::builder(target_for_builder)?,
2607 #[cfg(not(feature = "winch"))]
2608 Some(Strategy::Winch) => bail!("winch support not compiled in"),
2609
2610 None | Some(Strategy::Auto) => unreachable!(),
2611 };
2612
2613 if let Some(path) = &self.compiler_config_mut().clif_dir {
2614 compiler.clif_dir(path)?;
2615 }
2616
2617 // If probestack is enabled for a target, Wasmtime will always use the
2618 // inline strategy which doesn't require us to define a `__probestack`
2619 // function or similar.
2620 self.compiler_config_mut()
2621 .settings
2622 .insert("probestack_strategy".into(), "inline".into());
2623
2624 // We enable stack probing by default on all targets.
2625 // This is required on Windows because of the way Windows
2626 // commits its stacks, but it's also a good idea on other
2627 // platforms to ensure guard pages are hit for large frame
2628 // sizes.
2629 self.compiler_config_mut()
2630 .flags
2631 .insert("enable_probestack".into());
2632
2633 // The current wasm multivalue implementation depends on this.
2634 // FIXME(#9510) handle this in wasmtime-cranelift instead.
2635 self.compiler_config_mut()
2636 .flags
2637 .insert("enable_multi_ret_implicit_sret".into());
2638
2639 if let Some(unwind_requested) = self.native_unwind_info {
2640 if !self
2641 .compiler_config_mut()
2642 .ensure_setting_unset_or_given("unwind_info", &unwind_requested.to_string())
2643 {
2644 bail!(
2645 "incompatible settings requested for Cranelift and Wasmtime `unwind-info` settings"
2646 );
2647 }
2648 }
2649
2650 if target.operating_system == target_lexicon::OperatingSystem::Windows {
2651 if !self
2652 .compiler_config_mut()
2653 .ensure_setting_unset_or_given("unwind_info", "true")
2654 {
2655 bail!("`native_unwind_info` cannot be disabled on Windows");
2656 }
2657 }
2658
2659 // We require frame pointers for correct stack walking, which is safety
2660 // critical in the presence of reference types, and otherwise it is just
2661 // really bad developer experience to get wrong.
2662 self.compiler_config_mut()
2663 .settings
2664 .insert("preserve_frame_pointers".into(), "true".into());
2665
2666 if !tunables.signals_based_traps {
2667 let mut ok = self
2668 .compiler_config_mut()
2669 .ensure_setting_unset_or_given("enable_table_access_spectre_mitigation", "false");
2670 ok = ok
2671 && self.compiler_config_mut().ensure_setting_unset_or_given(
2672 "enable_heap_access_spectre_mitigation",
2673 "false",
2674 );
2675
2676 // Right now spectre-mitigated bounds checks will load from zero so
2677 // if host-based signal handlers are disabled then that's a mismatch
2678 // and doesn't work right now. Fixing this will require more thought
2679 // of how to implement the bounds check in spectre-only mode.
2680 if !ok {
2681 bail!(
2682 "when signals-based traps are disabled then spectre \
2683 mitigations must also be disabled"
2684 );
2685 }
2686 }
2687
2688 if features.contains(WasmFeatures::RELAXED_SIMD) && !features.contains(WasmFeatures::SIMD) {
2689 bail!("cannot disable the simd proposal but enable the relaxed simd proposal");
2690 }
2691
2692 if features.contains(WasmFeatures::STACK_SWITCHING) {
2693 use target_lexicon::OperatingSystem;
2694 let model = match target.operating_system {
2695 OperatingSystem::Windows => "update_windows_tib",
2696 OperatingSystem::Linux
2697 | OperatingSystem::MacOSX(_)
2698 | OperatingSystem::Darwin(_) => "basic",
2699 _ => bail!("stack-switching feature not supported on this platform "),
2700 };
2701
2702 if !self
2703 .compiler_config_mut()
2704 .ensure_setting_unset_or_given("stack_switch_model", model)
2705 {
2706 bail!(
2707 "compiler option 'stack_switch_model' must be set to '{model}' on this platform"
2708 );
2709 }
2710 }
2711
2712 // Apply compiler settings and flags
2713 compiler.set_tunables(tunables.clone())?;
2714 for (k, v) in self.compiler_config_mut().settings.iter() {
2715 compiler.set(k, v)?;
2716 }
2717 for flag in self.compiler_config_mut().flags.iter() {
2718 compiler.enable(flag)?;
2719 }
2720 *tunables = compiler.tunables().cloned().unwrap();
2721
2722 #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
2723 if let Some(cache_store) = &self.compiler_config_mut().cache_store {
2724 compiler.enable_incremental_compilation(cache_store.clone())?;
2725 }
2726
2727 compiler.wmemcheck(self.compiler_config_mut().wmemcheck);
2728
2729 Ok((self, compiler.build()?))
2730 }
2731
2732 /// Internal setting for whether adapter modules for components will have
2733 /// extra WebAssembly instructions inserted performing more debug checks
2734 /// then are necessary.
2735 #[cfg(feature = "component-model")]
2736 pub fn debug_adapter_modules(&mut self, debug: bool) -> &mut Self {
2737 self.tunables.debug_adapter_modules = Some(debug);
2738 self
2739 }
2740
2741 /// Enables clif output when compiling a WebAssembly module.
2742 #[cfg(any(feature = "cranelift", feature = "winch"))]
2743 pub fn emit_clif(&mut self, path: &Path) -> &mut Self {
2744 self.compiler_config_mut().clif_dir = Some(path.to_path_buf());
2745 self
2746 }
2747
2748 /// Configures whether, when on macOS, Mach ports are used for exception
2749 /// handling instead of traditional Unix-based signal handling.
2750 ///
2751 /// WebAssembly traps in Wasmtime are implemented with native faults, for
2752 /// example a `SIGSEGV` will occur when a WebAssembly guest accesses
2753 /// out-of-bounds memory. Handling this can be configured to either use Unix
2754 /// signals or Mach ports on macOS. By default Mach ports are used.
2755 ///
2756 /// Mach ports enable Wasmtime to work by default with foreign
2757 /// error-handling systems such as breakpad which also use Mach ports to
2758 /// handle signals. In this situation Wasmtime will continue to handle guest
2759 /// faults gracefully while any non-guest faults will get forwarded to
2760 /// process-level handlers such as breakpad. Some more background on this
2761 /// can be found in #2456.
2762 ///
2763 /// A downside of using mach ports, however, is that they don't interact
2764 /// well with `fork()`. Forking a Wasmtime process on macOS will produce a
2765 /// child process that cannot successfully run WebAssembly. In this
2766 /// situation traditional Unix signal handling should be used as that's
2767 /// inherited and works across forks.
2768 ///
2769 /// If your embedding wants to use a custom error handler which leverages
2770 /// Mach ports and you additionally wish to `fork()` the process and use
2771 /// Wasmtime in the child process that's not currently possible. Please
2772 /// reach out to us if you're in this bucket!
2773 ///
2774 /// This option defaults to `true`, using Mach ports by default.
2775 pub fn macos_use_mach_ports(&mut self, mach_ports: bool) -> &mut Self {
2776 self.macos_use_mach_ports = mach_ports;
2777 self
2778 }
2779
2780 /// Configures an embedder-provided function, `detect`, which is used to
2781 /// determine if an ISA-specific feature is available on the current host.
2782 ///
2783 /// This function is used to verify that any features enabled for a compiler
2784 /// backend, such as AVX support on x86\_64, are also available on the host.
2785 /// It is undefined behavior to execute an AVX instruction on a host that
2786 /// doesn't support AVX instructions, for example.
2787 ///
2788 /// When the `std` feature is active on this crate then this function is
2789 /// configured to a default implementation that uses the standard library's
2790 /// feature detection. When the `std` feature is disabled then there is no
2791 /// default available and this method must be called to configure a feature
2792 /// probing function.
2793 ///
2794 /// The `detect` function provided is given a string name of an ISA feature.
2795 /// The function should then return:
2796 ///
2797 /// * `Some(true)` - indicates that the feature was found on the host and it
2798 /// is supported.
2799 /// * `Some(false)` - the feature name was recognized but it was not
2800 /// detected on the host, for example the CPU is too old.
2801 /// * `None` - the feature name was not recognized and it's not known
2802 /// whether it's on the host or not.
2803 ///
2804 /// Feature names passed to `detect` match the same feature name used in the
2805 /// Rust standard library. For example `"sse4.2"` is used on x86\_64.
2806 ///
2807 /// # Unsafety
2808 ///
2809 /// This function is `unsafe` because it is undefined behavior to execute
2810 /// instructions that a host does not support. This means that the result of
2811 /// `detect` must be correct for memory safe execution at runtime.
2812 pub unsafe fn detect_host_feature(&mut self, detect: fn(&str) -> Option<bool>) -> &mut Self {
2813 self.detect_host_feature = Some(detect);
2814 self
2815 }
2816
2817 /// Configures Wasmtime to not use signals-based trap handlers, for example
2818 /// disables `SIGILL` and `SIGSEGV` handler registration on Unix platforms.
2819 ///
2820 /// > **Note:** this option has important performance ramifications, be sure
2821 /// > to understand the implications. Wasm programs have been measured to
2822 /// > run up to 2x slower when signals-based traps are disabled.
2823 ///
2824 /// Wasmtime will by default leverage signals-based trap handlers (or the
2825 /// platform equivalent, for example "vectored exception handlers" on
2826 /// Windows) to make generated code more efficient. For example, when
2827 /// Wasmtime can use signals-based traps, it can elide explicit bounds
2828 /// checks for Wasm linear memory accesses, instead relying on virtual
2829 /// memory guard pages to raise a `SIGSEGV` (on Unix) for out-of-bounds
2830 /// accesses, which Wasmtime's runtime then catches and handles. Another
2831 /// example is divide-by-zero: with signals-based traps, Wasmtime can let
2832 /// the hardware raise a trap when the divisor is zero. Without
2833 /// signals-based traps, Wasmtime must explicitly emit additional
2834 /// instructions to check for zero and conditionally branch to a trapping
2835 /// code path.
2836 ///
2837 /// Some environments however may not have access to signal handlers. For
2838 /// example embedded scenarios may not support virtual memory. Other
2839 /// environments where Wasmtime is embedded within the surrounding
2840 /// environment may require that new signal handlers aren't registered due
2841 /// to the global nature of signal handlers. This option exists to disable
2842 /// the signal handler registration when required for these scenarios.
2843 ///
2844 /// When signals-based trap handlers are disabled, then Wasmtime and its
2845 /// generated code will *never* rely on segfaults or other
2846 /// signals. Generated code will be slower because bounds must be explicitly
2847 /// checked along with other conditions like division by zero.
2848 ///
2849 /// The following additional factors can also affect Wasmtime's ability to
2850 /// elide explicit bounds checks and leverage signals-based traps:
2851 ///
2852 /// * The [`Config::memory_reservation`] and [`Config::memory_guard_size`]
2853 /// settings
2854 /// * The index type of the linear memory (e.g. 32-bit or 64-bit)
2855 /// * The page size of the linear memory
2856 ///
2857 /// When this option is disabled, the
2858 /// `enable_heap_access_spectre_mitigation` and
2859 /// `enable_table_access_spectre_mitigation` Cranelift settings must also be
2860 /// disabled. This means that generated code must have spectre mitigations
2861 /// disabled. This is because spectre mitigations rely on faults from
2862 /// loading from the null address to implement bounds checks.
2863 ///
2864 /// This option defaults to `true`: signals-based trap handlers are enabled
2865 /// by default.
2866 ///
2867 /// > **Note:** Disabling this option is not compatible with the Winch
2868 /// > compiler.
2869 pub fn signals_based_traps(&mut self, enable: bool) -> &mut Self {
2870 self.tunables.signals_based_traps = Some(enable);
2871 self
2872 }
2873
2874 /// Enable/disable GC support in Wasmtime entirely.
2875 ///
2876 /// This flag can be used to gate whether GC infrastructure is enabled or
2877 /// initialized in Wasmtime at all. Wasmtime's GC implementation is required
2878 /// for the [`Self::wasm_gc`] proposal, [`Self::wasm_function_references`],
2879 /// and [`Self::wasm_exceptions`] at this time. None of those proposal can
2880 /// be enabled without also having this option enabled.
2881 ///
2882 /// This option defaults to whether the crate `gc` feature is enabled or
2883 /// not.
2884 pub fn gc_support(&mut self, enable: bool) -> &mut Self {
2885 self.wasm_features(WasmFeatures::GC_TYPES, enable)
2886 }
2887
2888 /// Explicitly indicate or not whether the host is using a hardware float
2889 /// ABI on x86 targets.
2890 ///
2891 /// This configuration option is only applicable on the
2892 /// `x86_64-unknown-none` Rust target and has no effect on other host
2893 /// targets. The `x86_64-unknown-none` Rust target does not support hardware
2894 /// floats by default and uses a "soft float" implementation and ABI. This
2895 /// means that `f32`, for example, is passed in a general-purpose register
2896 /// between functions instead of a floating-point register. This does not
2897 /// match Cranelift's ABI for `f32` where it's passed in floating-point
2898 /// registers. Cranelift does not have support for a "soft float"
2899 /// implementation where all floating-point operations are lowered to
2900 /// libcalls.
2901 ///
2902 /// This means that for the `x86_64-unknown-none` target the ABI between
2903 /// Wasmtime's libcalls and the host is incompatible when floats are used.
2904 /// This further means that, by default, Wasmtime is unable to load native
2905 /// code when compiled to the `x86_64-unknown-none` target. The purpose of
2906 /// this option is to explicitly allow loading code and bypass this check.
2907 ///
2908 /// Setting this configuration option to `true` indicates that either:
2909 /// (a) the Rust target is compiled with the hard-float ABI manually via
2910 /// `-Zbuild-std` and a custom target JSON configuration, or (b) sufficient
2911 /// x86 features have been enabled in the compiler such that float libcalls
2912 /// will not be used in Wasmtime. For (a) there is no way in Rust at this
2913 /// time to detect whether a hard-float or soft-float ABI is in use on
2914 /// stable Rust, so this manual opt-in is required. For (b) the only
2915 /// instance where Wasmtime passes a floating-point value in a register
2916 /// between the host and compiled wasm code is with libcalls.
2917 ///
2918 /// Float-based libcalls are only used when the compilation target for a
2919 /// wasm module has insufficient target features enabled for native
2920 /// support. For example SSE4.1 is required for the `f32.ceil` WebAssembly
2921 /// instruction to be compiled to a native instruction. If SSE4.1 is not
2922 /// enabled then `f32.ceil` is translated to a "libcall" which is
2923 /// implemented on the host. Float-based libcalls can be avoided with
2924 /// sufficient target features enabled, for example:
2925 ///
2926 /// * `self.cranelift_flag_enable("has_sse3")`
2927 /// * `self.cranelift_flag_enable("has_ssse3")`
2928 /// * `self.cranelift_flag_enable("has_sse41")`
2929 /// * `self.cranelift_flag_enable("has_sse42")`
2930 /// * `self.cranelift_flag_enable("has_fma")`
2931 ///
2932 /// Note that when these features are enabled Wasmtime will perform a
2933 /// runtime check to determine that the host actually has the feature
2934 /// present.
2935 ///
2936 /// For some more discussion see [#11506].
2937 ///
2938 /// [#11506]: https://github.com/bytecodealliance/wasmtime/issues/11506
2939 ///
2940 /// # Safety
2941 ///
2942 /// This method is not safe because it cannot be detected in Rust right now
2943 /// whether the host is compiled with a soft or hard float ABI. Additionally
2944 /// if the host is compiled with a soft float ABI disabling this check does
2945 /// not ensure that the wasm module in question has zero usage of floats
2946 /// in the boundary to the host.
2947 ///
2948 /// Safely using this method requires one of:
2949 ///
2950 /// * The host target is compiled to use hardware floats.
2951 /// * Wasm modules loaded are compiled with enough x86 Cranelift features
2952 /// enabled to avoid float-related hostcalls.
2953 pub unsafe fn x86_float_abi_ok(&mut self, enable: bool) -> &mut Self {
2954 self.x86_float_abi_ok = Some(enable);
2955 self
2956 }
2957
2958 /// Enable or disable the ability to create a
2959 /// [`SharedMemory`](crate::SharedMemory).
2960 ///
2961 /// The WebAssembly threads proposal, configured by [`Config::wasm_threads`]
2962 /// is on-by-default but there are enough deficiencies in Wasmtime's
2963 /// implementation and API integration that creation of a shared memory is
2964 /// disabled by default. This cofiguration knob can be used to enable this.
2965 ///
2966 /// When enabling this method be aware that wasm threads are, at this time,
2967 /// a [tier 2
2968 /// feature](https://docs.wasmtime.dev/stability-tiers.html#tier-2) in
2969 /// Wasmtime meaning that it will not receive security updates or fixes to
2970 /// historical releases. Additionally security CVEs will not be issued for
2971 /// bugs in the implementation.
2972 ///
2973 /// This option is `false` by default.
2974 pub fn shared_memory(&mut self, enable: bool) -> &mut Self {
2975 self.shared_memory = enable;
2976 self
2977 }
2978
2979 /// Specifies whether support for concurrent execution of WebAssembly is
2980 /// supported within this store.
2981 ///
2982 /// This configuration option affects whether runtime data structures are
2983 /// initialized within a `Store` on creation to support concurrent execution
2984 /// of WebAssembly guests. This is primarily applicable to the
2985 /// [`Config::wasm_component_model_async`] configuration which is the first
2986 /// time Wasmtime has supported concurrent execution of guests. This
2987 /// configuration option, for example, enables usage of
2988 /// [`Store::run_concurrent`], [`Func::call_concurrent`], [`StreamReader`],
2989 /// etc.
2990 ///
2991 /// This configuration option can be manually disabled to avoid initializing
2992 /// data structures in the [`Store`] related to concurrent execution. When
2993 /// this option is disabled then APIs related to concurrency will all fail
2994 /// with a panic. For example [`Store::run_concurrent`] will panic, creating
2995 /// a [`StreamReader`] will panic, etc.
2996 ///
2997 /// The value of this option additionally affects whether a [`Config`] is
2998 /// valid and the default set of enabled WebAssembly features. If this
2999 /// option is disabled then component-model features related to concurrency
3000 /// will all be disabled. If this option is enabled, then the options will
3001 /// retain their normal defaults. It is not valid to create a [`Config`]
3002 /// with component-model-async explicitly enabled and this option explicitly
3003 /// disabled, however.
3004 ///
3005 /// This option defaults to `true`.
3006 ///
3007 /// [`Store`]: crate::Store
3008 /// [`Store::run_concurrent`]: crate::Store::run_concurrent
3009 /// [`Func::call_concurrent`]: crate::component::Func::call_concurrent
3010 /// [`StreamReader`]: crate::component::StreamReader
3011 pub fn concurrency_support(&mut self, enable: bool) -> &mut Self {
3012 self.tunables.concurrency_support = Some(enable);
3013 self
3014 }
3015
3016 /// Validate if the current configuration has conflicting overrides that prevent
3017 /// execution determinism. Returns an error if a conflict exists.
3018 ///
3019 /// Note: Keep this in sync with [`Config::enforce_determinism`].
3020 #[inline]
3021 #[cfg(feature = "rr")]
3022 pub(crate) fn validate_rr_determinism_conflicts(&self) -> Result<()> {
3023 if let Some(v) = self.tunables.relaxed_simd_deterministic {
3024 if v == false {
3025 bail!("Relaxed deterministic SIMD cannot be disabled when determinism is enforced");
3026 }
3027 }
3028 #[cfg(any(feature = "cranelift", feature = "winch"))]
3029 if let Some(v) = self
3030 .compiler_config
3031 .as_ref()
3032 .and_then(|c| c.settings.get("enable_nan_canonicalization"))
3033 {
3034 if v != "true" {
3035 bail!("NaN canonicalization cannot be disabled when determinism is enforced");
3036 }
3037 }
3038 Ok(())
3039 }
3040
3041 /// Enable execution trace recording or replaying to the configuration.
3042 ///
3043 /// When either recording/replaying are enabled, validation fails if settings
3044 /// that control determinism are not set appropriately. In particular, RR requires
3045 /// doing the following:
3046 /// * Enabling NaN canonicalization with [`Config::cranelift_nan_canonicalization`].
3047 /// * Enabling deterministic relaxed SIMD with [`Config::relaxed_simd_deterministic`].
3048 #[inline]
3049 pub fn rr(&mut self, cfg: RRConfig) -> &mut Self {
3050 self.rr_config = cfg;
3051 self
3052 }
3053}
3054
3055impl Default for Config {
3056 fn default() -> Config {
3057 Config::new()
3058 }
3059}
3060
3061impl fmt::Debug for Config {
3062 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
3063 let mut f = f.debug_struct("Config");
3064
3065 // Not every flag in WasmFeatures can be enabled as part of creating
3066 // a Config. This impl gives a complete picture of all WasmFeatures
3067 // enabled, and doesn't require maintenance by hand (which has become out
3068 // of date in the past), at the cost of possible confusion for why
3069 // a flag in this set doesn't have a Config setter.
3070 let features = self.features();
3071 for flag in WasmFeatures::FLAGS.iter() {
3072 f.field(
3073 &format!("wasm_{}", flag.name().to_lowercase()),
3074 &features.contains(*flag.value()),
3075 );
3076 }
3077
3078 f.field("parallel_compilation", &self.parallel_compilation);
3079 #[cfg(any(feature = "cranelift", feature = "winch"))]
3080 {
3081 f.field("compiler_config", &self.compiler_config);
3082 }
3083
3084 self.tunables.format(&mut f);
3085 f.finish()
3086 }
3087}
3088
3089/// Possible Compilation strategies for a wasm module.
3090///
3091/// This is used as an argument to the [`Config::strategy`] method.
3092#[non_exhaustive]
3093#[derive(PartialEq, Eq, Clone, Debug, Copy)]
3094pub enum Strategy {
3095 /// An indicator that the compilation strategy should be automatically
3096 /// selected.
3097 ///
3098 /// This is generally what you want for most projects and indicates that the
3099 /// `wasmtime` crate itself should make the decision about what the best
3100 /// code generator for a wasm module is.
3101 ///
3102 /// Currently this always defaults to Cranelift, but the default value may
3103 /// change over time.
3104 Auto,
3105
3106 /// Currently the default backend, Cranelift aims to be a reasonably fast
3107 /// code generator which generates high quality machine code.
3108 Cranelift,
3109
3110 /// A low-latency baseline compiler for WebAssembly.
3111 /// For more details regarding ISA support and Wasm proposals support
3112 /// see <https://docs.wasmtime.dev/stability-tiers.html#current-tier-status>
3113 Winch,
3114}
3115
3116#[cfg(any(feature = "winch", feature = "cranelift"))]
3117impl Strategy {
3118 fn not_auto(&self) -> Option<Strategy> {
3119 match self {
3120 Strategy::Auto => {
3121 if cfg!(feature = "cranelift") {
3122 Some(Strategy::Cranelift)
3123 } else if cfg!(feature = "winch") {
3124 Some(Strategy::Winch)
3125 } else {
3126 None
3127 }
3128 }
3129 other => Some(*other),
3130 }
3131 }
3132}
3133
3134/// Possible garbage collector implementations for Wasm.
3135///
3136/// This is used as an argument to the [`Config::collector`] method.
3137///
3138/// The properties of Wasmtime's available collectors are summarized in the
3139/// following table:
3140///
3141/// | Collector | Collects Garbage[^1] | Latency[^2] | Throughput[^3] | Allocation Speed[^4] | Heap Utilization[^5] |
3142/// |-----------------------------|----------------------|-------------|----------------|----------------------|----------------------|
3143/// | `DeferredReferenceCounting` | Yes, but not cycles | 🙂 | 🙁 | 😐 | 😐 |
3144/// | `Null` | No | 🙂 | 🙂 | 🙂 | 🙂 |
3145///
3146/// [^1]: Whether or not the collector is capable of collecting garbage and cyclic garbage.
3147///
3148/// [^2]: How long the Wasm program is paused during garbage
3149/// collections. Shorter is better. In general, better latency implies
3150/// worse throughput and vice versa.
3151///
3152/// [^3]: How fast the Wasm program runs when using this collector. Roughly
3153/// equivalent to the number of Wasm instructions executed per
3154/// second. Faster is better. In general, better throughput implies worse
3155/// latency and vice versa.
3156///
3157/// [^4]: How fast can individual objects be allocated?
3158///
3159/// [^5]: How many objects can the collector fit into N bytes of memory? That
3160/// is, how much space for bookkeeping and metadata does this collector
3161/// require? Less space taken up by metadata means more space for
3162/// additional objects. Reference counts are larger than mark bits and
3163/// free lists are larger than bump pointers, for example.
3164#[non_exhaustive]
3165#[derive(PartialEq, Eq, Clone, Debug, Copy)]
3166pub enum Collector {
3167 /// An indicator that the garbage collector should be automatically
3168 /// selected.
3169 ///
3170 /// This is generally what you want for most projects and indicates that the
3171 /// `wasmtime` crate itself should make the decision about what the best
3172 /// collector for a wasm module is.
3173 ///
3174 /// Currently this always defaults to the deferred reference-counting
3175 /// collector, but the default value may change over time.
3176 Auto,
3177
3178 /// The deferred reference-counting collector.
3179 ///
3180 /// A reference-counting collector, generally trading improved latency for
3181 /// worsened throughput. However, to avoid the largest overheads of
3182 /// reference counting, it avoids manipulating reference counts for Wasm
3183 /// objects on the stack. Instead, it will hold a reference count for an
3184 /// over-approximation of all objects that are currently on the stack, trace
3185 /// the stack during collection to find the precise set of on-stack roots,
3186 /// and decrement the reference count of any object that was in the
3187 /// over-approximation but not the precise set. This improves throughput,
3188 /// compared to "pure" reference counting, by performing many fewer
3189 /// refcount-increment and -decrement operations. The cost is the increased
3190 /// latency associated with tracing the stack.
3191 ///
3192 /// This collector cannot currently collect cycles; they will leak until the
3193 /// GC heap's store is dropped.
3194 DeferredReferenceCounting,
3195
3196 /// The null collector.
3197 ///
3198 /// This collector does not actually collect any garbage. It simply
3199 /// allocates objects until it runs out of memory, at which point further
3200 /// objects allocation attempts will trap.
3201 ///
3202 /// This collector is useful for incredibly short-running Wasm instances
3203 /// where additionally you would rather halt an over-allocating Wasm program
3204 /// than spend time collecting its garbage to allow it to keep running. It
3205 /// is also useful for measuring the overheads associated with other
3206 /// collectors, as this collector imposes as close to zero throughput and
3207 /// latency overhead as possible.
3208 Null,
3209}
3210
3211impl Default for Collector {
3212 fn default() -> Collector {
3213 Collector::Auto
3214 }
3215}
3216
3217#[cfg(feature = "gc")]
3218impl Collector {
3219 fn not_auto(&self) -> Option<Collector> {
3220 match self {
3221 Collector::Auto => {
3222 if cfg!(feature = "gc-drc") {
3223 Some(Collector::DeferredReferenceCounting)
3224 } else if cfg!(feature = "gc-null") {
3225 Some(Collector::Null)
3226 } else {
3227 None
3228 }
3229 }
3230 other => Some(*other),
3231 }
3232 }
3233
3234 fn try_not_auto(&self) -> Result<Self> {
3235 match self.not_auto() {
3236 #[cfg(feature = "gc-drc")]
3237 Some(c @ Collector::DeferredReferenceCounting) => Ok(c),
3238 #[cfg(not(feature = "gc-drc"))]
3239 Some(Collector::DeferredReferenceCounting) => bail!(
3240 "cannot create an engine using the deferred reference-counting \
3241 collector because the `gc-drc` feature was not enabled at \
3242 compile time",
3243 ),
3244
3245 #[cfg(feature = "gc-null")]
3246 Some(c @ Collector::Null) => Ok(c),
3247 #[cfg(not(feature = "gc-null"))]
3248 Some(Collector::Null) => bail!(
3249 "cannot create an engine using the null collector because \
3250 the `gc-null` feature was not enabled at compile time",
3251 ),
3252
3253 Some(Collector::Auto) => unreachable!(),
3254
3255 None => bail!(
3256 "cannot create an engine with GC support when none of the \
3257 collectors are available; enable one of the following \
3258 features: `gc-drc`, `gc-null`",
3259 ),
3260 }
3261 }
3262}
3263
3264/// Possible optimization levels for the Cranelift codegen backend.
3265#[non_exhaustive]
3266#[derive(Copy, Clone, Debug, Eq, PartialEq)]
3267pub enum OptLevel {
3268 /// No optimizations performed, minimizes compilation time by disabling most
3269 /// optimizations.
3270 None,
3271 /// Generates the fastest possible code, but may take longer.
3272 Speed,
3273 /// Similar to `speed`, but also performs transformations aimed at reducing
3274 /// code size.
3275 SpeedAndSize,
3276}
3277
3278/// Possible register allocator algorithms for the Cranelift codegen backend.
3279#[non_exhaustive]
3280#[derive(Copy, Clone, Debug, Eq, PartialEq)]
3281pub enum RegallocAlgorithm {
3282 /// Generates the fastest possible code, but may take longer.
3283 ///
3284 /// This algorithm performs "backtracking", which means that it may
3285 /// undo its earlier work and retry as it discovers conflicts. This
3286 /// results in better register utilization, producing fewer spills
3287 /// and moves, but can cause super-linear compile runtime.
3288 Backtracking,
3289 /// Generates acceptable code very quickly.
3290 ///
3291 /// This algorithm performs a single pass through the code,
3292 /// guaranteed to work in linear time. (Note that the rest of
3293 /// Cranelift is not necessarily guaranteed to run in linear time,
3294 /// however.) It cannot undo earlier decisions, however, and it
3295 /// cannot foresee constraints or issues that may occur further
3296 /// ahead in the code, so the code may have more spills and moves as
3297 /// a result.
3298 ///
3299 /// > **Note**: This algorithm is not yet production-ready and has
3300 /// > historically had known problems. It is not recommended to enable this
3301 /// > algorithm for security-sensitive applications and the Wasmtime project
3302 /// > does not consider this configuration option for issuing security
3303 /// > advisories at this time.
3304 SinglePass,
3305}
3306
3307/// Select which profiling technique to support.
3308#[derive(Debug, Clone, Copy, PartialEq)]
3309pub enum ProfilingStrategy {
3310 /// No profiler support.
3311 None,
3312
3313 /// Collect function name information as the "perf map" file format, used with `perf` on Linux.
3314 PerfMap,
3315
3316 /// Collect profiling info for "jitdump" file format, used with `perf` on
3317 /// Linux.
3318 JitDump,
3319
3320 /// Collect profiling info using the "ittapi", used with `VTune` on Linux.
3321 VTune,
3322
3323 /// Support for profiling Pulley, Wasmtime's interpreter. Note that enabling
3324 /// this at runtime requires enabling the `profile-pulley` Cargo feature at
3325 /// compile time.
3326 Pulley,
3327}
3328
3329/// Select how wasm backtrace detailed information is handled.
3330#[derive(Debug, Clone, Copy)]
3331pub enum WasmBacktraceDetails {
3332 /// Support is unconditionally enabled and wasmtime will parse and read
3333 /// debug information.
3334 Enable,
3335
3336 /// Support is disabled, and wasmtime will not parse debug information for
3337 /// backtrace details.
3338 Disable,
3339
3340 /// Support for backtrace details is conditional on the
3341 /// `WASMTIME_BACKTRACE_DETAILS` environment variable.
3342 Environment,
3343}
3344
3345/// Describe the tri-state configuration of keys such as MPK or PAGEMAP_SCAN.
3346#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
3347pub enum Enabled {
3348 /// Enable this feature if it's detected on the host system, otherwise leave
3349 /// it disabled.
3350 Auto,
3351 /// Enable this feature and fail configuration if the feature is not
3352 /// detected on the host system.
3353 Yes,
3354 /// Do not enable this feature, even if the host system supports it.
3355 No,
3356}
3357
3358/// Configuration options used with [`InstanceAllocationStrategy::Pooling`] to
3359/// change the behavior of the pooling instance allocator.
3360///
3361/// This structure has a builder-style API in the same manner as [`Config`] and
3362/// is configured with [`Config::allocation_strategy`].
3363///
3364/// Note that usage of the pooling allocator does not affect compiled
3365/// WebAssembly code. Compiled `*.cwasm` files, for example, are usable both
3366/// with and without the pooling allocator.
3367///
3368/// ## Advantages of Pooled Allocation
3369///
3370/// The main benefit of the pooling allocator is to make WebAssembly
3371/// instantiation both faster and more scalable in terms of parallelism.
3372/// Allocation is faster because virtual memory is already configured and ready
3373/// to go within the pool, there's no need to [`mmap`] (for example on Unix) a
3374/// new region and configure it with guard pages. By avoiding [`mmap`] this
3375/// avoids whole-process virtual memory locks which can improve scalability and
3376/// performance through avoiding this.
3377///
3378/// Additionally with pooled allocation it's possible to create "affine slots"
3379/// to a particular WebAssembly module or component over time. For example if
3380/// the same module is multiple times over time the pooling allocator will, by
3381/// default, attempt to reuse the same slot. This mean that the slot has been
3382/// pre-configured and can retain virtual memory mappings for a copy-on-write
3383/// image, for example (see [`Config::memory_init_cow`] for more information.
3384/// This means that in a steady state instance deallocation is a single
3385/// [`madvise`] to reset linear memory to its original contents followed by a
3386/// single (optional) [`mprotect`] during the next instantiation to shrink
3387/// memory back to its original size. Compared to non-pooled allocation this
3388/// avoids the need to [`mmap`] a new region of memory, [`munmap`] it, and
3389/// [`mprotect`] regions too.
3390///
3391/// Another benefit of pooled allocation is that it's possible to configure
3392/// things such that no virtual memory management is required at all in a steady
3393/// state. For example a pooling allocator can be configured with:
3394///
3395/// * [`Config::memory_init_cow`] disabled
3396/// * [`Config::memory_guard_size`] disabled
3397/// * [`Config::memory_reservation`] shrunk to minimal size
3398/// * [`PoolingAllocationConfig::table_keep_resident`] sufficiently large
3399/// * [`PoolingAllocationConfig::linear_memory_keep_resident`] sufficiently large
3400///
3401/// With all these options in place no virtual memory tricks are used at all and
3402/// everything is manually managed by Wasmtime (for example resetting memory is
3403/// a `memset(0)`). This is not as fast in a single-threaded scenario but can
3404/// provide benefits in high-parallelism situations as no virtual memory locks
3405/// or IPIs need happen.
3406///
3407/// ## Disadvantages of Pooled Allocation
3408///
3409/// Despite the above advantages to instantiation performance the pooling
3410/// allocator is not enabled by default in Wasmtime. One reason is that the
3411/// performance advantages are not necessarily portable, for example while the
3412/// pooling allocator works on Windows it has not been tuned for performance on
3413/// Windows in the same way it has on Linux.
3414///
3415/// Additionally the main cost of the pooling allocator is that it requires a
3416/// very large reservation of virtual memory (on the order of most of the
3417/// addressable virtual address space). WebAssembly 32-bit linear memories in
3418/// Wasmtime are, by default 4G address space reservations with a small guard
3419/// region both before and after the linear memory. Memories in the pooling
3420/// allocator are contiguous which means that we only need a guard after linear
3421/// memory because the previous linear memory's slot post-guard is our own
3422/// pre-guard. This means that, by default, the pooling allocator uses roughly
3423/// 4G of virtual memory per WebAssembly linear memory slot. 4G of virtual
3424/// memory is 32 bits of a 64-bit address. Many 64-bit systems can only
3425/// actually use 48-bit addresses by default (although this can be extended on
3426/// architectures nowadays too), and of those 48 bits one of them is reserved
3427/// to indicate kernel-vs-userspace. This leaves 47-32=15 bits left,
3428/// meaning you can only have at most 32k slots of linear memories on many
3429/// systems by default. This is a relatively small number and shows how the
3430/// pooling allocator can quickly exhaust all of virtual memory.
3431///
3432/// Another disadvantage of the pooling allocator is that it may keep memory
3433/// alive when nothing is using it. A previously used slot for an instance might
3434/// have paged-in memory that will not get paged out until the
3435/// [`Engine`](crate::Engine) owning the pooling allocator is dropped. While
3436/// suitable for some applications this behavior may not be suitable for all
3437/// applications.
3438///
3439/// Finally the last disadvantage of the pooling allocator is that the
3440/// configuration values for the maximum number of instances, memories, tables,
3441/// etc, must all be fixed up-front. There's not always a clear answer as to
3442/// what these values should be so not all applications may be able to work
3443/// with this constraint.
3444///
3445/// [`madvise`]: https://man7.org/linux/man-pages/man2/madvise.2.html
3446/// [`mprotect`]: https://man7.org/linux/man-pages/man2/mprotect.2.html
3447/// [`mmap`]: https://man7.org/linux/man-pages/man2/mmap.2.html
3448/// [`munmap`]: https://man7.org/linux/man-pages/man2/munmap.2.html
3449#[cfg(feature = "pooling-allocator")]
3450#[derive(Debug, Clone, Default)]
3451pub struct PoolingAllocationConfig {
3452 config: crate::runtime::vm::PoolingInstanceAllocatorConfig,
3453}
3454
3455#[cfg(feature = "pooling-allocator")]
3456impl PoolingAllocationConfig {
3457 /// Returns a new configuration builder with all default settings
3458 /// configured.
3459 pub fn new() -> PoolingAllocationConfig {
3460 PoolingAllocationConfig::default()
3461 }
3462
3463 /// Configures the maximum number of "unused warm slots" to retain in the
3464 /// pooling allocator.
3465 ///
3466 /// The pooling allocator operates over slots to allocate from, and each
3467 /// slot is considered "cold" if it's never been used before or "warm" if
3468 /// it's been used by some module in the past. Slots in the pooling
3469 /// allocator additionally track an "affinity" flag to a particular core
3470 /// wasm module. When a module is instantiated into a slot then the slot is
3471 /// considered affine to that module, even after the instance has been
3472 /// deallocated.
3473 ///
3474 /// When a new instance is created then a slot must be chosen, and the
3475 /// current algorithm for selecting a slot is:
3476 ///
3477 /// * If there are slots that are affine to the module being instantiated,
3478 /// then the most recently used slot is selected to be allocated from.
3479 /// This is done to improve reuse of resources such as memory mappings and
3480 /// additionally try to benefit from temporal locality for things like
3481 /// caches.
3482 ///
3483 /// * Otherwise if there are more than N affine slots to other modules, then
3484 /// one of those affine slots is chosen to be allocated. The slot chosen
3485 /// is picked on a least-recently-used basis.
3486 ///
3487 /// * Finally, if there are less than N affine slots to other modules, then
3488 /// the non-affine slots are allocated from.
3489 ///
3490 /// This setting, `max_unused_warm_slots`, is the value for N in the above
3491 /// algorithm. The purpose of this setting is to have a knob over the RSS
3492 /// impact of "unused slots" for a long-running wasm server.
3493 ///
3494 /// If this setting is set to 0, for example, then affine slots are
3495 /// aggressively reused on a least-recently-used basis. A "cold" slot is
3496 /// only used if there are no affine slots available to allocate from. This
3497 /// means that the set of slots used over the lifetime of a program is the
3498 /// same as the maximum concurrent number of wasm instances.
3499 ///
3500 /// If this setting is set to infinity, however, then cold slots are
3501 /// prioritized to be allocated from. This means that the set of slots used
3502 /// over the lifetime of a program will approach
3503 /// [`PoolingAllocationConfig::total_memories`], or the maximum number of
3504 /// slots in the pooling allocator.
3505 ///
3506 /// Wasmtime does not aggressively decommit all resources associated with a
3507 /// slot when the slot is not in use. For example the
3508 /// [`PoolingAllocationConfig::linear_memory_keep_resident`] option can be
3509 /// used to keep memory associated with a slot, even when it's not in use.
3510 /// This means that the total set of used slots in the pooling instance
3511 /// allocator can impact the overall RSS usage of a program.
3512 ///
3513 /// The default value for this option is `100`.
3514 pub fn max_unused_warm_slots(&mut self, max: u32) -> &mut Self {
3515 self.config.max_unused_warm_slots = max;
3516 self
3517 }
3518
3519 /// The target number of decommits to do per batch.
3520 ///
3521 /// This is not precise, as we can queue up decommits at times when we
3522 /// aren't prepared to immediately flush them, and so we may go over this
3523 /// target size occasionally.
3524 ///
3525 /// A batch size of one effectively disables batching.
3526 ///
3527 /// Defaults to `1`.
3528 pub fn decommit_batch_size(&mut self, batch_size: usize) -> &mut Self {
3529 self.config.decommit_batch_size = batch_size;
3530 self
3531 }
3532
3533 /// How much memory, in bytes, to keep resident for async stacks allocated
3534 /// with the pooling allocator.
3535 ///
3536 /// When [`Config::async_stack_zeroing`] is enabled then Wasmtime will reset
3537 /// the contents of async stacks back to zero upon deallocation. This option
3538 /// can be used to perform the zeroing operation with `memset` up to a
3539 /// certain threshold of bytes instead of using system calls to reset the
3540 /// stack to zero.
3541 ///
3542 /// Note that when using this option the memory with async stacks will
3543 /// never be decommitted.
3544 #[cfg(feature = "async")]
3545 pub fn async_stack_keep_resident(&mut self, size: usize) -> &mut Self {
3546 self.config.async_stack_keep_resident = size;
3547 self
3548 }
3549
3550 /// How much memory, in bytes, to keep resident for each linear memory
3551 /// after deallocation.
3552 ///
3553 /// This option is only applicable on Linux and has no effect on other
3554 /// platforms.
3555 ///
3556 /// By default Wasmtime will use `madvise` to reset the entire contents of
3557 /// linear memory back to zero when a linear memory is deallocated. This
3558 /// option can be used to use `memset` instead to set memory back to zero
3559 /// which can, in some configurations, reduce the number of page faults
3560 /// taken when a slot is reused.
3561 pub fn linear_memory_keep_resident(&mut self, size: usize) -> &mut Self {
3562 self.config.linear_memory_keep_resident = size;
3563 self
3564 }
3565
3566 /// How much memory, in bytes, to keep resident for each table after
3567 /// deallocation.
3568 ///
3569 /// This option is only applicable on Linux and has no effect on other
3570 /// platforms.
3571 ///
3572 /// This option is the same as
3573 /// [`PoolingAllocationConfig::linear_memory_keep_resident`] except that it
3574 /// is applicable to tables instead.
3575 pub fn table_keep_resident(&mut self, size: usize) -> &mut Self {
3576 self.config.table_keep_resident = size;
3577 self
3578 }
3579
3580 /// The maximum number of concurrent component instances supported (default
3581 /// is `1000`).
3582 ///
3583 /// This provides an upper-bound on the total size of component
3584 /// metadata-related allocations, along with
3585 /// [`PoolingAllocationConfig::max_component_instance_size`]. The upper bound is
3586 ///
3587 /// ```text
3588 /// total_component_instances * max_component_instance_size
3589 /// ```
3590 ///
3591 /// where `max_component_instance_size` is rounded up to the size and alignment
3592 /// of the internal representation of the metadata.
3593 pub fn total_component_instances(&mut self, count: u32) -> &mut Self {
3594 self.config.limits.total_component_instances = count;
3595 self
3596 }
3597
3598 /// The maximum size, in bytes, allocated for a component instance's
3599 /// `VMComponentContext` metadata.
3600 ///
3601 /// The [`wasmtime::component::Instance`][crate::component::Instance] type
3602 /// has a static size but its internal `VMComponentContext` is dynamically
3603 /// sized depending on the component being instantiated. This size limit
3604 /// loosely correlates to the size of the component, taking into account
3605 /// factors such as:
3606 ///
3607 /// * number of lifted and lowered functions,
3608 /// * number of memories
3609 /// * number of inner instances
3610 /// * number of resources
3611 ///
3612 /// If the allocated size per instance is too small then instantiation of a
3613 /// module will fail at runtime with an error indicating how many bytes were
3614 /// needed.
3615 ///
3616 /// The default value for this is 1MiB.
3617 ///
3618 /// This provides an upper-bound on the total size of component
3619 /// metadata-related allocations, along with
3620 /// [`PoolingAllocationConfig::total_component_instances`]. The upper bound is
3621 ///
3622 /// ```text
3623 /// total_component_instances * max_component_instance_size
3624 /// ```
3625 ///
3626 /// where `max_component_instance_size` is rounded up to the size and alignment
3627 /// of the internal representation of the metadata.
3628 pub fn max_component_instance_size(&mut self, size: usize) -> &mut Self {
3629 self.config.limits.component_instance_size = size;
3630 self
3631 }
3632
3633 /// The maximum number of core instances a single component may contain
3634 /// (default is unlimited).
3635 ///
3636 /// This method (along with
3637 /// [`PoolingAllocationConfig::max_memories_per_component`],
3638 /// [`PoolingAllocationConfig::max_tables_per_component`], and
3639 /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3640 /// the amount of resources a single component allocation consumes.
3641 ///
3642 /// If a component will instantiate more core instances than `count`, then
3643 /// the component will fail to instantiate.
3644 pub fn max_core_instances_per_component(&mut self, count: u32) -> &mut Self {
3645 self.config.limits.max_core_instances_per_component = count;
3646 self
3647 }
3648
3649 /// The maximum number of Wasm linear memories that a single component may
3650 /// transitively contain (default is unlimited).
3651 ///
3652 /// This method (along with
3653 /// [`PoolingAllocationConfig::max_core_instances_per_component`],
3654 /// [`PoolingAllocationConfig::max_tables_per_component`], and
3655 /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3656 /// the amount of resources a single component allocation consumes.
3657 ///
3658 /// If a component transitively contains more linear memories than `count`,
3659 /// then the component will fail to instantiate.
3660 pub fn max_memories_per_component(&mut self, count: u32) -> &mut Self {
3661 self.config.limits.max_memories_per_component = count;
3662 self
3663 }
3664
3665 /// The maximum number of tables that a single component may transitively
3666 /// contain (default is unlimited).
3667 ///
3668 /// This method (along with
3669 /// [`PoolingAllocationConfig::max_core_instances_per_component`],
3670 /// [`PoolingAllocationConfig::max_memories_per_component`],
3671 /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3672 /// the amount of resources a single component allocation consumes.
3673 ///
3674 /// If a component will transitively contains more tables than `count`, then
3675 /// the component will fail to instantiate.
3676 pub fn max_tables_per_component(&mut self, count: u32) -> &mut Self {
3677 self.config.limits.max_tables_per_component = count;
3678 self
3679 }
3680
3681 /// The maximum number of concurrent Wasm linear memories supported (default
3682 /// is `1000`).
3683 ///
3684 /// This value has a direct impact on the amount of memory allocated by the pooling
3685 /// instance allocator.
3686 ///
3687 /// The pooling instance allocator allocates a memory pool, where each entry
3688 /// in the pool contains the reserved address space for each linear memory
3689 /// supported by an instance.
3690 ///
3691 /// The memory pool will reserve a large quantity of host process address
3692 /// space to elide the bounds checks required for correct WebAssembly memory
3693 /// semantics. Even with 64-bit address spaces, the address space is limited
3694 /// when dealing with a large number of linear memories.
3695 ///
3696 /// For example, on Linux x86_64, the userland address space limit is 128
3697 /// TiB. That might seem like a lot, but each linear memory will *reserve* 6
3698 /// GiB of space by default.
3699 pub fn total_memories(&mut self, count: u32) -> &mut Self {
3700 self.config.limits.total_memories = count;
3701 self
3702 }
3703
3704 /// The maximum number of concurrent tables supported (default is `1000`).
3705 ///
3706 /// This value has a direct impact on the amount of memory allocated by the
3707 /// pooling instance allocator.
3708 ///
3709 /// The pooling instance allocator allocates a table pool, where each entry
3710 /// in the pool contains the space needed for each WebAssembly table
3711 /// supported by an instance (see `table_elements` to control the size of
3712 /// each table).
3713 pub fn total_tables(&mut self, count: u32) -> &mut Self {
3714 self.config.limits.total_tables = count;
3715 self
3716 }
3717
3718 /// The maximum number of execution stacks allowed for asynchronous
3719 /// execution, when enabled (default is `1000`).
3720 ///
3721 /// This value has a direct impact on the amount of memory allocated by the
3722 /// pooling instance allocator.
3723 #[cfg(feature = "async")]
3724 pub fn total_stacks(&mut self, count: u32) -> &mut Self {
3725 self.config.limits.total_stacks = count;
3726 self
3727 }
3728
3729 /// The maximum number of concurrent core instances supported (default is
3730 /// `1000`).
3731 ///
3732 /// This provides an upper-bound on the total size of core instance
3733 /// metadata-related allocations, along with
3734 /// [`PoolingAllocationConfig::max_core_instance_size`]. The upper bound is
3735 ///
3736 /// ```text
3737 /// total_core_instances * max_core_instance_size
3738 /// ```
3739 ///
3740 /// where `max_core_instance_size` is rounded up to the size and alignment of
3741 /// the internal representation of the metadata.
3742 pub fn total_core_instances(&mut self, count: u32) -> &mut Self {
3743 self.config.limits.total_core_instances = count;
3744 self
3745 }
3746
3747 /// The maximum size, in bytes, allocated for a core instance's `VMContext`
3748 /// metadata.
3749 ///
3750 /// The [`Instance`][crate::Instance] type has a static size but its
3751 /// `VMContext` metadata is dynamically sized depending on the module being
3752 /// instantiated. This size limit loosely correlates to the size of the Wasm
3753 /// module, taking into account factors such as:
3754 ///
3755 /// * number of functions
3756 /// * number of globals
3757 /// * number of memories
3758 /// * number of tables
3759 /// * number of function types
3760 ///
3761 /// If the allocated size per instance is too small then instantiation of a
3762 /// module will fail at runtime with an error indicating how many bytes were
3763 /// needed.
3764 ///
3765 /// The default value for this is 1MiB.
3766 ///
3767 /// This provides an upper-bound on the total size of core instance
3768 /// metadata-related allocations, along with
3769 /// [`PoolingAllocationConfig::total_core_instances`]. The upper bound is
3770 ///
3771 /// ```text
3772 /// total_core_instances * max_core_instance_size
3773 /// ```
3774 ///
3775 /// where `max_core_instance_size` is rounded up to the size and alignment of
3776 /// the internal representation of the metadata.
3777 pub fn max_core_instance_size(&mut self, size: usize) -> &mut Self {
3778 self.config.limits.core_instance_size = size;
3779 self
3780 }
3781
3782 /// The maximum number of defined tables for a core module (default is `1`).
3783 ///
3784 /// This value controls the capacity of the `VMTableDefinition` table in
3785 /// each instance's `VMContext` structure.
3786 ///
3787 /// The allocated size of the table will be `tables *
3788 /// sizeof(VMTableDefinition)` for each instance regardless of how many
3789 /// tables are defined by an instance's module.
3790 pub fn max_tables_per_module(&mut self, tables: u32) -> &mut Self {
3791 self.config.limits.max_tables_per_module = tables;
3792 self
3793 }
3794
3795 /// The maximum table elements for any table defined in a module (default is
3796 /// `20000`).
3797 ///
3798 /// If a table's minimum element limit is greater than this value, the
3799 /// module will fail to instantiate.
3800 ///
3801 /// If a table's maximum element limit is unbounded or greater than this
3802 /// value, the maximum will be `table_elements` for the purpose of any
3803 /// `table.grow` instruction.
3804 ///
3805 /// This value is used to reserve the maximum space for each supported
3806 /// table; table elements are pointer-sized in the Wasmtime runtime.
3807 /// Therefore, the space reserved for each instance is `tables *
3808 /// table_elements * sizeof::<*const ()>`.
3809 pub fn table_elements(&mut self, elements: usize) -> &mut Self {
3810 self.config.limits.table_elements = elements;
3811 self
3812 }
3813
3814 /// The maximum number of defined linear memories for a module (default is
3815 /// `1`).
3816 ///
3817 /// This value controls the capacity of the `VMMemoryDefinition` table in
3818 /// each core instance's `VMContext` structure.
3819 ///
3820 /// The allocated size of the table will be `memories *
3821 /// sizeof(VMMemoryDefinition)` for each core instance regardless of how
3822 /// many memories are defined by the core instance's module.
3823 pub fn max_memories_per_module(&mut self, memories: u32) -> &mut Self {
3824 self.config.limits.max_memories_per_module = memories;
3825 self
3826 }
3827
3828 /// The maximum byte size that any WebAssembly linear memory may grow to.
3829 ///
3830 /// This option defaults to 4 GiB meaning that for 32-bit linear memories
3831 /// there is no restrictions. 64-bit linear memories will not be allowed to
3832 /// grow beyond 4 GiB by default.
3833 ///
3834 /// If a memory's minimum size is greater than this value, the module will
3835 /// fail to instantiate.
3836 ///
3837 /// If a memory's maximum size is unbounded or greater than this value, the
3838 /// maximum will be `max_memory_size` for the purpose of any `memory.grow`
3839 /// instruction.
3840 ///
3841 /// This value is used to control the maximum accessible space for each
3842 /// linear memory of a core instance. This can be thought of as a simple
3843 /// mechanism like [`Store::limiter`](crate::Store::limiter) to limit memory
3844 /// at runtime. This value can also affect striping/coloring behavior when
3845 /// used in conjunction with
3846 /// [`memory_protection_keys`](PoolingAllocationConfig::memory_protection_keys).
3847 ///
3848 /// The virtual memory reservation size of each linear memory is controlled
3849 /// by the [`Config::memory_reservation`] setting and this method's
3850 /// configuration cannot exceed [`Config::memory_reservation`].
3851 pub fn max_memory_size(&mut self, bytes: usize) -> &mut Self {
3852 self.config.limits.max_memory_size = bytes;
3853 self
3854 }
3855
3856 /// Configures whether memory protection keys (MPK) should be used for more
3857 /// efficient layout of pool-allocated memories.
3858 ///
3859 /// When using the pooling allocator (see [`Config::allocation_strategy`],
3860 /// [`InstanceAllocationStrategy::Pooling`]), memory protection keys can
3861 /// reduce the total amount of allocated virtual memory by eliminating guard
3862 /// regions between WebAssembly memories in the pool. It does so by
3863 /// "coloring" memory regions with different memory keys and setting which
3864 /// regions are accessible each time executions switches from host to guest
3865 /// (or vice versa).
3866 ///
3867 /// Leveraging MPK requires configuring a smaller-than-default
3868 /// [`max_memory_size`](PoolingAllocationConfig::max_memory_size) to enable
3869 /// this coloring/striping behavior. For example embeddings might want to
3870 /// reduce the default 4G allowance to 128M.
3871 ///
3872 /// MPK is only available on Linux (called `pku` there) and recent x86
3873 /// systems; we check for MPK support at runtime by examining the `CPUID`
3874 /// register. This configuration setting can be in three states:
3875 ///
3876 /// - `auto`: if MPK support is available the guard regions are removed; if
3877 /// not, the guard regions remain
3878 /// - `yes`: use MPK to eliminate guard regions; fail if MPK is not
3879 /// supported
3880 /// - `no`: never use MPK
3881 ///
3882 /// By default this value is `no`, but may become `auto` in future
3883 /// releases.
3884 ///
3885 /// __WARNING__: this configuration options is still experimental--use at
3886 /// your own risk! MPK uses kernel and CPU features to protect memory
3887 /// regions; you may observe segmentation faults if anything is
3888 /// misconfigured.
3889 #[cfg(feature = "memory-protection-keys")]
3890 pub fn memory_protection_keys(&mut self, enable: Enabled) -> &mut Self {
3891 self.config.memory_protection_keys = enable;
3892 self
3893 }
3894
3895 /// Sets an upper limit on how many memory protection keys (MPK) Wasmtime
3896 /// will use.
3897 ///
3898 /// This setting is only applicable when
3899 /// [`PoolingAllocationConfig::memory_protection_keys`] is set to `enable`
3900 /// or `auto`. Configuring this above the HW and OS limits (typically 15)
3901 /// has no effect.
3902 ///
3903 /// If multiple Wasmtime engines are used in the same process, note that all
3904 /// engines will share the same set of allocated keys; this setting will
3905 /// limit how many keys are allocated initially and thus available to all
3906 /// other engines.
3907 #[cfg(feature = "memory-protection-keys")]
3908 pub fn max_memory_protection_keys(&mut self, max: usize) -> &mut Self {
3909 self.config.max_memory_protection_keys = max;
3910 self
3911 }
3912
3913 /// Check if memory protection keys (MPK) are available on the current host.
3914 ///
3915 /// This is a convenience method for determining MPK availability using the
3916 /// same method that [`Enabled::Auto`] does. See
3917 /// [`PoolingAllocationConfig::memory_protection_keys`] for more
3918 /// information.
3919 #[cfg(feature = "memory-protection-keys")]
3920 pub fn are_memory_protection_keys_available() -> bool {
3921 crate::runtime::vm::mpk::is_supported()
3922 }
3923
3924 /// The maximum number of concurrent GC heaps supported (default is `1000`).
3925 ///
3926 /// This value has a direct impact on the amount of memory allocated by the
3927 /// pooling instance allocator.
3928 ///
3929 /// The pooling instance allocator allocates a GC heap pool, where each
3930 /// entry in the pool contains the space needed for each GC heap used by a
3931 /// store.
3932 #[cfg(feature = "gc")]
3933 pub fn total_gc_heaps(&mut self, count: u32) -> &mut Self {
3934 self.config.limits.total_gc_heaps = count;
3935 self
3936 }
3937
3938 /// Configures whether the Linux-specific [`PAGEMAP_SCAN` ioctl][ioctl] is
3939 /// used to help reset linear memory.
3940 ///
3941 /// When [`Self::linear_memory_keep_resident`] or
3942 /// [`Self::table_keep_resident`] options are configured to nonzero values
3943 /// the default behavior is to `memset` the lowest addresses of a table or
3944 /// memory back to their original contents. With the `PAGEMAP_SCAN` ioctl on
3945 /// Linux this can be done to more intelligently scan for resident pages in
3946 /// the region and only reset those pages back to their original contents
3947 /// with `memset` rather than assuming the low addresses are all resident.
3948 ///
3949 /// This ioctl has the potential to provide a number of performance benefits
3950 /// in high-reuse and high concurrency scenarios. Notably this enables
3951 /// Wasmtime to scan the entire region of WebAssembly linear memory and
3952 /// manually reset memory back to its original contents, up to
3953 /// [`Self::linear_memory_keep_resident`] bytes, possibly skipping an
3954 /// `madvise` entirely. This can be more efficient by avoiding removing
3955 /// pages from the address space entirely and additionally ensuring that
3956 /// future use of the linear memory doesn't incur page faults as the pages
3957 /// remain resident.
3958 ///
3959 /// At this time this configuration option is still being evaluated as to
3960 /// how appropriate it is for all use cases. It currently defaults to
3961 /// `no` or disabled but may change to `auto`, enable if supported, in the
3962 /// future. This option is only supported on Linux and requires a kernel
3963 /// version of 6.7 or higher.
3964 ///
3965 /// [ioctl]: https://www.man7.org/linux/man-pages/man2/PAGEMAP_SCAN.2const.html
3966 pub fn pagemap_scan(&mut self, enable: Enabled) -> &mut Self {
3967 self.config.pagemap_scan = enable;
3968 self
3969 }
3970
3971 /// Tests whether [`Self::pagemap_scan`] is available or not on the host
3972 /// system.
3973 pub fn is_pagemap_scan_available() -> bool {
3974 crate::runtime::vm::PoolingInstanceAllocatorConfig::is_pagemap_scan_available()
3975 }
3976}
3977
3978#[cfg(feature = "std")]
3979fn detect_host_feature(feature: &str) -> Option<bool> {
3980 #[cfg(target_arch = "aarch64")]
3981 {
3982 return match feature {
3983 "lse" => Some(std::arch::is_aarch64_feature_detected!("lse")),
3984 "paca" => Some(std::arch::is_aarch64_feature_detected!("paca")),
3985 "fp16" => Some(std::arch::is_aarch64_feature_detected!("fp16")),
3986
3987 _ => None,
3988 };
3989 }
3990
3991 // `is_s390x_feature_detected` is nightly only for now, so use the
3992 // STORE FACILITY LIST EXTENDED instruction as a temporary measure.
3993 #[cfg(target_arch = "s390x")]
3994 {
3995 let mut facility_list: [u64; 4] = [0; 4];
3996 unsafe {
3997 core::arch::asm!(
3998 "stfle 0({})",
3999 in(reg_addr) facility_list.as_mut_ptr() ,
4000 inout("r0") facility_list.len() as u64 - 1 => _,
4001 options(nostack)
4002 );
4003 }
4004 let get_facility_bit = |n: usize| {
4005 // NOTE: bits are numbered from the left.
4006 facility_list[n / 64] & (1 << (63 - (n % 64))) != 0
4007 };
4008
4009 return match feature {
4010 "mie3" => Some(get_facility_bit(61)),
4011 "mie4" => Some(get_facility_bit(84)),
4012 "vxrs_ext2" => Some(get_facility_bit(148)),
4013 "vxrs_ext3" => Some(get_facility_bit(198)),
4014
4015 _ => None,
4016 };
4017 }
4018
4019 #[cfg(target_arch = "riscv64")]
4020 {
4021 return match feature {
4022 // due to `is_riscv64_feature_detected` is not stable.
4023 // we cannot use it. For now lie and say all features are always
4024 // found to keep tests working.
4025 _ => Some(true),
4026 };
4027 }
4028
4029 #[cfg(target_arch = "x86_64")]
4030 {
4031 return match feature {
4032 "cmpxchg16b" => Some(std::is_x86_feature_detected!("cmpxchg16b")),
4033 "sse3" => Some(std::is_x86_feature_detected!("sse3")),
4034 "ssse3" => Some(std::is_x86_feature_detected!("ssse3")),
4035 "sse4.1" => Some(std::is_x86_feature_detected!("sse4.1")),
4036 "sse4.2" => Some(std::is_x86_feature_detected!("sse4.2")),
4037 "popcnt" => Some(std::is_x86_feature_detected!("popcnt")),
4038 "avx" => Some(std::is_x86_feature_detected!("avx")),
4039 "avx2" => Some(std::is_x86_feature_detected!("avx2")),
4040 "fma" => Some(std::is_x86_feature_detected!("fma")),
4041 "bmi1" => Some(std::is_x86_feature_detected!("bmi1")),
4042 "bmi2" => Some(std::is_x86_feature_detected!("bmi2")),
4043 "avx512bitalg" => Some(std::is_x86_feature_detected!("avx512bitalg")),
4044 "avx512dq" => Some(std::is_x86_feature_detected!("avx512dq")),
4045 "avx512f" => Some(std::is_x86_feature_detected!("avx512f")),
4046 "avx512vl" => Some(std::is_x86_feature_detected!("avx512vl")),
4047 "avx512vbmi" => Some(std::is_x86_feature_detected!("avx512vbmi")),
4048 "lzcnt" => Some(std::is_x86_feature_detected!("lzcnt")),
4049
4050 _ => None,
4051 };
4052 }
4053
4054 #[allow(
4055 unreachable_code,
4056 reason = "reachable or not depending on if a target above matches"
4057 )]
4058 {
4059 let _ = feature;
4060 return None;
4061 }
4062}