wasmtime/config.rs
1use crate::prelude::*;
2use alloc::sync::Arc;
3use bitflags::Flags;
4use core::fmt;
5use core::num::{NonZeroU32, NonZeroUsize};
6use core::str::FromStr;
7#[cfg(any(feature = "cranelift", feature = "winch"))]
8use std::path::Path;
9pub use wasmparser::WasmFeatures;
10use wasmtime_environ::{ConfigTunables, OperatorCost, OperatorCostStrategy, TripleExt, Tunables};
11
12#[cfg(feature = "runtime")]
13use crate::memory::MemoryCreator;
14#[cfg(feature = "runtime")]
15use crate::profiling_agent::{self, ProfilingAgent};
16#[cfg(feature = "runtime")]
17use crate::runtime::vm::{
18 GcRuntime, InstanceAllocator, OnDemandInstanceAllocator, RuntimeMemoryCreator,
19};
20#[cfg(feature = "runtime")]
21use crate::trampoline::MemoryCreatorProxy;
22
23#[cfg(feature = "async")]
24use crate::stack::{StackCreator, StackCreatorProxy};
25#[cfg(feature = "async")]
26use wasmtime_fiber::RuntimeFiberStackCreator;
27
28#[cfg(feature = "runtime")]
29pub use crate::runtime::code_memory::CustomCodeMemory;
30#[cfg(feature = "cache")]
31pub use wasmtime_cache::{Cache, CacheConfig};
32#[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
33pub use wasmtime_environ::CacheStore;
34
35pub(crate) const DEFAULT_WASM_BACKTRACE_MAX_FRAMES: NonZeroUsize = NonZeroUsize::new(20).unwrap();
36
37/// Represents the module instance allocation strategy to use.
38#[derive(Clone)]
39#[non_exhaustive]
40pub enum InstanceAllocationStrategy {
41 /// The on-demand instance allocation strategy.
42 ///
43 /// Resources related to a module instance are allocated at instantiation time and
44 /// immediately deallocated when the `Store` referencing the instance is dropped.
45 ///
46 /// This is the default allocation strategy for Wasmtime.
47 OnDemand,
48 /// The pooling instance allocation strategy.
49 ///
50 /// A pool of resources is created in advance and module instantiation reuses resources
51 /// from the pool. Resources are returned to the pool when the `Store` referencing the instance
52 /// is dropped.
53 #[cfg(feature = "pooling-allocator")]
54 Pooling(PoolingAllocationConfig),
55}
56
57impl InstanceAllocationStrategy {
58 /// The default pooling instance allocation strategy.
59 #[cfg(feature = "pooling-allocator")]
60 pub fn pooling() -> Self {
61 Self::Pooling(Default::default())
62 }
63}
64
65impl Default for InstanceAllocationStrategy {
66 fn default() -> Self {
67 Self::OnDemand
68 }
69}
70
71#[cfg(feature = "pooling-allocator")]
72impl From<PoolingAllocationConfig> for InstanceAllocationStrategy {
73 fn from(cfg: PoolingAllocationConfig) -> InstanceAllocationStrategy {
74 InstanceAllocationStrategy::Pooling(cfg)
75 }
76}
77
78#[derive(Clone)]
79/// Configure the strategy used for versioning in serializing and deserializing [`crate::Module`].
80pub enum ModuleVersionStrategy {
81 /// Use the wasmtime crate's Cargo package version.
82 WasmtimeVersion,
83 /// Use a custom version string. Must be at most 255 bytes.
84 Custom(String),
85 /// Emit no version string in serialization, and accept all version strings in deserialization.
86 None,
87}
88
89impl Default for ModuleVersionStrategy {
90 fn default() -> Self {
91 ModuleVersionStrategy::WasmtimeVersion
92 }
93}
94
95impl core::hash::Hash for ModuleVersionStrategy {
96 fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
97 match self {
98 Self::WasmtimeVersion => env!("CARGO_PKG_VERSION").hash(hasher),
99 Self::Custom(s) => s.hash(hasher),
100 Self::None => {}
101 };
102 }
103}
104
105impl ModuleVersionStrategy {
106 /// Get the string-encoding version of the module.
107 pub fn as_str(&self) -> &str {
108 match &self {
109 Self::WasmtimeVersion => env!("CARGO_PKG_VERSION_MAJOR"),
110 Self::Custom(c) => c,
111 Self::None => "",
112 }
113 }
114}
115
116/// Configuration for record/replay
117#[derive(Clone)]
118#[non_exhaustive]
119pub enum RRConfig {
120 #[cfg(feature = "rr")]
121 /// Recording on store is enabled
122 Recording,
123 #[cfg(feature = "rr")]
124 /// Replaying on store is enabled
125 Replaying,
126 /// No record/replay is enabled
127 None,
128}
129
130/// Global configuration options used to create an [`Engine`](crate::Engine)
131/// and customize its behavior.
132///
133/// This structure exposed a builder-like interface and is primarily consumed by
134/// [`Engine::new()`](crate::Engine::new).
135///
136/// The validation of `Config` is deferred until the engine is being built, thus
137/// a problematic config may cause `Engine::new` to fail.
138///
139/// # Defaults
140///
141/// The `Default` trait implementation and the return value from
142/// [`Config::new()`] are the same and represent the default set of
143/// configuration for an engine. The exact set of defaults will differ based on
144/// properties such as enabled Cargo features at compile time and the configured
145/// target (see [`Config::target`]). Configuration options document their
146/// default values and what the conditional value of the default is where
147/// applicable.
148#[derive(Clone)]
149pub struct Config {
150 #[cfg(any(feature = "cranelift", feature = "winch"))]
151 compiler_config: Option<CompilerConfig>,
152 target: Option<target_lexicon::Triple>,
153 #[cfg(feature = "gc")]
154 collector: Collector,
155 profiling_strategy: ProfilingStrategy,
156 tunables: ConfigTunables,
157
158 #[cfg(feature = "cache")]
159 pub(crate) cache: Option<Cache>,
160 #[cfg(feature = "runtime")]
161 pub(crate) mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>,
162 #[cfg(feature = "runtime")]
163 pub(crate) custom_code_memory: Option<Arc<dyn CustomCodeMemory>>,
164 pub(crate) allocation_strategy: InstanceAllocationStrategy,
165 pub(crate) max_wasm_stack: usize,
166 /// Explicitly enabled features via `Config::wasm_*` methods. This is a
167 /// signal that the embedder specifically wants something turned on
168 /// regardless of the defaults that Wasmtime might otherwise have enabled.
169 ///
170 /// Note that this, and `disabled_features` below, start as the empty set of
171 /// features to only track explicit user requests.
172 pub(crate) enabled_features: WasmFeatures,
173 /// Same as `enabled_features`, but for those that are explicitly disabled.
174 pub(crate) disabled_features: WasmFeatures,
175 pub(crate) wasm_backtrace_details_env_used: bool,
176 pub(crate) wasm_backtrace_max_frames: Option<NonZeroUsize>,
177 pub(crate) native_unwind_info: Option<bool>,
178 #[cfg(any(feature = "async", feature = "stack-switching"))]
179 pub(crate) async_stack_size: usize,
180 #[cfg(feature = "async")]
181 pub(crate) async_stack_zeroing: bool,
182 #[cfg(feature = "async")]
183 pub(crate) stack_creator: Option<Arc<dyn RuntimeFiberStackCreator>>,
184 pub(crate) module_version: ModuleVersionStrategy,
185 pub(crate) parallel_compilation: bool,
186 pub(crate) memory_guaranteed_dense_image_size: u64,
187 pub(crate) force_memory_init_memfd: bool,
188 pub(crate) wmemcheck: bool,
189 #[cfg(feature = "coredump")]
190 pub(crate) coredump_on_trap: bool,
191 pub(crate) macos_use_mach_ports: bool,
192 pub(crate) detect_host_feature: Option<fn(&str) -> Option<bool>>,
193 pub(crate) x86_float_abi_ok: Option<bool>,
194 pub(crate) shared_memory: bool,
195 pub(crate) rr_config: RRConfig,
196}
197
198/// User-provided configuration for the compiler.
199#[cfg(any(feature = "cranelift", feature = "winch"))]
200#[derive(Debug, Clone)]
201struct CompilerConfig {
202 strategy: Option<Strategy>,
203 settings: crate::hash_map::HashMap<String, String>,
204 flags: crate::hash_set::HashSet<String>,
205 #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
206 cache_store: Option<Arc<dyn CacheStore>>,
207 clif_dir: Option<std::path::PathBuf>,
208 wmemcheck: bool,
209}
210
211#[cfg(any(feature = "cranelift", feature = "winch"))]
212impl CompilerConfig {
213 fn new() -> Self {
214 Self {
215 strategy: Strategy::Auto.not_auto(),
216 settings: Default::default(),
217 flags: Default::default(),
218 #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
219 cache_store: None,
220 clif_dir: None,
221 wmemcheck: false,
222 }
223 }
224
225 /// Ensures that the key is not set or equals to the given value.
226 /// If the key is not set, it will be set to the given value.
227 ///
228 /// # Returns
229 ///
230 /// Returns true if successfully set or already had the given setting
231 /// value, or false if the setting was explicitly set to something
232 /// else previously.
233 fn ensure_setting_unset_or_given(&mut self, k: &str, v: &str) -> bool {
234 if let Some(value) = self.settings.get(k) {
235 if value != v {
236 return false;
237 }
238 } else {
239 self.settings.insert(k.to_string(), v.to_string());
240 }
241 true
242 }
243}
244
245#[cfg(any(feature = "cranelift", feature = "winch"))]
246impl Default for CompilerConfig {
247 fn default() -> Self {
248 Self::new()
249 }
250}
251
252impl Config {
253 /// Creates a new configuration object with the default configuration
254 /// specified.
255 pub fn new() -> Self {
256 let mut ret = Self {
257 tunables: ConfigTunables::default(),
258 #[cfg(any(feature = "cranelift", feature = "winch"))]
259 compiler_config: Some(CompilerConfig::default()),
260 target: None,
261 #[cfg(feature = "gc")]
262 collector: Collector::default(),
263 #[cfg(feature = "cache")]
264 cache: None,
265 profiling_strategy: ProfilingStrategy::None,
266 #[cfg(feature = "runtime")]
267 mem_creator: None,
268 #[cfg(feature = "runtime")]
269 custom_code_memory: None,
270 allocation_strategy: InstanceAllocationStrategy::OnDemand,
271 // 512k of stack -- note that this is chosen currently to not be too
272 // big, not be too small, and be a good default for most platforms.
273 // One platform of particular note is Windows where the stack size
274 // of the main thread seems to, by default, be smaller than that of
275 // Linux and macOS. This 512k value at least lets our current test
276 // suite pass on the main thread of Windows (using `--test-threads
277 // 1` forces this), or at least it passed when this change was
278 // committed.
279 max_wasm_stack: 512 * 1024,
280 wasm_backtrace_details_env_used: false,
281 wasm_backtrace_max_frames: Some(DEFAULT_WASM_BACKTRACE_MAX_FRAMES),
282 native_unwind_info: None,
283 enabled_features: WasmFeatures::empty(),
284 disabled_features: WasmFeatures::empty(),
285 #[cfg(any(feature = "async", feature = "stack-switching"))]
286 async_stack_size: 2 << 20,
287 #[cfg(feature = "async")]
288 async_stack_zeroing: false,
289 #[cfg(feature = "async")]
290 stack_creator: None,
291 module_version: ModuleVersionStrategy::default(),
292 parallel_compilation: !cfg!(miri),
293 memory_guaranteed_dense_image_size: 16 << 20,
294 force_memory_init_memfd: false,
295 wmemcheck: false,
296 #[cfg(feature = "coredump")]
297 coredump_on_trap: false,
298 macos_use_mach_ports: !cfg!(miri),
299 #[cfg(feature = "std")]
300 detect_host_feature: Some(detect_host_feature),
301 #[cfg(not(feature = "std"))]
302 detect_host_feature: None,
303 x86_float_abi_ok: None,
304 shared_memory: false,
305 rr_config: RRConfig::None,
306 };
307 ret.wasm_backtrace_details(WasmBacktraceDetails::Environment);
308 ret
309 }
310
311 #[cfg(any(feature = "cranelift", feature = "winch"))]
312 pub(crate) fn has_compiler(&self) -> bool {
313 self.compiler_config.is_some()
314 }
315
316 #[track_caller]
317 #[cfg(any(feature = "cranelift", feature = "winch"))]
318 fn compiler_config_mut(&mut self) -> &mut CompilerConfig {
319 self.compiler_config.as_mut().expect(
320 "cannot configure compiler settings for `Config`s \
321 created by `Config::without_compiler`",
322 )
323 }
324
325 /// Configure whether Wasm compilation is enabled.
326 ///
327 /// Disabling Wasm compilation will allow you to load and run
328 /// [pre-compiled][crate::Engine::precompile_module] Wasm programs, but not
329 /// to compile and run new Wasm programs that have not already been
330 /// pre-compiled.
331 ///
332 /// Many compilation-related configuration methods will panic if compilation
333 /// has been disabled.
334 ///
335 /// Note that there are two ways to disable Wasm compilation:
336 ///
337 /// 1. Statically, by disabling the `"cranelift"` and `"winch"` cargo
338 /// features when building Wasmtime. These builds of Wasmtime will have
339 /// smaller code size, since they do not include any of the code to
340 /// compile Wasm.
341 ///
342 /// 2. Dynamically, by passing `false` to this method at run-time when
343 /// configuring Wasmtime. The Wasmtime binary will still include the code
344 /// for compiling Wasm, it just won't be executed, so code size is larger
345 /// than with the first approach.
346 ///
347 /// The static approach is better in most cases, however dynamically calling
348 /// `enable_compiler(false)` is useful whenever you create multiple
349 /// `Engine`s in the same process, some of which must be able to compile
350 /// Wasm and some of which should never do so. Tests are a common example of
351 /// such a situation, especially when there are multiple Rust binaries in
352 /// the same cargo workspace, and cargo's feature resolution enables the
353 /// `"cranelift"` or `"winch"` features across the whole workspace.
354 #[cfg(any(feature = "cranelift", feature = "winch"))]
355 pub fn enable_compiler(&mut self, enable: bool) -> &mut Self {
356 match (enable, &self.compiler_config) {
357 (true, Some(_)) | (false, None) => {}
358 (true, None) => {
359 self.compiler_config = Some(CompilerConfig::default());
360 }
361 (false, Some(_)) => {
362 self.compiler_config = None;
363 }
364 }
365 self
366 }
367
368 /// Configures the target platform of this [`Config`].
369 ///
370 /// This method is used to configure the output of compilation in an
371 /// [`Engine`](crate::Engine). This can be used, for example, to
372 /// cross-compile from one platform to another. By default, the host target
373 /// triple is used meaning compiled code is suitable to run on the host.
374 ///
375 /// Note that the [`Module`](crate::Module) type can only be created if the
376 /// target configured here matches the host. Otherwise if a cross-compile is
377 /// being performed where the host doesn't match the target then
378 /// [`Engine::precompile_module`](crate::Engine::precompile_module) must be
379 /// used instead.
380 ///
381 /// Target-specific flags (such as CPU features) will not be inferred by
382 /// default for the target when one is provided here. This means that this
383 /// can also be used, for example, with the host architecture to disable all
384 /// host-inferred feature flags. Configuring target-specific flags can be
385 /// done with [`Config::cranelift_flag_set`] and
386 /// [`Config::cranelift_flag_enable`].
387 ///
388 /// # Errors
389 ///
390 /// This method will error if the given target triple is not supported.
391 pub fn target(&mut self, target: &str) -> Result<&mut Self> {
392 self.target =
393 Some(target_lexicon::Triple::from_str(target).map_err(|e| crate::format_err!(e))?);
394
395 Ok(self)
396 }
397
398 /// Enables the incremental compilation cache in Cranelift, using the provided `CacheStore`
399 /// backend for storage.
400 ///
401 /// # Panics
402 ///
403 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
404 #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
405 pub fn enable_incremental_compilation(
406 &mut self,
407 cache_store: Arc<dyn CacheStore>,
408 ) -> Result<&mut Self> {
409 self.compiler_config_mut().cache_store = Some(cache_store);
410 Ok(self)
411 }
412
413 #[doc(hidden)]
414 #[deprecated(note = "no longer has any effect")]
415 #[cfg(feature = "async")]
416 pub fn async_support(&mut self, _enable: bool) -> &mut Self {
417 self
418 }
419
420 /// Configures whether DWARF debug information will be emitted
421 /// during compilation for a native debugger on the Wasmtime
422 /// process to consume.
423 ///
424 /// Note that the `debug-builtins` compile-time Cargo feature must also be
425 /// enabled for native debuggers such as GDB or LLDB to be able to debug
426 /// guest WebAssembly programs.
427 ///
428 /// By default this option is `false`.
429 /// **Note** Enabling this option is not compatible with the Winch compiler.
430 pub fn debug_info(&mut self, enable: bool) -> &mut Self {
431 self.tunables.debug_native = Some(enable);
432 self
433 }
434
435 /// Configures whether compiled guest code will be instrumented to
436 /// provide debugging at the Wasm VM level.
437 ///
438 /// This is required in order to enable a guest-level debugging
439 /// API that can precisely examine Wasm VM state and (eventually,
440 /// once it is complete) set breakpoints and watchpoints and step
441 /// through code.
442 ///
443 /// Without this enabled, debugging can only be done via a native
444 /// debugger operating on the compiled guest code (see
445 /// [`Config::debug_info`] and is "best-effort": we may be able to
446 /// recover some Wasm locals or operand stack values, but it is
447 /// not guaranteed, even when optimizations are disabled.
448 ///
449 /// When this is enabled, additional instrumentation is inserted
450 /// that directly tracks the Wasm VM state at every step. This has
451 /// some performance impact, but allows perfect debugging
452 /// fidelity.
453 ///
454 /// Breakpoints, watchpoints, and stepping are not yet supported,
455 /// but will be added in a future version of Wasmtime.
456 ///
457 /// This enables use of the [`crate::FrameHandle`] API which is
458 /// provided by [`crate::Caller::debug_exit_frames`] or
459 /// [`crate::Store::debug_exit_frames`].
460 ///
461 /// ***Note*** Enabling this option is not compatible with the
462 /// Winch compiler.
463 #[cfg(feature = "debug")]
464 pub fn guest_debug(&mut self, enable: bool) -> &mut Self {
465 self.tunables.debug_guest = Some(enable);
466 self
467 }
468
469 /// Configures whether [`WasmBacktrace`] will be present in the context of
470 /// errors returned from Wasmtime.
471 ///
472 /// This method is deprecated in favor of
473 /// [`Config::wasm_backtrace_max_frames`]. Calling `wasm_backtrace(false)`
474 /// is equivalent to `wasm_backtrace_max_frames(None)`, and
475 /// `wasm_backtrace(true)` will leave `wasm_backtrace_max_frames` unchanged
476 /// if the value is `Some` and will otherwise restore the default `Some`
477 /// value.
478 ///
479 /// [`WasmBacktrace`]: crate::WasmBacktrace
480 #[deprecated = "use `wasm_backtrace_max_frames` instead"]
481 pub fn wasm_backtrace(&mut self, enable: bool) -> &mut Self {
482 match (enable, self.wasm_backtrace_max_frames) {
483 (false, _) => self.wasm_backtrace_max_frames = None,
484 // Wasm backtraces were disabled; enable them with the
485 // default maximum number of frames to capture.
486 (true, None) => {
487 self.wasm_backtrace_max_frames = Some(DEFAULT_WASM_BACKTRACE_MAX_FRAMES)
488 }
489 // Wasm backtraces are already enabled; keep the existing
490 // max-frames configuration.
491 (true, Some(_)) => {}
492 }
493 self
494 }
495
496 /// Configures whether backtraces in `Trap` will parse debug info in the wasm file to
497 /// have filename/line number information.
498 ///
499 /// When enabled this will causes modules to retain debugging information
500 /// found in wasm binaries. This debug information will be used when a trap
501 /// happens to symbolicate each stack frame and attempt to print a
502 /// filename/line number for each wasm frame in the stack trace.
503 ///
504 /// By default this option is `WasmBacktraceDetails::Environment`, meaning
505 /// that wasm will read `WASMTIME_BACKTRACE_DETAILS` to indicate whether
506 /// details should be parsed. Note that the `std` feature of this crate must
507 /// be active to read environment variables, otherwise this is disabled by
508 /// default.
509 pub fn wasm_backtrace_details(&mut self, enable: WasmBacktraceDetails) -> &mut Self {
510 self.wasm_backtrace_details_env_used = false;
511 self.tunables.parse_wasm_debuginfo = match enable {
512 WasmBacktraceDetails::Enable => Some(true),
513 WasmBacktraceDetails::Disable => Some(false),
514 WasmBacktraceDetails::Environment => {
515 #[cfg(feature = "std")]
516 {
517 self.wasm_backtrace_details_env_used = true;
518 std::env::var("WASMTIME_BACKTRACE_DETAILS")
519 .map(|s| Some(s == "1"))
520 .unwrap_or(Some(false))
521 }
522 #[cfg(not(feature = "std"))]
523 {
524 Some(false)
525 }
526 }
527 };
528 self
529 }
530
531 /// Configures the maximum number of WebAssembly frames to collect in
532 /// backtraces.
533 ///
534 /// A backtrace may be collected whenever an error is returned from a host
535 /// function call through to WebAssembly or when WebAssembly itself hits a
536 /// trap condition, such as an out-of-bounds memory access. This flag
537 /// indicates, in these conditions, whether the backtrace is collected or
538 /// not and how many frames should be collected.
539 ///
540 /// Currently wasm backtraces are implemented through frame pointer walking.
541 /// This means that collecting a backtrace is expected to be a fast and
542 /// relatively cheap operation. Additionally backtrace collection is
543 /// suitable in concurrent environments since one thread capturing a
544 /// backtrace won't block other threads.
545 ///
546 /// Collected backtraces are attached via
547 /// [`Error::context`](crate::Error::context) to errors returned from host
548 /// functions. The [`WasmBacktrace`] type can be acquired via
549 /// [`Error::downcast_ref`](crate::Error::downcast_ref) to inspect the
550 /// backtrace. When this option is set to `None` then this context is never
551 /// applied to errors coming out of wasm.
552 ///
553 /// The default value is 20.
554 ///
555 /// [`WasmBacktrace`]: crate::WasmBacktrace
556 pub fn wasm_backtrace_max_frames(&mut self, limit: Option<NonZeroUsize>) -> &mut Self {
557 self.wasm_backtrace_max_frames = limit;
558 self
559 }
560
561 /// Configures whether to generate native unwind information
562 /// (e.g. `.eh_frame` on Linux).
563 ///
564 /// This configuration option only exists to help third-party stack
565 /// capturing mechanisms, such as the system's unwinder or the `backtrace`
566 /// crate, determine how to unwind through Wasm frames. It does not affect
567 /// whether Wasmtime can capture Wasm backtraces or not. The presence of
568 /// [`WasmBacktrace`] is controlled by the
569 /// [`Config::wasm_backtrace_max_frames`] option.
570 ///
571 /// Native unwind information is included:
572 /// - When targeting Windows, since the Windows ABI requires it.
573 /// - By default.
574 ///
575 /// Note that systems loading many modules may wish to disable this
576 /// configuration option instead of leaving it on-by-default. Some platforms
577 /// exhibit quadratic behavior when registering/unregistering unwinding
578 /// information which can greatly slow down the module loading/unloading
579 /// process.
580 ///
581 /// [`WasmBacktrace`]: crate::WasmBacktrace
582 pub fn native_unwind_info(&mut self, enable: bool) -> &mut Self {
583 self.native_unwind_info = Some(enable);
584 self
585 }
586
587 /// Configures whether execution of WebAssembly will "consume fuel" to
588 /// either halt or yield execution as desired.
589 ///
590 /// This can be used to deterministically prevent infinitely-executing
591 /// WebAssembly code by instrumenting generated code to consume fuel as it
592 /// executes. When fuel runs out a trap is raised, however [`Store`] can be
593 /// configured to yield execution periodically via
594 /// [`crate::Store::fuel_async_yield_interval`].
595 ///
596 /// Note that a [`Store`] starts with no fuel, so if you enable this option
597 /// you'll have to be sure to pour some fuel into [`Store`] before
598 /// executing some code.
599 ///
600 /// By default this option is `false`.
601 ///
602 /// **Note** Enabling this option is not compatible with the Winch compiler.
603 ///
604 /// [`Store`]: crate::Store
605 pub fn consume_fuel(&mut self, enable: bool) -> &mut Self {
606 self.tunables.consume_fuel = Some(enable);
607 self
608 }
609
610 /// Configures the fuel cost of each WebAssembly operator.
611 ///
612 /// This is only relevant when [`Config::consume_fuel`] is enabled.
613 pub fn operator_cost(&mut self, cost: OperatorCost) -> &mut Self {
614 self.tunables.operator_cost = Some(OperatorCostStrategy::table(cost));
615 self
616 }
617
618 /// Enables epoch-based interruption.
619 ///
620 /// When executing code in async mode, we sometimes want to
621 /// implement a form of cooperative timeslicing: long-running Wasm
622 /// guest code should periodically yield to the executor
623 /// loop. This yielding could be implemented by using "fuel" (see
624 /// [`consume_fuel`](Config::consume_fuel)). However, fuel
625 /// instrumentation is somewhat expensive: it modifies the
626 /// compiled form of the Wasm code so that it maintains a precise
627 /// instruction count, frequently checking this count against the
628 /// remaining fuel. If one does not need this precise count or
629 /// deterministic interruptions, and only needs a periodic
630 /// interrupt of some form, then It would be better to have a more
631 /// lightweight mechanism.
632 ///
633 /// Epoch-based interruption is that mechanism. There is a global
634 /// "epoch", which is a counter that divides time into arbitrary
635 /// periods (or epochs). This counter lives on the
636 /// [`Engine`](crate::Engine) and can be incremented by calling
637 /// [`Engine::increment_epoch`](crate::Engine::increment_epoch).
638 /// Epoch-based instrumentation works by setting a "deadline
639 /// epoch". The compiled code knows the deadline, and at certain
640 /// points, checks the current epoch against that deadline. It
641 /// will yield if the deadline has been reached.
642 ///
643 /// The idea is that checking an infrequently-changing counter is
644 /// cheaper than counting and frequently storing a precise metric
645 /// (instructions executed) locally. The interruptions are not
646 /// deterministic, but if the embedder increments the epoch in a
647 /// periodic way (say, every regular timer tick by a thread or
648 /// signal handler), then we can ensure that all async code will
649 /// yield to the executor within a bounded time.
650 ///
651 /// The deadline check cannot be avoided by malicious wasm code. It is safe
652 /// to use epoch deadlines to limit the execution time of untrusted
653 /// code.
654 ///
655 /// The [`Store`](crate::Store) tracks the deadline, and controls
656 /// what happens when the deadline is reached during
657 /// execution. Several behaviors are possible:
658 ///
659 /// - Trap if code is executing when the epoch deadline is
660 /// met. See
661 /// [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap).
662 ///
663 /// - Call an arbitrary function. This function may chose to trap or
664 /// increment the epoch. See
665 /// [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback).
666 ///
667 /// - Yield to the executor loop, then resume when the future is
668 /// next polled. See
669 /// [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update).
670 ///
671 /// Trapping is the default. The yielding behaviour may be used for
672 /// the timeslicing behavior described above.
673 ///
674 /// This feature is available with or without async support.
675 /// However, without async support, the timeslicing behaviour is
676 /// not available. This means epoch-based interruption can only
677 /// serve as a simple external-interruption mechanism.
678 ///
679 /// An initial deadline must be set before executing code by calling
680 /// [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline). If this
681 /// deadline is not configured then wasm will immediately trap.
682 ///
683 /// ## Interaction with blocking host calls
684 ///
685 /// Epochs (and fuel) do not assist in handling WebAssembly code blocked in
686 /// a call to the host. For example if the WebAssembly function calls
687 /// `wasi:io/poll.poll` to sleep epochs will not assist in waking this up or
688 /// timing it out. Epochs intentionally only affect running WebAssembly code
689 /// itself and it's left to the embedder to determine how best to wake up
690 /// indefinitely blocking code in the host.
691 ///
692 /// The typical solution for this, however, is to use the `async` variant of
693 /// WASI host functions. This models computation as a Rust `Future` which
694 /// means that when blocking happens the future is only suspended and
695 /// control yields back to the main event loop. This gives the embedder the
696 /// opportunity to use `tokio::time::timeout` for example on a wasm
697 /// computation and have the desired effect of cancelling a blocking
698 /// operation when a timeout expires.
699 ///
700 /// ## When to use fuel vs. epochs
701 ///
702 /// In general, epoch-based interruption results in faster
703 /// execution. This difference is sometimes significant: in some
704 /// measurements, up to 2-3x. This is because epoch-based
705 /// interruption does less work: it only watches for a global
706 /// rarely-changing counter to increment, rather than keeping a
707 /// local frequently-changing counter and comparing it to a
708 /// deadline.
709 ///
710 /// Fuel, in contrast, should be used when *deterministic*
711 /// yielding or trapping is needed. For example, if it is required
712 /// that the same function call with the same starting state will
713 /// always either complete or trap with an out-of-fuel error,
714 /// deterministically, then fuel with a fixed bound should be
715 /// used.
716 ///
717 /// **Note** Enabling this option is not compatible with the Winch compiler.
718 ///
719 /// # See Also
720 ///
721 /// - [`Engine::increment_epoch`](crate::Engine::increment_epoch)
722 /// - [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline)
723 /// - [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap)
724 /// - [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback)
725 /// - [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update)
726 pub fn epoch_interruption(&mut self, enable: bool) -> &mut Self {
727 self.tunables.epoch_interruption = Some(enable);
728 self
729 }
730
731 /// XXX: For internal fuzzing and debugging use only!
732 #[doc(hidden)]
733 pub fn gc_zeal_alloc_counter(&mut self, counter: Option<NonZeroU32>) -> Result<&mut Self> {
734 #[cfg(not(gc_zeal))]
735 {
736 let _ = counter;
737 bail!(
738 "cannot set `gc_zeal_alloc_counter` because Wasmtime was not built with `cfg(gc_zeal)`"
739 );
740 }
741
742 #[cfg(gc_zeal)]
743 {
744 self.tunables.gc_zeal_alloc_counter = Some(counter);
745 Ok(self)
746 }
747 }
748
749 /// Configures the maximum amount of stack space available for
750 /// executing WebAssembly code.
751 ///
752 /// WebAssembly has well-defined semantics on stack overflow. This is
753 /// intended to be a knob which can help configure how much stack space
754 /// wasm execution is allowed to consume. Note that the number here is not
755 /// super-precise, but rather wasm will take at most "pretty close to this
756 /// much" stack space.
757 ///
758 /// If a wasm call (or series of nested wasm calls) take more stack space
759 /// than the `size` specified then a stack overflow trap will be raised.
760 ///
761 /// Caveat: this knob only limits the stack space consumed by wasm code.
762 /// More importantly, it does not ensure that this much stack space is
763 /// available on the calling thread stack. Exhausting the thread stack
764 /// typically leads to an **abort** of the process.
765 ///
766 /// Here are some examples of how that could happen:
767 ///
768 /// - Let's assume this option is set to 2 MiB and then a thread that has
769 /// a stack with 512 KiB left.
770 ///
771 /// If wasm code consumes more than 512 KiB then the process will be aborted.
772 ///
773 /// - Assuming the same conditions, but this time wasm code does not consume
774 /// any stack but calls into a host function. The host function consumes
775 /// more than 512 KiB of stack space. The process will be aborted.
776 ///
777 /// There's another gotcha related to recursive calling into wasm: the stack
778 /// space consumed by a host function is counted towards this limit. The
779 /// host functions are not prevented from consuming more than this limit.
780 /// However, if the host function that used more than this limit and called
781 /// back into wasm, then the execution will trap immediately because of
782 /// stack overflow.
783 ///
784 /// When the `async` feature is enabled, this value cannot exceed the
785 /// `async_stack_size` option. Be careful not to set this value too close
786 /// to `async_stack_size` as doing so may limit how much stack space
787 /// is available for host functions.
788 ///
789 /// By default this option is 512 KiB.
790 ///
791 /// # Errors
792 ///
793 /// The `Engine::new` method will fail if the `size` specified here is
794 /// either 0 or larger than the [`Config::async_stack_size`] configuration.
795 pub fn max_wasm_stack(&mut self, size: usize) -> &mut Self {
796 self.max_wasm_stack = size;
797 self
798 }
799
800 /// Configures the size of the stacks used for asynchronous execution.
801 ///
802 /// This setting configures the size of the stacks that are allocated for
803 /// asynchronous execution. The value cannot be less than `max_wasm_stack`.
804 ///
805 /// The amount of stack space guaranteed for host functions is
806 /// `async_stack_size - max_wasm_stack`, so take care not to set these two values
807 /// close to one another; doing so may cause host functions to overflow the
808 /// stack and abort the process.
809 ///
810 /// By default this option is 2 MiB.
811 ///
812 /// # Errors
813 ///
814 /// The `Engine::new` method will fail if the value for this option is
815 /// smaller than the [`Config::max_wasm_stack`] option.
816 #[cfg(any(feature = "async", feature = "stack-switching"))]
817 pub fn async_stack_size(&mut self, size: usize) -> &mut Self {
818 self.async_stack_size = size;
819 self
820 }
821
822 /// Configures whether or not stacks used for async futures are zeroed
823 /// before (re)use.
824 ///
825 /// When the [`call_async`] variant of calling WebAssembly is used
826 /// then Wasmtime will create a separate runtime execution stack for each
827 /// future produced by [`call_async`]. By default upon allocation, depending
828 /// on the platform, these stacks might be filled with uninitialized
829 /// memory. This is safe and correct because, modulo bugs in Wasmtime,
830 /// compiled Wasm code will never read from a stack slot before it
831 /// initializes the stack slot.
832 ///
833 /// However, as a defense-in-depth mechanism, you may configure Wasmtime to
834 /// ensure that these stacks are zeroed before they are used. Notably, if
835 /// you are using the pooling allocator, stacks can be pooled and reused
836 /// across different Wasm guests; ensuring that stacks are zeroed can
837 /// prevent data leakage between Wasm guests even in the face of potential
838 /// read-of-stack-slot-before-initialization bugs in Wasmtime's compiler.
839 ///
840 /// Stack zeroing can be a costly operation in highly concurrent
841 /// environments due to modifications of the virtual address space requiring
842 /// process-wide synchronization. It can also be costly in `no-std`
843 /// environments that must manually zero memory, and cannot rely on an OS
844 /// and virtual memory to provide zeroed pages.
845 ///
846 /// This option defaults to `false`.
847 ///
848 /// [`call_async`]: crate::TypedFunc::call_async
849 #[cfg(feature = "async")]
850 pub fn async_stack_zeroing(&mut self, enable: bool) -> &mut Self {
851 self.async_stack_zeroing = enable;
852 self
853 }
854
855 /// Explicitly enables (and un-disables) a given set of [`WasmFeatures`].
856 ///
857 /// Note: this is a low-level method that does not necessarily imply that
858 /// wasmtime _supports_ a feature. It should only be used to _disable_
859 /// features that callers want to be rejected by the parser or _enable_
860 /// features callers are certain that the current configuration of wasmtime
861 /// supports.
862 ///
863 /// Feature validation is deferred until an engine is being built, thus by
864 /// enabling features here a caller may cause
865 /// [`Engine::new`](crate::Engine::new) to fail later, if the feature
866 /// configuration isn't supported.
867 pub fn wasm_features(&mut self, flag: WasmFeatures, enable: bool) -> &mut Self {
868 self.enabled_features.set(flag, enable);
869 self.disabled_features.set(flag, !enable);
870 self
871 }
872
873 /// Configures whether the WebAssembly tail calls proposal will be enabled
874 /// for compilation or not.
875 ///
876 /// The [WebAssembly tail calls proposal] introduces the `return_call` and
877 /// `return_call_indirect` instructions. These instructions allow for Wasm
878 /// programs to implement some recursive algorithms with *O(1)* stack space
879 /// usage.
880 ///
881 /// This is `true` by default except when the Winch compiler is enabled.
882 ///
883 /// [WebAssembly tail calls proposal]: https://github.com/WebAssembly/tail-call
884 pub fn wasm_tail_call(&mut self, enable: bool) -> &mut Self {
885 self.wasm_features(WasmFeatures::TAIL_CALL, enable);
886 self
887 }
888
889 /// Configures whether the WebAssembly custom-page-sizes proposal will be
890 /// enabled for compilation or not.
891 ///
892 /// The [WebAssembly custom-page-sizes proposal] allows a memory to
893 /// customize its page sizes. By default, Wasm page sizes are 64KiB
894 /// large. This proposal allows the memory to opt into smaller page sizes
895 /// instead, allowing Wasm to run in environments with less than 64KiB RAM
896 /// available, for example.
897 ///
898 /// Note that the page size is part of the memory's type, and because
899 /// different memories may have different types, they may also have
900 /// different page sizes.
901 ///
902 /// Currently the only valid page sizes are 64KiB (the default) and 1
903 /// byte. Future extensions may relax this constraint and allow all powers
904 /// of two.
905 ///
906 /// Support for this proposal is disabled by default.
907 ///
908 /// [WebAssembly custom-page-sizes proposal]: https://github.com/WebAssembly/custom-page-sizes
909 pub fn wasm_custom_page_sizes(&mut self, enable: bool) -> &mut Self {
910 self.wasm_features(WasmFeatures::CUSTOM_PAGE_SIZES, enable);
911 self
912 }
913
914 /// Configures whether the WebAssembly [threads] proposal will be enabled
915 /// for compilation.
916 ///
917 /// This feature gates items such as shared memories and atomic
918 /// instructions. Note that the threads feature depends on the bulk memory
919 /// feature, which is enabled by default. Additionally note that while the
920 /// wasm feature is called "threads" it does not actually include the
921 /// ability to spawn threads. Spawning threads is part of the [wasi-threads]
922 /// proposal which is a separately gated feature in Wasmtime.
923 ///
924 /// Embeddings of Wasmtime are able to build their own custom threading
925 /// scheme on top of the core wasm threads proposal, however.
926 ///
927 /// The default value for this option is whether the `threads`
928 /// crate feature of Wasmtime is enabled or not. By default this crate
929 /// feature is enabled.
930 ///
931 /// [threads]: https://github.com/webassembly/threads
932 /// [wasi-threads]: https://github.com/webassembly/wasi-threads
933 #[cfg(feature = "threads")]
934 pub fn wasm_threads(&mut self, enable: bool) -> &mut Self {
935 self.wasm_features(WasmFeatures::THREADS, enable);
936 self
937 }
938
939 /// Configures whether the WebAssembly [shared-everything-threads] proposal
940 /// will be enabled for compilation.
941 ///
942 /// This feature gates extended use of the `shared` attribute on items other
943 /// than memories, extra atomic instructions, and new component model
944 /// intrinsics for spawning threads. It depends on the
945 /// [`wasm_threads`][Self::wasm_threads] being enabled.
946 ///
947 /// [shared-everything-threads]:
948 /// https://github.com/webassembly/shared-everything-threads
949 pub fn wasm_shared_everything_threads(&mut self, enable: bool) -> &mut Self {
950 self.wasm_features(WasmFeatures::SHARED_EVERYTHING_THREADS, enable);
951 self
952 }
953
954 /// Configures whether the [WebAssembly reference types proposal][proposal]
955 /// will be enabled for compilation.
956 ///
957 /// This feature gates items such as the `externref` and `funcref` types as
958 /// well as allowing a module to define multiple tables.
959 ///
960 /// Note that the reference types proposal depends on the bulk memory proposal.
961 ///
962 /// This feature is `true` by default.
963 ///
964 /// # Errors
965 ///
966 /// The validation of this feature are deferred until the engine is being built,
967 /// and thus may cause `Engine::new` fail if the `bulk_memory` feature is disabled.
968 ///
969 /// [proposal]: https://github.com/webassembly/reference-types
970 #[cfg(feature = "gc")]
971 pub fn wasm_reference_types(&mut self, enable: bool) -> &mut Self {
972 self.wasm_features(WasmFeatures::REFERENCE_TYPES, enable);
973 self
974 }
975
976 /// Configures whether the [WebAssembly function references
977 /// proposal][proposal] will be enabled for compilation.
978 ///
979 /// This feature gates non-nullable reference types, function reference
980 /// types, `call_ref`, `ref.func`, and non-nullable reference related
981 /// instructions.
982 ///
983 /// Note that the function references proposal depends on the reference
984 /// types proposal.
985 ///
986 /// This feature is `false` by default.
987 ///
988 /// [proposal]: https://github.com/WebAssembly/function-references
989 #[cfg(feature = "gc")]
990 pub fn wasm_function_references(&mut self, enable: bool) -> &mut Self {
991 self.wasm_features(WasmFeatures::FUNCTION_REFERENCES, enable);
992 self
993 }
994
995 /// Configures whether the [WebAssembly wide-arithmetic][proposal] will be
996 /// enabled for compilation.
997 ///
998 /// This feature is `false` by default.
999 ///
1000 /// [proposal]: https://github.com/WebAssembly/wide-arithmetic
1001 pub fn wasm_wide_arithmetic(&mut self, enable: bool) -> &mut Self {
1002 self.wasm_features(WasmFeatures::WIDE_ARITHMETIC, enable);
1003 self
1004 }
1005
1006 /// Configures whether the [WebAssembly Garbage Collection
1007 /// proposal][proposal] will be enabled for compilation.
1008 ///
1009 /// This feature gates `struct` and `array` type definitions and references,
1010 /// the `i31ref` type, and all related instructions.
1011 ///
1012 /// Note that the function references proposal depends on the typed function
1013 /// references proposal.
1014 ///
1015 /// This feature is `false` by default.
1016 ///
1017 /// **Warning: Wasmtime's implementation of the GC proposal is still in
1018 /// progress and generally not ready for primetime.**
1019 ///
1020 /// [proposal]: https://github.com/WebAssembly/gc
1021 #[cfg(feature = "gc")]
1022 pub fn wasm_gc(&mut self, enable: bool) -> &mut Self {
1023 self.wasm_features(WasmFeatures::GC, enable);
1024 self
1025 }
1026
1027 /// Configures whether the WebAssembly SIMD proposal will be
1028 /// enabled for compilation.
1029 ///
1030 /// The [WebAssembly SIMD proposal][proposal]. This feature gates items such
1031 /// as the `v128` type and all of its operators being in a module. Note that
1032 /// this does not enable the [relaxed simd proposal].
1033 ///
1034 /// **Note**
1035 ///
1036 /// On x86_64 platforms the base CPU feature requirement for SIMD
1037 /// is SSE2 for the Cranelift compiler and AVX for the Winch compiler.
1038 ///
1039 /// This is `true` by default.
1040 ///
1041 /// [proposal]: https://github.com/webassembly/simd
1042 /// [relaxed simd proposal]: https://github.com/WebAssembly/relaxed-simd
1043 pub fn wasm_simd(&mut self, enable: bool) -> &mut Self {
1044 self.wasm_features(WasmFeatures::SIMD, enable);
1045 self
1046 }
1047
1048 /// Configures whether the WebAssembly Relaxed SIMD proposal will be
1049 /// enabled for compilation.
1050 ///
1051 /// The relaxed SIMD proposal adds new instructions to WebAssembly which,
1052 /// for some specific inputs, are allowed to produce different results on
1053 /// different hosts. More-or-less this proposal enables exposing
1054 /// platform-specific semantics of SIMD instructions in a controlled
1055 /// fashion to a WebAssembly program. From an embedder's perspective this
1056 /// means that WebAssembly programs may execute differently depending on
1057 /// whether the host is x86_64 or AArch64, for example.
1058 ///
1059 /// By default Wasmtime lowers relaxed SIMD instructions to the fastest
1060 /// lowering for the platform it's running on. This means that, by default,
1061 /// some relaxed SIMD instructions may have different results for the same
1062 /// inputs across x86_64 and AArch64. This behavior can be disabled through
1063 /// the [`Config::relaxed_simd_deterministic`] option which will force
1064 /// deterministic behavior across all platforms, as classified by the
1065 /// specification, at the cost of performance.
1066 ///
1067 /// This is `true` by default.
1068 ///
1069 /// [proposal]: https://github.com/webassembly/relaxed-simd
1070 pub fn wasm_relaxed_simd(&mut self, enable: bool) -> &mut Self {
1071 self.wasm_features(WasmFeatures::RELAXED_SIMD, enable);
1072 self
1073 }
1074
1075 /// This option can be used to control the behavior of the [relaxed SIMD
1076 /// proposal's][proposal] instructions.
1077 ///
1078 /// The relaxed SIMD proposal introduces instructions that are allowed to
1079 /// have different behavior on different architectures, primarily to afford
1080 /// an efficient implementation on all architectures. This means, however,
1081 /// that the same module may execute differently on one host than another,
1082 /// which typically is not otherwise the case. This option is provided to
1083 /// force Wasmtime to generate deterministic code for all relaxed simd
1084 /// instructions, at the cost of performance, for all architectures. When
1085 /// this option is enabled then the deterministic behavior of all
1086 /// instructions in the relaxed SIMD proposal is selected.
1087 ///
1088 /// This is `false` by default.
1089 ///
1090 /// [proposal]: https://github.com/webassembly/relaxed-simd
1091 pub fn relaxed_simd_deterministic(&mut self, enable: bool) -> &mut Self {
1092 self.tunables.relaxed_simd_deterministic = Some(enable);
1093 self
1094 }
1095
1096 /// Configures whether the [WebAssembly bulk memory operations
1097 /// proposal][proposal] will be enabled for compilation.
1098 ///
1099 /// This feature gates items such as the `memory.copy` instruction, passive
1100 /// data/table segments, etc, being in a module.
1101 ///
1102 /// This is `true` by default.
1103 ///
1104 /// Feature `reference_types`, which is also `true` by default, requires
1105 /// this feature to be enabled. Thus disabling this feature must also disable
1106 /// `reference_types` as well using [`wasm_reference_types`](crate::Config::wasm_reference_types).
1107 ///
1108 /// # Errors
1109 ///
1110 /// Disabling this feature without disabling `reference_types` will cause
1111 /// `Engine::new` to fail.
1112 ///
1113 /// [proposal]: https://github.com/webassembly/bulk-memory-operations
1114 pub fn wasm_bulk_memory(&mut self, enable: bool) -> &mut Self {
1115 self.wasm_features(WasmFeatures::BULK_MEMORY, enable);
1116 self
1117 }
1118
1119 /// Configures whether the WebAssembly multi-value [proposal] will
1120 /// be enabled for compilation.
1121 ///
1122 /// This feature gates functions and blocks returning multiple values in a
1123 /// module, for example.
1124 ///
1125 /// This is `true` by default.
1126 ///
1127 /// [proposal]: https://github.com/webassembly/multi-value
1128 pub fn wasm_multi_value(&mut self, enable: bool) -> &mut Self {
1129 self.wasm_features(WasmFeatures::MULTI_VALUE, enable);
1130 self
1131 }
1132
1133 /// Configures whether the WebAssembly multi-memory [proposal] will
1134 /// be enabled for compilation.
1135 ///
1136 /// This feature gates modules having more than one linear memory
1137 /// declaration or import.
1138 ///
1139 /// This is `true` by default.
1140 ///
1141 /// [proposal]: https://github.com/webassembly/multi-memory
1142 pub fn wasm_multi_memory(&mut self, enable: bool) -> &mut Self {
1143 self.wasm_features(WasmFeatures::MULTI_MEMORY, enable);
1144 self
1145 }
1146
1147 /// Configures whether the WebAssembly memory64 [proposal] will
1148 /// be enabled for compilation.
1149 ///
1150 /// Note that this the upstream specification is not finalized and Wasmtime
1151 /// may also have bugs for this feature since it hasn't been exercised
1152 /// much.
1153 ///
1154 /// This is `false` by default.
1155 ///
1156 /// [proposal]: https://github.com/webassembly/memory64
1157 pub fn wasm_memory64(&mut self, enable: bool) -> &mut Self {
1158 self.wasm_features(WasmFeatures::MEMORY64, enable);
1159 self
1160 }
1161
1162 /// Configures whether the WebAssembly extended-const [proposal] will
1163 /// be enabled for compilation.
1164 ///
1165 /// This is `true` by default.
1166 ///
1167 /// [proposal]: https://github.com/webassembly/extended-const
1168 pub fn wasm_extended_const(&mut self, enable: bool) -> &mut Self {
1169 self.wasm_features(WasmFeatures::EXTENDED_CONST, enable);
1170 self
1171 }
1172
1173 /// Configures whether the [WebAssembly stack switching
1174 /// proposal][proposal] will be enabled for compilation.
1175 ///
1176 /// This feature gates the use of control tags.
1177 ///
1178 /// This feature depends on the `function_reference_types` and
1179 /// `exceptions` features.
1180 ///
1181 /// This feature is `false` by default.
1182 ///
1183 /// # Errors
1184 ///
1185 /// [proposal]: https://github.com/webassembly/stack-switching
1186 pub fn wasm_stack_switching(&mut self, enable: bool) -> &mut Self {
1187 self.wasm_features(WasmFeatures::STACK_SWITCHING, enable);
1188 self
1189 }
1190
1191 /// Configures whether the WebAssembly component-model [proposal] will
1192 /// be enabled for compilation.
1193 ///
1194 /// This flag can be used to blanket disable all components within Wasmtime.
1195 /// Otherwise usage of components requires statically using
1196 /// [`Component`](crate::component::Component) instead of
1197 /// [`Module`](crate::Module) for example anyway.
1198 ///
1199 /// The default value for this option is whether the `component-model`
1200 /// crate feature of Wasmtime is enabled or not. By default this crate
1201 /// feature is enabled.
1202 ///
1203 /// [proposal]: https://github.com/webassembly/component-model
1204 #[cfg(feature = "component-model")]
1205 pub fn wasm_component_model(&mut self, enable: bool) -> &mut Self {
1206 self.wasm_features(WasmFeatures::COMPONENT_MODEL, enable);
1207 self
1208 }
1209
1210 /// Configures whether components support the async ABI [proposal] for
1211 /// lifting and lowering functions, as well as `stream`, `future`, and
1212 /// `error-context` types.
1213 ///
1214 /// Please note that Wasmtime's support for this feature is _very_
1215 /// incomplete.
1216 ///
1217 /// [proposal]:
1218 /// https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1219 #[cfg(feature = "component-model-async")]
1220 pub fn wasm_component_model_async(&mut self, enable: bool) -> &mut Self {
1221 self.wasm_features(WasmFeatures::CM_ASYNC, enable);
1222 self
1223 }
1224
1225 /// This corresponds to the 🚝 emoji in the component model specification.
1226 ///
1227 /// Please note that Wasmtime's support for this feature is _very_
1228 /// incomplete.
1229 ///
1230 /// [proposal]:
1231 /// https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1232 #[cfg(feature = "component-model-async")]
1233 pub fn wasm_component_model_async_builtins(&mut self, enable: bool) -> &mut Self {
1234 self.wasm_features(WasmFeatures::CM_ASYNC_BUILTINS, enable);
1235 self
1236 }
1237
1238 /// This corresponds to the 🚟 emoji in the component model specification.
1239 ///
1240 /// Please note that Wasmtime's support for this feature is _very_
1241 /// incomplete.
1242 ///
1243 /// [proposal]: https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1244 #[cfg(feature = "component-model-async")]
1245 pub fn wasm_component_model_async_stackful(&mut self, enable: bool) -> &mut Self {
1246 self.wasm_features(WasmFeatures::CM_ASYNC_STACKFUL, enable);
1247 self
1248 }
1249
1250 /// This corresponds to the 🧵 emoji in the component model specification.
1251 ///
1252 /// Please note that Wasmtime's support for this feature is _very_
1253 /// incomplete.
1254 ///
1255 /// [proposal]:
1256 /// https://github.com/WebAssembly/component-model/pull/557
1257 #[cfg(feature = "component-model-async")]
1258 pub fn wasm_component_model_threading(&mut self, enable: bool) -> &mut Self {
1259 self.wasm_features(WasmFeatures::CM_THREADING, enable);
1260 self
1261 }
1262
1263 /// This corresponds to the 📝 emoji in the component model specification.
1264 ///
1265 /// Please note that Wasmtime's support for this feature is _very_
1266 /// incomplete.
1267 ///
1268 /// [proposal]: https://github.com/WebAssembly/component-model/blob/main/design/mvp/Concurrency.md
1269 #[cfg(feature = "component-model")]
1270 pub fn wasm_component_model_error_context(&mut self, enable: bool) -> &mut Self {
1271 self.wasm_features(WasmFeatures::CM_ERROR_CONTEXT, enable);
1272 self
1273 }
1274
1275 /// Configures whether the [GC extension to the component-model
1276 /// proposal][proposal] is enabled or not.
1277 ///
1278 /// This corresponds to the 🛸 emoji in the component model specification.
1279 ///
1280 /// Please note that Wasmtime's support for this feature is _very_
1281 /// incomplete.
1282 ///
1283 /// [proposal]: https://github.com/WebAssembly/component-model/issues/525
1284 #[cfg(feature = "component-model")]
1285 pub fn wasm_component_model_gc(&mut self, enable: bool) -> &mut Self {
1286 self.wasm_features(WasmFeatures::CM_GC, enable);
1287 self
1288 }
1289
1290 /// Configures whether the component model map type is enabled or not.
1291 ///
1292 /// This is part of the component model specification and enables the
1293 /// `map<k, v>` type in WIT and the component binary format.
1294 #[cfg(feature = "component-model")]
1295 pub fn wasm_component_model_map(&mut self, enable: bool) -> &mut Self {
1296 self.wasm_features(WasmFeatures::CM_MAP, enable);
1297 self
1298 }
1299
1300 /// This corresponds to the 🔧 emoji in the component model specification.
1301 ///
1302 /// Please note that Wasmtime's support for this feature is _very_
1303 /// incomplete.
1304 #[cfg(feature = "component-model")]
1305 pub fn wasm_component_model_fixed_length_lists(&mut self, enable: bool) -> &mut Self {
1306 self.wasm_features(WasmFeatures::CM_FIXED_LENGTH_LISTS, enable);
1307 self
1308 }
1309
1310 /// Configures whether the [Exception-handling proposal][proposal] is enabled or not.
1311 ///
1312 /// [proposal]: https://github.com/WebAssembly/exception-handling
1313 #[cfg(feature = "gc")]
1314 pub fn wasm_exceptions(&mut self, enable: bool) -> &mut Self {
1315 self.wasm_features(WasmFeatures::EXCEPTIONS, enable);
1316 self
1317 }
1318
1319 #[doc(hidden)] // FIXME(#3427) - if/when implemented then un-hide this
1320 #[deprecated = "This configuration option only exists for internal \
1321 usage with the spec testsuite. It may be removed at \
1322 any time and without warning. Do not rely on it!"]
1323 pub fn wasm_legacy_exceptions(&mut self, enable: bool) -> &mut Self {
1324 self.wasm_features(WasmFeatures::LEGACY_EXCEPTIONS, enable);
1325 self
1326 }
1327
1328 /// Configures which compilation strategy will be used for wasm modules.
1329 ///
1330 /// This method can be used to configure which compiler is used for wasm
1331 /// modules, and for more documentation consult the [`Strategy`] enumeration
1332 /// and its documentation.
1333 ///
1334 /// The default value for this is `Strategy::Auto`.
1335 ///
1336 /// # Panics
1337 ///
1338 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1339 #[cfg(any(feature = "cranelift", feature = "winch"))]
1340 pub fn strategy(&mut self, strategy: Strategy) -> &mut Self {
1341 self.compiler_config_mut().strategy = strategy.not_auto();
1342 self
1343 }
1344
1345 /// Configures which garbage collector will be used for Wasm modules.
1346 ///
1347 /// This method can be used to configure which garbage collector
1348 /// implementation is used for Wasm modules. For more documentation, consult
1349 /// the [`Collector`] enumeration and its documentation.
1350 ///
1351 /// The default value for this is `Collector::Auto`.
1352 #[cfg(feature = "gc")]
1353 pub fn collector(&mut self, collector: Collector) -> &mut Self {
1354 self.collector = collector;
1355 self
1356 }
1357
1358 /// Creates a default profiler based on the profiling strategy chosen.
1359 ///
1360 /// Profiler creation calls the type's default initializer where the purpose is
1361 /// really just to put in place the type used for profiling.
1362 ///
1363 /// Some [`ProfilingStrategy`] require specific platforms or particular feature
1364 /// to be enabled, such as `ProfilingStrategy::JitDump` requires the `jitdump`
1365 /// feature.
1366 ///
1367 /// # Errors
1368 ///
1369 /// The validation of this field is deferred until the engine is being built, and thus may
1370 /// cause `Engine::new` fail if the required feature is disabled, or the platform is not
1371 /// supported.
1372 pub fn profiler(&mut self, profile: ProfilingStrategy) -> &mut Self {
1373 self.profiling_strategy = profile;
1374 self
1375 }
1376
1377 /// Configures whether the debug verifier of Cranelift is enabled or not.
1378 ///
1379 /// When Cranelift is used as a code generation backend this will configure
1380 /// it to have the `enable_verifier` flag which will enable a number of debug
1381 /// checks inside of Cranelift. This is largely only useful for the
1382 /// developers of wasmtime itself.
1383 ///
1384 /// The default value for this is `false`
1385 ///
1386 /// # Panics
1387 ///
1388 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1389 #[cfg(any(feature = "cranelift", feature = "winch"))]
1390 pub fn cranelift_debug_verifier(&mut self, enable: bool) -> &mut Self {
1391 let val = if enable { "true" } else { "false" };
1392 self.compiler_config_mut()
1393 .settings
1394 .insert("enable_verifier".to_string(), val.to_string());
1395 self
1396 }
1397
1398 /// Configures whether extra debug checks are inserted into
1399 /// Wasmtime-generated code by Cranelift.
1400 ///
1401 /// The default value for this is `false`
1402 ///
1403 /// # Panics
1404 ///
1405 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1406 #[cfg(any(feature = "cranelift", feature = "winch"))]
1407 pub fn cranelift_wasmtime_debug_checks(&mut self, enable: bool) -> &mut Self {
1408 unsafe { self.cranelift_flag_set("wasmtime_debug_checks", &enable.to_string()) }
1409 }
1410
1411 /// Configures the Cranelift code generator optimization level.
1412 ///
1413 /// When the Cranelift code generator is used you can configure the
1414 /// optimization level used for generated code in a few various ways. For
1415 /// more information see the documentation of [`OptLevel`].
1416 ///
1417 /// The default value for this is `OptLevel::Speed`.
1418 ///
1419 /// # Panics
1420 ///
1421 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1422 #[cfg(any(feature = "cranelift", feature = "winch"))]
1423 pub fn cranelift_opt_level(&mut self, level: OptLevel) -> &mut Self {
1424 let val = match level {
1425 OptLevel::None => "none",
1426 OptLevel::Speed => "speed",
1427 OptLevel::SpeedAndSize => "speed_and_size",
1428 };
1429 self.compiler_config_mut()
1430 .settings
1431 .insert("opt_level".to_string(), val.to_string());
1432 self
1433 }
1434
1435 /// Configures the regalloc algorithm used by the Cranelift code generator.
1436 ///
1437 /// Cranelift can select any of several register allocator algorithms. Each
1438 /// of these algorithms generates correct code, but they represent different
1439 /// tradeoffs between compile speed (how expensive the compilation process
1440 /// is) and run-time speed (how fast the generated code runs).
1441 /// For more information see the documentation of [`RegallocAlgorithm`].
1442 ///
1443 /// The default value for this is `RegallocAlgorithm::Backtracking`.
1444 ///
1445 /// # Panics
1446 ///
1447 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1448 #[cfg(any(feature = "cranelift", feature = "winch"))]
1449 pub fn cranelift_regalloc_algorithm(&mut self, algo: RegallocAlgorithm) -> &mut Self {
1450 let val = match algo {
1451 RegallocAlgorithm::Backtracking => "backtracking",
1452 RegallocAlgorithm::SinglePass => "single_pass",
1453 };
1454 self.compiler_config_mut()
1455 .settings
1456 .insert("regalloc_algorithm".to_string(), val.to_string());
1457 self
1458 }
1459
1460 /// Configures whether Cranelift should perform a NaN-canonicalization pass.
1461 ///
1462 /// When Cranelift is used as a code generation backend this will configure
1463 /// it to replace NaNs with a single canonical value. This is useful for
1464 /// users requiring entirely deterministic WebAssembly computation. This is
1465 /// not required by the WebAssembly spec, so it is not enabled by default.
1466 ///
1467 /// Note that this option affects not only WebAssembly's `f32` and `f64`
1468 /// types but additionally the `v128` type. This option will cause
1469 /// operations using any of these types to have extra checks placed after
1470 /// them to normalize NaN values as needed.
1471 ///
1472 /// The default value for this is `false`
1473 ///
1474 /// # Panics
1475 ///
1476 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1477 #[cfg(any(feature = "cranelift", feature = "winch"))]
1478 pub fn cranelift_nan_canonicalization(&mut self, enable: bool) -> &mut Self {
1479 let val = if enable { "true" } else { "false" };
1480 self.compiler_config_mut()
1481 .settings
1482 .insert("enable_nan_canonicalization".to_string(), val.to_string());
1483 self
1484 }
1485
1486 /// Allows setting a Cranelift boolean flag or preset. This allows
1487 /// fine-tuning of Cranelift settings.
1488 ///
1489 /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1490 /// either; other `Config` functions should be preferred for stability.
1491 ///
1492 /// # Safety
1493 ///
1494 /// This is marked as unsafe, because setting the wrong flag might break invariants,
1495 /// resulting in execution hazards.
1496 ///
1497 /// # Errors
1498 ///
1499 /// The validation of the flags are deferred until the engine is being built, and thus may
1500 /// cause `Engine::new` fail if the flag's name does not exist, or the value is not appropriate
1501 /// for the flag type.
1502 ///
1503 /// # Panics
1504 ///
1505 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1506 #[cfg(any(feature = "cranelift", feature = "winch"))]
1507 pub unsafe fn cranelift_flag_enable(&mut self, flag: &str) -> &mut Self {
1508 self.compiler_config_mut().flags.insert(flag.to_string());
1509 self
1510 }
1511
1512 /// Allows settings another Cranelift flag defined by a flag name and value. This allows
1513 /// fine-tuning of Cranelift settings.
1514 ///
1515 /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1516 /// either; other `Config` functions should be preferred for stability.
1517 ///
1518 /// # Safety
1519 ///
1520 /// This is marked as unsafe, because setting the wrong flag might break invariants,
1521 /// resulting in execution hazards.
1522 ///
1523 /// # Errors
1524 ///
1525 /// The validation of the flags are deferred until the engine is being built, and thus may
1526 /// cause `Engine::new` fail if the flag's name does not exist, or incompatible with other
1527 /// settings.
1528 ///
1529 /// For example, feature `wasm_backtrace` will set `unwind_info` to `true`, but if it's
1530 /// manually set to false then it will fail.
1531 ///
1532 /// # Panics
1533 ///
1534 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
1535 #[cfg(any(feature = "cranelift", feature = "winch"))]
1536 pub unsafe fn cranelift_flag_set(&mut self, name: &str, value: &str) -> &mut Self {
1537 self.compiler_config_mut()
1538 .settings
1539 .insert(name.to_string(), value.to_string());
1540 self
1541 }
1542
1543 /// Set a custom [`Cache`].
1544 ///
1545 /// To load a cache configuration from a file, use [`Cache::from_file`]. Otherwise, you can
1546 /// create a new cache config using [`CacheConfig::new`] and passing that to [`Cache::new`].
1547 ///
1548 /// If you want to disable the cache, you can call this method with `None`.
1549 ///
1550 /// By default, new configs do not have caching enabled.
1551 /// Every call to [`Module::new(my_wasm)`][crate::Module::new] will recompile `my_wasm`,
1552 /// even when it is unchanged, unless an enabled `CacheConfig` is provided.
1553 ///
1554 /// This method is only available when the `cache` feature of this crate is
1555 /// enabled.
1556 ///
1557 /// [docs]: https://bytecodealliance.github.io/wasmtime/cli-cache.html
1558 #[cfg(feature = "cache")]
1559 pub fn cache(&mut self, cache: Option<Cache>) -> &mut Self {
1560 self.cache = cache;
1561 self
1562 }
1563
1564 /// Sets a custom memory creator.
1565 ///
1566 /// Custom memory creators are used when creating host `Memory` objects or when
1567 /// creating instance linear memories for the on-demand instance allocation strategy.
1568 #[cfg(feature = "runtime")]
1569 pub fn with_host_memory(&mut self, mem_creator: Arc<dyn MemoryCreator>) -> &mut Self {
1570 self.mem_creator = Some(Arc::new(MemoryCreatorProxy(mem_creator)));
1571 self
1572 }
1573
1574 /// Sets a custom stack creator.
1575 ///
1576 /// Custom memory creators are used when creating creating async instance stacks for
1577 /// the on-demand instance allocation strategy.
1578 #[cfg(feature = "async")]
1579 pub fn with_host_stack(&mut self, stack_creator: Arc<dyn StackCreator>) -> &mut Self {
1580 self.stack_creator = Some(Arc::new(StackCreatorProxy(stack_creator)));
1581 self
1582 }
1583
1584 /// Sets a custom executable-memory publisher.
1585 ///
1586 /// Custom executable-memory publishers are hooks that allow
1587 /// Wasmtime to make certain regions of memory executable when
1588 /// loading precompiled modules or compiling new modules
1589 /// in-process. In most modern operating systems, memory allocated
1590 /// for heap usage is readable and writable by default but not
1591 /// executable. To jump to machine code stored in that memory, we
1592 /// need to make it executable. For security reasons, we usually
1593 /// also make it read-only at the same time, so the executing code
1594 /// can't be modified later.
1595 ///
1596 /// By default, Wasmtime will use the appropriate system calls on
1597 /// the host platform for this work. However, it also allows
1598 /// plugging in a custom implementation via this configuration
1599 /// option. This may be useful on custom or `no_std` platforms,
1600 /// for example, especially where virtual memory is not otherwise
1601 /// used by Wasmtime (no `signals-and-traps` feature).
1602 #[cfg(feature = "runtime")]
1603 pub fn with_custom_code_memory(
1604 &mut self,
1605 custom_code_memory: Option<Arc<dyn CustomCodeMemory>>,
1606 ) -> &mut Self {
1607 self.custom_code_memory = custom_code_memory;
1608 self
1609 }
1610
1611 /// Sets the instance allocation strategy to use.
1612 ///
1613 /// This is notably used in conjunction with
1614 /// [`InstanceAllocationStrategy::Pooling`] and [`PoolingAllocationConfig`].
1615 pub fn allocation_strategy(
1616 &mut self,
1617 strategy: impl Into<InstanceAllocationStrategy>,
1618 ) -> &mut Self {
1619 self.allocation_strategy = strategy.into();
1620 self
1621 }
1622
1623 /// Specifies the capacity of linear memories, in bytes, in their initial
1624 /// allocation.
1625 ///
1626 /// > Note: this value has important performance ramifications, be sure to
1627 /// > benchmark when setting this to a non-default value and read over this
1628 /// > documentation.
1629 ///
1630 /// This function will change the size of the initial memory allocation made
1631 /// for linear memories. This setting is only applicable when the initial
1632 /// size of a linear memory is below this threshold. Linear memories are
1633 /// allocated in the virtual address space of the host process with OS APIs
1634 /// such as `mmap` and this setting affects how large the allocation will
1635 /// be.
1636 ///
1637 /// ## Background: WebAssembly Linear Memories
1638 ///
1639 /// WebAssembly linear memories always start with a minimum size and can
1640 /// possibly grow up to a maximum size. The minimum size is always specified
1641 /// in a WebAssembly module itself and the maximum size can either be
1642 /// optionally specified in the module or inherently limited by the index
1643 /// type. For example for this module:
1644 ///
1645 /// ```wasm
1646 /// (module
1647 /// (memory $a 4)
1648 /// (memory $b 4096 4096 (pagesize 1))
1649 /// (memory $c i64 10)
1650 /// )
1651 /// ```
1652 ///
1653 /// * Memory `$a` initially allocates 4 WebAssembly pages (256KiB) and can
1654 /// grow up to 4GiB, the limit of the 32-bit index space.
1655 /// * Memory `$b` initially allocates 4096 WebAssembly pages, but in this
1656 /// case its page size is 1, so it's 4096 bytes. Memory can also grow no
1657 /// further meaning that it will always be 4096 bytes.
1658 /// * Memory `$c` is a 64-bit linear memory which starts with 640KiB of
1659 /// memory and can theoretically grow up to 2^64 bytes, although most
1660 /// hosts will run out of memory long before that.
1661 ///
1662 /// All operations on linear memories done by wasm are required to be
1663 /// in-bounds. Any access beyond the end of a linear memory is considered a
1664 /// trap.
1665 ///
1666 /// ## What this setting affects: Virtual Memory
1667 ///
1668 /// This setting is used to configure the behavior of the size of the linear
1669 /// memory allocation performed for each of these memories. For example the
1670 /// initial linear memory allocation looks like this:
1671 ///
1672 /// ```text
1673 /// memory_reservation
1674 /// |
1675 /// ◄─────────┴────────────────►
1676 /// ┌───────┬─────────┬──────────────────┬───────┐
1677 /// │ guard │ initial │ ... capacity ... │ guard │
1678 /// └───────┴─────────┴──────────────────┴───────┘
1679 /// ◄──┬──► ◄──┬──►
1680 /// │ │
1681 /// │ memory_guard_size
1682 /// │
1683 /// │
1684 /// memory_guard_size (if guard_before_linear_memory)
1685 /// ```
1686 ///
1687 /// Memory in the `initial` range is accessible to the instance and can be
1688 /// read/written by wasm code. Memory in the `guard` regions is never
1689 /// accessible to wasm code and memory in `capacity` is initially
1690 /// inaccessible but may become accessible through `memory.grow` instructions
1691 /// for example.
1692 ///
1693 /// This means that this setting is the size of the initial chunk of virtual
1694 /// memory that a linear memory may grow into.
1695 ///
1696 /// ## What this setting affects: Runtime Speed
1697 ///
1698 /// This is a performance-sensitive setting which is taken into account
1699 /// during the compilation process of a WebAssembly module. For example if a
1700 /// 32-bit WebAssembly linear memory has a `memory_reservation` size of 4GiB
1701 /// then bounds checks can be elided because `capacity` will be guaranteed
1702 /// to be unmapped for all addressable bytes that wasm can access (modulo a
1703 /// few details).
1704 ///
1705 /// If `memory_reservation` was something smaller like 256KiB then that
1706 /// would have a much smaller impact on virtual memory but the compile code
1707 /// would then need to have explicit bounds checks to ensure that
1708 /// loads/stores are in-bounds.
1709 ///
1710 /// The goal of this setting is to enable skipping bounds checks in most
1711 /// modules by default. Some situations which require explicit bounds checks
1712 /// though are:
1713 ///
1714 /// * When `memory_reservation` is smaller than the addressable size of the
1715 /// linear memory. For example if 64-bit linear memories always need
1716 /// bounds checks as they can address the entire virtual address spacce.
1717 /// For 32-bit linear memories a `memory_reservation` minimum size of 4GiB
1718 /// is required to elide bounds checks.
1719 ///
1720 /// * When linear memories have a page size of 1 then bounds checks are
1721 /// required. In this situation virtual memory can't be relied upon
1722 /// because that operates at the host page size granularity where wasm
1723 /// requires a per-byte level granularity.
1724 ///
1725 /// * Configuration settings such as [`Config::signals_based_traps`] can be
1726 /// used to disable the use of signal handlers and virtual memory so
1727 /// explicit bounds checks are required.
1728 ///
1729 /// * When [`Config::memory_guard_size`] is too small a bounds check may be
1730 /// required. For 32-bit wasm addresses are actually 33-bit effective
1731 /// addresses because loads/stores have a 32-bit static offset to add to
1732 /// the dynamic 32-bit address. If the static offset is larger than the
1733 /// size of the guard region then an explicit bounds check is required.
1734 ///
1735 /// ## What this setting affects: Memory Growth Behavior
1736 ///
1737 /// In addition to affecting bounds checks emitted in compiled code this
1738 /// setting also affects how WebAssembly linear memories are grown. The
1739 /// `memory.grow` instruction can be used to make a linear memory larger and
1740 /// this is also affected by APIs such as
1741 /// [`Memory::grow`](crate::Memory::grow).
1742 ///
1743 /// In these situations when the amount being grown is small enough to fit
1744 /// within the remaining capacity then the linear memory doesn't have to be
1745 /// moved at runtime. If the capacity runs out though then a new linear
1746 /// memory allocation must be made and the contents of linear memory is
1747 /// copied over.
1748 ///
1749 /// For example here's a situation where a copy happens:
1750 ///
1751 /// * The `memory_reservation` setting is configured to 128KiB.
1752 /// * A WebAssembly linear memory starts with a single 64KiB page.
1753 /// * This memory can be grown by one page to contain the full 128KiB of
1754 /// memory.
1755 /// * If grown by one more page, though, then a 192KiB allocation must be
1756 /// made and the previous 128KiB of contents are copied into the new
1757 /// allocation.
1758 ///
1759 /// This growth behavior can have a significant performance impact if lots
1760 /// of data needs to be copied on growth. Conversely if memory growth never
1761 /// needs to happen because the capacity will always be large enough then
1762 /// optimizations can be applied to cache the base pointer of linear memory.
1763 ///
1764 /// When memory is grown then the
1765 /// [`Config::memory_reservation_for_growth`] is used for the new
1766 /// memory allocation to have memory to grow into.
1767 ///
1768 /// When using the pooling allocator via [`PoolingAllocationConfig`] then
1769 /// memories are never allowed to move so requests for growth are instead
1770 /// rejected with an error.
1771 ///
1772 /// ## When this setting is not used
1773 ///
1774 /// This setting is ignored and unused when the initial size of linear
1775 /// memory is larger than this threshold. For example if this setting is set
1776 /// to 1MiB but a wasm module requires a 2MiB minimum allocation then this
1777 /// setting is ignored. In this situation the minimum size of memory will be
1778 /// allocated along with [`Config::memory_reservation_for_growth`]
1779 /// after it to grow into.
1780 ///
1781 /// That means that this value can be set to zero. That can be useful in
1782 /// benchmarking to see the overhead of bounds checks for example.
1783 /// Additionally it can be used to minimize the virtual memory allocated by
1784 /// Wasmtime.
1785 ///
1786 /// ## Default Value
1787 ///
1788 /// The default value for this property depends on the host platform. For
1789 /// 64-bit platforms there's lots of address space available, so the default
1790 /// configured here is 4GiB. When coupled with the default size of
1791 /// [`Config::memory_guard_size`] this means that 32-bit WebAssembly linear
1792 /// memories with 64KiB page sizes will skip almost all bounds checks by
1793 /// default.
1794 ///
1795 /// For 32-bit platforms this value defaults to 10MiB. This means that
1796 /// bounds checks will be required on 32-bit platforms.
1797 pub fn memory_reservation(&mut self, bytes: u64) -> &mut Self {
1798 self.tunables.memory_reservation = Some(bytes);
1799 self
1800 }
1801
1802 /// Indicates whether linear memories may relocate their base pointer at
1803 /// runtime.
1804 ///
1805 /// WebAssembly linear memories either have a maximum size that's explicitly
1806 /// listed in the type of a memory or inherently limited by the index type
1807 /// of the memory (e.g. 4GiB for 32-bit linear memories). Depending on how
1808 /// the linear memory is allocated (see [`Config::memory_reservation`]) it
1809 /// may be necessary to move the memory in the host's virtual address space
1810 /// during growth. This option controls whether this movement is allowed or
1811 /// not.
1812 ///
1813 /// An example of a linear memory needing to move is when
1814 /// [`Config::memory_reservation`] is 0 then a linear memory will be
1815 /// allocated as the minimum size of the memory plus
1816 /// [`Config::memory_reservation_for_growth`]. When memory grows beyond the
1817 /// reservation for growth then the memory needs to be relocated.
1818 ///
1819 /// When this option is set to `false` then it can have a number of impacts
1820 /// on how memories work at runtime:
1821 ///
1822 /// * Modules can be compiled with static knowledge the base pointer of
1823 /// linear memory never changes to enable optimizations such as
1824 /// loop invariant code motion (hoisting the base pointer out of a loop).
1825 ///
1826 /// * Memories cannot grow in excess of their original allocation. This
1827 /// means that [`Config::memory_reservation`] and
1828 /// [`Config::memory_reservation_for_growth`] may need tuning to ensure
1829 /// the memory configuration works at runtime.
1830 ///
1831 /// The default value for this option is `true`.
1832 pub fn memory_may_move(&mut self, enable: bool) -> &mut Self {
1833 self.tunables.memory_may_move = Some(enable);
1834 self
1835 }
1836
1837 /// Configures the size, in bytes, of the guard region used at the end of a
1838 /// linear memory's address space reservation.
1839 ///
1840 /// > Note: this value has important performance ramifications, be sure to
1841 /// > understand what this value does before tweaking it and benchmarking.
1842 ///
1843 /// This setting controls how many bytes are guaranteed to be unmapped after
1844 /// the virtual memory allocation of a linear memory. When
1845 /// combined with sufficiently large values of
1846 /// [`Config::memory_reservation`] (e.g. 4GiB for 32-bit linear memories)
1847 /// then a guard region can be used to eliminate bounds checks in generated
1848 /// code.
1849 ///
1850 /// This setting additionally can be used to help deduplicate bounds checks
1851 /// in code that otherwise requires bounds checks. For example with a 4KiB
1852 /// guard region then a 64-bit linear memory which accesses addresses `x+8`
1853 /// and `x+16` only needs to perform a single bounds check on `x`. If that
1854 /// bounds check passes then the offset is guaranteed to either reside in
1855 /// linear memory or the guard region, resulting in deterministic behavior
1856 /// either way.
1857 ///
1858 /// ## How big should the guard be?
1859 ///
1860 /// In general, like with configuring [`Config::memory_reservation`], you
1861 /// probably don't want to change this value from the defaults. Removing
1862 /// bounds checks is dependent on a number of factors where the size of the
1863 /// guard region is only one piece of the equation. Other factors include:
1864 ///
1865 /// * [`Config::memory_reservation`]
1866 /// * The index type of the linear memory (e.g. 32-bit or 64-bit)
1867 /// * The page size of the linear memory
1868 /// * Other settings such as [`Config::signals_based_traps`]
1869 ///
1870 /// Embeddings using virtual memory almost always want at least some guard
1871 /// region, but otherwise changes from the default should be profiled
1872 /// locally to see the performance impact.
1873 ///
1874 /// ## Default
1875 ///
1876 /// The default value for this property is 32MiB on 64-bit platforms. This
1877 /// allows eliminating almost all bounds checks on loads/stores with an
1878 /// immediate offset of less than 32MiB. On 32-bit platforms this defaults
1879 /// to 64KiB.
1880 pub fn memory_guard_size(&mut self, bytes: u64) -> &mut Self {
1881 self.tunables.memory_guard_size = Some(bytes);
1882 self
1883 }
1884
1885 /// Configures the size, in bytes, of the extra virtual memory space
1886 /// reserved after a linear memory is relocated.
1887 ///
1888 /// This setting is used in conjunction with [`Config::memory_reservation`]
1889 /// to configure what happens after a linear memory is relocated in the host
1890 /// address space. If the initial size of a linear memory exceeds
1891 /// [`Config::memory_reservation`] or if it grows beyond that size
1892 /// throughout its lifetime then this setting will be used.
1893 ///
1894 /// When a linear memory is relocated it will initially look like this:
1895 ///
1896 /// ```text
1897 /// memory.size
1898 /// │
1899 /// ◄──────┴─────►
1900 /// ┌───────┬──────────────┬───────┐
1901 /// │ guard │ accessible │ guard │
1902 /// └───────┴──────────────┴───────┘
1903 /// ◄──┬──►
1904 /// │
1905 /// memory_guard_size
1906 /// ```
1907 ///
1908 /// where `accessible` needs to be grown but there's no more memory to grow
1909 /// into. A new region of the virtual address space will be allocated that
1910 /// looks like this:
1911 ///
1912 /// ```text
1913 /// memory_reservation_for_growth
1914 /// │
1915 /// memory.size │
1916 /// │ │
1917 /// ◄──────┴─────► ◄─────────────┴───────────►
1918 /// ┌───────┬──────────────┬───────────────────────────┬───────┐
1919 /// │ guard │ accessible │ .. reserved for growth .. │ guard │
1920 /// └───────┴──────────────┴───────────────────────────┴───────┘
1921 /// ◄──┬──►
1922 /// │
1923 /// memory_guard_size
1924 /// ```
1925 ///
1926 /// This means that up to `memory_reservation_for_growth` bytes can be
1927 /// allocated again before the entire linear memory needs to be moved again
1928 /// when another `memory_reservation_for_growth` bytes will be appended to
1929 /// the size of the allocation.
1930 ///
1931 /// Note that this is a currently simple heuristic for optimizing the growth
1932 /// of dynamic memories, primarily implemented for the memory64 proposal
1933 /// where the maximum size of memory is larger than 4GiB. This setting is
1934 /// unlikely to be a one-size-fits-all style approach and if you're an
1935 /// embedder running into issues with growth and are interested in having
1936 /// other growth strategies available here please feel free to [open an
1937 /// issue on the Wasmtime repository][issue]!
1938 ///
1939 /// [issue]: https://github.com/bytecodealliance/wasmtime/issues/new
1940 ///
1941 /// ## Default
1942 ///
1943 /// For 64-bit platforms this defaults to 2GiB, and for 32-bit platforms
1944 /// this defaults to 1MiB.
1945 pub fn memory_reservation_for_growth(&mut self, bytes: u64) -> &mut Self {
1946 self.tunables.memory_reservation_for_growth = Some(bytes);
1947 self
1948 }
1949
1950 /// Indicates whether a guard region is present before allocations of
1951 /// linear memory.
1952 ///
1953 /// Guard regions before linear memories are never used during normal
1954 /// operation of WebAssembly modules, even if they have out-of-bounds
1955 /// loads. The only purpose for a preceding guard region in linear memory
1956 /// is extra protection against possible bugs in code generators like
1957 /// Cranelift. This setting does not affect performance in any way, but will
1958 /// result in larger virtual memory reservations for linear memories (it
1959 /// won't actually ever use more memory, just use more of the address
1960 /// space).
1961 ///
1962 /// The size of the guard region before linear memory is the same as the
1963 /// guard size that comes after linear memory, which is configured by
1964 /// [`Config::memory_guard_size`].
1965 ///
1966 /// ## Default
1967 ///
1968 /// This value defaults to `true`.
1969 pub fn guard_before_linear_memory(&mut self, enable: bool) -> &mut Self {
1970 self.tunables.guard_before_linear_memory = Some(enable);
1971 self
1972 }
1973
1974 /// Indicates whether to initialize tables lazily, so that instantiation
1975 /// is fast but indirect calls are a little slower. If false, tables
1976 /// are initialized eagerly during instantiation from any active element
1977 /// segments that apply to them.
1978 ///
1979 /// **Note** Disabling this option is not compatible with the Winch compiler.
1980 ///
1981 /// ## Default
1982 ///
1983 /// This value defaults to `true`.
1984 pub fn table_lazy_init(&mut self, table_lazy_init: bool) -> &mut Self {
1985 self.tunables.table_lazy_init = Some(table_lazy_init);
1986 self
1987 }
1988
1989 /// Configure the version information used in serialized and deserialized [`crate::Module`]s.
1990 /// This effects the behavior of [`crate::Module::serialize()`], as well as
1991 /// [`crate::Module::deserialize()`] and related functions.
1992 ///
1993 /// The default strategy is to use the wasmtime crate's Cargo package version.
1994 pub fn module_version(&mut self, strategy: ModuleVersionStrategy) -> Result<&mut Self> {
1995 match strategy {
1996 // This case requires special precondition for assertion in SerializedModule::to_bytes
1997 ModuleVersionStrategy::Custom(ref v) => {
1998 if v.as_bytes().len() > 255 {
1999 bail!("custom module version cannot be more than 255 bytes: {v}");
2000 }
2001 }
2002 _ => {}
2003 }
2004 self.module_version = strategy;
2005 Ok(self)
2006 }
2007
2008 /// Configure whether wasmtime should compile a module using multiple
2009 /// threads.
2010 ///
2011 /// Disabling this will result in a single thread being used to compile
2012 /// the wasm bytecode.
2013 ///
2014 /// By default parallel compilation is enabled.
2015 #[cfg(feature = "parallel-compilation")]
2016 pub fn parallel_compilation(&mut self, parallel: bool) -> &mut Self {
2017 self.parallel_compilation = parallel;
2018 self
2019 }
2020
2021 /// Configures whether compiled artifacts will contain information to map
2022 /// native program addresses back to the original wasm module.
2023 ///
2024 /// This configuration option is `true` by default and, if enabled,
2025 /// generates the appropriate tables in compiled modules to map from native
2026 /// address back to wasm source addresses. This is used for displaying wasm
2027 /// program counters in backtraces as well as generating filenames/line
2028 /// numbers if so configured as well (and the original wasm module has DWARF
2029 /// debugging information present).
2030 pub fn generate_address_map(&mut self, generate: bool) -> &mut Self {
2031 self.tunables.generate_address_map = Some(generate);
2032 self
2033 }
2034
2035 /// Configures whether copy-on-write memory-mapped data is used to
2036 /// initialize a linear memory.
2037 ///
2038 /// Initializing linear memory via a copy-on-write mapping can drastically
2039 /// improve instantiation costs of a WebAssembly module because copying
2040 /// memory is deferred. Additionally if a page of memory is only ever read
2041 /// from WebAssembly and never written too then the same underlying page of
2042 /// data will be reused between all instantiations of a module meaning that
2043 /// if a module is instantiated many times this can lower the overall memory
2044 /// required needed to run that module.
2045 ///
2046 /// The main disadvantage of copy-on-write initialization, however, is that
2047 /// it may be possible for highly-parallel scenarios to be less scalable. If
2048 /// a page is read initially by a WebAssembly module then that page will be
2049 /// mapped to a read-only copy shared between all WebAssembly instances. If
2050 /// the same page is then written, however, then a private copy is created
2051 /// and swapped out from the read-only version. This also requires an [IPI],
2052 /// however, which can be a significant bottleneck in high-parallelism
2053 /// situations.
2054 ///
2055 /// This feature is only applicable when a WebAssembly module meets specific
2056 /// criteria to be initialized in this fashion, such as:
2057 ///
2058 /// * Only memories defined in the module can be initialized this way.
2059 /// * Data segments for memory must use statically known offsets.
2060 /// * Data segments for memory must all be in-bounds.
2061 ///
2062 /// Modules which do not meet these criteria will fall back to
2063 /// initialization of linear memory based on copying memory.
2064 ///
2065 /// This feature of Wasmtime is also platform-specific:
2066 ///
2067 /// * Linux - this feature is supported for all instances of [`Module`].
2068 /// Modules backed by an existing mmap (such as those created by
2069 /// [`Module::deserialize_file`]) will reuse that mmap to cow-initialize
2070 /// memory. Other instance of [`Module`] may use the `memfd_create`
2071 /// syscall to create an initialization image to `mmap`.
2072 /// * Unix (not Linux) - this feature is only supported when loading modules
2073 /// from a precompiled file via [`Module::deserialize_file`] where there
2074 /// is a file descriptor to use to map data into the process. Note that
2075 /// the module must have been compiled with this setting enabled as well.
2076 /// * Windows - there is no support for this feature at this time. Memory
2077 /// initialization will always copy bytes.
2078 ///
2079 /// By default this option is enabled.
2080 ///
2081 /// [`Module::deserialize_file`]: crate::Module::deserialize_file
2082 /// [`Module`]: crate::Module
2083 /// [IPI]: https://en.wikipedia.org/wiki/Inter-processor_interrupt
2084 pub fn memory_init_cow(&mut self, enable: bool) -> &mut Self {
2085 self.tunables.memory_init_cow = Some(enable);
2086 self
2087 }
2088
2089 /// A configuration option to force the usage of `memfd_create` on Linux to
2090 /// be used as the backing source for a module's initial memory image.
2091 ///
2092 /// When [`Config::memory_init_cow`] is enabled, which is enabled by
2093 /// default, module memory initialization images are taken from a module's
2094 /// original mmap if possible. If a precompiled module was loaded from disk
2095 /// this means that the disk's file is used as an mmap source for the
2096 /// initial linear memory contents. This option can be used to force, on
2097 /// Linux, that instead of using the original file on disk a new in-memory
2098 /// file is created with `memfd_create` to hold the contents of the initial
2099 /// image.
2100 ///
2101 /// This option can be used to avoid possibly loading the contents of memory
2102 /// from disk through a page fault. Instead with `memfd_create` the contents
2103 /// of memory are always in RAM, meaning that even page faults which
2104 /// initially populate a wasm linear memory will only work with RAM instead
2105 /// of ever hitting the disk that the original precompiled module is stored
2106 /// on.
2107 ///
2108 /// This option is disabled by default.
2109 pub fn force_memory_init_memfd(&mut self, enable: bool) -> &mut Self {
2110 self.force_memory_init_memfd = enable;
2111 self
2112 }
2113
2114 /// Configures whether or not a coredump should be generated and attached to
2115 /// the [`Error`](crate::Error) when a trap is raised.
2116 ///
2117 /// This option is disabled by default.
2118 #[cfg(feature = "coredump")]
2119 pub fn coredump_on_trap(&mut self, enable: bool) -> &mut Self {
2120 self.coredump_on_trap = enable;
2121 self
2122 }
2123
2124 /// Enables memory error checking for wasm programs.
2125 ///
2126 /// This option is disabled by default.
2127 ///
2128 /// # Panics
2129 ///
2130 /// Panics if this configuration's compiler was [disabled][Config::enable_compiler].
2131 #[cfg(any(feature = "cranelift", feature = "winch"))]
2132 pub fn wmemcheck(&mut self, enable: bool) -> &mut Self {
2133 self.wmemcheck = enable;
2134 self.compiler_config_mut().wmemcheck = enable;
2135 self
2136 }
2137
2138 /// Configures the "guaranteed dense image size" for copy-on-write
2139 /// initialized memories.
2140 ///
2141 /// When using the [`Config::memory_init_cow`] feature to initialize memory
2142 /// efficiently (which is enabled by default), compiled modules contain an
2143 /// image of the module's initial heap. If the module has a fairly sparse
2144 /// initial heap, with just a few data segments at very different offsets,
2145 /// this could result in a large region of zero bytes in the image. In
2146 /// other words, it's not very memory-efficient.
2147 ///
2148 /// We normally use a heuristic to avoid this: if less than half
2149 /// of the initialized range (first non-zero to last non-zero
2150 /// byte) of any memory in the module has pages with nonzero
2151 /// bytes, then we avoid creating a memory image for the entire module.
2152 ///
2153 /// However, if the embedder always needs the instantiation-time efficiency
2154 /// of copy-on-write initialization, and is otherwise carefully controlling
2155 /// parameters of the modules (for example, by limiting the maximum heap
2156 /// size of the modules), then it may be desirable to ensure a memory image
2157 /// is created even if this could go against the heuristic above. Thus, we
2158 /// add another condition: there is a size of initialized data region up to
2159 /// which we *always* allow a memory image. The embedder can set this to a
2160 /// known maximum heap size if they desire to always get the benefits of
2161 /// copy-on-write images.
2162 ///
2163 /// In the future we may implement a "best of both worlds"
2164 /// solution where we have a dense image up to some limit, and
2165 /// then support a sparse list of initializers beyond that; this
2166 /// would get most of the benefit of copy-on-write and pay the incremental
2167 /// cost of eager initialization only for those bits of memory
2168 /// that are out-of-bounds. However, for now, an embedder desiring
2169 /// fast instantiation should ensure that this setting is as large
2170 /// as the maximum module initial memory content size.
2171 ///
2172 /// By default this value is 16 MiB.
2173 pub fn memory_guaranteed_dense_image_size(&mut self, size_in_bytes: u64) -> &mut Self {
2174 self.memory_guaranteed_dense_image_size = size_in_bytes;
2175 self
2176 }
2177
2178 /// Whether to enable function inlining during compilation or not.
2179 ///
2180 /// This may result in faster execution at runtime, but adds additional
2181 /// compilation time. Inlining may also enlarge the size of compiled
2182 /// artifacts (for example, the size of the result of
2183 /// [`Engine::precompile_component`](crate::Engine::precompile_component)).
2184 ///
2185 /// Inlining is not supported by all of Wasmtime's compilation strategies;
2186 /// currently, it only Cranelift supports it. This setting will be ignored
2187 /// when using a compilation strategy that does not support inlining, like
2188 /// Winch.
2189 ///
2190 /// Note that inlining is still somewhat experimental at the moment (as of
2191 /// the Wasmtime version 36).
2192 pub fn compiler_inlining(&mut self, inlining: bool) -> &mut Self {
2193 self.tunables.inlining = Some(inlining);
2194 self
2195 }
2196
2197 /// Returns the set of features that the currently selected compiler backend
2198 /// does not support at all and may panic on.
2199 ///
2200 /// Wasmtime strives to reject unknown modules or unsupported modules with
2201 /// first-class errors instead of panics. Not all compiler backends have the
2202 /// same level of feature support on all platforms as well. This method
2203 /// returns a set of features that the currently selected compiler
2204 /// configuration is known to not support and may panic on. This acts as a
2205 /// first-level filter on incoming wasm modules/configuration to fail-fast
2206 /// instead of panicking later on.
2207 ///
2208 /// Note that if a feature is not listed here it does not mean that the
2209 /// backend fully supports the proposal. Instead that means that the backend
2210 /// doesn't ever panic on the proposal, but errors during compilation may
2211 /// still be returned. This means that features listed here are definitely
2212 /// not supported at all, but features not listed here may still be
2213 /// partially supported. For example at the time of this writing the Winch
2214 /// backend partially supports simd so it's not listed here. Winch doesn't
2215 /// fully support simd but unimplemented instructions just return errors.
2216 fn compiler_panicking_wasm_features(&self) -> WasmFeatures {
2217 // First we compute the set of features that Wasmtime itself knows;
2218 // this is a sort of "maximal set" that we invert to create a set
2219 // of features we _definitely can't support_ because wasmtime
2220 // has never heard of them.
2221 let features_known_to_wasmtime = WasmFeatures::empty()
2222 | WasmFeatures::MUTABLE_GLOBAL
2223 | WasmFeatures::SATURATING_FLOAT_TO_INT
2224 | WasmFeatures::SIGN_EXTENSION
2225 | WasmFeatures::REFERENCE_TYPES
2226 | WasmFeatures::CALL_INDIRECT_OVERLONG
2227 | WasmFeatures::MULTI_VALUE
2228 | WasmFeatures::BULK_MEMORY
2229 | WasmFeatures::BULK_MEMORY_OPT
2230 | WasmFeatures::SIMD
2231 | WasmFeatures::RELAXED_SIMD
2232 | WasmFeatures::THREADS
2233 | WasmFeatures::SHARED_EVERYTHING_THREADS
2234 | WasmFeatures::TAIL_CALL
2235 | WasmFeatures::FLOATS
2236 | WasmFeatures::MULTI_MEMORY
2237 | WasmFeatures::EXCEPTIONS
2238 | WasmFeatures::MEMORY64
2239 | WasmFeatures::EXTENDED_CONST
2240 | WasmFeatures::COMPONENT_MODEL
2241 | WasmFeatures::FUNCTION_REFERENCES
2242 | WasmFeatures::GC
2243 | WasmFeatures::CUSTOM_PAGE_SIZES
2244 | WasmFeatures::GC_TYPES
2245 | WasmFeatures::STACK_SWITCHING
2246 | WasmFeatures::WIDE_ARITHMETIC
2247 | WasmFeatures::CM_ASYNC
2248 | WasmFeatures::CM_ASYNC_STACKFUL
2249 | WasmFeatures::CM_ASYNC_BUILTINS
2250 | WasmFeatures::CM_THREADING
2251 | WasmFeatures::CM_ERROR_CONTEXT
2252 | WasmFeatures::CM_GC
2253 | WasmFeatures::CM_MAP
2254 | WasmFeatures::CM_FIXED_LENGTH_LISTS;
2255
2256 #[allow(unused_mut, reason = "easier to avoid #[cfg]")]
2257 let mut unsupported = !features_known_to_wasmtime;
2258
2259 #[cfg(any(feature = "cranelift", feature = "winch"))]
2260 match self.compiler_config.as_ref().and_then(|c| c.strategy) {
2261 None | Some(Strategy::Cranelift) => {
2262 // Pulley at this time fundamentally doesn't support the
2263 // `threads` proposal, notably shared memory, because Rust can't
2264 // safely implement loads/stores in the face of shared memory.
2265 // Stack switching is not implemented, either.
2266 if self.compiler_target().is_pulley() {
2267 unsupported |= WasmFeatures::THREADS;
2268 unsupported |= WasmFeatures::STACK_SWITCHING;
2269 }
2270
2271 use target_lexicon::*;
2272 match self.compiler_target() {
2273 Triple {
2274 architecture: Architecture::X86_64 | Architecture::X86_64h,
2275 operating_system:
2276 OperatingSystem::Linux
2277 | OperatingSystem::MacOSX(_)
2278 | OperatingSystem::Darwin(_),
2279 ..
2280 } => {
2281 // Stack switching supported on (non-Pulley) Cranelift.
2282 }
2283
2284 _ => {
2285 // On platforms other than x64 Unix-like, we don't
2286 // support stack switching.
2287 unsupported |= WasmFeatures::STACK_SWITCHING;
2288 }
2289 }
2290 }
2291 Some(Strategy::Winch) => {
2292 unsupported |= WasmFeatures::GC
2293 | WasmFeatures::FUNCTION_REFERENCES
2294 | WasmFeatures::RELAXED_SIMD
2295 | WasmFeatures::TAIL_CALL
2296 | WasmFeatures::GC_TYPES
2297 | WasmFeatures::EXCEPTIONS
2298 | WasmFeatures::LEGACY_EXCEPTIONS
2299 | WasmFeatures::STACK_SWITCHING
2300 | WasmFeatures::CM_ASYNC;
2301 match self.compiler_target().architecture {
2302 target_lexicon::Architecture::Aarch64(_) => {
2303 unsupported |= WasmFeatures::THREADS;
2304 unsupported |= WasmFeatures::WIDE_ARITHMETIC;
2305 }
2306
2307 // Winch doesn't support other non-x64 architectures at this
2308 // time either but will return an first-class error for
2309 // them.
2310 _ => {}
2311 }
2312 }
2313 Some(Strategy::Auto) => unreachable!(),
2314 }
2315 unsupported
2316 }
2317
2318 /// Calculates the set of features that are enabled for this `Config`.
2319 ///
2320 /// This method internally will start with the an empty set of features to
2321 /// avoid being tied to wasmparser's defaults. Next Wasmtime's set of
2322 /// default features are added to this set, some of which are conditional
2323 /// depending on crate features. Finally explicitly requested features via
2324 /// `wasm_*` methods on `Config` are applied. Everything is then validated
2325 /// later in `Config::validate`.
2326 fn features(&self) -> WasmFeatures {
2327 // Wasmtime by default supports all of the wasm 2.0 version of the
2328 // specification.
2329 let mut features = WasmFeatures::WASM2;
2330
2331 // On-by-default features that wasmtime has. Note that these are all
2332 // subject to the criteria at
2333 // https://docs.wasmtime.dev/contributing-implementing-wasm-proposals.html
2334 // and
2335 // https://docs.wasmtime.dev/stability-wasm-proposals.html
2336 features |= WasmFeatures::MULTI_MEMORY;
2337 features |= WasmFeatures::RELAXED_SIMD;
2338 features |= WasmFeatures::TAIL_CALL;
2339 features |= WasmFeatures::EXTENDED_CONST;
2340 features |= WasmFeatures::MEMORY64;
2341 // NB: if you add a feature above this line please double-check
2342 // https://docs.wasmtime.dev/stability-wasm-proposals.html
2343 // to ensure all requirements are met and/or update the documentation
2344 // there too.
2345
2346 // Set some features to their conditionally-enabled defaults depending
2347 // on crate compile-time features.
2348 features.set(WasmFeatures::GC_TYPES, cfg!(feature = "gc"));
2349 features.set(WasmFeatures::THREADS, cfg!(feature = "threads"));
2350 features.set(
2351 WasmFeatures::COMPONENT_MODEL,
2352 cfg!(feature = "component-model"),
2353 );
2354
2355 // From the default set of proposals remove any that the current
2356 // compiler backend may panic on if the module contains them.
2357 features = features & !self.compiler_panicking_wasm_features();
2358
2359 // After wasmtime's defaults are configured then factor in user requests
2360 // and disable/enable features. Note that the enable/disable sets should
2361 // be disjoint.
2362 debug_assert!((self.enabled_features & self.disabled_features).is_empty());
2363 features &= !self.disabled_features;
2364 features |= self.enabled_features;
2365
2366 features
2367 }
2368
2369 /// Returns the configured compiler target for this `Config`.
2370 pub(crate) fn compiler_target(&self) -> target_lexicon::Triple {
2371 // If a target is explicitly configured, always use that.
2372 if let Some(target) = self.target.clone() {
2373 return target;
2374 }
2375
2376 // If the `build.rs` script determined that this platform uses pulley by
2377 // default, then use Pulley.
2378 if cfg!(default_target_pulley) {
2379 return target_lexicon::Triple::pulley_host();
2380 }
2381
2382 // And at this point the target is for sure the host.
2383 target_lexicon::Triple::host()
2384 }
2385
2386 pub(crate) fn validate(&self) -> Result<(Tunables, WasmFeatures)> {
2387 let features = self.features();
2388
2389 // First validate that the selected compiler backend and configuration
2390 // supports the set of `features` that are enabled. This will help
2391 // provide more first class errors instead of panics about unsupported
2392 // features and configurations.
2393 let unsupported = features & self.compiler_panicking_wasm_features();
2394 if !unsupported.is_empty() {
2395 for flag in WasmFeatures::FLAGS.iter() {
2396 if !unsupported.contains(*flag.value()) {
2397 continue;
2398 }
2399 bail!(
2400 "the wasm_{} feature is not supported on this compiler configuration",
2401 flag.name().to_lowercase()
2402 );
2403 }
2404
2405 panic!("should have returned an error by now")
2406 }
2407
2408 #[cfg(any(feature = "async", feature = "stack-switching"))]
2409 if self.max_wasm_stack > self.async_stack_size {
2410 bail!("max_wasm_stack size cannot exceed the async_stack_size");
2411 }
2412 if self.max_wasm_stack == 0 {
2413 bail!("max_wasm_stack size cannot be zero");
2414 }
2415 if !cfg!(feature = "wmemcheck") && self.wmemcheck {
2416 bail!("wmemcheck (memory checker) was requested but is not enabled in this build");
2417 }
2418
2419 if !cfg!(feature = "gc") && features.gc_types() {
2420 bail!("support for GC was disabled at compile time")
2421 }
2422
2423 if !cfg!(feature = "gc") && features.contains(WasmFeatures::EXCEPTIONS) {
2424 bail!("exceptions support requires garbage collection (GC) to be enabled in the build");
2425 }
2426
2427 match &self.rr_config {
2428 #[cfg(feature = "rr")]
2429 RRConfig::Recording | RRConfig::Replaying => {
2430 self.validate_rr_determinism_conflicts()?;
2431 }
2432 RRConfig::None => {}
2433 };
2434
2435 let mut tunables = Tunables::default_for_target(&self.compiler_target())?;
2436
2437 // By default this is enabled with the Cargo feature, and if the feature
2438 // is missing this is disabled.
2439 tunables.concurrency_support = cfg!(feature = "component-model-async");
2440
2441 #[cfg(feature = "rr")]
2442 {
2443 tunables.recording = matches!(self.rr_config, RRConfig::Recording);
2444 }
2445
2446 // If no target is explicitly specified then further refine `tunables`
2447 // for the configuration of this host depending on what platform
2448 // features were found available at compile time. This means that anyone
2449 // cross-compiling for a customized host will need to further refine
2450 // compilation options.
2451 if self.target.is_none() {
2452 // If this platform doesn't have native signals then change some
2453 // defaults to account for that. Note that VM guards are turned off
2454 // here because that's primarily a feature of eliding
2455 // bounds-checks.
2456 if !cfg!(has_native_signals) {
2457 tunables.signals_based_traps = cfg!(has_native_signals);
2458 tunables.memory_guard_size = 0;
2459 }
2460
2461 // When virtual memory is not available use slightly different
2462 // defaults for tunables to be more amenable to `MallocMemory`.
2463 // Note that these can still be overridden by config options.
2464 if !cfg!(has_virtual_memory) {
2465 tunables.memory_reservation = 0;
2466 tunables.memory_reservation_for_growth = 1 << 20; // 1MB
2467 tunables.memory_init_cow = false;
2468 }
2469 }
2470
2471 // If guest-debugging is enabled, we must disable
2472 // signals-based traps. Do this before we process the user's
2473 // provided tunables settings so we can detect a conflict with
2474 // an explicit request to use signals-based traps.
2475 #[cfg(feature = "debug")]
2476 if self.tunables.debug_guest == Some(true) {
2477 tunables.signals_based_traps = false;
2478 }
2479
2480 self.tunables.configure(&mut tunables);
2481
2482 // If we're going to compile with winch, we must use the winch calling convention.
2483 #[cfg(any(feature = "cranelift", feature = "winch"))]
2484 {
2485 tunables.winch_callable = self
2486 .compiler_config
2487 .as_ref()
2488 .is_some_and(|c| c.strategy == Some(Strategy::Winch));
2489 }
2490
2491 tunables.collector = if features.gc_types() {
2492 #[cfg(feature = "gc")]
2493 {
2494 use wasmtime_environ::Collector as EnvCollector;
2495 Some(match self.collector.try_not_auto()? {
2496 Collector::DeferredReferenceCounting => EnvCollector::DeferredReferenceCounting,
2497 Collector::Null => EnvCollector::Null,
2498 Collector::Auto => unreachable!(),
2499 })
2500 }
2501 #[cfg(not(feature = "gc"))]
2502 bail!("cannot use GC types: the `gc` feature was disabled at compile time")
2503 } else {
2504 None
2505 };
2506
2507 if tunables.debug_guest {
2508 ensure!(
2509 cfg!(feature = "debug"),
2510 "debug instrumentation support was disabled at compile time"
2511 );
2512 ensure!(
2513 !tunables.signals_based_traps,
2514 "cannot use signals-based traps with guest debugging enabled"
2515 );
2516 }
2517
2518 // Concurrency support is required for some component model features.
2519 let requires_concurrency = WasmFeatures::CM_ASYNC
2520 | WasmFeatures::CM_ASYNC_BUILTINS
2521 | WasmFeatures::CM_ASYNC_STACKFUL
2522 | WasmFeatures::CM_THREADING
2523 | WasmFeatures::CM_ERROR_CONTEXT;
2524 if tunables.concurrency_support && !cfg!(feature = "component-model-async") {
2525 bail!(
2526 "concurrency support was requested but was not \
2527 compiled into this build of Wasmtime"
2528 )
2529 }
2530 if !tunables.concurrency_support && features.intersects(requires_concurrency) {
2531 bail!(
2532 "concurrency support must be enabled to use the component \
2533 model async or threading features"
2534 )
2535 }
2536
2537 Ok((tunables, features))
2538 }
2539
2540 #[cfg(feature = "runtime")]
2541 pub(crate) fn build_allocator(
2542 &self,
2543 tunables: &Tunables,
2544 ) -> Result<Box<dyn InstanceAllocator + Send + Sync>> {
2545 #[cfg(feature = "async")]
2546 let (stack_size, stack_zeroing) = (self.async_stack_size, self.async_stack_zeroing);
2547
2548 #[cfg(not(feature = "async"))]
2549 let (stack_size, stack_zeroing) = (0, false);
2550
2551 let _ = tunables;
2552
2553 match &self.allocation_strategy {
2554 InstanceAllocationStrategy::OnDemand => {
2555 let mut _allocator = try_new::<Box<_>>(OnDemandInstanceAllocator::new(
2556 self.mem_creator.clone(),
2557 stack_size,
2558 stack_zeroing,
2559 ))?;
2560 #[cfg(feature = "async")]
2561 if let Some(stack_creator) = &self.stack_creator {
2562 _allocator.set_stack_creator(stack_creator.clone());
2563 }
2564 Ok(_allocator as _)
2565 }
2566 #[cfg(feature = "pooling-allocator")]
2567 InstanceAllocationStrategy::Pooling(config) => {
2568 let mut config = config.config;
2569 config.stack_size = stack_size;
2570 config.async_stack_zeroing = stack_zeroing;
2571 let allocator = try_new::<Box<_>>(
2572 crate::runtime::vm::PoolingInstanceAllocator::new(&config, tunables)?,
2573 )?;
2574 Ok(allocator as _)
2575 }
2576 }
2577 }
2578
2579 #[cfg(feature = "runtime")]
2580 pub(crate) fn build_gc_runtime(&self) -> Result<Option<Arc<dyn GcRuntime>>> {
2581 if !self.features().gc_types() {
2582 return Ok(None);
2583 }
2584
2585 #[cfg(not(feature = "gc"))]
2586 bail!("cannot create a GC runtime: the `gc` feature was disabled at compile time");
2587
2588 #[cfg(feature = "gc")]
2589 #[cfg_attr(
2590 not(any(feature = "gc-null", feature = "gc-drc")),
2591 expect(unreachable_code, reason = "definitions known to be dummy")
2592 )]
2593 {
2594 Ok(Some(match self.collector.try_not_auto()? {
2595 #[cfg(feature = "gc-drc")]
2596 Collector::DeferredReferenceCounting => {
2597 try_new::<Arc<_>>(crate::runtime::vm::DrcCollector::default())? as _
2598 }
2599 #[cfg(not(feature = "gc-drc"))]
2600 Collector::DeferredReferenceCounting => unreachable!(),
2601
2602 #[cfg(feature = "gc-null")]
2603 Collector::Null => {
2604 try_new::<Arc<_>>(crate::runtime::vm::NullCollector::default())? as _
2605 }
2606 #[cfg(not(feature = "gc-null"))]
2607 Collector::Null => unreachable!(),
2608
2609 Collector::Auto => unreachable!(),
2610 }))
2611 }
2612 }
2613
2614 #[cfg(feature = "runtime")]
2615 pub(crate) fn build_profiler(&self) -> Result<Box<dyn ProfilingAgent>> {
2616 Ok(match self.profiling_strategy {
2617 ProfilingStrategy::PerfMap => profiling_agent::new_perfmap()?,
2618 ProfilingStrategy::JitDump => profiling_agent::new_jitdump()?,
2619 ProfilingStrategy::VTune => profiling_agent::new_vtune()?,
2620 ProfilingStrategy::None => profiling_agent::new_null(),
2621 ProfilingStrategy::Pulley => profiling_agent::new_pulley()?,
2622 })
2623 }
2624
2625 #[cfg(any(feature = "cranelift", feature = "winch"))]
2626 pub(crate) fn build_compiler(
2627 mut self,
2628 tunables: &mut Tunables,
2629 features: WasmFeatures,
2630 ) -> Result<(Self, Box<dyn wasmtime_environ::Compiler>)> {
2631 let target = self.compiler_target();
2632
2633 // The target passed to the builders below is an `Option<Triple>` where
2634 // `None` represents the current host with CPU features inferred from
2635 // the host's CPU itself. The `target` above is not an `Option`, so
2636 // switch it to `None` in the case that a target wasn't explicitly
2637 // specified (which indicates no feature inference) and the target
2638 // matches the host.
2639 let target_for_builder =
2640 if self.target.is_none() && target == target_lexicon::Triple::host() {
2641 None
2642 } else {
2643 Some(target.clone())
2644 };
2645
2646 let mut compiler = match self.compiler_config_mut().strategy {
2647 #[cfg(feature = "cranelift")]
2648 Some(Strategy::Cranelift) => wasmtime_cranelift::builder(target_for_builder)?,
2649 #[cfg(not(feature = "cranelift"))]
2650 Some(Strategy::Cranelift) => bail!("cranelift support not compiled in"),
2651 #[cfg(feature = "winch")]
2652 Some(Strategy::Winch) => wasmtime_winch::builder(target_for_builder)?,
2653 #[cfg(not(feature = "winch"))]
2654 Some(Strategy::Winch) => bail!("winch support not compiled in"),
2655
2656 None | Some(Strategy::Auto) => unreachable!(),
2657 };
2658
2659 if let Some(path) = &self.compiler_config_mut().clif_dir {
2660 compiler.clif_dir(path)?;
2661 }
2662
2663 // If probestack is enabled for a target, Wasmtime will always use the
2664 // inline strategy which doesn't require us to define a `__probestack`
2665 // function or similar.
2666 self.compiler_config_mut()
2667 .settings
2668 .insert("probestack_strategy".into(), "inline".into());
2669
2670 // We enable stack probing by default on all targets.
2671 // This is required on Windows because of the way Windows
2672 // commits its stacks, but it's also a good idea on other
2673 // platforms to ensure guard pages are hit for large frame
2674 // sizes.
2675 self.compiler_config_mut()
2676 .flags
2677 .insert("enable_probestack".into());
2678
2679 // The current wasm multivalue implementation depends on this.
2680 // FIXME(#9510) handle this in wasmtime-cranelift instead.
2681 self.compiler_config_mut()
2682 .flags
2683 .insert("enable_multi_ret_implicit_sret".into());
2684
2685 if let Some(unwind_requested) = self.native_unwind_info {
2686 if !self
2687 .compiler_config_mut()
2688 .ensure_setting_unset_or_given("unwind_info", &unwind_requested.to_string())
2689 {
2690 bail!(
2691 "incompatible settings requested for Cranelift and Wasmtime `unwind-info` settings"
2692 );
2693 }
2694 }
2695
2696 if target.operating_system == target_lexicon::OperatingSystem::Windows {
2697 if !self
2698 .compiler_config_mut()
2699 .ensure_setting_unset_or_given("unwind_info", "true")
2700 {
2701 bail!("`native_unwind_info` cannot be disabled on Windows");
2702 }
2703 }
2704
2705 // We require frame pointers for correct stack walking, which is safety
2706 // critical in the presence of reference types, and otherwise it is just
2707 // really bad developer experience to get wrong.
2708 self.compiler_config_mut()
2709 .settings
2710 .insert("preserve_frame_pointers".into(), "true".into());
2711
2712 if !tunables.signals_based_traps {
2713 let mut ok = self
2714 .compiler_config_mut()
2715 .ensure_setting_unset_or_given("enable_table_access_spectre_mitigation", "false");
2716 ok = ok
2717 && self.compiler_config_mut().ensure_setting_unset_or_given(
2718 "enable_heap_access_spectre_mitigation",
2719 "false",
2720 );
2721
2722 // Right now spectre-mitigated bounds checks will load from zero so
2723 // if host-based signal handlers are disabled then that's a mismatch
2724 // and doesn't work right now. Fixing this will require more thought
2725 // of how to implement the bounds check in spectre-only mode.
2726 if !ok {
2727 bail!(
2728 "when signals-based traps are disabled then spectre \
2729 mitigations must also be disabled"
2730 );
2731 }
2732 }
2733
2734 if features.contains(WasmFeatures::RELAXED_SIMD) && !features.contains(WasmFeatures::SIMD) {
2735 bail!("cannot disable the simd proposal but enable the relaxed simd proposal");
2736 }
2737
2738 if features.contains(WasmFeatures::STACK_SWITCHING) {
2739 use target_lexicon::OperatingSystem;
2740 let model = match target.operating_system {
2741 OperatingSystem::Windows => "update_windows_tib",
2742 OperatingSystem::Linux
2743 | OperatingSystem::MacOSX(_)
2744 | OperatingSystem::Darwin(_) => "basic",
2745 _ => bail!("stack-switching feature not supported on this platform "),
2746 };
2747
2748 if !self
2749 .compiler_config_mut()
2750 .ensure_setting_unset_or_given("stack_switch_model", model)
2751 {
2752 bail!(
2753 "compiler option 'stack_switch_model' must be set to '{model}' on this platform"
2754 );
2755 }
2756 }
2757
2758 // Apply compiler settings and flags
2759 compiler.set_tunables(tunables.clone())?;
2760 for (k, v) in self.compiler_config_mut().settings.iter() {
2761 compiler.set(k, v)?;
2762 }
2763 for flag in self.compiler_config_mut().flags.iter() {
2764 compiler.enable(flag)?;
2765 }
2766 *tunables = compiler.tunables().cloned().unwrap();
2767
2768 #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
2769 if let Some(cache_store) = &self.compiler_config_mut().cache_store {
2770 compiler.enable_incremental_compilation(cache_store.clone())?;
2771 }
2772
2773 compiler.wmemcheck(self.compiler_config_mut().wmemcheck);
2774
2775 Ok((self, compiler.build()?))
2776 }
2777
2778 /// Internal setting for whether adapter modules for components will have
2779 /// extra WebAssembly instructions inserted performing more debug checks
2780 /// then are necessary.
2781 #[cfg(feature = "component-model")]
2782 pub fn debug_adapter_modules(&mut self, debug: bool) -> &mut Self {
2783 self.tunables.debug_adapter_modules = Some(debug);
2784 self
2785 }
2786
2787 /// Enables clif output when compiling a WebAssembly module.
2788 #[cfg(any(feature = "cranelift", feature = "winch"))]
2789 pub fn emit_clif(&mut self, path: &Path) -> &mut Self {
2790 self.compiler_config_mut().clif_dir = Some(path.to_path_buf());
2791 self
2792 }
2793
2794 /// Configures whether, when on macOS, Mach ports are used for exception
2795 /// handling instead of traditional Unix-based signal handling.
2796 ///
2797 /// WebAssembly traps in Wasmtime are implemented with native faults, for
2798 /// example a `SIGSEGV` will occur when a WebAssembly guest accesses
2799 /// out-of-bounds memory. Handling this can be configured to either use Unix
2800 /// signals or Mach ports on macOS. By default Mach ports are used.
2801 ///
2802 /// Mach ports enable Wasmtime to work by default with foreign
2803 /// error-handling systems such as breakpad which also use Mach ports to
2804 /// handle signals. In this situation Wasmtime will continue to handle guest
2805 /// faults gracefully while any non-guest faults will get forwarded to
2806 /// process-level handlers such as breakpad. Some more background on this
2807 /// can be found in #2456.
2808 ///
2809 /// A downside of using mach ports, however, is that they don't interact
2810 /// well with `fork()`. Forking a Wasmtime process on macOS will produce a
2811 /// child process that cannot successfully run WebAssembly. In this
2812 /// situation traditional Unix signal handling should be used as that's
2813 /// inherited and works across forks.
2814 ///
2815 /// If your embedding wants to use a custom error handler which leverages
2816 /// Mach ports and you additionally wish to `fork()` the process and use
2817 /// Wasmtime in the child process that's not currently possible. Please
2818 /// reach out to us if you're in this bucket!
2819 ///
2820 /// This option defaults to `true`, using Mach ports by default.
2821 pub fn macos_use_mach_ports(&mut self, mach_ports: bool) -> &mut Self {
2822 self.macos_use_mach_ports = mach_ports;
2823 self
2824 }
2825
2826 /// Configures an embedder-provided function, `detect`, which is used to
2827 /// determine if an ISA-specific feature is available on the current host.
2828 ///
2829 /// This function is used to verify that any features enabled for a compiler
2830 /// backend, such as AVX support on x86\_64, are also available on the host.
2831 /// It is undefined behavior to execute an AVX instruction on a host that
2832 /// doesn't support AVX instructions, for example.
2833 ///
2834 /// When the `std` feature is active on this crate then this function is
2835 /// configured to a default implementation that uses the standard library's
2836 /// feature detection. When the `std` feature is disabled then there is no
2837 /// default available and this method must be called to configure a feature
2838 /// probing function.
2839 ///
2840 /// The `detect` function provided is given a string name of an ISA feature.
2841 /// The function should then return:
2842 ///
2843 /// * `Some(true)` - indicates that the feature was found on the host and it
2844 /// is supported.
2845 /// * `Some(false)` - the feature name was recognized but it was not
2846 /// detected on the host, for example the CPU is too old.
2847 /// * `None` - the feature name was not recognized and it's not known
2848 /// whether it's on the host or not.
2849 ///
2850 /// Feature names passed to `detect` match the same feature name used in the
2851 /// Rust standard library. For example `"sse4.2"` is used on x86\_64.
2852 ///
2853 /// # Unsafety
2854 ///
2855 /// This function is `unsafe` because it is undefined behavior to execute
2856 /// instructions that a host does not support. This means that the result of
2857 /// `detect` must be correct for memory safe execution at runtime.
2858 pub unsafe fn detect_host_feature(&mut self, detect: fn(&str) -> Option<bool>) -> &mut Self {
2859 self.detect_host_feature = Some(detect);
2860 self
2861 }
2862
2863 /// Configures Wasmtime to not use signals-based trap handlers, for example
2864 /// disables `SIGILL` and `SIGSEGV` handler registration on Unix platforms.
2865 ///
2866 /// > **Note:** this option has important performance ramifications, be sure
2867 /// > to understand the implications. Wasm programs have been measured to
2868 /// > run up to 2x slower when signals-based traps are disabled.
2869 ///
2870 /// Wasmtime will by default leverage signals-based trap handlers (or the
2871 /// platform equivalent, for example "vectored exception handlers" on
2872 /// Windows) to make generated code more efficient. For example, when
2873 /// Wasmtime can use signals-based traps, it can elide explicit bounds
2874 /// checks for Wasm linear memory accesses, instead relying on virtual
2875 /// memory guard pages to raise a `SIGSEGV` (on Unix) for out-of-bounds
2876 /// accesses, which Wasmtime's runtime then catches and handles. Another
2877 /// example is divide-by-zero: with signals-based traps, Wasmtime can let
2878 /// the hardware raise a trap when the divisor is zero. Without
2879 /// signals-based traps, Wasmtime must explicitly emit additional
2880 /// instructions to check for zero and conditionally branch to a trapping
2881 /// code path.
2882 ///
2883 /// Some environments however may not have access to signal handlers. For
2884 /// example embedded scenarios may not support virtual memory. Other
2885 /// environments where Wasmtime is embedded within the surrounding
2886 /// environment may require that new signal handlers aren't registered due
2887 /// to the global nature of signal handlers. This option exists to disable
2888 /// the signal handler registration when required for these scenarios.
2889 ///
2890 /// When signals-based trap handlers are disabled, then Wasmtime and its
2891 /// generated code will *never* rely on segfaults or other
2892 /// signals. Generated code will be slower because bounds must be explicitly
2893 /// checked along with other conditions like division by zero.
2894 ///
2895 /// The following additional factors can also affect Wasmtime's ability to
2896 /// elide explicit bounds checks and leverage signals-based traps:
2897 ///
2898 /// * The [`Config::memory_reservation`] and [`Config::memory_guard_size`]
2899 /// settings
2900 /// * The index type of the linear memory (e.g. 32-bit or 64-bit)
2901 /// * The page size of the linear memory
2902 ///
2903 /// When this option is disabled, the
2904 /// `enable_heap_access_spectre_mitigation` and
2905 /// `enable_table_access_spectre_mitigation` Cranelift settings must also be
2906 /// disabled. This means that generated code must have spectre mitigations
2907 /// disabled. This is because spectre mitigations rely on faults from
2908 /// loading from the null address to implement bounds checks.
2909 ///
2910 /// This option defaults to `true`: signals-based trap handlers are enabled
2911 /// by default.
2912 ///
2913 /// > **Note:** Disabling this option is not compatible with the Winch
2914 /// > compiler.
2915 pub fn signals_based_traps(&mut self, enable: bool) -> &mut Self {
2916 self.tunables.signals_based_traps = Some(enable);
2917 self
2918 }
2919
2920 /// Enable/disable GC support in Wasmtime entirely.
2921 ///
2922 /// This flag can be used to gate whether GC infrastructure is enabled or
2923 /// initialized in Wasmtime at all. Wasmtime's GC implementation is required
2924 /// for the [`Self::wasm_gc`] proposal, [`Self::wasm_function_references`],
2925 /// and [`Self::wasm_exceptions`] at this time. None of those proposal can
2926 /// be enabled without also having this option enabled.
2927 ///
2928 /// This option defaults to whether the crate `gc` feature is enabled or
2929 /// not.
2930 pub fn gc_support(&mut self, enable: bool) -> &mut Self {
2931 self.wasm_features(WasmFeatures::GC_TYPES, enable)
2932 }
2933
2934 /// Explicitly indicate or not whether the host is using a hardware float
2935 /// ABI on x86 targets.
2936 ///
2937 /// This configuration option is only applicable on the
2938 /// `x86_64-unknown-none` Rust target and has no effect on other host
2939 /// targets. The `x86_64-unknown-none` Rust target does not support hardware
2940 /// floats by default and uses a "soft float" implementation and ABI. This
2941 /// means that `f32`, for example, is passed in a general-purpose register
2942 /// between functions instead of a floating-point register. This does not
2943 /// match Cranelift's ABI for `f32` where it's passed in floating-point
2944 /// registers. Cranelift does not have support for a "soft float"
2945 /// implementation where all floating-point operations are lowered to
2946 /// libcalls.
2947 ///
2948 /// This means that for the `x86_64-unknown-none` target the ABI between
2949 /// Wasmtime's libcalls and the host is incompatible when floats are used.
2950 /// This further means that, by default, Wasmtime is unable to load native
2951 /// code when compiled to the `x86_64-unknown-none` target. The purpose of
2952 /// this option is to explicitly allow loading code and bypass this check.
2953 ///
2954 /// Setting this configuration option to `true` indicates that either:
2955 /// (a) the Rust target is compiled with the hard-float ABI manually via
2956 /// `-Zbuild-std` and a custom target JSON configuration, or (b) sufficient
2957 /// x86 features have been enabled in the compiler such that float libcalls
2958 /// will not be used in Wasmtime. For (a) there is no way in Rust at this
2959 /// time to detect whether a hard-float or soft-float ABI is in use on
2960 /// stable Rust, so this manual opt-in is required. For (b) the only
2961 /// instance where Wasmtime passes a floating-point value in a register
2962 /// between the host and compiled wasm code is with libcalls.
2963 ///
2964 /// Float-based libcalls are only used when the compilation target for a
2965 /// wasm module has insufficient target features enabled for native
2966 /// support. For example SSE4.1 is required for the `f32.ceil` WebAssembly
2967 /// instruction to be compiled to a native instruction. If SSE4.1 is not
2968 /// enabled then `f32.ceil` is translated to a "libcall" which is
2969 /// implemented on the host. Float-based libcalls can be avoided with
2970 /// sufficient target features enabled, for example:
2971 ///
2972 /// * `self.cranelift_flag_enable("has_sse3")`
2973 /// * `self.cranelift_flag_enable("has_ssse3")`
2974 /// * `self.cranelift_flag_enable("has_sse41")`
2975 /// * `self.cranelift_flag_enable("has_sse42")`
2976 /// * `self.cranelift_flag_enable("has_fma")`
2977 ///
2978 /// Note that when these features are enabled Wasmtime will perform a
2979 /// runtime check to determine that the host actually has the feature
2980 /// present.
2981 ///
2982 /// For some more discussion see [#11506].
2983 ///
2984 /// [#11506]: https://github.com/bytecodealliance/wasmtime/issues/11506
2985 ///
2986 /// # Safety
2987 ///
2988 /// This method is not safe because it cannot be detected in Rust right now
2989 /// whether the host is compiled with a soft or hard float ABI. Additionally
2990 /// if the host is compiled with a soft float ABI disabling this check does
2991 /// not ensure that the wasm module in question has zero usage of floats
2992 /// in the boundary to the host.
2993 ///
2994 /// Safely using this method requires one of:
2995 ///
2996 /// * The host target is compiled to use hardware floats.
2997 /// * Wasm modules loaded are compiled with enough x86 Cranelift features
2998 /// enabled to avoid float-related hostcalls.
2999 pub unsafe fn x86_float_abi_ok(&mut self, enable: bool) -> &mut Self {
3000 self.x86_float_abi_ok = Some(enable);
3001 self
3002 }
3003
3004 /// Enable or disable the ability to create a
3005 /// [`SharedMemory`](crate::SharedMemory).
3006 ///
3007 /// The WebAssembly threads proposal, configured by [`Config::wasm_threads`]
3008 /// is on-by-default but there are enough deficiencies in Wasmtime's
3009 /// implementation and API integration that creation of a shared memory is
3010 /// disabled by default. This configuration knob can be used to enable this.
3011 ///
3012 /// When enabling this method be aware that wasm threads are, at this time,
3013 /// a [tier 2
3014 /// feature](https://docs.wasmtime.dev/stability-tiers.html#tier-2) in
3015 /// Wasmtime meaning that it will not receive security updates or fixes to
3016 /// historical releases. Additionally security CVEs will not be issued for
3017 /// bugs in the implementation.
3018 ///
3019 /// This option is `false` by default.
3020 pub fn shared_memory(&mut self, enable: bool) -> &mut Self {
3021 self.shared_memory = enable;
3022 self
3023 }
3024
3025 /// Specifies whether support for concurrent execution of WebAssembly is
3026 /// supported within this store.
3027 ///
3028 /// This configuration option affects whether runtime data structures are
3029 /// initialized within a `Store` on creation to support concurrent execution
3030 /// of WebAssembly guests. This is primarily applicable to the
3031 /// [`Config::wasm_component_model_async`] configuration which is the first
3032 /// time Wasmtime has supported concurrent execution of guests. This
3033 /// configuration option, for example, enables usage of
3034 /// [`Store::run_concurrent`], [`Func::call_concurrent`], [`StreamReader`],
3035 /// etc.
3036 ///
3037 /// This configuration option can be manually disabled to avoid initializing
3038 /// data structures in the [`Store`] related to concurrent execution. When
3039 /// this option is disabled then APIs related to concurrency will all fail
3040 /// with a panic. For example [`Store::run_concurrent`] will panic, creating
3041 /// a [`StreamReader`] will panic, etc.
3042 ///
3043 /// The value of this option additionally affects whether a [`Config`] is
3044 /// valid and the default set of enabled WebAssembly features. If this
3045 /// option is disabled then component-model features related to concurrency
3046 /// will all be disabled. If this option is enabled, then the options will
3047 /// retain their normal defaults. It is not valid to create a [`Config`]
3048 /// with component-model-async explicitly enabled and this option explicitly
3049 /// disabled, however.
3050 ///
3051 /// This option defaults to `true`.
3052 ///
3053 /// [`Store`]: crate::Store
3054 /// [`Store::run_concurrent`]: crate::Store::run_concurrent
3055 /// [`Func::call_concurrent`]: crate::component::Func::call_concurrent
3056 /// [`StreamReader`]: crate::component::StreamReader
3057 pub fn concurrency_support(&mut self, enable: bool) -> &mut Self {
3058 self.tunables.concurrency_support = Some(enable);
3059 self
3060 }
3061
3062 /// Validate if the current configuration has conflicting overrides that prevent
3063 /// execution determinism. Returns an error if a conflict exists.
3064 ///
3065 /// Note: Keep this in sync with [`Config::enforce_determinism`].
3066 #[inline]
3067 #[cfg(feature = "rr")]
3068 pub(crate) fn validate_rr_determinism_conflicts(&self) -> Result<()> {
3069 if let Some(v) = self.tunables.relaxed_simd_deterministic {
3070 if v == false {
3071 bail!("Relaxed deterministic SIMD cannot be disabled when determinism is enforced");
3072 }
3073 }
3074 #[cfg(any(feature = "cranelift", feature = "winch"))]
3075 if let Some(v) = self
3076 .compiler_config
3077 .as_ref()
3078 .and_then(|c| c.settings.get("enable_nan_canonicalization"))
3079 {
3080 if v != "true" {
3081 bail!("NaN canonicalization cannot be disabled when determinism is enforced");
3082 }
3083 }
3084 Ok(())
3085 }
3086
3087 /// Enable execution trace recording or replaying to the configuration.
3088 ///
3089 /// When either recording/replaying are enabled, validation fails if settings
3090 /// that control determinism are not set appropriately. In particular, RR requires
3091 /// doing the following:
3092 /// * Enabling NaN canonicalization with [`Config::cranelift_nan_canonicalization`].
3093 /// * Enabling deterministic relaxed SIMD with [`Config::relaxed_simd_deterministic`].
3094 #[inline]
3095 pub fn rr(&mut self, cfg: RRConfig) -> &mut Self {
3096 self.rr_config = cfg;
3097 self
3098 }
3099}
3100
3101impl Default for Config {
3102 fn default() -> Config {
3103 Config::new()
3104 }
3105}
3106
3107impl fmt::Debug for Config {
3108 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
3109 let mut f = f.debug_struct("Config");
3110
3111 // Not every flag in WasmFeatures can be enabled as part of creating
3112 // a Config. This impl gives a complete picture of all WasmFeatures
3113 // enabled, and doesn't require maintenance by hand (which has become out
3114 // of date in the past), at the cost of possible confusion for why
3115 // a flag in this set doesn't have a Config setter.
3116 let features = self.features();
3117 for flag in WasmFeatures::FLAGS.iter() {
3118 f.field(
3119 &format!("wasm_{}", flag.name().to_lowercase()),
3120 &features.contains(*flag.value()),
3121 );
3122 }
3123
3124 f.field("parallel_compilation", &self.parallel_compilation);
3125 #[cfg(any(feature = "cranelift", feature = "winch"))]
3126 {
3127 f.field("compiler_config", &self.compiler_config);
3128 }
3129
3130 self.tunables.format(&mut f);
3131 f.finish()
3132 }
3133}
3134
3135/// Possible Compilation strategies for a wasm module.
3136///
3137/// This is used as an argument to the [`Config::strategy`] method.
3138#[non_exhaustive]
3139#[derive(PartialEq, Eq, Clone, Debug, Copy)]
3140pub enum Strategy {
3141 /// An indicator that the compilation strategy should be automatically
3142 /// selected.
3143 ///
3144 /// This is generally what you want for most projects and indicates that the
3145 /// `wasmtime` crate itself should make the decision about what the best
3146 /// code generator for a wasm module is.
3147 ///
3148 /// Currently this always defaults to Cranelift, but the default value may
3149 /// change over time.
3150 Auto,
3151
3152 /// Currently the default backend, Cranelift aims to be a reasonably fast
3153 /// code generator which generates high quality machine code.
3154 Cranelift,
3155
3156 /// A low-latency baseline compiler for WebAssembly.
3157 /// For more details regarding ISA support and Wasm proposals support
3158 /// see <https://docs.wasmtime.dev/stability-tiers.html#current-tier-status>
3159 Winch,
3160}
3161
3162#[cfg(any(feature = "winch", feature = "cranelift"))]
3163impl Strategy {
3164 fn not_auto(&self) -> Option<Strategy> {
3165 match self {
3166 Strategy::Auto => {
3167 if cfg!(feature = "cranelift") {
3168 Some(Strategy::Cranelift)
3169 } else if cfg!(feature = "winch") {
3170 Some(Strategy::Winch)
3171 } else {
3172 None
3173 }
3174 }
3175 other => Some(*other),
3176 }
3177 }
3178}
3179
3180/// Possible garbage collector implementations for Wasm.
3181///
3182/// This is used as an argument to the [`Config::collector`] method.
3183///
3184/// The properties of Wasmtime's available collectors are summarized in the
3185/// following table:
3186///
3187/// | Collector | Collects Garbage[^1] | Latency[^2] | Throughput[^3] | Allocation Speed[^4] | Heap Utilization[^5] |
3188/// |-----------------------------|----------------------|-------------|----------------|----------------------|----------------------|
3189/// | `DeferredReferenceCounting` | Yes, but not cycles | 🙂 | 🙁 | 😐 | 😐 |
3190/// | `Null` | No | 🙂 | 🙂 | 🙂 | 🙂 |
3191///
3192/// [^1]: Whether or not the collector is capable of collecting garbage and cyclic garbage.
3193///
3194/// [^2]: How long the Wasm program is paused during garbage
3195/// collections. Shorter is better. In general, better latency implies
3196/// worse throughput and vice versa.
3197///
3198/// [^3]: How fast the Wasm program runs when using this collector. Roughly
3199/// equivalent to the number of Wasm instructions executed per
3200/// second. Faster is better. In general, better throughput implies worse
3201/// latency and vice versa.
3202///
3203/// [^4]: How fast can individual objects be allocated?
3204///
3205/// [^5]: How many objects can the collector fit into N bytes of memory? That
3206/// is, how much space for bookkeeping and metadata does this collector
3207/// require? Less space taken up by metadata means more space for
3208/// additional objects. Reference counts are larger than mark bits and
3209/// free lists are larger than bump pointers, for example.
3210#[non_exhaustive]
3211#[derive(PartialEq, Eq, Clone, Debug, Copy)]
3212pub enum Collector {
3213 /// An indicator that the garbage collector should be automatically
3214 /// selected.
3215 ///
3216 /// This is generally what you want for most projects and indicates that the
3217 /// `wasmtime` crate itself should make the decision about what the best
3218 /// collector for a wasm module is.
3219 ///
3220 /// Currently this always defaults to the deferred reference-counting
3221 /// collector, but the default value may change over time.
3222 Auto,
3223
3224 /// The deferred reference-counting collector.
3225 ///
3226 /// A reference-counting collector, generally trading improved latency for
3227 /// worsened throughput. However, to avoid the largest overheads of
3228 /// reference counting, it avoids manipulating reference counts for Wasm
3229 /// objects on the stack. Instead, it will hold a reference count for an
3230 /// over-approximation of all objects that are currently on the stack, trace
3231 /// the stack during collection to find the precise set of on-stack roots,
3232 /// and decrement the reference count of any object that was in the
3233 /// over-approximation but not the precise set. This improves throughput,
3234 /// compared to "pure" reference counting, by performing many fewer
3235 /// refcount-increment and -decrement operations. The cost is the increased
3236 /// latency associated with tracing the stack.
3237 ///
3238 /// This collector cannot currently collect cycles; they will leak until the
3239 /// GC heap's store is dropped.
3240 DeferredReferenceCounting,
3241
3242 /// The null collector.
3243 ///
3244 /// This collector does not actually collect any garbage. It simply
3245 /// allocates objects until it runs out of memory, at which point further
3246 /// objects allocation attempts will trap.
3247 ///
3248 /// This collector is useful for incredibly short-running Wasm instances
3249 /// where additionally you would rather halt an over-allocating Wasm program
3250 /// than spend time collecting its garbage to allow it to keep running. It
3251 /// is also useful for measuring the overheads associated with other
3252 /// collectors, as this collector imposes as close to zero throughput and
3253 /// latency overhead as possible.
3254 Null,
3255}
3256
3257impl Default for Collector {
3258 fn default() -> Collector {
3259 Collector::Auto
3260 }
3261}
3262
3263#[cfg(feature = "gc")]
3264impl Collector {
3265 fn not_auto(&self) -> Option<Collector> {
3266 match self {
3267 Collector::Auto => {
3268 if cfg!(feature = "gc-drc") {
3269 Some(Collector::DeferredReferenceCounting)
3270 } else if cfg!(feature = "gc-null") {
3271 Some(Collector::Null)
3272 } else {
3273 None
3274 }
3275 }
3276 other => Some(*other),
3277 }
3278 }
3279
3280 fn try_not_auto(&self) -> Result<Self> {
3281 match self.not_auto() {
3282 #[cfg(feature = "gc-drc")]
3283 Some(c @ Collector::DeferredReferenceCounting) => Ok(c),
3284 #[cfg(not(feature = "gc-drc"))]
3285 Some(Collector::DeferredReferenceCounting) => bail!(
3286 "cannot create an engine using the deferred reference-counting \
3287 collector because the `gc-drc` feature was not enabled at \
3288 compile time",
3289 ),
3290
3291 #[cfg(feature = "gc-null")]
3292 Some(c @ Collector::Null) => Ok(c),
3293 #[cfg(not(feature = "gc-null"))]
3294 Some(Collector::Null) => bail!(
3295 "cannot create an engine using the null collector because \
3296 the `gc-null` feature was not enabled at compile time",
3297 ),
3298
3299 Some(Collector::Auto) => unreachable!(),
3300
3301 None => bail!(
3302 "cannot create an engine with GC support when none of the \
3303 collectors are available; enable one of the following \
3304 features: `gc-drc`, `gc-null`",
3305 ),
3306 }
3307 }
3308}
3309
3310/// Possible optimization levels for the Cranelift codegen backend.
3311#[non_exhaustive]
3312#[derive(Copy, Clone, Debug, Eq, PartialEq)]
3313pub enum OptLevel {
3314 /// No optimizations performed, minimizes compilation time by disabling most
3315 /// optimizations.
3316 None,
3317 /// Generates the fastest possible code, but may take longer.
3318 Speed,
3319 /// Similar to `speed`, but also performs transformations aimed at reducing
3320 /// code size.
3321 SpeedAndSize,
3322}
3323
3324/// Possible register allocator algorithms for the Cranelift codegen backend.
3325#[non_exhaustive]
3326#[derive(Copy, Clone, Debug, Eq, PartialEq)]
3327pub enum RegallocAlgorithm {
3328 /// Generates the fastest possible code, but may take longer.
3329 ///
3330 /// This algorithm performs "backtracking", which means that it may
3331 /// undo its earlier work and retry as it discovers conflicts. This
3332 /// results in better register utilization, producing fewer spills
3333 /// and moves, but can cause super-linear compile runtime.
3334 Backtracking,
3335 /// Generates acceptable code very quickly.
3336 ///
3337 /// This algorithm performs a single pass through the code,
3338 /// guaranteed to work in linear time. (Note that the rest of
3339 /// Cranelift is not necessarily guaranteed to run in linear time,
3340 /// however.) It cannot undo earlier decisions, however, and it
3341 /// cannot foresee constraints or issues that may occur further
3342 /// ahead in the code, so the code may have more spills and moves as
3343 /// a result.
3344 ///
3345 /// > **Note**: This algorithm is not yet production-ready and has
3346 /// > historically had known problems. It is not recommended to enable this
3347 /// > algorithm for security-sensitive applications and the Wasmtime project
3348 /// > does not consider this configuration option for issuing security
3349 /// > advisories at this time.
3350 SinglePass,
3351}
3352
3353/// Select which profiling technique to support.
3354#[derive(Debug, Clone, Copy, PartialEq)]
3355pub enum ProfilingStrategy {
3356 /// No profiler support.
3357 None,
3358
3359 /// Collect function name information as the "perf map" file format, used with `perf` on Linux.
3360 PerfMap,
3361
3362 /// Collect profiling info for "jitdump" file format, used with `perf` on
3363 /// Linux.
3364 JitDump,
3365
3366 /// Collect profiling info using the "ittapi", used with `VTune` on Linux.
3367 VTune,
3368
3369 /// Support for profiling Pulley, Wasmtime's interpreter. Note that enabling
3370 /// this at runtime requires enabling the `profile-pulley` Cargo feature at
3371 /// compile time.
3372 Pulley,
3373}
3374
3375/// Select how wasm backtrace detailed information is handled.
3376#[derive(Debug, Clone, Copy)]
3377pub enum WasmBacktraceDetails {
3378 /// Support is unconditionally enabled and wasmtime will parse and read
3379 /// debug information.
3380 Enable,
3381
3382 /// Support is disabled, and wasmtime will not parse debug information for
3383 /// backtrace details.
3384 Disable,
3385
3386 /// Support for backtrace details is conditional on the
3387 /// `WASMTIME_BACKTRACE_DETAILS` environment variable.
3388 Environment,
3389}
3390
3391/// Describe the tri-state configuration of keys such as MPK or PAGEMAP_SCAN.
3392#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
3393pub enum Enabled {
3394 /// Enable this feature if it's detected on the host system, otherwise leave
3395 /// it disabled.
3396 Auto,
3397 /// Enable this feature and fail configuration if the feature is not
3398 /// detected on the host system.
3399 Yes,
3400 /// Do not enable this feature, even if the host system supports it.
3401 No,
3402}
3403
3404/// Configuration options used with [`InstanceAllocationStrategy::Pooling`] to
3405/// change the behavior of the pooling instance allocator.
3406///
3407/// This structure has a builder-style API in the same manner as [`Config`] and
3408/// is configured with [`Config::allocation_strategy`].
3409///
3410/// Note that usage of the pooling allocator does not affect compiled
3411/// WebAssembly code. Compiled `*.cwasm` files, for example, are usable both
3412/// with and without the pooling allocator.
3413///
3414/// ## Advantages of Pooled Allocation
3415///
3416/// The main benefit of the pooling allocator is to make WebAssembly
3417/// instantiation both faster and more scalable in terms of parallelism.
3418/// Allocation is faster because virtual memory is already configured and ready
3419/// to go within the pool, there's no need to [`mmap`] (for example on Unix) a
3420/// new region and configure it with guard pages. By avoiding [`mmap`] this
3421/// avoids whole-process virtual memory locks which can improve scalability and
3422/// performance through avoiding this.
3423///
3424/// Additionally with pooled allocation it's possible to create "affine slots"
3425/// to a particular WebAssembly module or component over time. For example if
3426/// the same module is multiple times over time the pooling allocator will, by
3427/// default, attempt to reuse the same slot. This mean that the slot has been
3428/// pre-configured and can retain virtual memory mappings for a copy-on-write
3429/// image, for example (see [`Config::memory_init_cow`] for more information.
3430/// This means that in a steady state instance deallocation is a single
3431/// [`madvise`] to reset linear memory to its original contents followed by a
3432/// single (optional) [`mprotect`] during the next instantiation to shrink
3433/// memory back to its original size. Compared to non-pooled allocation this
3434/// avoids the need to [`mmap`] a new region of memory, [`munmap`] it, and
3435/// [`mprotect`] regions too.
3436///
3437/// Another benefit of pooled allocation is that it's possible to configure
3438/// things such that no virtual memory management is required at all in a steady
3439/// state. For example a pooling allocator can be configured with:
3440///
3441/// * [`Config::memory_init_cow`] disabled
3442/// * [`Config::memory_guard_size`] disabled
3443/// * [`Config::memory_reservation`] shrunk to minimal size
3444/// * [`PoolingAllocationConfig::table_keep_resident`] sufficiently large
3445/// * [`PoolingAllocationConfig::linear_memory_keep_resident`] sufficiently large
3446///
3447/// With all these options in place no virtual memory tricks are used at all and
3448/// everything is manually managed by Wasmtime (for example resetting memory is
3449/// a `memset(0)`). This is not as fast in a single-threaded scenario but can
3450/// provide benefits in high-parallelism situations as no virtual memory locks
3451/// or IPIs need happen.
3452///
3453/// ## Disadvantages of Pooled Allocation
3454///
3455/// Despite the above advantages to instantiation performance the pooling
3456/// allocator is not enabled by default in Wasmtime. One reason is that the
3457/// performance advantages are not necessarily portable, for example while the
3458/// pooling allocator works on Windows it has not been tuned for performance on
3459/// Windows in the same way it has on Linux.
3460///
3461/// Additionally the main cost of the pooling allocator is that it requires a
3462/// very large reservation of virtual memory (on the order of most of the
3463/// addressable virtual address space). WebAssembly 32-bit linear memories in
3464/// Wasmtime are, by default 4G address space reservations with a small guard
3465/// region both before and after the linear memory. Memories in the pooling
3466/// allocator are contiguous which means that we only need a guard after linear
3467/// memory because the previous linear memory's slot post-guard is our own
3468/// pre-guard. This means that, by default, the pooling allocator uses roughly
3469/// 4G of virtual memory per WebAssembly linear memory slot. 4G of virtual
3470/// memory is 32 bits of a 64-bit address. Many 64-bit systems can only
3471/// actually use 48-bit addresses by default (although this can be extended on
3472/// architectures nowadays too), and of those 48 bits one of them is reserved
3473/// to indicate kernel-vs-userspace. This leaves 47-32=15 bits left,
3474/// meaning you can only have at most 32k slots of linear memories on many
3475/// systems by default. This is a relatively small number and shows how the
3476/// pooling allocator can quickly exhaust all of virtual memory.
3477///
3478/// Another disadvantage of the pooling allocator is that it may keep memory
3479/// alive when nothing is using it. A previously used slot for an instance might
3480/// have paged-in memory that will not get paged out until the
3481/// [`Engine`](crate::Engine) owning the pooling allocator is dropped. While
3482/// suitable for some applications this behavior may not be suitable for all
3483/// applications.
3484///
3485/// Finally the last disadvantage of the pooling allocator is that the
3486/// configuration values for the maximum number of instances, memories, tables,
3487/// etc, must all be fixed up-front. There's not always a clear answer as to
3488/// what these values should be so not all applications may be able to work
3489/// with this constraint.
3490///
3491/// [`madvise`]: https://man7.org/linux/man-pages/man2/madvise.2.html
3492/// [`mprotect`]: https://man7.org/linux/man-pages/man2/mprotect.2.html
3493/// [`mmap`]: https://man7.org/linux/man-pages/man2/mmap.2.html
3494/// [`munmap`]: https://man7.org/linux/man-pages/man2/munmap.2.html
3495#[cfg(feature = "pooling-allocator")]
3496#[derive(Debug, Clone, Default)]
3497pub struct PoolingAllocationConfig {
3498 config: crate::runtime::vm::PoolingInstanceAllocatorConfig,
3499}
3500
3501#[cfg(feature = "pooling-allocator")]
3502impl PoolingAllocationConfig {
3503 /// Returns a new configuration builder with all default settings
3504 /// configured.
3505 pub fn new() -> PoolingAllocationConfig {
3506 PoolingAllocationConfig::default()
3507 }
3508
3509 /// Configures the maximum number of "unused warm slots" to retain in the
3510 /// pooling allocator.
3511 ///
3512 /// The pooling allocator operates over slots to allocate from, and each
3513 /// slot is considered "cold" if it's never been used before or "warm" if
3514 /// it's been used by some module in the past. Slots in the pooling
3515 /// allocator additionally track an "affinity" flag to a particular core
3516 /// wasm module. When a module is instantiated into a slot then the slot is
3517 /// considered affine to that module, even after the instance has been
3518 /// deallocated.
3519 ///
3520 /// When a new instance is created then a slot must be chosen, and the
3521 /// current algorithm for selecting a slot is:
3522 ///
3523 /// * If there are slots that are affine to the module being instantiated,
3524 /// then the most recently used slot is selected to be allocated from.
3525 /// This is done to improve reuse of resources such as memory mappings and
3526 /// additionally try to benefit from temporal locality for things like
3527 /// caches.
3528 ///
3529 /// * Otherwise if there are more than N affine slots to other modules, then
3530 /// one of those affine slots is chosen to be allocated. The slot chosen
3531 /// is picked on a least-recently-used basis.
3532 ///
3533 /// * Finally, if there are less than N affine slots to other modules, then
3534 /// the non-affine slots are allocated from.
3535 ///
3536 /// This setting, `max_unused_warm_slots`, is the value for N in the above
3537 /// algorithm. The purpose of this setting is to have a knob over the RSS
3538 /// impact of "unused slots" for a long-running wasm server.
3539 ///
3540 /// If this setting is set to 0, for example, then affine slots are
3541 /// aggressively reused on a least-recently-used basis. A "cold" slot is
3542 /// only used if there are no affine slots available to allocate from. This
3543 /// means that the set of slots used over the lifetime of a program is the
3544 /// same as the maximum concurrent number of wasm instances.
3545 ///
3546 /// If this setting is set to infinity, however, then cold slots are
3547 /// prioritized to be allocated from. This means that the set of slots used
3548 /// over the lifetime of a program will approach
3549 /// [`PoolingAllocationConfig::total_memories`], or the maximum number of
3550 /// slots in the pooling allocator.
3551 ///
3552 /// Wasmtime does not aggressively decommit all resources associated with a
3553 /// slot when the slot is not in use. For example the
3554 /// [`PoolingAllocationConfig::linear_memory_keep_resident`] option can be
3555 /// used to keep memory associated with a slot, even when it's not in use.
3556 /// This means that the total set of used slots in the pooling instance
3557 /// allocator can impact the overall RSS usage of a program.
3558 ///
3559 /// The default value for this option is `100`.
3560 pub fn max_unused_warm_slots(&mut self, max: u32) -> &mut Self {
3561 self.config.max_unused_warm_slots = max;
3562 self
3563 }
3564
3565 /// The target number of decommits to do per batch.
3566 ///
3567 /// This is not precise, as we can queue up decommits at times when we
3568 /// aren't prepared to immediately flush them, and so we may go over this
3569 /// target size occasionally.
3570 ///
3571 /// A batch size of one effectively disables batching.
3572 ///
3573 /// Defaults to `1`.
3574 pub fn decommit_batch_size(&mut self, batch_size: usize) -> &mut Self {
3575 self.config.decommit_batch_size = batch_size;
3576 self
3577 }
3578
3579 /// How much memory, in bytes, to keep resident for async stacks allocated
3580 /// with the pooling allocator.
3581 ///
3582 /// When [`Config::async_stack_zeroing`] is enabled then Wasmtime will reset
3583 /// the contents of async stacks back to zero upon deallocation. This option
3584 /// can be used to perform the zeroing operation with `memset` up to a
3585 /// certain threshold of bytes instead of using system calls to reset the
3586 /// stack to zero.
3587 ///
3588 /// Note that when using this option the memory with async stacks will
3589 /// never be decommitted.
3590 #[cfg(feature = "async")]
3591 pub fn async_stack_keep_resident(&mut self, size: usize) -> &mut Self {
3592 self.config.async_stack_keep_resident = size;
3593 self
3594 }
3595
3596 /// How much memory, in bytes, to keep resident for each linear memory
3597 /// after deallocation.
3598 ///
3599 /// This option is only applicable on Linux and has no effect on other
3600 /// platforms.
3601 ///
3602 /// By default Wasmtime will use `madvise` to reset the entire contents of
3603 /// linear memory back to zero when a linear memory is deallocated. This
3604 /// option can be used to use `memset` instead to set memory back to zero
3605 /// which can, in some configurations, reduce the number of page faults
3606 /// taken when a slot is reused.
3607 pub fn linear_memory_keep_resident(&mut self, size: usize) -> &mut Self {
3608 self.config.linear_memory_keep_resident = size;
3609 self
3610 }
3611
3612 /// How much memory, in bytes, to keep resident for each table after
3613 /// deallocation.
3614 ///
3615 /// This option is only applicable on Linux and has no effect on other
3616 /// platforms.
3617 ///
3618 /// This option is the same as
3619 /// [`PoolingAllocationConfig::linear_memory_keep_resident`] except that it
3620 /// is applicable to tables instead.
3621 pub fn table_keep_resident(&mut self, size: usize) -> &mut Self {
3622 self.config.table_keep_resident = size;
3623 self
3624 }
3625
3626 /// The maximum number of concurrent component instances supported (default
3627 /// is `1000`).
3628 ///
3629 /// This provides an upper-bound on the total size of component
3630 /// metadata-related allocations, along with
3631 /// [`PoolingAllocationConfig::max_component_instance_size`]. The upper bound is
3632 ///
3633 /// ```text
3634 /// total_component_instances * max_component_instance_size
3635 /// ```
3636 ///
3637 /// where `max_component_instance_size` is rounded up to the size and alignment
3638 /// of the internal representation of the metadata.
3639 pub fn total_component_instances(&mut self, count: u32) -> &mut Self {
3640 self.config.limits.total_component_instances = count;
3641 self
3642 }
3643
3644 /// The maximum size, in bytes, allocated for a component instance's
3645 /// `VMComponentContext` metadata as well as the aggregate size of this
3646 /// component's core instances `VMContext` metadata.
3647 ///
3648 /// The [`wasmtime::component::Instance`][crate::component::Instance] type
3649 /// has a static size but its internal `VMComponentContext` is dynamically
3650 /// sized depending on the component being instantiated. This size limit
3651 /// loosely correlates to the size of the component, taking into account
3652 /// factors such as:
3653 ///
3654 /// * number of lifted and lowered functions,
3655 /// * number of memories
3656 /// * number of inner instances
3657 /// * number of resources
3658 ///
3659 /// If the allocated size per instance is too small then instantiation of a
3660 /// module will fail at runtime with an error indicating how many bytes were
3661 /// needed.
3662 ///
3663 /// In addition to the memory in the runtime for the component itself,
3664 /// components contain one or more core module instances. Each of these
3665 /// require some memory in the runtime as described in
3666 /// [`PoolingAllocationConfig::max_core_instance_size`]. The limit here
3667 /// applies against the sum of all of these individual allocations.
3668 ///
3669 /// The default value for this is 1MiB.
3670 ///
3671 /// This provides an upper-bound on the total size of all component's
3672 /// metadata-related allocations (for both the component and its embedded
3673 /// core module instances), along with
3674 /// [`PoolingAllocationConfig::total_component_instances`]. The upper bound is
3675 ///
3676 /// ```text
3677 /// total_component_instances * max_component_instance_size
3678 /// ```
3679 ///
3680 /// where `max_component_instance_size` is rounded up to the size and alignment
3681 /// of the internal representation of the metadata.
3682 pub fn max_component_instance_size(&mut self, size: usize) -> &mut Self {
3683 self.config.limits.component_instance_size = size;
3684 self
3685 }
3686
3687 /// The maximum number of core instances a single component may contain
3688 /// (default is unlimited).
3689 ///
3690 /// This method (along with
3691 /// [`PoolingAllocationConfig::max_memories_per_component`],
3692 /// [`PoolingAllocationConfig::max_tables_per_component`], and
3693 /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3694 /// the amount of resources a single component allocation consumes.
3695 ///
3696 /// If a component will instantiate more core instances than `count`, then
3697 /// the component will fail to instantiate.
3698 pub fn max_core_instances_per_component(&mut self, count: u32) -> &mut Self {
3699 self.config.limits.max_core_instances_per_component = count;
3700 self
3701 }
3702
3703 /// The maximum number of Wasm linear memories that a single component may
3704 /// transitively contain (default is unlimited).
3705 ///
3706 /// This method (along with
3707 /// [`PoolingAllocationConfig::max_core_instances_per_component`],
3708 /// [`PoolingAllocationConfig::max_tables_per_component`], and
3709 /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3710 /// the amount of resources a single component allocation consumes.
3711 ///
3712 /// If a component transitively contains more linear memories than `count`,
3713 /// then the component will fail to instantiate.
3714 pub fn max_memories_per_component(&mut self, count: u32) -> &mut Self {
3715 self.config.limits.max_memories_per_component = count;
3716 self
3717 }
3718
3719 /// The maximum number of tables that a single component may transitively
3720 /// contain (default is unlimited).
3721 ///
3722 /// This method (along with
3723 /// [`PoolingAllocationConfig::max_core_instances_per_component`],
3724 /// [`PoolingAllocationConfig::max_memories_per_component`],
3725 /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3726 /// the amount of resources a single component allocation consumes.
3727 ///
3728 /// If a component will transitively contains more tables than `count`, then
3729 /// the component will fail to instantiate.
3730 pub fn max_tables_per_component(&mut self, count: u32) -> &mut Self {
3731 self.config.limits.max_tables_per_component = count;
3732 self
3733 }
3734
3735 /// The maximum number of concurrent Wasm linear memories supported (default
3736 /// is `1000`).
3737 ///
3738 /// This value has a direct impact on the amount of memory allocated by the pooling
3739 /// instance allocator.
3740 ///
3741 /// The pooling instance allocator allocates a memory pool, where each entry
3742 /// in the pool contains the reserved address space for each linear memory
3743 /// supported by an instance.
3744 ///
3745 /// The memory pool will reserve a large quantity of host process address
3746 /// space to elide the bounds checks required for correct WebAssembly memory
3747 /// semantics. Even with 64-bit address spaces, the address space is limited
3748 /// when dealing with a large number of linear memories.
3749 ///
3750 /// For example, on Linux x86_64, the userland address space limit is 128
3751 /// TiB. That might seem like a lot, but each linear memory will *reserve* 6
3752 /// GiB of space by default.
3753 pub fn total_memories(&mut self, count: u32) -> &mut Self {
3754 self.config.limits.total_memories = count;
3755 self
3756 }
3757
3758 /// The maximum number of concurrent tables supported (default is `1000`).
3759 ///
3760 /// This value has a direct impact on the amount of memory allocated by the
3761 /// pooling instance allocator.
3762 ///
3763 /// The pooling instance allocator allocates a table pool, where each entry
3764 /// in the pool contains the space needed for each WebAssembly table
3765 /// supported by an instance (see `table_elements` to control the size of
3766 /// each table).
3767 pub fn total_tables(&mut self, count: u32) -> &mut Self {
3768 self.config.limits.total_tables = count;
3769 self
3770 }
3771
3772 /// The maximum number of execution stacks allowed for asynchronous
3773 /// execution, when enabled (default is `1000`).
3774 ///
3775 /// This value has a direct impact on the amount of memory allocated by the
3776 /// pooling instance allocator.
3777 #[cfg(feature = "async")]
3778 pub fn total_stacks(&mut self, count: u32) -> &mut Self {
3779 self.config.limits.total_stacks = count;
3780 self
3781 }
3782
3783 /// The maximum number of concurrent core instances supported (default is
3784 /// `1000`).
3785 ///
3786 /// This provides an upper-bound on the total size of core instance
3787 /// metadata-related allocations, along with
3788 /// [`PoolingAllocationConfig::max_core_instance_size`]. The upper bound is
3789 ///
3790 /// ```text
3791 /// total_core_instances * max_core_instance_size
3792 /// ```
3793 ///
3794 /// where `max_core_instance_size` is rounded up to the size and alignment of
3795 /// the internal representation of the metadata.
3796 pub fn total_core_instances(&mut self, count: u32) -> &mut Self {
3797 self.config.limits.total_core_instances = count;
3798 self
3799 }
3800
3801 /// The maximum size, in bytes, allocated for a core instance's `VMContext`
3802 /// metadata.
3803 ///
3804 /// The [`Instance`][crate::Instance] type has a static size but its
3805 /// `VMContext` metadata is dynamically sized depending on the module being
3806 /// instantiated. This size limit loosely correlates to the size of the Wasm
3807 /// module, taking into account factors such as:
3808 ///
3809 /// * number of functions
3810 /// * number of globals
3811 /// * number of memories
3812 /// * number of tables
3813 /// * number of function types
3814 ///
3815 /// If the allocated size per instance is too small then instantiation of a
3816 /// module will fail at runtime with an error indicating how many bytes were
3817 /// needed.
3818 ///
3819 /// The default value for this is 1MiB.
3820 ///
3821 /// This provides an upper-bound on the total size of core instance
3822 /// metadata-related allocations, along with
3823 /// [`PoolingAllocationConfig::total_core_instances`]. The upper bound is
3824 ///
3825 /// ```text
3826 /// total_core_instances * max_core_instance_size
3827 /// ```
3828 ///
3829 /// where `max_core_instance_size` is rounded up to the size and alignment of
3830 /// the internal representation of the metadata.
3831 pub fn max_core_instance_size(&mut self, size: usize) -> &mut Self {
3832 self.config.limits.core_instance_size = size;
3833 self
3834 }
3835
3836 /// The maximum number of defined tables for a core module (default is `1`).
3837 ///
3838 /// This value controls the capacity of the `VMTableDefinition` table in
3839 /// each instance's `VMContext` structure.
3840 ///
3841 /// The allocated size of the table will be `tables *
3842 /// sizeof(VMTableDefinition)` for each instance regardless of how many
3843 /// tables are defined by an instance's module.
3844 pub fn max_tables_per_module(&mut self, tables: u32) -> &mut Self {
3845 self.config.limits.max_tables_per_module = tables;
3846 self
3847 }
3848
3849 /// The maximum table elements for any table defined in a module (default is
3850 /// `20000`).
3851 ///
3852 /// If a table's minimum element limit is greater than this value, the
3853 /// module will fail to instantiate.
3854 ///
3855 /// If a table's maximum element limit is unbounded or greater than this
3856 /// value, the maximum will be `table_elements` for the purpose of any
3857 /// `table.grow` instruction.
3858 ///
3859 /// This value is used to reserve the maximum space for each supported
3860 /// table; table elements are pointer-sized in the Wasmtime runtime.
3861 /// Therefore, the space reserved for each instance is `tables *
3862 /// table_elements * sizeof::<*const ()>`.
3863 pub fn table_elements(&mut self, elements: usize) -> &mut Self {
3864 self.config.limits.table_elements = elements;
3865 self
3866 }
3867
3868 /// The maximum number of defined linear memories for a module (default is
3869 /// `1`).
3870 ///
3871 /// This value controls the capacity of the `VMMemoryDefinition` table in
3872 /// each core instance's `VMContext` structure.
3873 ///
3874 /// The allocated size of the table will be `memories *
3875 /// sizeof(VMMemoryDefinition)` for each core instance regardless of how
3876 /// many memories are defined by the core instance's module.
3877 pub fn max_memories_per_module(&mut self, memories: u32) -> &mut Self {
3878 self.config.limits.max_memories_per_module = memories;
3879 self
3880 }
3881
3882 /// The maximum byte size that any WebAssembly linear memory may grow to.
3883 ///
3884 /// This option defaults to 4 GiB meaning that for 32-bit linear memories
3885 /// there is no restrictions. 64-bit linear memories will not be allowed to
3886 /// grow beyond 4 GiB by default.
3887 ///
3888 /// If a memory's minimum size is greater than this value, the module will
3889 /// fail to instantiate.
3890 ///
3891 /// If a memory's maximum size is unbounded or greater than this value, the
3892 /// maximum will be `max_memory_size` for the purpose of any `memory.grow`
3893 /// instruction.
3894 ///
3895 /// This value is used to control the maximum accessible space for each
3896 /// linear memory of a core instance. This can be thought of as a simple
3897 /// mechanism like [`Store::limiter`](crate::Store::limiter) to limit memory
3898 /// at runtime. This value can also affect striping/coloring behavior when
3899 /// used in conjunction with
3900 /// [`memory_protection_keys`](PoolingAllocationConfig::memory_protection_keys).
3901 ///
3902 /// The virtual memory reservation size of each linear memory is controlled
3903 /// by the [`Config::memory_reservation`] setting and this method's
3904 /// configuration cannot exceed [`Config::memory_reservation`].
3905 pub fn max_memory_size(&mut self, bytes: usize) -> &mut Self {
3906 self.config.limits.max_memory_size = bytes;
3907 self
3908 }
3909
3910 /// Configures whether memory protection keys (MPK) should be used for more
3911 /// efficient layout of pool-allocated memories.
3912 ///
3913 /// When using the pooling allocator (see [`Config::allocation_strategy`],
3914 /// [`InstanceAllocationStrategy::Pooling`]), memory protection keys can
3915 /// reduce the total amount of allocated virtual memory by eliminating guard
3916 /// regions between WebAssembly memories in the pool. It does so by
3917 /// "coloring" memory regions with different memory keys and setting which
3918 /// regions are accessible each time executions switches from host to guest
3919 /// (or vice versa).
3920 ///
3921 /// Leveraging MPK requires configuring a smaller-than-default
3922 /// [`max_memory_size`](PoolingAllocationConfig::max_memory_size) to enable
3923 /// this coloring/striping behavior. For example embeddings might want to
3924 /// reduce the default 4G allowance to 128M.
3925 ///
3926 /// MPK is only available on Linux (called `pku` there) and recent x86
3927 /// systems; we check for MPK support at runtime by examining the `CPUID`
3928 /// register. This configuration setting can be in three states:
3929 ///
3930 /// - `auto`: if MPK support is available the guard regions are removed; if
3931 /// not, the guard regions remain
3932 /// - `yes`: use MPK to eliminate guard regions; fail if MPK is not
3933 /// supported
3934 /// - `no`: never use MPK
3935 ///
3936 /// By default this value is `no`, but may become `auto` in future
3937 /// releases.
3938 ///
3939 /// __WARNING__: this configuration options is still experimental--use at
3940 /// your own risk! MPK uses kernel and CPU features to protect memory
3941 /// regions; you may observe segmentation faults if anything is
3942 /// misconfigured.
3943 #[cfg(feature = "memory-protection-keys")]
3944 pub fn memory_protection_keys(&mut self, enable: Enabled) -> &mut Self {
3945 self.config.memory_protection_keys = enable;
3946 self
3947 }
3948
3949 /// Sets an upper limit on how many memory protection keys (MPK) Wasmtime
3950 /// will use.
3951 ///
3952 /// This setting is only applicable when
3953 /// [`PoolingAllocationConfig::memory_protection_keys`] is set to `enable`
3954 /// or `auto`. Configuring this above the HW and OS limits (typically 15)
3955 /// has no effect.
3956 ///
3957 /// If multiple Wasmtime engines are used in the same process, note that all
3958 /// engines will share the same set of allocated keys; this setting will
3959 /// limit how many keys are allocated initially and thus available to all
3960 /// other engines.
3961 #[cfg(feature = "memory-protection-keys")]
3962 pub fn max_memory_protection_keys(&mut self, max: usize) -> &mut Self {
3963 self.config.max_memory_protection_keys = max;
3964 self
3965 }
3966
3967 /// Check if memory protection keys (MPK) are available on the current host.
3968 ///
3969 /// This is a convenience method for determining MPK availability using the
3970 /// same method that [`Enabled::Auto`] does. See
3971 /// [`PoolingAllocationConfig::memory_protection_keys`] for more
3972 /// information.
3973 #[cfg(feature = "memory-protection-keys")]
3974 pub fn are_memory_protection_keys_available() -> bool {
3975 crate::runtime::vm::mpk::is_supported()
3976 }
3977
3978 /// The maximum number of concurrent GC heaps supported (default is `1000`).
3979 ///
3980 /// This value has a direct impact on the amount of memory allocated by the
3981 /// pooling instance allocator.
3982 ///
3983 /// The pooling instance allocator allocates a GC heap pool, where each
3984 /// entry in the pool contains the space needed for each GC heap used by a
3985 /// store.
3986 #[cfg(feature = "gc")]
3987 pub fn total_gc_heaps(&mut self, count: u32) -> &mut Self {
3988 self.config.limits.total_gc_heaps = count;
3989 self
3990 }
3991
3992 /// Configures whether the Linux-specific [`PAGEMAP_SCAN` ioctl][ioctl] is
3993 /// used to help reset linear memory.
3994 ///
3995 /// When [`Self::linear_memory_keep_resident`] or
3996 /// [`Self::table_keep_resident`] options are configured to nonzero values
3997 /// the default behavior is to `memset` the lowest addresses of a table or
3998 /// memory back to their original contents. With the `PAGEMAP_SCAN` ioctl on
3999 /// Linux this can be done to more intelligently scan for resident pages in
4000 /// the region and only reset those pages back to their original contents
4001 /// with `memset` rather than assuming the low addresses are all resident.
4002 ///
4003 /// This ioctl has the potential to provide a number of performance benefits
4004 /// in high-reuse and high concurrency scenarios. Notably this enables
4005 /// Wasmtime to scan the entire region of WebAssembly linear memory and
4006 /// manually reset memory back to its original contents, up to
4007 /// [`Self::linear_memory_keep_resident`] bytes, possibly skipping an
4008 /// `madvise` entirely. This can be more efficient by avoiding removing
4009 /// pages from the address space entirely and additionally ensuring that
4010 /// future use of the linear memory doesn't incur page faults as the pages
4011 /// remain resident.
4012 ///
4013 /// At this time this configuration option is still being evaluated as to
4014 /// how appropriate it is for all use cases. It currently defaults to
4015 /// `no` or disabled but may change to `auto`, enable if supported, in the
4016 /// future. This option is only supported on Linux and requires a kernel
4017 /// version of 6.7 or higher.
4018 ///
4019 /// [ioctl]: https://www.man7.org/linux/man-pages/man2/PAGEMAP_SCAN.2const.html
4020 pub fn pagemap_scan(&mut self, enable: Enabled) -> &mut Self {
4021 self.config.pagemap_scan = enable;
4022 self
4023 }
4024
4025 /// Tests whether [`Self::pagemap_scan`] is available or not on the host
4026 /// system.
4027 pub fn is_pagemap_scan_available() -> bool {
4028 crate::runtime::vm::PoolingInstanceAllocatorConfig::is_pagemap_scan_available()
4029 }
4030}
4031
4032#[cfg(feature = "std")]
4033fn detect_host_feature(feature: &str) -> Option<bool> {
4034 #[cfg(target_arch = "aarch64")]
4035 {
4036 return match feature {
4037 "lse" => Some(std::arch::is_aarch64_feature_detected!("lse")),
4038 "paca" => Some(std::arch::is_aarch64_feature_detected!("paca")),
4039 "fp16" => Some(std::arch::is_aarch64_feature_detected!("fp16")),
4040
4041 _ => None,
4042 };
4043 }
4044
4045 // `is_s390x_feature_detected` is nightly only for now, so use the
4046 // STORE FACILITY LIST EXTENDED instruction as a temporary measure.
4047 #[cfg(target_arch = "s390x")]
4048 {
4049 let mut facility_list: [u64; 4] = [0; 4];
4050 unsafe {
4051 core::arch::asm!(
4052 "stfle 0({})",
4053 in(reg_addr) facility_list.as_mut_ptr() ,
4054 inout("r0") facility_list.len() as u64 - 1 => _,
4055 options(nostack)
4056 );
4057 }
4058 let get_facility_bit = |n: usize| {
4059 // NOTE: bits are numbered from the left.
4060 facility_list[n / 64] & (1 << (63 - (n % 64))) != 0
4061 };
4062
4063 return match feature {
4064 "mie3" => Some(get_facility_bit(61)),
4065 "mie4" => Some(get_facility_bit(84)),
4066 "vxrs_ext2" => Some(get_facility_bit(148)),
4067 "vxrs_ext3" => Some(get_facility_bit(198)),
4068
4069 _ => None,
4070 };
4071 }
4072
4073 #[cfg(target_arch = "riscv64")]
4074 {
4075 return match feature {
4076 // due to `is_riscv64_feature_detected` is not stable.
4077 // we cannot use it. For now lie and say all features are always
4078 // found to keep tests working.
4079 _ => Some(true),
4080 };
4081 }
4082
4083 #[cfg(target_arch = "x86_64")]
4084 {
4085 return match feature {
4086 "cmpxchg16b" => Some(std::is_x86_feature_detected!("cmpxchg16b")),
4087 "sse3" => Some(std::is_x86_feature_detected!("sse3")),
4088 "ssse3" => Some(std::is_x86_feature_detected!("ssse3")),
4089 "sse4.1" => Some(std::is_x86_feature_detected!("sse4.1")),
4090 "sse4.2" => Some(std::is_x86_feature_detected!("sse4.2")),
4091 "popcnt" => Some(std::is_x86_feature_detected!("popcnt")),
4092 "avx" => Some(std::is_x86_feature_detected!("avx")),
4093 "avx2" => Some(std::is_x86_feature_detected!("avx2")),
4094 "fma" => Some(std::is_x86_feature_detected!("fma")),
4095 "bmi1" => Some(std::is_x86_feature_detected!("bmi1")),
4096 "bmi2" => Some(std::is_x86_feature_detected!("bmi2")),
4097 "avx512bitalg" => Some(std::is_x86_feature_detected!("avx512bitalg")),
4098 "avx512dq" => Some(std::is_x86_feature_detected!("avx512dq")),
4099 "avx512f" => Some(std::is_x86_feature_detected!("avx512f")),
4100 "avx512vl" => Some(std::is_x86_feature_detected!("avx512vl")),
4101 "avx512vbmi" => Some(std::is_x86_feature_detected!("avx512vbmi")),
4102 "lzcnt" => Some(std::is_x86_feature_detected!("lzcnt")),
4103
4104 _ => None,
4105 };
4106 }
4107
4108 #[allow(
4109 unreachable_code,
4110 reason = "reachable or not depending on if a target above matches"
4111 )]
4112 {
4113 let _ = feature;
4114 return None;
4115 }
4116}