wasmtime/config.rs
1use crate::prelude::*;
2use alloc::sync::Arc;
3use bitflags::Flags;
4use core::fmt;
5use core::str::FromStr;
6#[cfg(any(feature = "cache", feature = "cranelift", feature = "winch"))]
7use std::path::Path;
8use wasmparser::WasmFeatures;
9use wasmtime_environ::{ConfigTunables, TripleExt, Tunables};
10
11#[cfg(feature = "runtime")]
12use crate::memory::MemoryCreator;
13#[cfg(feature = "runtime")]
14use crate::profiling_agent::{self, ProfilingAgent};
15#[cfg(feature = "runtime")]
16use crate::runtime::vm::{
17 GcRuntime, InstanceAllocator, OnDemandInstanceAllocator, RuntimeMemoryCreator,
18};
19#[cfg(feature = "runtime")]
20use crate::trampoline::MemoryCreatorProxy;
21
22#[cfg(feature = "async")]
23use crate::stack::{StackCreator, StackCreatorProxy};
24#[cfg(feature = "async")]
25use wasmtime_fiber::RuntimeFiberStackCreator;
26
27#[cfg(feature = "runtime")]
28pub use crate::runtime::code_memory::CustomCodeMemory;
29#[cfg(feature = "cache")]
30pub use wasmtime_cache::{Cache, CacheConfig};
31#[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
32pub use wasmtime_environ::CacheStore;
33
34/// Represents the module instance allocation strategy to use.
35#[derive(Clone)]
36#[non_exhaustive]
37pub enum InstanceAllocationStrategy {
38 /// The on-demand instance allocation strategy.
39 ///
40 /// Resources related to a module instance are allocated at instantiation time and
41 /// immediately deallocated when the `Store` referencing the instance is dropped.
42 ///
43 /// This is the default allocation strategy for Wasmtime.
44 OnDemand,
45 /// The pooling instance allocation strategy.
46 ///
47 /// A pool of resources is created in advance and module instantiation reuses resources
48 /// from the pool. Resources are returned to the pool when the `Store` referencing the instance
49 /// is dropped.
50 #[cfg(feature = "pooling-allocator")]
51 Pooling(PoolingAllocationConfig),
52}
53
54impl InstanceAllocationStrategy {
55 /// The default pooling instance allocation strategy.
56 #[cfg(feature = "pooling-allocator")]
57 pub fn pooling() -> Self {
58 Self::Pooling(Default::default())
59 }
60}
61
62impl Default for InstanceAllocationStrategy {
63 fn default() -> Self {
64 Self::OnDemand
65 }
66}
67
68#[cfg(feature = "pooling-allocator")]
69impl From<PoolingAllocationConfig> for InstanceAllocationStrategy {
70 fn from(cfg: PoolingAllocationConfig) -> InstanceAllocationStrategy {
71 InstanceAllocationStrategy::Pooling(cfg)
72 }
73}
74
75#[derive(Clone)]
76/// Configure the strategy used for versioning in serializing and deserializing [`crate::Module`].
77pub enum ModuleVersionStrategy {
78 /// Use the wasmtime crate's Cargo package version.
79 WasmtimeVersion,
80 /// Use a custom version string. Must be at most 255 bytes.
81 Custom(String),
82 /// Emit no version string in serialization, and accept all version strings in deserialization.
83 None,
84}
85
86impl Default for ModuleVersionStrategy {
87 fn default() -> Self {
88 ModuleVersionStrategy::WasmtimeVersion
89 }
90}
91
92impl core::hash::Hash for ModuleVersionStrategy {
93 fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
94 match self {
95 Self::WasmtimeVersion => env!("CARGO_PKG_VERSION").hash(hasher),
96 Self::Custom(s) => s.hash(hasher),
97 Self::None => {}
98 };
99 }
100}
101
102/// Global configuration options used to create an [`Engine`](crate::Engine)
103/// and customize its behavior.
104///
105/// This structure exposed a builder-like interface and is primarily consumed by
106/// [`Engine::new()`](crate::Engine::new).
107///
108/// The validation of `Config` is deferred until the engine is being built, thus
109/// a problematic config may cause `Engine::new` to fail.
110///
111/// # Defaults
112///
113/// The `Default` trait implementation and the return value from
114/// [`Config::new()`] are the same and represent the default set of
115/// configuration for an engine. The exact set of defaults will differ based on
116/// properties such as enabled Cargo features at compile time and the configured
117/// target (see [`Config::target`]). Configuration options document their
118/// default values and what the conditional value of the default is where
119/// applicable.
120#[derive(Clone)]
121pub struct Config {
122 #[cfg(any(feature = "cranelift", feature = "winch"))]
123 compiler_config: CompilerConfig,
124 target: Option<target_lexicon::Triple>,
125 #[cfg(feature = "gc")]
126 collector: Collector,
127 profiling_strategy: ProfilingStrategy,
128 tunables: ConfigTunables,
129
130 #[cfg(feature = "cache")]
131 pub(crate) cache: Option<Cache>,
132 #[cfg(feature = "runtime")]
133 pub(crate) mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>,
134 #[cfg(feature = "runtime")]
135 pub(crate) custom_code_memory: Option<Arc<dyn CustomCodeMemory>>,
136 pub(crate) allocation_strategy: InstanceAllocationStrategy,
137 pub(crate) max_wasm_stack: usize,
138 /// Explicitly enabled features via `Config::wasm_*` methods. This is a
139 /// signal that the embedder specifically wants something turned on
140 /// regardless of the defaults that Wasmtime might otherwise have enabled.
141 ///
142 /// Note that this, and `disabled_features` below, start as the empty set of
143 /// features to only track explicit user requests.
144 pub(crate) enabled_features: WasmFeatures,
145 /// Same as `enabled_features`, but for those that are explicitly disabled.
146 pub(crate) disabled_features: WasmFeatures,
147 pub(crate) wasm_backtrace: bool,
148 pub(crate) wasm_backtrace_details_env_used: bool,
149 pub(crate) native_unwind_info: Option<bool>,
150 #[cfg(any(feature = "async", feature = "stack-switching"))]
151 pub(crate) async_stack_size: usize,
152 #[cfg(feature = "async")]
153 pub(crate) async_stack_zeroing: bool,
154 #[cfg(feature = "async")]
155 pub(crate) stack_creator: Option<Arc<dyn RuntimeFiberStackCreator>>,
156 pub(crate) async_support: bool,
157 pub(crate) module_version: ModuleVersionStrategy,
158 pub(crate) parallel_compilation: bool,
159 pub(crate) memory_guaranteed_dense_image_size: u64,
160 pub(crate) force_memory_init_memfd: bool,
161 pub(crate) wmemcheck: bool,
162 #[cfg(feature = "coredump")]
163 pub(crate) coredump_on_trap: bool,
164 pub(crate) macos_use_mach_ports: bool,
165 pub(crate) detect_host_feature: Option<fn(&str) -> Option<bool>>,
166}
167
168/// User-provided configuration for the compiler.
169#[cfg(any(feature = "cranelift", feature = "winch"))]
170#[derive(Debug, Clone)]
171struct CompilerConfig {
172 strategy: Option<Strategy>,
173 settings: crate::hash_map::HashMap<String, String>,
174 flags: crate::hash_set::HashSet<String>,
175 #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
176 cache_store: Option<Arc<dyn CacheStore>>,
177 clif_dir: Option<std::path::PathBuf>,
178 wmemcheck: bool,
179}
180
181#[cfg(any(feature = "cranelift", feature = "winch"))]
182impl CompilerConfig {
183 fn new() -> Self {
184 Self {
185 strategy: Strategy::Auto.not_auto(),
186 settings: Default::default(),
187 flags: Default::default(),
188 #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
189 cache_store: None,
190 clif_dir: None,
191 wmemcheck: false,
192 }
193 }
194
195 /// Ensures that the key is not set or equals to the given value.
196 /// If the key is not set, it will be set to the given value.
197 ///
198 /// # Returns
199 ///
200 /// Returns true if successfully set or already had the given setting
201 /// value, or false if the setting was explicitly set to something
202 /// else previously.
203 fn ensure_setting_unset_or_given(&mut self, k: &str, v: &str) -> bool {
204 if let Some(value) = self.settings.get(k) {
205 if value != v {
206 return false;
207 }
208 } else {
209 self.settings.insert(k.to_string(), v.to_string());
210 }
211 true
212 }
213}
214
215#[cfg(any(feature = "cranelift", feature = "winch"))]
216impl Default for CompilerConfig {
217 fn default() -> Self {
218 Self::new()
219 }
220}
221
222impl Config {
223 /// Creates a new configuration object with the default configuration
224 /// specified.
225 pub fn new() -> Self {
226 let mut ret = Self {
227 tunables: ConfigTunables::default(),
228 #[cfg(any(feature = "cranelift", feature = "winch"))]
229 compiler_config: CompilerConfig::default(),
230 target: None,
231 #[cfg(feature = "gc")]
232 collector: Collector::default(),
233 #[cfg(feature = "cache")]
234 cache: None,
235 profiling_strategy: ProfilingStrategy::None,
236 #[cfg(feature = "runtime")]
237 mem_creator: None,
238 #[cfg(feature = "runtime")]
239 custom_code_memory: None,
240 allocation_strategy: InstanceAllocationStrategy::OnDemand,
241 // 512k of stack -- note that this is chosen currently to not be too
242 // big, not be too small, and be a good default for most platforms.
243 // One platform of particular note is Windows where the stack size
244 // of the main thread seems to, by default, be smaller than that of
245 // Linux and macOS. This 512k value at least lets our current test
246 // suite pass on the main thread of Windows (using `--test-threads
247 // 1` forces this), or at least it passed when this change was
248 // committed.
249 max_wasm_stack: 512 * 1024,
250 wasm_backtrace: true,
251 wasm_backtrace_details_env_used: false,
252 native_unwind_info: None,
253 enabled_features: WasmFeatures::empty(),
254 disabled_features: WasmFeatures::empty(),
255 #[cfg(any(feature = "async", feature = "stack-switching"))]
256 async_stack_size: 2 << 20,
257 #[cfg(feature = "async")]
258 async_stack_zeroing: false,
259 #[cfg(feature = "async")]
260 stack_creator: None,
261 async_support: false,
262 module_version: ModuleVersionStrategy::default(),
263 parallel_compilation: !cfg!(miri),
264 memory_guaranteed_dense_image_size: 16 << 20,
265 force_memory_init_memfd: false,
266 wmemcheck: false,
267 #[cfg(feature = "coredump")]
268 coredump_on_trap: false,
269 macos_use_mach_ports: !cfg!(miri),
270 #[cfg(feature = "std")]
271 detect_host_feature: Some(detect_host_feature),
272 #[cfg(not(feature = "std"))]
273 detect_host_feature: None,
274 };
275 #[cfg(any(feature = "cranelift", feature = "winch"))]
276 {
277 ret.cranelift_debug_verifier(false);
278 ret.cranelift_opt_level(OptLevel::Speed);
279
280 // When running under MIRI try to optimize for compile time of wasm
281 // code itself as much as possible. Disable optimizations by
282 // default.
283 if cfg!(miri) {
284 ret.cranelift_opt_level(OptLevel::None);
285 }
286 }
287
288 ret.wasm_backtrace_details(WasmBacktraceDetails::Environment);
289
290 ret
291 }
292
293 /// Configures the target platform of this [`Config`].
294 ///
295 /// This method is used to configure the output of compilation in an
296 /// [`Engine`](crate::Engine). This can be used, for example, to
297 /// cross-compile from one platform to another. By default, the host target
298 /// triple is used meaning compiled code is suitable to run on the host.
299 ///
300 /// Note that the [`Module`](crate::Module) type can only be created if the
301 /// target configured here matches the host. Otherwise if a cross-compile is
302 /// being performed where the host doesn't match the target then
303 /// [`Engine::precompile_module`](crate::Engine::precompile_module) must be
304 /// used instead.
305 ///
306 /// Target-specific flags (such as CPU features) will not be inferred by
307 /// default for the target when one is provided here. This means that this
308 /// can also be used, for example, with the host architecture to disable all
309 /// host-inferred feature flags. Configuring target-specific flags can be
310 /// done with [`Config::cranelift_flag_set`] and
311 /// [`Config::cranelift_flag_enable`].
312 ///
313 /// # Errors
314 ///
315 /// This method will error if the given target triple is not supported.
316 pub fn target(&mut self, target: &str) -> Result<&mut Self> {
317 self.target =
318 Some(target_lexicon::Triple::from_str(target).map_err(|e| anyhow::anyhow!(e))?);
319
320 Ok(self)
321 }
322
323 /// Enables the incremental compilation cache in Cranelift, using the provided `CacheStore`
324 /// backend for storage.
325 #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
326 pub fn enable_incremental_compilation(
327 &mut self,
328 cache_store: Arc<dyn CacheStore>,
329 ) -> Result<&mut Self> {
330 self.compiler_config.cache_store = Some(cache_store);
331 Ok(self)
332 }
333
334 /// Whether or not to enable support for asynchronous functions in Wasmtime.
335 ///
336 /// When enabled, the config can optionally define host functions with `async`.
337 /// Instances created and functions called with this `Config` *must* be called
338 /// through their asynchronous APIs, however. For example using
339 /// [`Func::call`](crate::Func::call) will panic when used with this config.
340 ///
341 /// # Asynchronous Wasm
342 ///
343 /// WebAssembly does not currently have a way to specify at the bytecode
344 /// level what is and isn't async. Host-defined functions, however, may be
345 /// defined as `async`. WebAssembly imports always appear synchronous, which
346 /// gives rise to a bit of an impedance mismatch here. To solve this
347 /// Wasmtime supports "asynchronous configs" which enables calling these
348 /// asynchronous functions in a way that looks synchronous to the executing
349 /// WebAssembly code.
350 ///
351 /// An asynchronous config must always invoke wasm code asynchronously,
352 /// meaning we'll always represent its computation as a
353 /// [`Future`](std::future::Future). The `poll` method of the futures
354 /// returned by Wasmtime will perform the actual work of calling the
355 /// WebAssembly. Wasmtime won't manage its own thread pools or similar,
356 /// that's left up to the embedder.
357 ///
358 /// To implement futures in a way that WebAssembly sees asynchronous host
359 /// functions as synchronous, all async Wasmtime futures will execute on a
360 /// separately allocated native stack from the thread otherwise executing
361 /// Wasmtime. This separate native stack can then be switched to and from.
362 /// Using this whenever an `async` host function returns a future that
363 /// resolves to `Pending` we switch away from the temporary stack back to
364 /// the main stack and propagate the `Pending` status.
365 ///
366 /// In general it's encouraged that the integration with `async` and
367 /// wasmtime is designed early on in your embedding of Wasmtime to ensure
368 /// that it's planned that WebAssembly executes in the right context of your
369 /// application.
370 ///
371 /// # Execution in `poll`
372 ///
373 /// The [`Future::poll`](std::future::Future::poll) method is the main
374 /// driving force behind Rust's futures. That method's own documentation
375 /// states "an implementation of `poll` should strive to return quickly, and
376 /// should not block". This, however, can be at odds with executing
377 /// WebAssembly code as part of the `poll` method itself. If your
378 /// WebAssembly is untrusted then this could allow the `poll` method to take
379 /// arbitrarily long in the worst case, likely blocking all other
380 /// asynchronous tasks.
381 ///
382 /// To remedy this situation you have a few possible ways to solve this:
383 ///
384 /// * The most efficient solution is to enable
385 /// [`Config::epoch_interruption`] in conjunction with
386 /// [`crate::Store::epoch_deadline_async_yield_and_update`]. Coupled with
387 /// periodic calls to [`crate::Engine::increment_epoch`] this will cause
388 /// executing WebAssembly to periodically yield back according to the
389 /// epoch configuration settings. This enables `Future::poll` to take at
390 /// most a certain amount of time according to epoch configuration
391 /// settings and when increments happen. The benefit of this approach is
392 /// that the instrumentation in compiled code is quite lightweight, but a
393 /// downside can be that the scheduling is somewhat nondeterministic since
394 /// increments are usually timer-based which are not always deterministic.
395 ///
396 /// Note that to prevent infinite execution of wasm it's recommended to
397 /// place a timeout on the entire future representing executing wasm code
398 /// and the periodic yields with epochs should ensure that when the
399 /// timeout is reached it's appropriately recognized.
400 ///
401 /// * Alternatively you can enable the
402 /// [`Config::consume_fuel`](crate::Config::consume_fuel) method as well
403 /// as [`crate::Store::fuel_async_yield_interval`] When doing so this will
404 /// configure Wasmtime futures to yield periodically while they're
405 /// executing WebAssembly code. After consuming the specified amount of
406 /// fuel wasm futures will return `Poll::Pending` from their `poll`
407 /// method, and will get automatically re-polled later. This enables the
408 /// `Future::poll` method to take roughly a fixed amount of time since
409 /// fuel is guaranteed to get consumed while wasm is executing. Unlike
410 /// epoch-based preemption this is deterministic since wasm always
411 /// consumes a fixed amount of fuel per-operation. The downside of this
412 /// approach, however, is that the compiled code instrumentation is
413 /// significantly more expensive than epoch checks.
414 ///
415 /// Note that to prevent infinite execution of wasm it's recommended to
416 /// place a timeout on the entire future representing executing wasm code
417 /// and the periodic yields with epochs should ensure that when the
418 /// timeout is reached it's appropriately recognized.
419 ///
420 /// In all cases special care needs to be taken when integrating
421 /// asynchronous wasm into your application. You should carefully plan where
422 /// WebAssembly will execute and what compute resources will be allotted to
423 /// it. If Wasmtime doesn't support exactly what you'd like just yet, please
424 /// feel free to open an issue!
425 #[cfg(feature = "async")]
426 pub fn async_support(&mut self, enable: bool) -> &mut Self {
427 self.async_support = enable;
428 self
429 }
430
431 /// Configures whether DWARF debug information will be emitted during
432 /// compilation.
433 ///
434 /// Note that the `debug-builtins` compile-time Cargo feature must also be
435 /// enabled for native debuggers such as GDB or LLDB to be able to debug
436 /// guest WebAssembly programs.
437 ///
438 /// By default this option is `false`.
439 /// **Note** Enabling this option is not compatible with the Winch compiler.
440 pub fn debug_info(&mut self, enable: bool) -> &mut Self {
441 self.tunables.generate_native_debuginfo = Some(enable);
442 self
443 }
444
445 /// Configures whether [`WasmBacktrace`] will be present in the context of
446 /// errors returned from Wasmtime.
447 ///
448 /// A backtrace may be collected whenever an error is returned from a host
449 /// function call through to WebAssembly or when WebAssembly itself hits a
450 /// trap condition, such as an out-of-bounds memory access. This flag
451 /// indicates, in these conditions, whether the backtrace is collected or
452 /// not.
453 ///
454 /// Currently wasm backtraces are implemented through frame pointer walking.
455 /// This means that collecting a backtrace is expected to be a fast and
456 /// relatively cheap operation. Additionally backtrace collection is
457 /// suitable in concurrent environments since one thread capturing a
458 /// backtrace won't block other threads.
459 ///
460 /// Collected backtraces are attached via [`anyhow::Error::context`] to
461 /// errors returned from host functions. The [`WasmBacktrace`] type can be
462 /// acquired via [`anyhow::Error::downcast_ref`] to inspect the backtrace.
463 /// When this option is disabled then this context is never applied to
464 /// errors coming out of wasm.
465 ///
466 /// This option is `true` by default.
467 ///
468 /// [`WasmBacktrace`]: crate::WasmBacktrace
469 pub fn wasm_backtrace(&mut self, enable: bool) -> &mut Self {
470 self.wasm_backtrace = enable;
471 self
472 }
473
474 /// Configures whether backtraces in `Trap` will parse debug info in the wasm file to
475 /// have filename/line number information.
476 ///
477 /// When enabled this will causes modules to retain debugging information
478 /// found in wasm binaries. This debug information will be used when a trap
479 /// happens to symbolicate each stack frame and attempt to print a
480 /// filename/line number for each wasm frame in the stack trace.
481 ///
482 /// By default this option is `WasmBacktraceDetails::Environment`, meaning
483 /// that wasm will read `WASMTIME_BACKTRACE_DETAILS` to indicate whether
484 /// details should be parsed. Note that the `std` feature of this crate must
485 /// be active to read environment variables, otherwise this is disabled by
486 /// default.
487 pub fn wasm_backtrace_details(&mut self, enable: WasmBacktraceDetails) -> &mut Self {
488 self.wasm_backtrace_details_env_used = false;
489 self.tunables.parse_wasm_debuginfo = match enable {
490 WasmBacktraceDetails::Enable => Some(true),
491 WasmBacktraceDetails::Disable => Some(false),
492 WasmBacktraceDetails::Environment => {
493 #[cfg(feature = "std")]
494 {
495 self.wasm_backtrace_details_env_used = true;
496 std::env::var("WASMTIME_BACKTRACE_DETAILS")
497 .map(|s| Some(s == "1"))
498 .unwrap_or(Some(false))
499 }
500 #[cfg(not(feature = "std"))]
501 {
502 Some(false)
503 }
504 }
505 };
506 self
507 }
508
509 /// Configures whether to generate native unwind information
510 /// (e.g. `.eh_frame` on Linux).
511 ///
512 /// This configuration option only exists to help third-party stack
513 /// capturing mechanisms, such as the system's unwinder or the `backtrace`
514 /// crate, determine how to unwind through Wasm frames. It does not affect
515 /// whether Wasmtime can capture Wasm backtraces or not. The presence of
516 /// [`WasmBacktrace`] is controlled by the [`Config::wasm_backtrace`]
517 /// option.
518 ///
519 /// Native unwind information is included:
520 /// - When targeting Windows, since the Windows ABI requires it.
521 /// - By default.
522 ///
523 /// Note that systems loading many modules may wish to disable this
524 /// configuration option instead of leaving it on-by-default. Some platforms
525 /// exhibit quadratic behavior when registering/unregistering unwinding
526 /// information which can greatly slow down the module loading/unloading
527 /// process.
528 ///
529 /// [`WasmBacktrace`]: crate::WasmBacktrace
530 pub fn native_unwind_info(&mut self, enable: bool) -> &mut Self {
531 self.native_unwind_info = Some(enable);
532 self
533 }
534
535 /// Configures whether execution of WebAssembly will "consume fuel" to
536 /// either halt or yield execution as desired.
537 ///
538 /// This can be used to deterministically prevent infinitely-executing
539 /// WebAssembly code by instrumenting generated code to consume fuel as it
540 /// executes. When fuel runs out a trap is raised, however [`Store`] can be
541 /// configured to yield execution periodically via
542 /// [`crate::Store::fuel_async_yield_interval`].
543 ///
544 /// Note that a [`Store`] starts with no fuel, so if you enable this option
545 /// you'll have to be sure to pour some fuel into [`Store`] before
546 /// executing some code.
547 ///
548 /// By default this option is `false`.
549 ///
550 /// **Note** Enabling this option is not compatible with the Winch compiler.
551 ///
552 /// [`Store`]: crate::Store
553 pub fn consume_fuel(&mut self, enable: bool) -> &mut Self {
554 self.tunables.consume_fuel = Some(enable);
555 self
556 }
557
558 /// Enables epoch-based interruption.
559 ///
560 /// When executing code in async mode, we sometimes want to
561 /// implement a form of cooperative timeslicing: long-running Wasm
562 /// guest code should periodically yield to the executor
563 /// loop. This yielding could be implemented by using "fuel" (see
564 /// [`consume_fuel`](Config::consume_fuel)). However, fuel
565 /// instrumentation is somewhat expensive: it modifies the
566 /// compiled form of the Wasm code so that it maintains a precise
567 /// instruction count, frequently checking this count against the
568 /// remaining fuel. If one does not need this precise count or
569 /// deterministic interruptions, and only needs a periodic
570 /// interrupt of some form, then It would be better to have a more
571 /// lightweight mechanism.
572 ///
573 /// Epoch-based interruption is that mechanism. There is a global
574 /// "epoch", which is a counter that divides time into arbitrary
575 /// periods (or epochs). This counter lives on the
576 /// [`Engine`](crate::Engine) and can be incremented by calling
577 /// [`Engine::increment_epoch`](crate::Engine::increment_epoch).
578 /// Epoch-based instrumentation works by setting a "deadline
579 /// epoch". The compiled code knows the deadline, and at certain
580 /// points, checks the current epoch against that deadline. It
581 /// will yield if the deadline has been reached.
582 ///
583 /// The idea is that checking an infrequently-changing counter is
584 /// cheaper than counting and frequently storing a precise metric
585 /// (instructions executed) locally. The interruptions are not
586 /// deterministic, but if the embedder increments the epoch in a
587 /// periodic way (say, every regular timer tick by a thread or
588 /// signal handler), then we can ensure that all async code will
589 /// yield to the executor within a bounded time.
590 ///
591 /// The deadline check cannot be avoided by malicious wasm code. It is safe
592 /// to use epoch deadlines to limit the execution time of untrusted
593 /// code.
594 ///
595 /// The [`Store`](crate::Store) tracks the deadline, and controls
596 /// what happens when the deadline is reached during
597 /// execution. Several behaviors are possible:
598 ///
599 /// - Trap if code is executing when the epoch deadline is
600 /// met. See
601 /// [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap).
602 ///
603 /// - Call an arbitrary function. This function may chose to trap or
604 /// increment the epoch. See
605 /// [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback).
606 ///
607 /// - Yield to the executor loop, then resume when the future is
608 /// next polled. See
609 /// [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update).
610 ///
611 /// Trapping is the default. The yielding behaviour may be used for
612 /// the timeslicing behavior described above.
613 ///
614 /// This feature is available with or without async support.
615 /// However, without async support, the timeslicing behaviour is
616 /// not available. This means epoch-based interruption can only
617 /// serve as a simple external-interruption mechanism.
618 ///
619 /// An initial deadline must be set before executing code by calling
620 /// [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline). If this
621 /// deadline is not configured then wasm will immediately trap.
622 ///
623 /// ## Interaction with blocking host calls
624 ///
625 /// Epochs (and fuel) do not assist in handling WebAssembly code blocked in
626 /// a call to the host. For example if the WebAssembly function calls
627 /// `wasi:io/poll/poll` to sleep epochs will not assist in waking this up or
628 /// timing it out. Epochs intentionally only affect running WebAssembly code
629 /// itself and it's left to the embedder to determine how best to wake up
630 /// indefinitely blocking code in the host.
631 ///
632 /// The typical solution for this, however, is to use
633 /// [`Config::async_support(true)`](Config::async_support) and the `async`
634 /// variant of WASI host functions. This models computation as a Rust
635 /// `Future` which means that when blocking happens the future is only
636 /// suspended and control yields back to the main event loop. This gives the
637 /// embedder the opportunity to use `tokio::time::timeout` for example on a
638 /// wasm computation and have the desired effect of cancelling a blocking
639 /// operation when a timeout expires.
640 ///
641 /// ## When to use fuel vs. epochs
642 ///
643 /// In general, epoch-based interruption results in faster
644 /// execution. This difference is sometimes significant: in some
645 /// measurements, up to 2-3x. This is because epoch-based
646 /// interruption does less work: it only watches for a global
647 /// rarely-changing counter to increment, rather than keeping a
648 /// local frequently-changing counter and comparing it to a
649 /// deadline.
650 ///
651 /// Fuel, in contrast, should be used when *deterministic*
652 /// yielding or trapping is needed. For example, if it is required
653 /// that the same function call with the same starting state will
654 /// always either complete or trap with an out-of-fuel error,
655 /// deterministically, then fuel with a fixed bound should be
656 /// used.
657 ///
658 /// **Note** Enabling this option is not compatible with the Winch compiler.
659 ///
660 /// # See Also
661 ///
662 /// - [`Engine::increment_epoch`](crate::Engine::increment_epoch)
663 /// - [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline)
664 /// - [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap)
665 /// - [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback)
666 /// - [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update)
667 pub fn epoch_interruption(&mut self, enable: bool) -> &mut Self {
668 self.tunables.epoch_interruption = Some(enable);
669 self
670 }
671
672 /// Configures the maximum amount of stack space available for
673 /// executing WebAssembly code.
674 ///
675 /// WebAssembly has well-defined semantics on stack overflow. This is
676 /// intended to be a knob which can help configure how much stack space
677 /// wasm execution is allowed to consume. Note that the number here is not
678 /// super-precise, but rather wasm will take at most "pretty close to this
679 /// much" stack space.
680 ///
681 /// If a wasm call (or series of nested wasm calls) take more stack space
682 /// than the `size` specified then a stack overflow trap will be raised.
683 ///
684 /// Caveat: this knob only limits the stack space consumed by wasm code.
685 /// More importantly, it does not ensure that this much stack space is
686 /// available on the calling thread stack. Exhausting the thread stack
687 /// typically leads to an **abort** of the process.
688 ///
689 /// Here are some examples of how that could happen:
690 ///
691 /// - Let's assume this option is set to 2 MiB and then a thread that has
692 /// a stack with 512 KiB left.
693 ///
694 /// If wasm code consumes more than 512 KiB then the process will be aborted.
695 ///
696 /// - Assuming the same conditions, but this time wasm code does not consume
697 /// any stack but calls into a host function. The host function consumes
698 /// more than 512 KiB of stack space. The process will be aborted.
699 ///
700 /// There's another gotcha related to recursive calling into wasm: the stack
701 /// space consumed by a host function is counted towards this limit. The
702 /// host functions are not prevented from consuming more than this limit.
703 /// However, if the host function that used more than this limit and called
704 /// back into wasm, then the execution will trap immediately because of
705 /// stack overflow.
706 ///
707 /// When the `async` feature is enabled, this value cannot exceed the
708 /// `async_stack_size` option. Be careful not to set this value too close
709 /// to `async_stack_size` as doing so may limit how much stack space
710 /// is available for host functions.
711 ///
712 /// By default this option is 512 KiB.
713 ///
714 /// # Errors
715 ///
716 /// The `Engine::new` method will fail if the `size` specified here is
717 /// either 0 or larger than the [`Config::async_stack_size`] configuration.
718 pub fn max_wasm_stack(&mut self, size: usize) -> &mut Self {
719 self.max_wasm_stack = size;
720 self
721 }
722
723 /// Configures the size of the stacks used for asynchronous execution.
724 ///
725 /// This setting configures the size of the stacks that are allocated for
726 /// asynchronous execution. The value cannot be less than `max_wasm_stack`.
727 ///
728 /// The amount of stack space guaranteed for host functions is
729 /// `async_stack_size - max_wasm_stack`, so take care not to set these two values
730 /// close to one another; doing so may cause host functions to overflow the
731 /// stack and abort the process.
732 ///
733 /// By default this option is 2 MiB.
734 ///
735 /// # Errors
736 ///
737 /// The `Engine::new` method will fail if the value for this option is
738 /// smaller than the [`Config::max_wasm_stack`] option.
739 #[cfg(any(feature = "async", feature = "stack-switching"))]
740 pub fn async_stack_size(&mut self, size: usize) -> &mut Self {
741 self.async_stack_size = size;
742 self
743 }
744
745 /// Configures whether or not stacks used for async futures are zeroed
746 /// before (re)use.
747 ///
748 /// When the [`async_support`](Config::async_support) method is enabled for
749 /// Wasmtime and the [`call_async`] variant of calling WebAssembly is used
750 /// then Wasmtime will create a separate runtime execution stack for each
751 /// future produced by [`call_async`]. By default upon allocation, depending
752 /// on the platform, these stacks might be filled with uninitialized
753 /// memory. This is safe and correct because, modulo bugs in Wasmtime,
754 /// compiled Wasm code will never read from a stack slot before it
755 /// initializes the stack slot.
756 ///
757 /// However, as a defense-in-depth mechanism, you may configure Wasmtime to
758 /// ensure that these stacks are zeroed before they are used. Notably, if
759 /// you are using the pooling allocator, stacks can be pooled and reused
760 /// across different Wasm guests; ensuring that stacks are zeroed can
761 /// prevent data leakage between Wasm guests even in the face of potential
762 /// read-of-stack-slot-before-initialization bugs in Wasmtime's compiler.
763 ///
764 /// Stack zeroing can be a costly operation in highly concurrent
765 /// environments due to modifications of the virtual address space requiring
766 /// process-wide synchronization. It can also be costly in `no-std`
767 /// environments that must manually zero memory, and cannot rely on an OS
768 /// and virtual memory to provide zeroed pages.
769 ///
770 /// This option defaults to `false`.
771 ///
772 /// [`call_async`]: crate::TypedFunc::call_async
773 #[cfg(feature = "async")]
774 pub fn async_stack_zeroing(&mut self, enable: bool) -> &mut Self {
775 self.async_stack_zeroing = enable;
776 self
777 }
778
779 fn wasm_feature(&mut self, flag: WasmFeatures, enable: bool) -> &mut Self {
780 self.enabled_features.set(flag, enable);
781 self.disabled_features.set(flag, !enable);
782 self
783 }
784
785 /// Configures whether the WebAssembly tail calls proposal will be enabled
786 /// for compilation or not.
787 ///
788 /// The [WebAssembly tail calls proposal] introduces the `return_call` and
789 /// `return_call_indirect` instructions. These instructions allow for Wasm
790 /// programs to implement some recursive algorithms with *O(1)* stack space
791 /// usage.
792 ///
793 /// This is `true` by default except when the Winch compiler is enabled.
794 ///
795 /// [WebAssembly tail calls proposal]: https://github.com/WebAssembly/tail-call
796 pub fn wasm_tail_call(&mut self, enable: bool) -> &mut Self {
797 self.wasm_feature(WasmFeatures::TAIL_CALL, enable);
798 self
799 }
800
801 /// Configures whether the WebAssembly custom-page-sizes proposal will be
802 /// enabled for compilation or not.
803 ///
804 /// The [WebAssembly custom-page-sizes proposal] allows a memory to
805 /// customize its page sizes. By default, Wasm page sizes are 64KiB
806 /// large. This proposal allows the memory to opt into smaller page sizes
807 /// instead, allowing Wasm to run in environments with less than 64KiB RAM
808 /// available, for example.
809 ///
810 /// Note that the page size is part of the memory's type, and because
811 /// different memories may have different types, they may also have
812 /// different page sizes.
813 ///
814 /// Currently the only valid page sizes are 64KiB (the default) and 1
815 /// byte. Future extensions may relax this constraint and allow all powers
816 /// of two.
817 ///
818 /// Support for this proposal is disabled by default.
819 ///
820 /// [WebAssembly custom-page-sizes proposal]: https://github.com/WebAssembly/custom-page-sizes
821 pub fn wasm_custom_page_sizes(&mut self, enable: bool) -> &mut Self {
822 self.wasm_feature(WasmFeatures::CUSTOM_PAGE_SIZES, enable);
823 self
824 }
825
826 /// Configures whether the WebAssembly [threads] proposal will be enabled
827 /// for compilation.
828 ///
829 /// This feature gates items such as shared memories and atomic
830 /// instructions. Note that the threads feature depends on the bulk memory
831 /// feature, which is enabled by default. Additionally note that while the
832 /// wasm feature is called "threads" it does not actually include the
833 /// ability to spawn threads. Spawning threads is part of the [wasi-threads]
834 /// proposal which is a separately gated feature in Wasmtime.
835 ///
836 /// Embeddings of Wasmtime are able to build their own custom threading
837 /// scheme on top of the core wasm threads proposal, however.
838 ///
839 /// The default value for this option is whether the `threads`
840 /// crate feature of Wasmtime is enabled or not. By default this crate
841 /// feature is enabled.
842 ///
843 /// [threads]: https://github.com/webassembly/threads
844 /// [wasi-threads]: https://github.com/webassembly/wasi-threads
845 #[cfg(feature = "threads")]
846 pub fn wasm_threads(&mut self, enable: bool) -> &mut Self {
847 self.wasm_feature(WasmFeatures::THREADS, enable);
848 self
849 }
850
851 /// Configures whether the WebAssembly [shared-everything-threads] proposal
852 /// will be enabled for compilation.
853 ///
854 /// This feature gates extended use of the `shared` attribute on items other
855 /// than memories, extra atomic instructions, and new component model
856 /// intrinsics for spawning threads. It depends on the
857 /// [`wasm_threads`][Self::wasm_threads] being enabled.
858 ///
859 /// [shared-everything-threads]:
860 /// https://github.com/webassembly/shared-everything-threads
861 pub fn wasm_shared_everything_threads(&mut self, enable: bool) -> &mut Self {
862 self.wasm_feature(WasmFeatures::SHARED_EVERYTHING_THREADS, enable);
863 self
864 }
865
866 /// Configures whether the [WebAssembly reference types proposal][proposal]
867 /// will be enabled for compilation.
868 ///
869 /// This feature gates items such as the `externref` and `funcref` types as
870 /// well as allowing a module to define multiple tables.
871 ///
872 /// Note that the reference types proposal depends on the bulk memory proposal.
873 ///
874 /// This feature is `true` by default.
875 ///
876 /// # Errors
877 ///
878 /// The validation of this feature are deferred until the engine is being built,
879 /// and thus may cause `Engine::new` fail if the `bulk_memory` feature is disabled.
880 ///
881 /// [proposal]: https://github.com/webassembly/reference-types
882 #[cfg(feature = "gc")]
883 pub fn wasm_reference_types(&mut self, enable: bool) -> &mut Self {
884 self.wasm_feature(WasmFeatures::REFERENCE_TYPES, enable);
885 self
886 }
887
888 /// Configures whether the [WebAssembly function references
889 /// proposal][proposal] will be enabled for compilation.
890 ///
891 /// This feature gates non-nullable reference types, function reference
892 /// types, `call_ref`, `ref.func`, and non-nullable reference related
893 /// instructions.
894 ///
895 /// Note that the function references proposal depends on the reference
896 /// types proposal.
897 ///
898 /// This feature is `false` by default.
899 ///
900 /// [proposal]: https://github.com/WebAssembly/function-references
901 #[cfg(feature = "gc")]
902 pub fn wasm_function_references(&mut self, enable: bool) -> &mut Self {
903 self.wasm_feature(WasmFeatures::FUNCTION_REFERENCES, enable);
904 self
905 }
906
907 /// Configures whether the [WebAssembly wide-arithmetic][proposal] will be
908 /// enabled for compilation.
909 ///
910 /// This feature is `false` by default.
911 ///
912 /// [proposal]: https://github.com/WebAssembly/wide-arithmetic
913 pub fn wasm_wide_arithmetic(&mut self, enable: bool) -> &mut Self {
914 self.wasm_feature(WasmFeatures::WIDE_ARITHMETIC, enable);
915 self
916 }
917
918 /// Configures whether the [WebAssembly Garbage Collection
919 /// proposal][proposal] will be enabled for compilation.
920 ///
921 /// This feature gates `struct` and `array` type definitions and references,
922 /// the `i31ref` type, and all related instructions.
923 ///
924 /// Note that the function references proposal depends on the typed function
925 /// references proposal.
926 ///
927 /// This feature is `false` by default.
928 ///
929 /// **Warning: Wasmtime's implementation of the GC proposal is still in
930 /// progress and generally not ready for primetime.**
931 ///
932 /// [proposal]: https://github.com/WebAssembly/gc
933 #[cfg(feature = "gc")]
934 pub fn wasm_gc(&mut self, enable: bool) -> &mut Self {
935 self.wasm_feature(WasmFeatures::GC, enable);
936 self
937 }
938
939 /// Configures whether the WebAssembly SIMD proposal will be
940 /// enabled for compilation.
941 ///
942 /// The [WebAssembly SIMD proposal][proposal]. This feature gates items such
943 /// as the `v128` type and all of its operators being in a module. Note that
944 /// this does not enable the [relaxed simd proposal].
945 ///
946 /// **Note**
947 ///
948 /// On x86_64 platforms the base CPU feature requirement for SIMD
949 /// is SSE2 for the Cranelift compiler and AVX for the Winch compiler.
950 ///
951 /// This is `true` by default.
952 ///
953 /// [proposal]: https://github.com/webassembly/simd
954 /// [relaxed simd proposal]: https://github.com/WebAssembly/relaxed-simd
955 pub fn wasm_simd(&mut self, enable: bool) -> &mut Self {
956 self.wasm_feature(WasmFeatures::SIMD, enable);
957 self
958 }
959
960 /// Configures whether the WebAssembly Relaxed SIMD proposal will be
961 /// enabled for compilation.
962 ///
963 /// The relaxed SIMD proposal adds new instructions to WebAssembly which,
964 /// for some specific inputs, are allowed to produce different results on
965 /// different hosts. More-or-less this proposal enables exposing
966 /// platform-specific semantics of SIMD instructions in a controlled
967 /// fashion to a WebAssembly program. From an embedder's perspective this
968 /// means that WebAssembly programs may execute differently depending on
969 /// whether the host is x86_64 or AArch64, for example.
970 ///
971 /// By default Wasmtime lowers relaxed SIMD instructions to the fastest
972 /// lowering for the platform it's running on. This means that, by default,
973 /// some relaxed SIMD instructions may have different results for the same
974 /// inputs across x86_64 and AArch64. This behavior can be disabled through
975 /// the [`Config::relaxed_simd_deterministic`] option which will force
976 /// deterministic behavior across all platforms, as classified by the
977 /// specification, at the cost of performance.
978 ///
979 /// This is `true` by default.
980 ///
981 /// [proposal]: https://github.com/webassembly/relaxed-simd
982 pub fn wasm_relaxed_simd(&mut self, enable: bool) -> &mut Self {
983 self.wasm_feature(WasmFeatures::RELAXED_SIMD, enable);
984 self
985 }
986
987 /// This option can be used to control the behavior of the [relaxed SIMD
988 /// proposal's][proposal] instructions.
989 ///
990 /// The relaxed SIMD proposal introduces instructions that are allowed to
991 /// have different behavior on different architectures, primarily to afford
992 /// an efficient implementation on all architectures. This means, however,
993 /// that the same module may execute differently on one host than another,
994 /// which typically is not otherwise the case. This option is provided to
995 /// force Wasmtime to generate deterministic code for all relaxed simd
996 /// instructions, at the cost of performance, for all architectures. When
997 /// this option is enabled then the deterministic behavior of all
998 /// instructions in the relaxed SIMD proposal is selected.
999 ///
1000 /// This is `false` by default.
1001 ///
1002 /// [proposal]: https://github.com/webassembly/relaxed-simd
1003 pub fn relaxed_simd_deterministic(&mut self, enable: bool) -> &mut Self {
1004 self.tunables.relaxed_simd_deterministic = Some(enable);
1005 self
1006 }
1007
1008 /// Configures whether the [WebAssembly bulk memory operations
1009 /// proposal][proposal] will be enabled for compilation.
1010 ///
1011 /// This feature gates items such as the `memory.copy` instruction, passive
1012 /// data/table segments, etc, being in a module.
1013 ///
1014 /// This is `true` by default.
1015 ///
1016 /// Feature `reference_types`, which is also `true` by default, requires
1017 /// this feature to be enabled. Thus disabling this feature must also disable
1018 /// `reference_types` as well using [`wasm_reference_types`](crate::Config::wasm_reference_types).
1019 ///
1020 /// # Errors
1021 ///
1022 /// Disabling this feature without disabling `reference_types` will cause
1023 /// `Engine::new` to fail.
1024 ///
1025 /// [proposal]: https://github.com/webassembly/bulk-memory-operations
1026 pub fn wasm_bulk_memory(&mut self, enable: bool) -> &mut Self {
1027 self.wasm_feature(WasmFeatures::BULK_MEMORY, enable);
1028 self
1029 }
1030
1031 /// Configures whether the WebAssembly multi-value [proposal] will
1032 /// be enabled for compilation.
1033 ///
1034 /// This feature gates functions and blocks returning multiple values in a
1035 /// module, for example.
1036 ///
1037 /// This is `true` by default.
1038 ///
1039 /// [proposal]: https://github.com/webassembly/multi-value
1040 pub fn wasm_multi_value(&mut self, enable: bool) -> &mut Self {
1041 self.wasm_feature(WasmFeatures::MULTI_VALUE, enable);
1042 self
1043 }
1044
1045 /// Configures whether the WebAssembly multi-memory [proposal] will
1046 /// be enabled for compilation.
1047 ///
1048 /// This feature gates modules having more than one linear memory
1049 /// declaration or import.
1050 ///
1051 /// This is `true` by default.
1052 ///
1053 /// [proposal]: https://github.com/webassembly/multi-memory
1054 pub fn wasm_multi_memory(&mut self, enable: bool) -> &mut Self {
1055 self.wasm_feature(WasmFeatures::MULTI_MEMORY, enable);
1056 self
1057 }
1058
1059 /// Configures whether the WebAssembly memory64 [proposal] will
1060 /// be enabled for compilation.
1061 ///
1062 /// Note that this the upstream specification is not finalized and Wasmtime
1063 /// may also have bugs for this feature since it hasn't been exercised
1064 /// much.
1065 ///
1066 /// This is `false` by default.
1067 ///
1068 /// [proposal]: https://github.com/webassembly/memory64
1069 pub fn wasm_memory64(&mut self, enable: bool) -> &mut Self {
1070 self.wasm_feature(WasmFeatures::MEMORY64, enable);
1071 self
1072 }
1073
1074 /// Configures whether the WebAssembly extended-const [proposal] will
1075 /// be enabled for compilation.
1076 ///
1077 /// This is `true` by default.
1078 ///
1079 /// [proposal]: https://github.com/webassembly/extended-const
1080 pub fn wasm_extended_const(&mut self, enable: bool) -> &mut Self {
1081 self.wasm_feature(WasmFeatures::EXTENDED_CONST, enable);
1082 self
1083 }
1084
1085 /// Configures whether the [WebAssembly stack switching
1086 /// proposal][proposal] will be enabled for compilation.
1087 ///
1088 /// This feature gates the use of control tags.
1089 ///
1090 /// This feature depends on the `function_reference_types` and
1091 /// `exceptions` features.
1092 ///
1093 /// This feature is `false` by default.
1094 ///
1095 /// # Errors
1096 ///
1097 /// [proposal]: https://github.com/webassembly/stack-switching
1098 pub fn wasm_stack_switching(&mut self, enable: bool) -> &mut Self {
1099 self.wasm_feature(WasmFeatures::STACK_SWITCHING, enable);
1100 self
1101 }
1102
1103 /// Configures whether the WebAssembly component-model [proposal] will
1104 /// be enabled for compilation.
1105 ///
1106 /// This flag can be used to blanket disable all components within Wasmtime.
1107 /// Otherwise usage of components requires statically using
1108 /// [`Component`](crate::component::Component) instead of
1109 /// [`Module`](crate::Module) for example anyway.
1110 ///
1111 /// The default value for this option is whether the `component-model`
1112 /// crate feature of Wasmtime is enabled or not. By default this crate
1113 /// feature is enabled.
1114 ///
1115 /// [proposal]: https://github.com/webassembly/component-model
1116 #[cfg(feature = "component-model")]
1117 pub fn wasm_component_model(&mut self, enable: bool) -> &mut Self {
1118 self.wasm_feature(WasmFeatures::COMPONENT_MODEL, enable);
1119 self
1120 }
1121
1122 /// Configures whether components support the async ABI [proposal] for
1123 /// lifting and lowering functions, as well as `stream`, `future`, and
1124 /// `error-context` types.
1125 ///
1126 /// Please note that Wasmtime's support for this feature is _very_
1127 /// incomplete.
1128 ///
1129 /// [proposal]:
1130 /// https://github.com/WebAssembly/component-model/blob/main/design/mvp/Async.md
1131 #[cfg(feature = "component-model-async")]
1132 pub fn wasm_component_model_async(&mut self, enable: bool) -> &mut Self {
1133 self.wasm_feature(WasmFeatures::CM_ASYNC, enable);
1134 self
1135 }
1136
1137 /// This corresponds to the 🚝 emoji in the component model specification.
1138 ///
1139 /// Please note that Wasmtime's support for this feature is _very_
1140 /// incomplete.
1141 ///
1142 /// [proposal]:
1143 /// https://github.com/WebAssembly/component-model/blob/main/design/mvp/Async.md
1144 #[cfg(feature = "component-model-async")]
1145 pub fn wasm_component_model_async_builtins(&mut self, enable: bool) -> &mut Self {
1146 self.wasm_feature(WasmFeatures::CM_ASYNC_BUILTINS, enable);
1147 self
1148 }
1149
1150 /// This corresponds to the 🚟 emoji in the component model specification.
1151 ///
1152 /// Please note that Wasmtime's support for this feature is _very_
1153 /// incomplete.
1154 ///
1155 /// [proposal]: https://github.com/WebAssembly/component-model/blob/main/design/mvp/Async.md
1156 #[cfg(feature = "component-model-async")]
1157 pub fn wasm_component_model_async_stackful(&mut self, enable: bool) -> &mut Self {
1158 self.wasm_feature(WasmFeatures::CM_ASYNC_STACKFUL, enable);
1159 self
1160 }
1161
1162 /// This corresponds to the 📝 emoji in the component model specification.
1163 ///
1164 /// Please note that Wasmtime's support for this feature is _very_
1165 /// incomplete.
1166 ///
1167 /// [proposal]: https://github.com/WebAssembly/component-model/blob/main/design/mvp/Async.md
1168 #[cfg(feature = "component-model")]
1169 pub fn wasm_component_model_error_context(&mut self, enable: bool) -> &mut Self {
1170 self.wasm_feature(WasmFeatures::CM_ERROR_CONTEXT, enable);
1171 self
1172 }
1173
1174 #[doc(hidden)] // FIXME(#3427) - if/when implemented then un-hide this
1175 pub fn wasm_exceptions(&mut self, enable: bool) -> &mut Self {
1176 self.wasm_feature(WasmFeatures::EXCEPTIONS, enable);
1177 self
1178 }
1179
1180 #[doc(hidden)] // FIXME(#3427) - if/when implemented then un-hide this
1181 #[deprecated = "This configuration option only exists for internal \
1182 usage with the spec testsuite. It may be removed at \
1183 any time and without warning. Do not rely on it!"]
1184 pub fn wasm_legacy_exceptions(&mut self, enable: bool) -> &mut Self {
1185 self.wasm_feature(WasmFeatures::LEGACY_EXCEPTIONS, enable);
1186 self
1187 }
1188
1189 /// Configures which compilation strategy will be used for wasm modules.
1190 ///
1191 /// This method can be used to configure which compiler is used for wasm
1192 /// modules, and for more documentation consult the [`Strategy`] enumeration
1193 /// and its documentation.
1194 ///
1195 /// The default value for this is `Strategy::Auto`.
1196 #[cfg(any(feature = "cranelift", feature = "winch"))]
1197 pub fn strategy(&mut self, strategy: Strategy) -> &mut Self {
1198 self.compiler_config.strategy = strategy.not_auto();
1199 self
1200 }
1201
1202 /// Configures which garbage collector will be used for Wasm modules.
1203 ///
1204 /// This method can be used to configure which garbage collector
1205 /// implementation is used for Wasm modules. For more documentation, consult
1206 /// the [`Collector`] enumeration and its documentation.
1207 ///
1208 /// The default value for this is `Collector::Auto`.
1209 #[cfg(feature = "gc")]
1210 pub fn collector(&mut self, collector: Collector) -> &mut Self {
1211 self.collector = collector;
1212 self
1213 }
1214
1215 /// Creates a default profiler based on the profiling strategy chosen.
1216 ///
1217 /// Profiler creation calls the type's default initializer where the purpose is
1218 /// really just to put in place the type used for profiling.
1219 ///
1220 /// Some [`ProfilingStrategy`] require specific platforms or particular feature
1221 /// to be enabled, such as `ProfilingStrategy::JitDump` requires the `jitdump`
1222 /// feature.
1223 ///
1224 /// # Errors
1225 ///
1226 /// The validation of this field is deferred until the engine is being built, and thus may
1227 /// cause `Engine::new` fail if the required feature is disabled, or the platform is not
1228 /// supported.
1229 pub fn profiler(&mut self, profile: ProfilingStrategy) -> &mut Self {
1230 self.profiling_strategy = profile;
1231 self
1232 }
1233
1234 /// Configures whether the debug verifier of Cranelift is enabled or not.
1235 ///
1236 /// When Cranelift is used as a code generation backend this will configure
1237 /// it to have the `enable_verifier` flag which will enable a number of debug
1238 /// checks inside of Cranelift. This is largely only useful for the
1239 /// developers of wasmtime itself.
1240 ///
1241 /// The default value for this is `false`
1242 #[cfg(any(feature = "cranelift", feature = "winch"))]
1243 pub fn cranelift_debug_verifier(&mut self, enable: bool) -> &mut Self {
1244 let val = if enable { "true" } else { "false" };
1245 self.compiler_config
1246 .settings
1247 .insert("enable_verifier".to_string(), val.to_string());
1248 self
1249 }
1250
1251 /// Configures the Cranelift code generator optimization level.
1252 ///
1253 /// When the Cranelift code generator is used you can configure the
1254 /// optimization level used for generated code in a few various ways. For
1255 /// more information see the documentation of [`OptLevel`].
1256 ///
1257 /// The default value for this is `OptLevel::Speed`.
1258 #[cfg(any(feature = "cranelift", feature = "winch"))]
1259 pub fn cranelift_opt_level(&mut self, level: OptLevel) -> &mut Self {
1260 let val = match level {
1261 OptLevel::None => "none",
1262 OptLevel::Speed => "speed",
1263 OptLevel::SpeedAndSize => "speed_and_size",
1264 };
1265 self.compiler_config
1266 .settings
1267 .insert("opt_level".to_string(), val.to_string());
1268 self
1269 }
1270
1271 /// Configures the regalloc algorithm used by the Cranelift code generator.
1272 ///
1273 /// Cranelift can select any of several register allocator algorithms. Each
1274 /// of these algorithms generates correct code, but they represent different
1275 /// tradeoffs between compile speed (how expensive the compilation process
1276 /// is) and run-time speed (how fast the generated code runs).
1277 /// For more information see the documentation of [`RegallocAlgorithm`].
1278 ///
1279 /// The default value for this is `RegallocAlgorithm::Backtracking`.
1280 #[cfg(any(feature = "cranelift", feature = "winch"))]
1281 pub fn cranelift_regalloc_algorithm(&mut self, algo: RegallocAlgorithm) -> &mut Self {
1282 let val = match algo {
1283 RegallocAlgorithm::Backtracking => "backtracking",
1284 };
1285 self.compiler_config
1286 .settings
1287 .insert("regalloc_algorithm".to_string(), val.to_string());
1288 self
1289 }
1290
1291 /// Configures whether Cranelift should perform a NaN-canonicalization pass.
1292 ///
1293 /// When Cranelift is used as a code generation backend this will configure
1294 /// it to replace NaNs with a single canonical value. This is useful for
1295 /// users requiring entirely deterministic WebAssembly computation. This is
1296 /// not required by the WebAssembly spec, so it is not enabled by default.
1297 ///
1298 /// Note that this option affects not only WebAssembly's `f32` and `f64`
1299 /// types but additionally the `v128` type. This option will cause
1300 /// operations using any of these types to have extra checks placed after
1301 /// them to normalize NaN values as needed.
1302 ///
1303 /// The default value for this is `false`
1304 #[cfg(any(feature = "cranelift", feature = "winch"))]
1305 pub fn cranelift_nan_canonicalization(&mut self, enable: bool) -> &mut Self {
1306 let val = if enable { "true" } else { "false" };
1307 self.compiler_config
1308 .settings
1309 .insert("enable_nan_canonicalization".to_string(), val.to_string());
1310 self
1311 }
1312
1313 /// Controls whether proof-carrying code (PCC) is used to validate
1314 /// lowering of Wasm sandbox checks.
1315 ///
1316 /// Proof-carrying code carries "facts" about program values from
1317 /// the IR all the way to machine code, and checks those facts
1318 /// against known machine-instruction semantics. This guards
1319 /// against bugs in instruction lowering that might create holes
1320 /// in the Wasm sandbox.
1321 ///
1322 /// PCC is designed to be fast: it does not require complex
1323 /// solvers or logic engines to verify, but only a linear pass
1324 /// over a trail of "breadcrumbs" or facts at each intermediate
1325 /// value. Thus, it is appropriate to enable in production.
1326 #[cfg(any(feature = "cranelift", feature = "winch"))]
1327 pub fn cranelift_pcc(&mut self, enable: bool) -> &mut Self {
1328 let val = if enable { "true" } else { "false" };
1329 self.compiler_config
1330 .settings
1331 .insert("enable_pcc".to_string(), val.to_string());
1332 self
1333 }
1334
1335 /// Allows setting a Cranelift boolean flag or preset. This allows
1336 /// fine-tuning of Cranelift settings.
1337 ///
1338 /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1339 /// either; other `Config` functions should be preferred for stability.
1340 ///
1341 /// # Safety
1342 ///
1343 /// This is marked as unsafe, because setting the wrong flag might break invariants,
1344 /// resulting in execution hazards.
1345 ///
1346 /// # Errors
1347 ///
1348 /// The validation of the flags are deferred until the engine is being built, and thus may
1349 /// cause `Engine::new` fail if the flag's name does not exist, or the value is not appropriate
1350 /// for the flag type.
1351 #[cfg(any(feature = "cranelift", feature = "winch"))]
1352 pub unsafe fn cranelift_flag_enable(&mut self, flag: &str) -> &mut Self {
1353 self.compiler_config.flags.insert(flag.to_string());
1354 self
1355 }
1356
1357 /// Allows settings another Cranelift flag defined by a flag name and value. This allows
1358 /// fine-tuning of Cranelift settings.
1359 ///
1360 /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1361 /// either; other `Config` functions should be preferred for stability.
1362 ///
1363 /// # Safety
1364 ///
1365 /// This is marked as unsafe, because setting the wrong flag might break invariants,
1366 /// resulting in execution hazards.
1367 ///
1368 /// # Errors
1369 ///
1370 /// The validation of the flags are deferred until the engine is being built, and thus may
1371 /// cause `Engine::new` fail if the flag's name does not exist, or incompatible with other
1372 /// settings.
1373 ///
1374 /// For example, feature `wasm_backtrace` will set `unwind_info` to `true`, but if it's
1375 /// manually set to false then it will fail.
1376 #[cfg(any(feature = "cranelift", feature = "winch"))]
1377 pub unsafe fn cranelift_flag_set(&mut self, name: &str, value: &str) -> &mut Self {
1378 self.compiler_config
1379 .settings
1380 .insert(name.to_string(), value.to_string());
1381 self
1382 }
1383
1384 /// Set a custom [`Cache`].
1385 ///
1386 /// To load a cache from a file, use [`Cache::from_file`]. Otherwise, you can create a new
1387 /// cache config using [`CacheConfig::new`] and passing that to [`Cache::new`].
1388 ///
1389 /// If you want to disable the cache, you can call this method with `None`.
1390 ///
1391 /// By default, new configs do not have caching enabled.
1392 /// Every call to [`Module::new(my_wasm)`][crate::Module::new] will recompile `my_wasm`,
1393 /// even when it is unchanged, unless an enabled `CacheConfig` is provided.
1394 ///
1395 /// This method is only available when the `cache` feature of this crate is
1396 /// enabled.
1397 ///
1398 /// [docs]: https://bytecodealliance.github.io/wasmtime/cli-cache.html
1399 #[cfg(feature = "cache")]
1400 pub fn cache(&mut self, cache: Option<Cache>) -> &mut Self {
1401 self.cache = cache;
1402 self
1403 }
1404
1405 /// Sets a custom memory creator.
1406 ///
1407 /// Custom memory creators are used when creating host `Memory` objects or when
1408 /// creating instance linear memories for the on-demand instance allocation strategy.
1409 #[cfg(feature = "runtime")]
1410 pub fn with_host_memory(&mut self, mem_creator: Arc<dyn MemoryCreator>) -> &mut Self {
1411 self.mem_creator = Some(Arc::new(MemoryCreatorProxy(mem_creator)));
1412 self
1413 }
1414
1415 /// Sets a custom stack creator.
1416 ///
1417 /// Custom memory creators are used when creating creating async instance stacks for
1418 /// the on-demand instance allocation strategy.
1419 #[cfg(feature = "async")]
1420 pub fn with_host_stack(&mut self, stack_creator: Arc<dyn StackCreator>) -> &mut Self {
1421 self.stack_creator = Some(Arc::new(StackCreatorProxy(stack_creator)));
1422 self
1423 }
1424
1425 /// Sets a custom executable-memory publisher.
1426 ///
1427 /// Custom executable-memory publishers are hooks that allow
1428 /// Wasmtime to make certain regions of memory executable when
1429 /// loading precompiled modules or compiling new modules
1430 /// in-process. In most modern operating systems, memory allocated
1431 /// for heap usage is readable and writable by default but not
1432 /// executable. To jump to machine code stored in that memory, we
1433 /// need to make it executable. For security reasons, we usually
1434 /// also make it read-only at the same time, so the executing code
1435 /// can't be modified later.
1436 ///
1437 /// By default, Wasmtime will use the appropriate system calls on
1438 /// the host platform for this work. However, it also allows
1439 /// plugging in a custom implementation via this configuration
1440 /// option. This may be useful on custom or `no_std` platforms,
1441 /// for example, especially where virtual memory is not otherwise
1442 /// used by Wasmtime (no `signals-and-traps` feature).
1443 #[cfg(feature = "runtime")]
1444 pub fn with_custom_code_memory(
1445 &mut self,
1446 custom_code_memory: Option<Arc<dyn CustomCodeMemory>>,
1447 ) -> &mut Self {
1448 self.custom_code_memory = custom_code_memory;
1449 self
1450 }
1451
1452 /// Sets the instance allocation strategy to use.
1453 ///
1454 /// This is notably used in conjunction with
1455 /// [`InstanceAllocationStrategy::Pooling`] and [`PoolingAllocationConfig`].
1456 pub fn allocation_strategy(
1457 &mut self,
1458 strategy: impl Into<InstanceAllocationStrategy>,
1459 ) -> &mut Self {
1460 self.allocation_strategy = strategy.into();
1461 self
1462 }
1463
1464 /// Specifies the capacity of linear memories, in bytes, in their initial
1465 /// allocation.
1466 ///
1467 /// > Note: this value has important performance ramifications, be sure to
1468 /// > benchmark when setting this to a non-default value and read over this
1469 /// > documentation.
1470 ///
1471 /// This function will change the size of the initial memory allocation made
1472 /// for linear memories. This setting is only applicable when the initial
1473 /// size of a linear memory is below this threshold. Linear memories are
1474 /// allocated in the virtual address space of the host process with OS APIs
1475 /// such as `mmap` and this setting affects how large the allocation will
1476 /// be.
1477 ///
1478 /// ## Background: WebAssembly Linear Memories
1479 ///
1480 /// WebAssembly linear memories always start with a minimum size and can
1481 /// possibly grow up to a maximum size. The minimum size is always specified
1482 /// in a WebAssembly module itself and the maximum size can either be
1483 /// optionally specified in the module or inherently limited by the index
1484 /// type. For example for this module:
1485 ///
1486 /// ```wasm
1487 /// (module
1488 /// (memory $a 4)
1489 /// (memory $b 4096 4096 (pagesize 1))
1490 /// (memory $c i64 10)
1491 /// )
1492 /// ```
1493 ///
1494 /// * Memory `$a` initially allocates 4 WebAssembly pages (256KiB) and can
1495 /// grow up to 4GiB, the limit of the 32-bit index space.
1496 /// * Memory `$b` initially allocates 4096 WebAssembly pages, but in this
1497 /// case its page size is 1, so it's 4096 bytes. Memory can also grow no
1498 /// further meaning that it will always be 4096 bytes.
1499 /// * Memory `$c` is a 64-bit linear memory which starts with 640KiB of
1500 /// memory and can theoretically grow up to 2^64 bytes, although most
1501 /// hosts will run out of memory long before that.
1502 ///
1503 /// All operations on linear memories done by wasm are required to be
1504 /// in-bounds. Any access beyond the end of a linear memory is considered a
1505 /// trap.
1506 ///
1507 /// ## What this setting affects: Virtual Memory
1508 ///
1509 /// This setting is used to configure the behavior of the size of the linear
1510 /// memory allocation performed for each of these memories. For example the
1511 /// initial linear memory allocation looks like this:
1512 ///
1513 /// ```text
1514 /// memory_reservation
1515 /// |
1516 /// ◄─────────┴────────────────►
1517 /// ┌───────┬─────────┬──────────────────┬───────┐
1518 /// │ guard │ initial │ ... capacity ... │ guard │
1519 /// └───────┴─────────┴──────────────────┴───────┘
1520 /// ◄──┬──► ◄──┬──►
1521 /// │ │
1522 /// │ memory_guard_size
1523 /// │
1524 /// │
1525 /// memory_guard_size (if guard_before_linear_memory)
1526 /// ```
1527 ///
1528 /// Memory in the `initial` range is accessible to the instance and can be
1529 /// read/written by wasm code. Memory in the `guard` regions is never
1530 /// accessible to wasm code and memory in `capacity` is initially
1531 /// inaccessible but may become accessible through `memory.grow` instructions
1532 /// for example.
1533 ///
1534 /// This means that this setting is the size of the initial chunk of virtual
1535 /// memory that a linear memory may grow into.
1536 ///
1537 /// ## What this setting affects: Runtime Speed
1538 ///
1539 /// This is a performance-sensitive setting which is taken into account
1540 /// during the compilation process of a WebAssembly module. For example if a
1541 /// 32-bit WebAssembly linear memory has a `memory_reservation` size of 4GiB
1542 /// then bounds checks can be elided because `capacity` will be guaranteed
1543 /// to be unmapped for all addressable bytes that wasm can access (modulo a
1544 /// few details).
1545 ///
1546 /// If `memory_reservation` was something smaller like 256KiB then that
1547 /// would have a much smaller impact on virtual memory but the compile code
1548 /// would then need to have explicit bounds checks to ensure that
1549 /// loads/stores are in-bounds.
1550 ///
1551 /// The goal of this setting is to enable skipping bounds checks in most
1552 /// modules by default. Some situations which require explicit bounds checks
1553 /// though are:
1554 ///
1555 /// * When `memory_reservation` is smaller than the addressable size of the
1556 /// linear memory. For example if 64-bit linear memories always need
1557 /// bounds checks as they can address the entire virtual address spacce.
1558 /// For 32-bit linear memories a `memory_reservation` minimum size of 4GiB
1559 /// is required to elide bounds checks.
1560 ///
1561 /// * When linear memories have a page size of 1 then bounds checks are
1562 /// required. In this situation virtual memory can't be relied upon
1563 /// because that operates at the host page size granularity where wasm
1564 /// requires a per-byte level granularity.
1565 ///
1566 /// * Configuration settings such as [`Config::signals_based_traps`] can be
1567 /// used to disable the use of signal handlers and virtual memory so
1568 /// explicit bounds checks are required.
1569 ///
1570 /// * When [`Config::memory_guard_size`] is too small a bounds check may be
1571 /// required. For 32-bit wasm addresses are actually 33-bit effective
1572 /// addresses because loads/stores have a 32-bit static offset to add to
1573 /// the dynamic 32-bit address. If the static offset is larger than the
1574 /// size of the guard region then an explicit bounds check is required.
1575 ///
1576 /// ## What this setting affects: Memory Growth Behavior
1577 ///
1578 /// In addition to affecting bounds checks emitted in compiled code this
1579 /// setting also affects how WebAssembly linear memories are grown. The
1580 /// `memory.grow` instruction can be used to make a linear memory larger and
1581 /// this is also affected by APIs such as
1582 /// [`Memory::grow`](crate::Memory::grow).
1583 ///
1584 /// In these situations when the amount being grown is small enough to fit
1585 /// within the remaining capacity then the linear memory doesn't have to be
1586 /// moved at runtime. If the capacity runs out though then a new linear
1587 /// memory allocation must be made and the contents of linear memory is
1588 /// copied over.
1589 ///
1590 /// For example here's a situation where a copy happens:
1591 ///
1592 /// * The `memory_reservation` setting is configured to 128KiB.
1593 /// * A WebAssembly linear memory starts with a single 64KiB page.
1594 /// * This memory can be grown by one page to contain the full 128KiB of
1595 /// memory.
1596 /// * If grown by one more page, though, then a 192KiB allocation must be
1597 /// made and the previous 128KiB of contents are copied into the new
1598 /// allocation.
1599 ///
1600 /// This growth behavior can have a significant performance impact if lots
1601 /// of data needs to be copied on growth. Conversely if memory growth never
1602 /// needs to happen because the capacity will always be large enough then
1603 /// optimizations can be applied to cache the base pointer of linear memory.
1604 ///
1605 /// When memory is grown then the
1606 /// [`Config::memory_reservation_for_growth`] is used for the new
1607 /// memory allocation to have memory to grow into.
1608 ///
1609 /// When using the pooling allocator via [`PoolingAllocationConfig`] then
1610 /// memories are never allowed to move so requests for growth are instead
1611 /// rejected with an error.
1612 ///
1613 /// ## When this setting is not used
1614 ///
1615 /// This setting is ignored and unused when the initial size of linear
1616 /// memory is larger than this threshold. For example if this setting is set
1617 /// to 1MiB but a wasm module requires a 2MiB minimum allocation then this
1618 /// setting is ignored. In this situation the minimum size of memory will be
1619 /// allocated along with [`Config::memory_reservation_for_growth`]
1620 /// after it to grow into.
1621 ///
1622 /// That means that this value can be set to zero. That can be useful in
1623 /// benchmarking to see the overhead of bounds checks for example.
1624 /// Additionally it can be used to minimize the virtual memory allocated by
1625 /// Wasmtime.
1626 ///
1627 /// ## Default Value
1628 ///
1629 /// The default value for this property depends on the host platform. For
1630 /// 64-bit platforms there's lots of address space available, so the default
1631 /// configured here is 4GiB. When coupled with the default size of
1632 /// [`Config::memory_guard_size`] this means that 32-bit WebAssembly linear
1633 /// memories with 64KiB page sizes will skip almost all bounds checks by
1634 /// default.
1635 ///
1636 /// For 32-bit platforms this value defaults to 10MiB. This means that
1637 /// bounds checks will be required on 32-bit platforms.
1638 pub fn memory_reservation(&mut self, bytes: u64) -> &mut Self {
1639 self.tunables.memory_reservation = Some(bytes);
1640 self
1641 }
1642
1643 /// Indicates whether linear memories may relocate their base pointer at
1644 /// runtime.
1645 ///
1646 /// WebAssembly linear memories either have a maximum size that's explicitly
1647 /// listed in the type of a memory or inherently limited by the index type
1648 /// of the memory (e.g. 4GiB for 32-bit linear memories). Depending on how
1649 /// the linear memory is allocated (see [`Config::memory_reservation`]) it
1650 /// may be necessary to move the memory in the host's virtual address space
1651 /// during growth. This option controls whether this movement is allowed or
1652 /// not.
1653 ///
1654 /// An example of a linear memory needing to move is when
1655 /// [`Config::memory_reservation`] is 0 then a linear memory will be
1656 /// allocated as the minimum size of the memory plus
1657 /// [`Config::memory_reservation_for_growth`]. When memory grows beyond the
1658 /// reservation for growth then the memory needs to be relocated.
1659 ///
1660 /// When this option is set to `false` then it can have a number of impacts
1661 /// on how memories work at runtime:
1662 ///
1663 /// * Modules can be compiled with static knowledge the base pointer of
1664 /// linear memory never changes to enable optimizations such as
1665 /// loop invariant code motion (hoisting the base pointer out of a loop).
1666 ///
1667 /// * Memories cannot grow in excess of their original allocation. This
1668 /// means that [`Config::memory_reservation`] and
1669 /// [`Config::memory_reservation_for_growth`] may need tuning to ensure
1670 /// the memory configuration works at runtime.
1671 ///
1672 /// The default value for this option is `true`.
1673 pub fn memory_may_move(&mut self, enable: bool) -> &mut Self {
1674 self.tunables.memory_may_move = Some(enable);
1675 self
1676 }
1677
1678 /// Configures the size, in bytes, of the guard region used at the end of a
1679 /// linear memory's address space reservation.
1680 ///
1681 /// > Note: this value has important performance ramifications, be sure to
1682 /// > understand what this value does before tweaking it and benchmarking.
1683 ///
1684 /// This setting controls how many bytes are guaranteed to be unmapped after
1685 /// the virtual memory allocation of a linear memory. When
1686 /// combined with sufficiently large values of
1687 /// [`Config::memory_reservation`] (e.g. 4GiB for 32-bit linear memories)
1688 /// then a guard region can be used to eliminate bounds checks in generated
1689 /// code.
1690 ///
1691 /// This setting additionally can be used to help deduplicate bounds checks
1692 /// in code that otherwise requires bounds checks. For example with a 4KiB
1693 /// guard region then a 64-bit linear memory which accesses addresses `x+8`
1694 /// and `x+16` only needs to perform a single bounds check on `x`. If that
1695 /// bounds check passes then the offset is guaranteed to either reside in
1696 /// linear memory or the guard region, resulting in deterministic behavior
1697 /// either way.
1698 ///
1699 /// ## How big should the guard be?
1700 ///
1701 /// In general, like with configuring [`Config::memory_reservation`], you
1702 /// probably don't want to change this value from the defaults. Removing
1703 /// bounds checks is dependent on a number of factors where the size of the
1704 /// guard region is only one piece of the equation. Other factors include:
1705 ///
1706 /// * [`Config::memory_reservation`]
1707 /// * The index type of the linear memory (e.g. 32-bit or 64-bit)
1708 /// * The page size of the linear memory
1709 /// * Other settings such as [`Config::signals_based_traps`]
1710 ///
1711 /// Embeddings using virtual memory almost always want at least some guard
1712 /// region, but otherwise changes from the default should be profiled
1713 /// locally to see the performance impact.
1714 ///
1715 /// ## Default
1716 ///
1717 /// The default value for this property is 32MiB on 64-bit platforms. This
1718 /// allows eliminating almost all bounds checks on loads/stores with an
1719 /// immediate offset of less than 32MiB. On 32-bit platforms this defaults
1720 /// to 64KiB.
1721 pub fn memory_guard_size(&mut self, bytes: u64) -> &mut Self {
1722 self.tunables.memory_guard_size = Some(bytes);
1723 self
1724 }
1725
1726 /// Configures the size, in bytes, of the extra virtual memory space
1727 /// reserved after a linear memory is relocated.
1728 ///
1729 /// This setting is used in conjunction with [`Config::memory_reservation`]
1730 /// to configure what happens after a linear memory is relocated in the host
1731 /// address space. If the initial size of a linear memory exceeds
1732 /// [`Config::memory_reservation`] or if it grows beyond that size
1733 /// throughout its lifetime then this setting will be used.
1734 ///
1735 /// When a linear memory is relocated it will initially look like this:
1736 ///
1737 /// ```text
1738 /// memory.size
1739 /// │
1740 /// ◄──────┴─────►
1741 /// ┌───────┬──────────────┬───────┐
1742 /// │ guard │ accessible │ guard │
1743 /// └───────┴──────────────┴───────┘
1744 /// ◄──┬──►
1745 /// │
1746 /// memory_guard_size
1747 /// ```
1748 ///
1749 /// where `accessible` needs to be grown but there's no more memory to grow
1750 /// into. A new region of the virtual address space will be allocated that
1751 /// looks like this:
1752 ///
1753 /// ```text
1754 /// memory_reservation_for_growth
1755 /// │
1756 /// memory.size │
1757 /// │ │
1758 /// ◄──────┴─────► ◄─────────────┴───────────►
1759 /// ┌───────┬──────────────┬───────────────────────────┬───────┐
1760 /// │ guard │ accessible │ .. reserved for growth .. │ guard │
1761 /// └───────┴──────────────┴───────────────────────────┴───────┘
1762 /// ◄──┬──►
1763 /// │
1764 /// memory_guard_size
1765 /// ```
1766 ///
1767 /// This means that up to `memory_reservation_for_growth` bytes can be
1768 /// allocated again before the entire linear memory needs to be moved again
1769 /// when another `memory_reservation_for_growth` bytes will be appended to
1770 /// the size of the allocation.
1771 ///
1772 /// Note that this is a currently simple heuristic for optimizing the growth
1773 /// of dynamic memories, primarily implemented for the memory64 proposal
1774 /// where the maximum size of memory is larger than 4GiB. This setting is
1775 /// unlikely to be a one-size-fits-all style approach and if you're an
1776 /// embedder running into issues with growth and are interested in having
1777 /// other growth strategies available here please feel free to [open an
1778 /// issue on the Wasmtime repository][issue]!
1779 ///
1780 /// [issue]: https://github.com/bytecodealliance/wasmtime/issues/new
1781 ///
1782 /// ## Default
1783 ///
1784 /// For 64-bit platforms this defaults to 2GiB, and for 32-bit platforms
1785 /// this defaults to 1MiB.
1786 pub fn memory_reservation_for_growth(&mut self, bytes: u64) -> &mut Self {
1787 self.tunables.memory_reservation_for_growth = Some(bytes);
1788 self
1789 }
1790
1791 /// Indicates whether a guard region is present before allocations of
1792 /// linear memory.
1793 ///
1794 /// Guard regions before linear memories are never used during normal
1795 /// operation of WebAssembly modules, even if they have out-of-bounds
1796 /// loads. The only purpose for a preceding guard region in linear memory
1797 /// is extra protection against possible bugs in code generators like
1798 /// Cranelift. This setting does not affect performance in any way, but will
1799 /// result in larger virtual memory reservations for linear memories (it
1800 /// won't actually ever use more memory, just use more of the address
1801 /// space).
1802 ///
1803 /// The size of the guard region before linear memory is the same as the
1804 /// guard size that comes after linear memory, which is configured by
1805 /// [`Config::memory_guard_size`].
1806 ///
1807 /// ## Default
1808 ///
1809 /// This value defaults to `true`.
1810 pub fn guard_before_linear_memory(&mut self, enable: bool) -> &mut Self {
1811 self.tunables.guard_before_linear_memory = Some(enable);
1812 self
1813 }
1814
1815 /// Indicates whether to initialize tables lazily, so that instantiation
1816 /// is fast but indirect calls are a little slower. If false, tables
1817 /// are initialized eagerly during instantiation from any active element
1818 /// segments that apply to them.
1819 ///
1820 /// **Note** Disabling this option is not compatible with the Winch compiler.
1821 ///
1822 /// ## Default
1823 ///
1824 /// This value defaults to `true`.
1825 pub fn table_lazy_init(&mut self, table_lazy_init: bool) -> &mut Self {
1826 self.tunables.table_lazy_init = Some(table_lazy_init);
1827 self
1828 }
1829
1830 /// Configure the version information used in serialized and deserialized [`crate::Module`]s.
1831 /// This effects the behavior of [`crate::Module::serialize()`], as well as
1832 /// [`crate::Module::deserialize()`] and related functions.
1833 ///
1834 /// The default strategy is to use the wasmtime crate's Cargo package version.
1835 pub fn module_version(&mut self, strategy: ModuleVersionStrategy) -> Result<&mut Self> {
1836 match strategy {
1837 // This case requires special precondition for assertion in SerializedModule::to_bytes
1838 ModuleVersionStrategy::Custom(ref v) => {
1839 if v.as_bytes().len() > 255 {
1840 bail!("custom module version cannot be more than 255 bytes: {}", v);
1841 }
1842 }
1843 _ => {}
1844 }
1845 self.module_version = strategy;
1846 Ok(self)
1847 }
1848
1849 /// Configure whether wasmtime should compile a module using multiple
1850 /// threads.
1851 ///
1852 /// Disabling this will result in a single thread being used to compile
1853 /// the wasm bytecode.
1854 ///
1855 /// By default parallel compilation is enabled.
1856 #[cfg(feature = "parallel-compilation")]
1857 pub fn parallel_compilation(&mut self, parallel: bool) -> &mut Self {
1858 self.parallel_compilation = parallel;
1859 self
1860 }
1861
1862 /// Configures whether compiled artifacts will contain information to map
1863 /// native program addresses back to the original wasm module.
1864 ///
1865 /// This configuration option is `true` by default and, if enabled,
1866 /// generates the appropriate tables in compiled modules to map from native
1867 /// address back to wasm source addresses. This is used for displaying wasm
1868 /// program counters in backtraces as well as generating filenames/line
1869 /// numbers if so configured as well (and the original wasm module has DWARF
1870 /// debugging information present).
1871 pub fn generate_address_map(&mut self, generate: bool) -> &mut Self {
1872 self.tunables.generate_address_map = Some(generate);
1873 self
1874 }
1875
1876 /// Configures whether copy-on-write memory-mapped data is used to
1877 /// initialize a linear memory.
1878 ///
1879 /// Initializing linear memory via a copy-on-write mapping can drastically
1880 /// improve instantiation costs of a WebAssembly module because copying
1881 /// memory is deferred. Additionally if a page of memory is only ever read
1882 /// from WebAssembly and never written too then the same underlying page of
1883 /// data will be reused between all instantiations of a module meaning that
1884 /// if a module is instantiated many times this can lower the overall memory
1885 /// required needed to run that module.
1886 ///
1887 /// The main disadvantage of copy-on-write initialization, however, is that
1888 /// it may be possible for highly-parallel scenarios to be less scalable. If
1889 /// a page is read initially by a WebAssembly module then that page will be
1890 /// mapped to a read-only copy shared between all WebAssembly instances. If
1891 /// the same page is then written, however, then a private copy is created
1892 /// and swapped out from the read-only version. This also requires an [IPI],
1893 /// however, which can be a significant bottleneck in high-parallelism
1894 /// situations.
1895 ///
1896 /// This feature is only applicable when a WebAssembly module meets specific
1897 /// criteria to be initialized in this fashion, such as:
1898 ///
1899 /// * Only memories defined in the module can be initialized this way.
1900 /// * Data segments for memory must use statically known offsets.
1901 /// * Data segments for memory must all be in-bounds.
1902 ///
1903 /// Modules which do not meet these criteria will fall back to
1904 /// initialization of linear memory based on copying memory.
1905 ///
1906 /// This feature of Wasmtime is also platform-specific:
1907 ///
1908 /// * Linux - this feature is supported for all instances of [`Module`].
1909 /// Modules backed by an existing mmap (such as those created by
1910 /// [`Module::deserialize_file`]) will reuse that mmap to cow-initialize
1911 /// memory. Other instance of [`Module`] may use the `memfd_create`
1912 /// syscall to create an initialization image to `mmap`.
1913 /// * Unix (not Linux) - this feature is only supported when loading modules
1914 /// from a precompiled file via [`Module::deserialize_file`] where there
1915 /// is a file descriptor to use to map data into the process. Note that
1916 /// the module must have been compiled with this setting enabled as well.
1917 /// * Windows - there is no support for this feature at this time. Memory
1918 /// initialization will always copy bytes.
1919 ///
1920 /// By default this option is enabled.
1921 ///
1922 /// [`Module::deserialize_file`]: crate::Module::deserialize_file
1923 /// [`Module`]: crate::Module
1924 /// [IPI]: https://en.wikipedia.org/wiki/Inter-processor_interrupt
1925 pub fn memory_init_cow(&mut self, enable: bool) -> &mut Self {
1926 self.tunables.memory_init_cow = Some(enable);
1927 self
1928 }
1929
1930 /// A configuration option to force the usage of `memfd_create` on Linux to
1931 /// be used as the backing source for a module's initial memory image.
1932 ///
1933 /// When [`Config::memory_init_cow`] is enabled, which is enabled by
1934 /// default, module memory initialization images are taken from a module's
1935 /// original mmap if possible. If a precompiled module was loaded from disk
1936 /// this means that the disk's file is used as an mmap source for the
1937 /// initial linear memory contents. This option can be used to force, on
1938 /// Linux, that instead of using the original file on disk a new in-memory
1939 /// file is created with `memfd_create` to hold the contents of the initial
1940 /// image.
1941 ///
1942 /// This option can be used to avoid possibly loading the contents of memory
1943 /// from disk through a page fault. Instead with `memfd_create` the contents
1944 /// of memory are always in RAM, meaning that even page faults which
1945 /// initially populate a wasm linear memory will only work with RAM instead
1946 /// of ever hitting the disk that the original precompiled module is stored
1947 /// on.
1948 ///
1949 /// This option is disabled by default.
1950 pub fn force_memory_init_memfd(&mut self, enable: bool) -> &mut Self {
1951 self.force_memory_init_memfd = enable;
1952 self
1953 }
1954
1955 /// Configures whether or not a coredump should be generated and attached to
1956 /// the anyhow::Error when a trap is raised.
1957 ///
1958 /// This option is disabled by default.
1959 #[cfg(feature = "coredump")]
1960 pub fn coredump_on_trap(&mut self, enable: bool) -> &mut Self {
1961 self.coredump_on_trap = enable;
1962 self
1963 }
1964
1965 /// Enables memory error checking for wasm programs.
1966 ///
1967 /// This option is disabled by default.
1968 #[cfg(any(feature = "cranelift", feature = "winch"))]
1969 pub fn wmemcheck(&mut self, enable: bool) -> &mut Self {
1970 self.wmemcheck = enable;
1971 self.compiler_config.wmemcheck = enable;
1972 self
1973 }
1974
1975 /// Configures the "guaranteed dense image size" for copy-on-write
1976 /// initialized memories.
1977 ///
1978 /// When using the [`Config::memory_init_cow`] feature to initialize memory
1979 /// efficiently (which is enabled by default), compiled modules contain an
1980 /// image of the module's initial heap. If the module has a fairly sparse
1981 /// initial heap, with just a few data segments at very different offsets,
1982 /// this could result in a large region of zero bytes in the image. In
1983 /// other words, it's not very memory-efficient.
1984 ///
1985 /// We normally use a heuristic to avoid this: if less than half
1986 /// of the initialized range (first non-zero to last non-zero
1987 /// byte) of any memory in the module has pages with nonzero
1988 /// bytes, then we avoid creating a memory image for the entire module.
1989 ///
1990 /// However, if the embedder always needs the instantiation-time efficiency
1991 /// of copy-on-write initialization, and is otherwise carefully controlling
1992 /// parameters of the modules (for example, by limiting the maximum heap
1993 /// size of the modules), then it may be desirable to ensure a memory image
1994 /// is created even if this could go against the heuristic above. Thus, we
1995 /// add another condition: there is a size of initialized data region up to
1996 /// which we *always* allow a memory image. The embedder can set this to a
1997 /// known maximum heap size if they desire to always get the benefits of
1998 /// copy-on-write images.
1999 ///
2000 /// In the future we may implement a "best of both worlds"
2001 /// solution where we have a dense image up to some limit, and
2002 /// then support a sparse list of initializers beyond that; this
2003 /// would get most of the benefit of copy-on-write and pay the incremental
2004 /// cost of eager initialization only for those bits of memory
2005 /// that are out-of-bounds. However, for now, an embedder desiring
2006 /// fast instantiation should ensure that this setting is as large
2007 /// as the maximum module initial memory content size.
2008 ///
2009 /// By default this value is 16 MiB.
2010 pub fn memory_guaranteed_dense_image_size(&mut self, size_in_bytes: u64) -> &mut Self {
2011 self.memory_guaranteed_dense_image_size = size_in_bytes;
2012 self
2013 }
2014
2015 /// Returns the set of features that the currently selected compiler backend
2016 /// does not support at all and may panic on.
2017 ///
2018 /// Wasmtime strives to reject unknown modules or unsupported modules with
2019 /// first-class errors instead of panics. Not all compiler backends have the
2020 /// same level of feature support on all platforms as well. This method
2021 /// returns a set of features that the currently selected compiler
2022 /// configuration is known to not support and may panic on. This acts as a
2023 /// first-level filter on incoming wasm modules/configuration to fail-fast
2024 /// instead of panicking later on.
2025 ///
2026 /// Note that if a feature is not listed here it does not mean that the
2027 /// backend fully supports the proposal. Instead that means that the backend
2028 /// doesn't ever panic on the proposal, but errors during compilation may
2029 /// still be returned. This means that features listed here are definitely
2030 /// not supported at all, but features not listed here may still be
2031 /// partially supported. For example at the time of this writing the Winch
2032 /// backend partially supports simd so it's not listed here. Winch doesn't
2033 /// fully support simd but unimplemented instructions just return errors.
2034 fn compiler_panicking_wasm_features(&self) -> WasmFeatures {
2035 #[cfg(any(feature = "cranelift", feature = "winch"))]
2036 match self.compiler_config.strategy {
2037 None | Some(Strategy::Cranelift) => {
2038 let mut unsupported = WasmFeatures::empty();
2039
2040 // Pulley at this time fundamentally doesn't support the
2041 // `threads` proposal, notably shared memory, because Rust can't
2042 // safely implement loads/stores in the face of shared memory.
2043 // Stack switching is not implemented, either.
2044 if self.compiler_target().is_pulley() {
2045 unsupported |= WasmFeatures::THREADS;
2046 unsupported |= WasmFeatures::STACK_SWITCHING;
2047 }
2048
2049 use target_lexicon::*;
2050 match self.compiler_target() {
2051 Triple {
2052 architecture: Architecture::X86_64 | Architecture::X86_64h,
2053 operating_system:
2054 OperatingSystem::Linux
2055 | OperatingSystem::MacOSX(_)
2056 | OperatingSystem::Darwin(_),
2057 ..
2058 } => {
2059 // Stack switching supported on (non-Pulley) Cranelift.
2060 }
2061
2062 _ => {
2063 // On platforms other than x64 Unix-like, we don't
2064 // support stack switching.
2065 unsupported |= WasmFeatures::STACK_SWITCHING;
2066 }
2067 }
2068 unsupported
2069 }
2070 Some(Strategy::Winch) => {
2071 let mut unsupported = WasmFeatures::GC
2072 | WasmFeatures::FUNCTION_REFERENCES
2073 | WasmFeatures::RELAXED_SIMD
2074 | WasmFeatures::TAIL_CALL
2075 | WasmFeatures::GC_TYPES
2076 | WasmFeatures::EXCEPTIONS
2077 | WasmFeatures::LEGACY_EXCEPTIONS
2078 | WasmFeatures::STACK_SWITCHING;
2079 match self.compiler_target().architecture {
2080 target_lexicon::Architecture::Aarch64(_) => {
2081 unsupported |= WasmFeatures::SIMD;
2082 unsupported |= WasmFeatures::THREADS;
2083 unsupported |= WasmFeatures::WIDE_ARITHMETIC;
2084 }
2085
2086 // Winch doesn't support other non-x64 architectures at this
2087 // time either but will return an first-class error for
2088 // them.
2089 _ => {}
2090 }
2091 unsupported
2092 }
2093 Some(Strategy::Auto) => unreachable!(),
2094 }
2095 #[cfg(not(any(feature = "cranelift", feature = "winch")))]
2096 return WasmFeatures::empty();
2097 }
2098
2099 /// Calculates the set of features that are enabled for this `Config`.
2100 ///
2101 /// This method internally will start with the an empty set of features to
2102 /// avoid being tied to wasmparser's defaults. Next Wasmtime's set of
2103 /// default features are added to this set, some of which are conditional
2104 /// depending on crate features. Finally explicitly requested features via
2105 /// `wasm_*` methods on `Config` are applied. Everything is then validated
2106 /// later in `Config::validate`.
2107 fn features(&self) -> WasmFeatures {
2108 // Wasmtime by default supports all of the wasm 2.0 version of the
2109 // specification.
2110 let mut features = WasmFeatures::WASM2;
2111
2112 // On-by-default features that wasmtime has. Note that these are all
2113 // subject to the criteria at
2114 // https://docs.wasmtime.dev/contributing-implementing-wasm-proposals.html
2115 // and
2116 // https://docs.wasmtime.dev/stability-wasm-proposals.html
2117 features |= WasmFeatures::MULTI_MEMORY;
2118 features |= WasmFeatures::RELAXED_SIMD;
2119 features |= WasmFeatures::TAIL_CALL;
2120 features |= WasmFeatures::EXTENDED_CONST;
2121 features |= WasmFeatures::MEMORY64;
2122 // NB: if you add a feature above this line please double-check
2123 // https://docs.wasmtime.dev/stability-wasm-proposals.html
2124 // to ensure all requirements are met and/or update the documentation
2125 // there too.
2126
2127 // Set some features to their conditionally-enabled defaults depending
2128 // on crate compile-time features.
2129 features.set(WasmFeatures::GC_TYPES, cfg!(feature = "gc"));
2130 features.set(WasmFeatures::THREADS, cfg!(feature = "threads"));
2131 features.set(
2132 WasmFeatures::COMPONENT_MODEL,
2133 cfg!(feature = "component-model"),
2134 );
2135
2136 // From the default set of proposals remove any that the current
2137 // compiler backend may panic on if the module contains them.
2138 features = features & !self.compiler_panicking_wasm_features();
2139
2140 // After wasmtime's defaults are configured then factor in user requests
2141 // and disable/enable features. Note that the enable/disable sets should
2142 // be disjoint.
2143 debug_assert!((self.enabled_features & self.disabled_features).is_empty());
2144 features &= !self.disabled_features;
2145 features |= self.enabled_features;
2146
2147 features
2148 }
2149
2150 /// Returns the configured compiler target for this `Config`.
2151 pub(crate) fn compiler_target(&self) -> target_lexicon::Triple {
2152 // If a target is explicitly configured, always use that.
2153 if let Some(target) = self.target.clone() {
2154 return target;
2155 }
2156
2157 // If the `build.rs` script determined that this platform uses pulley by
2158 // default, then use Pulley.
2159 if cfg!(default_target_pulley) {
2160 return target_lexicon::Triple::pulley_host();
2161 }
2162
2163 // And at this point the target is for sure the host.
2164 target_lexicon::Triple::host()
2165 }
2166
2167 pub(crate) fn validate(&self) -> Result<(Tunables, WasmFeatures)> {
2168 let features = self.features();
2169
2170 // First validate that the selected compiler backend and configuration
2171 // supports the set of `features` that are enabled. This will help
2172 // provide more first class errors instead of panics about unsupported
2173 // features and configurations.
2174 let unsupported = features & self.compiler_panicking_wasm_features();
2175 if !unsupported.is_empty() {
2176 for flag in WasmFeatures::FLAGS.iter() {
2177 if !unsupported.contains(*flag.value()) {
2178 continue;
2179 }
2180 bail!(
2181 "the wasm_{} feature is not supported on this compiler configuration",
2182 flag.name().to_lowercase()
2183 );
2184 }
2185
2186 panic!("should have returned an error by now")
2187 }
2188
2189 #[cfg(any(feature = "async", feature = "stack-switching"))]
2190 if self.async_support && self.max_wasm_stack > self.async_stack_size {
2191 bail!("max_wasm_stack size cannot exceed the async_stack_size");
2192 }
2193 if self.max_wasm_stack == 0 {
2194 bail!("max_wasm_stack size cannot be zero");
2195 }
2196 #[cfg(not(feature = "wmemcheck"))]
2197 if self.wmemcheck {
2198 bail!("wmemcheck (memory checker) was requested but is not enabled in this build");
2199 }
2200
2201 let mut tunables = Tunables::default_for_target(&self.compiler_target())?;
2202
2203 // If no target is explicitly specified then further refine `tunables`
2204 // for the configuration of this host depending on what platform
2205 // features were found available at compile time. This means that anyone
2206 // cross-compiling for a customized host will need to further refine
2207 // compilation options.
2208 if self.target.is_none() {
2209 // If this platform doesn't have native signals then change some
2210 // defaults to account for that. Note that VM guards are turned off
2211 // here because that's primarily a feature of eliding
2212 // bounds-checks.
2213 if !cfg!(has_native_signals) {
2214 tunables.signals_based_traps = cfg!(has_native_signals);
2215 tunables.memory_guard_size = 0;
2216 }
2217
2218 // When virtual memory is not available use slightly different
2219 // defaults for tunables to be more amenable to `MallocMemory`.
2220 // Note that these can still be overridden by config options.
2221 if !cfg!(has_virtual_memory) {
2222 tunables.memory_reservation = 0;
2223 tunables.memory_reservation_for_growth = 1 << 20; // 1MB
2224 tunables.memory_init_cow = false;
2225 }
2226 }
2227
2228 self.tunables.configure(&mut tunables);
2229
2230 // If we're going to compile with winch, we must use the winch calling convention.
2231 #[cfg(any(feature = "cranelift", feature = "winch"))]
2232 {
2233 tunables.winch_callable = self.compiler_config.strategy == Some(Strategy::Winch);
2234 }
2235
2236 tunables.collector = if features.gc_types() {
2237 #[cfg(feature = "gc")]
2238 {
2239 use wasmtime_environ::Collector as EnvCollector;
2240 Some(match self.collector.try_not_auto()? {
2241 Collector::DeferredReferenceCounting => EnvCollector::DeferredReferenceCounting,
2242 Collector::Null => EnvCollector::Null,
2243 Collector::Auto => unreachable!(),
2244 })
2245 }
2246 #[cfg(not(feature = "gc"))]
2247 bail!("cannot use GC types: the `gc` feature was disabled at compile time")
2248 } else {
2249 None
2250 };
2251
2252 Ok((tunables, features))
2253 }
2254
2255 #[cfg(feature = "runtime")]
2256 pub(crate) fn build_allocator(
2257 &self,
2258 tunables: &Tunables,
2259 ) -> Result<Box<dyn InstanceAllocator + Send + Sync>> {
2260 #[cfg(feature = "async")]
2261 let (stack_size, stack_zeroing) = (self.async_stack_size, self.async_stack_zeroing);
2262
2263 #[cfg(not(feature = "async"))]
2264 let (stack_size, stack_zeroing) = (0, false);
2265
2266 let _ = tunables;
2267
2268 match &self.allocation_strategy {
2269 InstanceAllocationStrategy::OnDemand => {
2270 #[allow(unused_mut)]
2271 let mut allocator = Box::new(OnDemandInstanceAllocator::new(
2272 self.mem_creator.clone(),
2273 stack_size,
2274 stack_zeroing,
2275 ));
2276 #[cfg(feature = "async")]
2277 if let Some(stack_creator) = &self.stack_creator {
2278 allocator.set_stack_creator(stack_creator.clone());
2279 }
2280 Ok(allocator)
2281 }
2282 #[cfg(feature = "pooling-allocator")]
2283 InstanceAllocationStrategy::Pooling(config) => {
2284 let mut config = config.config;
2285 config.stack_size = stack_size;
2286 config.async_stack_zeroing = stack_zeroing;
2287 Ok(Box::new(crate::runtime::vm::PoolingInstanceAllocator::new(
2288 &config, tunables,
2289 )?))
2290 }
2291 }
2292 }
2293
2294 #[cfg(feature = "runtime")]
2295 pub(crate) fn build_gc_runtime(&self) -> Result<Option<Arc<dyn GcRuntime>>> {
2296 if !self.features().gc_types() {
2297 return Ok(None);
2298 }
2299
2300 #[cfg(not(feature = "gc"))]
2301 bail!("cannot create a GC runtime: the `gc` feature was disabled at compile time");
2302
2303 #[cfg(feature = "gc")]
2304 #[cfg_attr(
2305 not(any(feature = "gc-null", feature = "gc-drc")),
2306 allow(unused_variables, unreachable_code)
2307 )]
2308 {
2309 Ok(Some(match self.collector.try_not_auto()? {
2310 #[cfg(feature = "gc-drc")]
2311 Collector::DeferredReferenceCounting => {
2312 Arc::new(crate::runtime::vm::DrcCollector::default()) as Arc<dyn GcRuntime>
2313 }
2314 #[cfg(not(feature = "gc-drc"))]
2315 Collector::DeferredReferenceCounting => unreachable!(),
2316
2317 #[cfg(feature = "gc-null")]
2318 Collector::Null => {
2319 Arc::new(crate::runtime::vm::NullCollector::default()) as Arc<dyn GcRuntime>
2320 }
2321 #[cfg(not(feature = "gc-null"))]
2322 Collector::Null => unreachable!(),
2323
2324 Collector::Auto => unreachable!(),
2325 }))
2326 }
2327 }
2328
2329 #[cfg(feature = "runtime")]
2330 pub(crate) fn build_profiler(&self) -> Result<Box<dyn ProfilingAgent>> {
2331 Ok(match self.profiling_strategy {
2332 ProfilingStrategy::PerfMap => profiling_agent::new_perfmap()?,
2333 ProfilingStrategy::JitDump => profiling_agent::new_jitdump()?,
2334 ProfilingStrategy::VTune => profiling_agent::new_vtune()?,
2335 ProfilingStrategy::None => profiling_agent::new_null(),
2336 ProfilingStrategy::Pulley => profiling_agent::new_pulley()?,
2337 })
2338 }
2339
2340 #[cfg(any(feature = "cranelift", feature = "winch"))]
2341 pub(crate) fn build_compiler(
2342 mut self,
2343 tunables: &Tunables,
2344 features: WasmFeatures,
2345 ) -> Result<(Self, Box<dyn wasmtime_environ::Compiler>)> {
2346 let target = self.compiler_target();
2347
2348 // The target passed to the builders below is an `Option<Triple>` where
2349 // `None` represents the current host with CPU features inferred from
2350 // the host's CPU itself. The `target` above is not an `Option`, so
2351 // switch it to `None` in the case that a target wasn't explicitly
2352 // specified (which indicates no feature inference) and the target
2353 // matches the host.
2354 let target_for_builder =
2355 if self.target.is_none() && target == target_lexicon::Triple::host() {
2356 None
2357 } else {
2358 Some(target.clone())
2359 };
2360
2361 let mut compiler = match self.compiler_config.strategy {
2362 #[cfg(feature = "cranelift")]
2363 Some(Strategy::Cranelift) => wasmtime_cranelift::builder(target_for_builder)?,
2364 #[cfg(not(feature = "cranelift"))]
2365 Some(Strategy::Cranelift) => bail!("cranelift support not compiled in"),
2366 #[cfg(feature = "winch")]
2367 Some(Strategy::Winch) => wasmtime_winch::builder(target_for_builder)?,
2368 #[cfg(not(feature = "winch"))]
2369 Some(Strategy::Winch) => bail!("winch support not compiled in"),
2370
2371 None | Some(Strategy::Auto) => unreachable!(),
2372 };
2373
2374 if let Some(path) = &self.compiler_config.clif_dir {
2375 compiler.clif_dir(path)?;
2376 }
2377
2378 // If probestack is enabled for a target, Wasmtime will always use the
2379 // inline strategy which doesn't require us to define a `__probestack`
2380 // function or similar.
2381 self.compiler_config
2382 .settings
2383 .insert("probestack_strategy".into(), "inline".into());
2384
2385 // We enable stack probing by default on all targets.
2386 // This is required on Windows because of the way Windows
2387 // commits its stacks, but it's also a good idea on other
2388 // platforms to ensure guard pages are hit for large frame
2389 // sizes.
2390 self.compiler_config
2391 .flags
2392 .insert("enable_probestack".into());
2393
2394 // The current wasm multivalue implementation depends on this.
2395 // FIXME(#9510) handle this in wasmtime-cranelift instead.
2396 self.compiler_config
2397 .flags
2398 .insert("enable_multi_ret_implicit_sret".into());
2399
2400 if let Some(unwind_requested) = self.native_unwind_info {
2401 if !self
2402 .compiler_config
2403 .ensure_setting_unset_or_given("unwind_info", &unwind_requested.to_string())
2404 {
2405 bail!(
2406 "incompatible settings requested for Cranelift and Wasmtime `unwind-info` settings"
2407 );
2408 }
2409 }
2410
2411 if target.operating_system == target_lexicon::OperatingSystem::Windows {
2412 if !self
2413 .compiler_config
2414 .ensure_setting_unset_or_given("unwind_info", "true")
2415 {
2416 bail!("`native_unwind_info` cannot be disabled on Windows");
2417 }
2418 }
2419
2420 // We require frame pointers for correct stack walking, which is safety
2421 // critical in the presence of reference types, and otherwise it is just
2422 // really bad developer experience to get wrong.
2423 self.compiler_config
2424 .settings
2425 .insert("preserve_frame_pointers".into(), "true".into());
2426
2427 if !tunables.signals_based_traps {
2428 let mut ok = self
2429 .compiler_config
2430 .ensure_setting_unset_or_given("enable_table_access_spectre_mitigation", "false");
2431 ok = ok
2432 && self.compiler_config.ensure_setting_unset_or_given(
2433 "enable_heap_access_spectre_mitigation",
2434 "false",
2435 );
2436
2437 // Right now spectre-mitigated bounds checks will load from zero so
2438 // if host-based signal handlers are disabled then that's a mismatch
2439 // and doesn't work right now. Fixing this will require more thought
2440 // of how to implement the bounds check in spectre-only mode.
2441 if !ok {
2442 bail!(
2443 "when signals-based traps are disabled then spectre \
2444 mitigations must also be disabled"
2445 );
2446 }
2447 }
2448
2449 // check for incompatible compiler options and set required values
2450 if features.contains(WasmFeatures::REFERENCE_TYPES) {
2451 if !self
2452 .compiler_config
2453 .ensure_setting_unset_or_given("enable_safepoints", "true")
2454 {
2455 bail!(
2456 "compiler option 'enable_safepoints' must be enabled when 'reference types' is enabled"
2457 );
2458 }
2459 }
2460
2461 if features.contains(WasmFeatures::RELAXED_SIMD) && !features.contains(WasmFeatures::SIMD) {
2462 bail!("cannot disable the simd proposal but enable the relaxed simd proposal");
2463 }
2464
2465 if features.contains(WasmFeatures::STACK_SWITCHING) {
2466 use target_lexicon::OperatingSystem;
2467 let model = match target.operating_system {
2468 OperatingSystem::Windows => "update_windows_tib",
2469 OperatingSystem::Linux
2470 | OperatingSystem::MacOSX(_)
2471 | OperatingSystem::Darwin(_) => "basic",
2472 _ => bail!("stack-switching feature not supported on this platform "),
2473 };
2474
2475 if !self
2476 .compiler_config
2477 .ensure_setting_unset_or_given("stack_switch_model", model)
2478 {
2479 bail!(
2480 "compiler option 'stack_switch_model' must be set to '{}' on this platform",
2481 model
2482 );
2483 }
2484 }
2485
2486 // Apply compiler settings and flags
2487 for (k, v) in self.compiler_config.settings.iter() {
2488 compiler.set(k, v)?;
2489 }
2490 for flag in self.compiler_config.flags.iter() {
2491 compiler.enable(flag)?;
2492 }
2493
2494 #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
2495 if let Some(cache_store) = &self.compiler_config.cache_store {
2496 compiler.enable_incremental_compilation(cache_store.clone())?;
2497 }
2498
2499 compiler.set_tunables(tunables.clone())?;
2500 compiler.wmemcheck(self.compiler_config.wmemcheck);
2501
2502 Ok((self, compiler.build()?))
2503 }
2504
2505 /// Internal setting for whether adapter modules for components will have
2506 /// extra WebAssembly instructions inserted performing more debug checks
2507 /// then are necessary.
2508 #[cfg(feature = "component-model")]
2509 pub fn debug_adapter_modules(&mut self, debug: bool) -> &mut Self {
2510 self.tunables.debug_adapter_modules = Some(debug);
2511 self
2512 }
2513
2514 /// Enables clif output when compiling a WebAssembly module.
2515 #[cfg(any(feature = "cranelift", feature = "winch"))]
2516 pub fn emit_clif(&mut self, path: &Path) -> &mut Self {
2517 self.compiler_config.clif_dir = Some(path.to_path_buf());
2518 self
2519 }
2520
2521 /// Configures whether, when on macOS, Mach ports are used for exception
2522 /// handling instead of traditional Unix-based signal handling.
2523 ///
2524 /// WebAssembly traps in Wasmtime are implemented with native faults, for
2525 /// example a `SIGSEGV` will occur when a WebAssembly guest accesses
2526 /// out-of-bounds memory. Handling this can be configured to either use Unix
2527 /// signals or Mach ports on macOS. By default Mach ports are used.
2528 ///
2529 /// Mach ports enable Wasmtime to work by default with foreign
2530 /// error-handling systems such as breakpad which also use Mach ports to
2531 /// handle signals. In this situation Wasmtime will continue to handle guest
2532 /// faults gracefully while any non-guest faults will get forwarded to
2533 /// process-level handlers such as breakpad. Some more background on this
2534 /// can be found in #2456.
2535 ///
2536 /// A downside of using mach ports, however, is that they don't interact
2537 /// well with `fork()`. Forking a Wasmtime process on macOS will produce a
2538 /// child process that cannot successfully run WebAssembly. In this
2539 /// situation traditional Unix signal handling should be used as that's
2540 /// inherited and works across forks.
2541 ///
2542 /// If your embedding wants to use a custom error handler which leverages
2543 /// Mach ports and you additionally wish to `fork()` the process and use
2544 /// Wasmtime in the child process that's not currently possible. Please
2545 /// reach out to us if you're in this bucket!
2546 ///
2547 /// This option defaults to `true`, using Mach ports by default.
2548 pub fn macos_use_mach_ports(&mut self, mach_ports: bool) -> &mut Self {
2549 self.macos_use_mach_ports = mach_ports;
2550 self
2551 }
2552
2553 /// Configures an embedder-provided function, `detect`, which is used to
2554 /// determine if an ISA-specific feature is available on the current host.
2555 ///
2556 /// This function is used to verify that any features enabled for a compiler
2557 /// backend, such as AVX support on x86\_64, are also available on the host.
2558 /// It is undefined behavior to execute an AVX instruction on a host that
2559 /// doesn't support AVX instructions, for example.
2560 ///
2561 /// When the `std` feature is active on this crate then this function is
2562 /// configured to a default implementation that uses the standard library's
2563 /// feature detection. When the `std` feature is disabled then there is no
2564 /// default available and this method must be called to configure a feature
2565 /// probing function.
2566 ///
2567 /// The `detect` function provided is given a string name of an ISA feature.
2568 /// The function should then return:
2569 ///
2570 /// * `Some(true)` - indicates that the feature was found on the host and it
2571 /// is supported.
2572 /// * `Some(false)` - the feature name was recognized but it was not
2573 /// detected on the host, for example the CPU is too old.
2574 /// * `None` - the feature name was not recognized and it's not known
2575 /// whether it's on the host or not.
2576 ///
2577 /// Feature names passed to `detect` match the same feature name used in the
2578 /// Rust standard library. For example `"sse4.2"` is used on x86\_64.
2579 ///
2580 /// # Unsafety
2581 ///
2582 /// This function is `unsafe` because it is undefined behavior to execute
2583 /// instructions that a host does not support. This means that the result of
2584 /// `detect` must be correct for memory safe execution at runtime.
2585 pub unsafe fn detect_host_feature(&mut self, detect: fn(&str) -> Option<bool>) -> &mut Self {
2586 self.detect_host_feature = Some(detect);
2587 self
2588 }
2589
2590 /// Configures Wasmtime to not use signals-based trap handlers, for example
2591 /// disables `SIGILL` and `SIGSEGV` handler registration on Unix platforms.
2592 ///
2593 /// Wasmtime will by default leverage signals-based trap handlers (or the
2594 /// platform equivalent, for example "vectored exception handlers" on
2595 /// Windows) to make generated code more efficient. For example an
2596 /// out-of-bounds load in WebAssembly will result in a `SIGSEGV` on Unix
2597 /// that is caught by a signal handler in Wasmtime by default. Another
2598 /// example is divide-by-zero is reported by hardware rather than
2599 /// explicitly checked and Wasmtime turns that into a trap.
2600 ///
2601 /// Some environments however may not have easy access to signal handlers.
2602 /// For example embedded scenarios may not support virtual memory. Other
2603 /// environments where Wasmtime is embedded within the surrounding
2604 /// environment may require that new signal handlers aren't registered due
2605 /// to the global nature of signal handlers. This option exists to disable
2606 /// the signal handler registration when required.
2607 ///
2608 /// When signals-based trap handlers are disabled then generated code will
2609 /// never rely on segfaults or other signals. Generated code will be slower
2610 /// because bounds checks must be explicit along with other operations like
2611 /// integer division which must check for zero.
2612 ///
2613 /// When this option is disable it additionally requires that the
2614 /// `enable_heap_access_spectre_mitigation` and
2615 /// `enable_table_access_spectre_mitigation` Cranelift settings are
2616 /// disabled. This means that generated code must have spectre mitigations
2617 /// disabled. This is because spectre mitigations rely on faults from
2618 /// loading from the null address to implement bounds checks.
2619 ///
2620 /// This option defaults to `true` meaning that signals-based trap handlers
2621 /// are enabled by default.
2622 ///
2623 /// **Note** Disabling this option is not compatible with the Winch compiler.
2624 pub fn signals_based_traps(&mut self, enable: bool) -> &mut Self {
2625 self.tunables.signals_based_traps = Some(enable);
2626 self
2627 }
2628}
2629
2630impl Default for Config {
2631 fn default() -> Config {
2632 Config::new()
2633 }
2634}
2635
2636impl fmt::Debug for Config {
2637 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2638 let mut f = f.debug_struct("Config");
2639
2640 // Not every flag in WasmFeatures can be enabled as part of creating
2641 // a Config. This impl gives a complete picture of all WasmFeatures
2642 // enabled, and doesn't require maintenance by hand (which has become out
2643 // of date in the past), at the cost of possible confusion for why
2644 // a flag in this set doesn't have a Config setter.
2645 let features = self.features();
2646 for flag in WasmFeatures::FLAGS.iter() {
2647 f.field(
2648 &format!("wasm_{}", flag.name().to_lowercase()),
2649 &features.contains(*flag.value()),
2650 );
2651 }
2652
2653 f.field("parallel_compilation", &self.parallel_compilation);
2654 #[cfg(any(feature = "cranelift", feature = "winch"))]
2655 {
2656 f.field("compiler_config", &self.compiler_config);
2657 }
2658
2659 self.tunables.format(&mut f);
2660 f.finish()
2661 }
2662}
2663
2664/// Possible Compilation strategies for a wasm module.
2665///
2666/// This is used as an argument to the [`Config::strategy`] method.
2667#[non_exhaustive]
2668#[derive(PartialEq, Eq, Clone, Debug, Copy)]
2669pub enum Strategy {
2670 /// An indicator that the compilation strategy should be automatically
2671 /// selected.
2672 ///
2673 /// This is generally what you want for most projects and indicates that the
2674 /// `wasmtime` crate itself should make the decision about what the best
2675 /// code generator for a wasm module is.
2676 ///
2677 /// Currently this always defaults to Cranelift, but the default value may
2678 /// change over time.
2679 Auto,
2680
2681 /// Currently the default backend, Cranelift aims to be a reasonably fast
2682 /// code generator which generates high quality machine code.
2683 Cranelift,
2684
2685 /// A baseline compiler for WebAssembly, currently under active development and not ready for
2686 /// production applications.
2687 Winch,
2688}
2689
2690#[cfg(any(feature = "winch", feature = "cranelift"))]
2691impl Strategy {
2692 fn not_auto(&self) -> Option<Strategy> {
2693 match self {
2694 Strategy::Auto => {
2695 if cfg!(feature = "cranelift") {
2696 Some(Strategy::Cranelift)
2697 } else if cfg!(feature = "winch") {
2698 Some(Strategy::Winch)
2699 } else {
2700 None
2701 }
2702 }
2703 other => Some(*other),
2704 }
2705 }
2706}
2707
2708/// Possible garbage collector implementations for Wasm.
2709///
2710/// This is used as an argument to the [`Config::collector`] method.
2711///
2712/// The properties of Wasmtime's available collectors are summarized in the
2713/// following table:
2714///
2715/// | Collector | Collects Garbage[^1] | Latency[^2] | Throughput[^3] | Allocation Speed[^4] | Heap Utilization[^5] |
2716/// |-----------------------------|----------------------|-------------|----------------|----------------------|----------------------|
2717/// | `DeferredReferenceCounting` | Yes, but not cycles | 🙂 | 🙁 | 😐 | 😐 |
2718/// | `Null` | No | 🙂 | 🙂 | 🙂 | 🙂 |
2719///
2720/// [^1]: Whether or not the collector is capable of collecting garbage and cyclic garbage.
2721///
2722/// [^2]: How long the Wasm program is paused during garbage
2723/// collections. Shorter is better. In general, better latency implies
2724/// worse throughput and vice versa.
2725///
2726/// [^3]: How fast the Wasm program runs when using this collector. Roughly
2727/// equivalent to the number of Wasm instructions executed per
2728/// second. Faster is better. In general, better throughput implies worse
2729/// latency and vice versa.
2730///
2731/// [^4]: How fast can individual objects be allocated?
2732///
2733/// [^5]: How many objects can the collector fit into N bytes of memory? That
2734/// is, how much space for bookkeeping and metadata does this collector
2735/// require? Less space taken up by metadata means more space for
2736/// additional objects. Reference counts are larger than mark bits and
2737/// free lists are larger than bump pointers, for example.
2738#[non_exhaustive]
2739#[derive(PartialEq, Eq, Clone, Debug, Copy)]
2740pub enum Collector {
2741 /// An indicator that the garbage collector should be automatically
2742 /// selected.
2743 ///
2744 /// This is generally what you want for most projects and indicates that the
2745 /// `wasmtime` crate itself should make the decision about what the best
2746 /// collector for a wasm module is.
2747 ///
2748 /// Currently this always defaults to the deferred reference-counting
2749 /// collector, but the default value may change over time.
2750 Auto,
2751
2752 /// The deferred reference-counting collector.
2753 ///
2754 /// A reference-counting collector, generally trading improved latency for
2755 /// worsened throughput. However, to avoid the largest overheads of
2756 /// reference counting, it avoids manipulating reference counts for Wasm
2757 /// objects on the stack. Instead, it will hold a reference count for an
2758 /// over-approximation of all objects that are currently on the stack, trace
2759 /// the stack during collection to find the precise set of on-stack roots,
2760 /// and decrement the reference count of any object that was in the
2761 /// over-approximation but not the precise set. This improves throughput,
2762 /// compared to "pure" reference counting, by performing many fewer
2763 /// refcount-increment and -decrement operations. The cost is the increased
2764 /// latency associated with tracing the stack.
2765 ///
2766 /// This collector cannot currently collect cycles; they will leak until the
2767 /// GC heap's store is dropped.
2768 DeferredReferenceCounting,
2769
2770 /// The null collector.
2771 ///
2772 /// This collector does not actually collect any garbage. It simply
2773 /// allocates objects until it runs out of memory, at which point further
2774 /// objects allocation attempts will trap.
2775 ///
2776 /// This collector is useful for incredibly short-running Wasm instances
2777 /// where additionally you would rather halt an over-allocating Wasm program
2778 /// than spend time collecting its garbage to allow it to keep running. It
2779 /// is also useful for measuring the overheads associated with other
2780 /// collectors, as this collector imposes as close to zero throughput and
2781 /// latency overhead as possible.
2782 Null,
2783}
2784
2785impl Default for Collector {
2786 fn default() -> Collector {
2787 Collector::Auto
2788 }
2789}
2790
2791#[cfg(feature = "gc")]
2792impl Collector {
2793 fn not_auto(&self) -> Option<Collector> {
2794 match self {
2795 Collector::Auto => {
2796 if cfg!(feature = "gc-drc") {
2797 Some(Collector::DeferredReferenceCounting)
2798 } else if cfg!(feature = "gc-null") {
2799 Some(Collector::Null)
2800 } else {
2801 None
2802 }
2803 }
2804 other => Some(*other),
2805 }
2806 }
2807
2808 fn try_not_auto(&self) -> Result<Self> {
2809 match self.not_auto() {
2810 #[cfg(feature = "gc-drc")]
2811 Some(c @ Collector::DeferredReferenceCounting) => Ok(c),
2812 #[cfg(not(feature = "gc-drc"))]
2813 Some(Collector::DeferredReferenceCounting) => bail!(
2814 "cannot create an engine using the deferred reference-counting \
2815 collector because the `gc-drc` feature was not enabled at \
2816 compile time",
2817 ),
2818
2819 #[cfg(feature = "gc-null")]
2820 Some(c @ Collector::Null) => Ok(c),
2821 #[cfg(not(feature = "gc-null"))]
2822 Some(Collector::Null) => bail!(
2823 "cannot create an engine using the null collector because \
2824 the `gc-null` feature was not enabled at compile time",
2825 ),
2826
2827 Some(Collector::Auto) => unreachable!(),
2828
2829 None => bail!(
2830 "cannot create an engine with GC support when none of the \
2831 collectors are available; enable one of the following \
2832 features: `gc-drc`, `gc-null`",
2833 ),
2834 }
2835 }
2836}
2837
2838/// Possible optimization levels for the Cranelift codegen backend.
2839#[non_exhaustive]
2840#[derive(Copy, Clone, Debug, Eq, PartialEq)]
2841pub enum OptLevel {
2842 /// No optimizations performed, minimizes compilation time by disabling most
2843 /// optimizations.
2844 None,
2845 /// Generates the fastest possible code, but may take longer.
2846 Speed,
2847 /// Similar to `speed`, but also performs transformations aimed at reducing
2848 /// code size.
2849 SpeedAndSize,
2850}
2851
2852/// Possible register allocator algorithms for the Cranelift codegen backend.
2853#[non_exhaustive]
2854#[derive(Copy, Clone, Debug, Eq, PartialEq)]
2855pub enum RegallocAlgorithm {
2856 /// Generates the fastest possible code, but may take longer.
2857 ///
2858 /// This algorithm performs "backtracking", which means that it may
2859 /// undo its earlier work and retry as it discovers conflicts. This
2860 /// results in better register utilization, producing fewer spills
2861 /// and moves, but can cause super-linear compile runtime.
2862 Backtracking,
2863}
2864
2865/// Select which profiling technique to support.
2866#[derive(Debug, Clone, Copy, PartialEq)]
2867pub enum ProfilingStrategy {
2868 /// No profiler support.
2869 None,
2870
2871 /// Collect function name information as the "perf map" file format, used with `perf` on Linux.
2872 PerfMap,
2873
2874 /// Collect profiling info for "jitdump" file format, used with `perf` on
2875 /// Linux.
2876 JitDump,
2877
2878 /// Collect profiling info using the "ittapi", used with `VTune` on Linux.
2879 VTune,
2880
2881 /// Support for profiling Pulley, Wasmtime's interpreter. Note that enabling
2882 /// this at runtime requires enabling the `profile-pulley` Cargo feature at
2883 /// compile time.
2884 Pulley,
2885}
2886
2887/// Select how wasm backtrace detailed information is handled.
2888#[derive(Debug, Clone, Copy)]
2889pub enum WasmBacktraceDetails {
2890 /// Support is unconditionally enabled and wasmtime will parse and read
2891 /// debug information.
2892 Enable,
2893
2894 /// Support is disabled, and wasmtime will not parse debug information for
2895 /// backtrace details.
2896 Disable,
2897
2898 /// Support for backtrace details is conditional on the
2899 /// `WASMTIME_BACKTRACE_DETAILS` environment variable.
2900 Environment,
2901}
2902
2903/// Describe the tri-state configuration of memory protection keys (MPK).
2904#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
2905pub enum MpkEnabled {
2906 /// Use MPK if supported by the current system; fall back to guard regions
2907 /// otherwise.
2908 Auto,
2909 /// Use MPK or fail if not supported.
2910 Enable,
2911 /// Do not use MPK.
2912 Disable,
2913}
2914
2915/// Configuration options used with [`InstanceAllocationStrategy::Pooling`] to
2916/// change the behavior of the pooling instance allocator.
2917///
2918/// This structure has a builder-style API in the same manner as [`Config`] and
2919/// is configured with [`Config::allocation_strategy`].
2920///
2921/// Note that usage of the pooling allocator does not affect compiled
2922/// WebAssembly code. Compiled `*.cwasm` files, for example, are usable both
2923/// with and without the pooling allocator.
2924///
2925/// ## Advantages of Pooled Allocation
2926///
2927/// The main benefit of the pooling allocator is to make WebAssembly
2928/// instantiation both faster and more scalable in terms of parallelism.
2929/// Allocation is faster because virtual memory is already configured and ready
2930/// to go within the pool, there's no need to [`mmap`] (for example on Unix) a
2931/// new region and configure it with guard pages. By avoiding [`mmap`] this
2932/// avoids whole-process virtual memory locks which can improve scalability and
2933/// performance through avoiding this.
2934///
2935/// Additionally with pooled allocation it's possible to create "affine slots"
2936/// to a particular WebAssembly module or component over time. For example if
2937/// the same module is multiple times over time the pooling allocator will, by
2938/// default, attempt to reuse the same slot. This mean that the slot has been
2939/// pre-configured and can retain virtual memory mappings for a copy-on-write
2940/// image, for example (see [`Config::memory_init_cow`] for more information.
2941/// This means that in a steady state instance deallocation is a single
2942/// [`madvise`] to reset linear memory to its original contents followed by a
2943/// single (optional) [`mprotect`] during the next instantiation to shrink
2944/// memory back to its original size. Compared to non-pooled allocation this
2945/// avoids the need to [`mmap`] a new region of memory, [`munmap`] it, and
2946/// [`mprotect`] regions too.
2947///
2948/// Another benefit of pooled allocation is that it's possible to configure
2949/// things such that no virtual memory management is required at all in a steady
2950/// state. For example a pooling allocator can be configured with:
2951///
2952/// * [`Config::memory_init_cow`] disabled
2953/// * [`Config::memory_guard_size`] disabled
2954/// * [`Config::memory_reservation`] shrunk to minimal size
2955/// * [`PoolingAllocationConfig::table_keep_resident`] sufficiently large
2956/// * [`PoolingAllocationConfig::linear_memory_keep_resident`] sufficiently large
2957///
2958/// With all these options in place no virtual memory tricks are used at all and
2959/// everything is manually managed by Wasmtime (for example resetting memory is
2960/// a `memset(0)`). This is not as fast in a single-threaded scenario but can
2961/// provide benefits in high-parallelism situations as no virtual memory locks
2962/// or IPIs need happen.
2963///
2964/// ## Disadvantages of Pooled Allocation
2965///
2966/// Despite the above advantages to instantiation performance the pooling
2967/// allocator is not enabled by default in Wasmtime. One reason is that the
2968/// performance advantages are not necessarily portable, for example while the
2969/// pooling allocator works on Windows it has not been tuned for performance on
2970/// Windows in the same way it has on Linux.
2971///
2972/// Additionally the main cost of the pooling allocator is that it requires a
2973/// very large reservation of virtual memory (on the order of most of the
2974/// addressable virtual address space). WebAssembly 32-bit linear memories in
2975/// Wasmtime are, by default 4G address space reservations with a small guard
2976/// region both before and after the linear memory. Memories in the pooling
2977/// allocator are contiguous which means that we only need a guard after linear
2978/// memory because the previous linear memory's slot post-guard is our own
2979/// pre-guard. This means that, by default, the pooling allocator uses roughly
2980/// 4G of virtual memory per WebAssembly linear memory slot. 4G of virtual
2981/// memory is 32 bits of a 64-bit address. Many 64-bit systems can only
2982/// actually use 48-bit addresses by default (although this can be extended on
2983/// architectures nowadays too), and of those 48 bits one of them is reserved
2984/// to indicate kernel-vs-userspace. This leaves 47-32=15 bits left,
2985/// meaning you can only have at most 32k slots of linear memories on many
2986/// systems by default. This is a relatively small number and shows how the
2987/// pooling allocator can quickly exhaust all of virtual memory.
2988///
2989/// Another disadvantage of the pooling allocator is that it may keep memory
2990/// alive when nothing is using it. A previously used slot for an instance might
2991/// have paged-in memory that will not get paged out until the
2992/// [`Engine`](crate::Engine) owning the pooling allocator is dropped. While
2993/// suitable for some applications this behavior may not be suitable for all
2994/// applications.
2995///
2996/// Finally the last disadvantage of the pooling allocator is that the
2997/// configuration values for the maximum number of instances, memories, tables,
2998/// etc, must all be fixed up-front. There's not always a clear answer as to
2999/// what these values should be so not all applications may be able to work
3000/// with this constraint.
3001///
3002/// [`madvise`]: https://man7.org/linux/man-pages/man2/madvise.2.html
3003/// [`mprotect`]: https://man7.org/linux/man-pages/man2/mprotect.2.html
3004/// [`mmap`]: https://man7.org/linux/man-pages/man2/mmap.2.html
3005/// [`munmap`]: https://man7.org/linux/man-pages/man2/munmap.2.html
3006#[cfg(feature = "pooling-allocator")]
3007#[derive(Debug, Clone, Default)]
3008pub struct PoolingAllocationConfig {
3009 config: crate::runtime::vm::PoolingInstanceAllocatorConfig,
3010}
3011
3012#[cfg(feature = "pooling-allocator")]
3013impl PoolingAllocationConfig {
3014 /// Returns a new configuration builder with all default settings
3015 /// configured.
3016 pub fn new() -> PoolingAllocationConfig {
3017 PoolingAllocationConfig::default()
3018 }
3019
3020 /// Configures the maximum number of "unused warm slots" to retain in the
3021 /// pooling allocator.
3022 ///
3023 /// The pooling allocator operates over slots to allocate from, and each
3024 /// slot is considered "cold" if it's never been used before or "warm" if
3025 /// it's been used by some module in the past. Slots in the pooling
3026 /// allocator additionally track an "affinity" flag to a particular core
3027 /// wasm module. When a module is instantiated into a slot then the slot is
3028 /// considered affine to that module, even after the instance has been
3029 /// deallocated.
3030 ///
3031 /// When a new instance is created then a slot must be chosen, and the
3032 /// current algorithm for selecting a slot is:
3033 ///
3034 /// * If there are slots that are affine to the module being instantiated,
3035 /// then the most recently used slot is selected to be allocated from.
3036 /// This is done to improve reuse of resources such as memory mappings and
3037 /// additionally try to benefit from temporal locality for things like
3038 /// caches.
3039 ///
3040 /// * Otherwise if there are more than N affine slots to other modules, then
3041 /// one of those affine slots is chosen to be allocated. The slot chosen
3042 /// is picked on a least-recently-used basis.
3043 ///
3044 /// * Finally, if there are less than N affine slots to other modules, then
3045 /// the non-affine slots are allocated from.
3046 ///
3047 /// This setting, `max_unused_warm_slots`, is the value for N in the above
3048 /// algorithm. The purpose of this setting is to have a knob over the RSS
3049 /// impact of "unused slots" for a long-running wasm server.
3050 ///
3051 /// If this setting is set to 0, for example, then affine slots are
3052 /// aggressively reused on a least-recently-used basis. A "cold" slot is
3053 /// only used if there are no affine slots available to allocate from. This
3054 /// means that the set of slots used over the lifetime of a program is the
3055 /// same as the maximum concurrent number of wasm instances.
3056 ///
3057 /// If this setting is set to infinity, however, then cold slots are
3058 /// prioritized to be allocated from. This means that the set of slots used
3059 /// over the lifetime of a program will approach
3060 /// [`PoolingAllocationConfig::total_memories`], or the maximum number of
3061 /// slots in the pooling allocator.
3062 ///
3063 /// Wasmtime does not aggressively decommit all resources associated with a
3064 /// slot when the slot is not in use. For example the
3065 /// [`PoolingAllocationConfig::linear_memory_keep_resident`] option can be
3066 /// used to keep memory associated with a slot, even when it's not in use.
3067 /// This means that the total set of used slots in the pooling instance
3068 /// allocator can impact the overall RSS usage of a program.
3069 ///
3070 /// The default value for this option is `100`.
3071 pub fn max_unused_warm_slots(&mut self, max: u32) -> &mut Self {
3072 self.config.max_unused_warm_slots = max;
3073 self
3074 }
3075
3076 /// The target number of decommits to do per batch.
3077 ///
3078 /// This is not precise, as we can queue up decommits at times when we
3079 /// aren't prepared to immediately flush them, and so we may go over this
3080 /// target size occasionally.
3081 ///
3082 /// A batch size of one effectively disables batching.
3083 ///
3084 /// Defaults to `1`.
3085 pub fn decommit_batch_size(&mut self, batch_size: usize) -> &mut Self {
3086 self.config.decommit_batch_size = batch_size;
3087 self
3088 }
3089
3090 /// How much memory, in bytes, to keep resident for async stacks allocated
3091 /// with the pooling allocator.
3092 ///
3093 /// When [`PoolingAllocationConfig::async_stack_zeroing`] is enabled then
3094 /// Wasmtime will reset the contents of async stacks back to zero upon
3095 /// deallocation. This option can be used to perform the zeroing operation
3096 /// with `memset` up to a certain threshold of bytes instead of using system
3097 /// calls to reset the stack to zero.
3098 ///
3099 /// Note that when using this option the memory with async stacks will
3100 /// never be decommitted.
3101 #[cfg(feature = "async")]
3102 pub fn async_stack_keep_resident(&mut self, size: usize) -> &mut Self {
3103 self.config.async_stack_keep_resident = size;
3104 self
3105 }
3106
3107 /// How much memory, in bytes, to keep resident for each linear memory
3108 /// after deallocation.
3109 ///
3110 /// This option is only applicable on Linux and has no effect on other
3111 /// platforms.
3112 ///
3113 /// By default Wasmtime will use `madvise` to reset the entire contents of
3114 /// linear memory back to zero when a linear memory is deallocated. This
3115 /// option can be used to use `memset` instead to set memory back to zero
3116 /// which can, in some configurations, reduce the number of page faults
3117 /// taken when a slot is reused.
3118 pub fn linear_memory_keep_resident(&mut self, size: usize) -> &mut Self {
3119 self.config.linear_memory_keep_resident = size;
3120 self
3121 }
3122
3123 /// How much memory, in bytes, to keep resident for each table after
3124 /// deallocation.
3125 ///
3126 /// This option is only applicable on Linux and has no effect on other
3127 /// platforms.
3128 ///
3129 /// This option is the same as
3130 /// [`PoolingAllocationConfig::linear_memory_keep_resident`] except that it
3131 /// is applicable to tables instead.
3132 pub fn table_keep_resident(&mut self, size: usize) -> &mut Self {
3133 self.config.table_keep_resident = size;
3134 self
3135 }
3136
3137 /// The maximum number of concurrent component instances supported (default
3138 /// is `1000`).
3139 ///
3140 /// This provides an upper-bound on the total size of component
3141 /// metadata-related allocations, along with
3142 /// [`PoolingAllocationConfig::max_component_instance_size`]. The upper bound is
3143 ///
3144 /// ```text
3145 /// total_component_instances * max_component_instance_size
3146 /// ```
3147 ///
3148 /// where `max_component_instance_size` is rounded up to the size and alignment
3149 /// of the internal representation of the metadata.
3150 pub fn total_component_instances(&mut self, count: u32) -> &mut Self {
3151 self.config.limits.total_component_instances = count;
3152 self
3153 }
3154
3155 /// The maximum size, in bytes, allocated for a component instance's
3156 /// `VMComponentContext` metadata.
3157 ///
3158 /// The [`wasmtime::component::Instance`][crate::component::Instance] type
3159 /// has a static size but its internal `VMComponentContext` is dynamically
3160 /// sized depending on the component being instantiated. This size limit
3161 /// loosely correlates to the size of the component, taking into account
3162 /// factors such as:
3163 ///
3164 /// * number of lifted and lowered functions,
3165 /// * number of memories
3166 /// * number of inner instances
3167 /// * number of resources
3168 ///
3169 /// If the allocated size per instance is too small then instantiation of a
3170 /// module will fail at runtime with an error indicating how many bytes were
3171 /// needed.
3172 ///
3173 /// The default value for this is 1MiB.
3174 ///
3175 /// This provides an upper-bound on the total size of component
3176 /// metadata-related allocations, along with
3177 /// [`PoolingAllocationConfig::total_component_instances`]. The upper bound is
3178 ///
3179 /// ```text
3180 /// total_component_instances * max_component_instance_size
3181 /// ```
3182 ///
3183 /// where `max_component_instance_size` is rounded up to the size and alignment
3184 /// of the internal representation of the metadata.
3185 pub fn max_component_instance_size(&mut self, size: usize) -> &mut Self {
3186 self.config.limits.component_instance_size = size;
3187 self
3188 }
3189
3190 /// The maximum number of core instances a single component may contain
3191 /// (default is unlimited).
3192 ///
3193 /// This method (along with
3194 /// [`PoolingAllocationConfig::max_memories_per_component`],
3195 /// [`PoolingAllocationConfig::max_tables_per_component`], and
3196 /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3197 /// the amount of resources a single component allocation consumes.
3198 ///
3199 /// If a component will instantiate more core instances than `count`, then
3200 /// the component will fail to instantiate.
3201 pub fn max_core_instances_per_component(&mut self, count: u32) -> &mut Self {
3202 self.config.limits.max_core_instances_per_component = count;
3203 self
3204 }
3205
3206 /// The maximum number of Wasm linear memories that a single component may
3207 /// transitively contain (default is unlimited).
3208 ///
3209 /// This method (along with
3210 /// [`PoolingAllocationConfig::max_core_instances_per_component`],
3211 /// [`PoolingAllocationConfig::max_tables_per_component`], and
3212 /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3213 /// the amount of resources a single component allocation consumes.
3214 ///
3215 /// If a component transitively contains more linear memories than `count`,
3216 /// then the component will fail to instantiate.
3217 pub fn max_memories_per_component(&mut self, count: u32) -> &mut Self {
3218 self.config.limits.max_memories_per_component = count;
3219 self
3220 }
3221
3222 /// The maximum number of tables that a single component may transitively
3223 /// contain (default is unlimited).
3224 ///
3225 /// This method (along with
3226 /// [`PoolingAllocationConfig::max_core_instances_per_component`],
3227 /// [`PoolingAllocationConfig::max_memories_per_component`],
3228 /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3229 /// the amount of resources a single component allocation consumes.
3230 ///
3231 /// If a component will transitively contains more tables than `count`, then
3232 /// the component will fail to instantiate.
3233 pub fn max_tables_per_component(&mut self, count: u32) -> &mut Self {
3234 self.config.limits.max_tables_per_component = count;
3235 self
3236 }
3237
3238 /// The maximum number of concurrent Wasm linear memories supported (default
3239 /// is `1000`).
3240 ///
3241 /// This value has a direct impact on the amount of memory allocated by the pooling
3242 /// instance allocator.
3243 ///
3244 /// The pooling instance allocator allocates a memory pool, where each entry
3245 /// in the pool contains the reserved address space for each linear memory
3246 /// supported by an instance.
3247 ///
3248 /// The memory pool will reserve a large quantity of host process address
3249 /// space to elide the bounds checks required for correct WebAssembly memory
3250 /// semantics. Even with 64-bit address spaces, the address space is limited
3251 /// when dealing with a large number of linear memories.
3252 ///
3253 /// For example, on Linux x86_64, the userland address space limit is 128
3254 /// TiB. That might seem like a lot, but each linear memory will *reserve* 6
3255 /// GiB of space by default.
3256 pub fn total_memories(&mut self, count: u32) -> &mut Self {
3257 self.config.limits.total_memories = count;
3258 self
3259 }
3260
3261 /// The maximum number of concurrent tables supported (default is `1000`).
3262 ///
3263 /// This value has a direct impact on the amount of memory allocated by the
3264 /// pooling instance allocator.
3265 ///
3266 /// The pooling instance allocator allocates a table pool, where each entry
3267 /// in the pool contains the space needed for each WebAssembly table
3268 /// supported by an instance (see `table_elements` to control the size of
3269 /// each table).
3270 pub fn total_tables(&mut self, count: u32) -> &mut Self {
3271 self.config.limits.total_tables = count;
3272 self
3273 }
3274
3275 /// The maximum number of execution stacks allowed for asynchronous
3276 /// execution, when enabled (default is `1000`).
3277 ///
3278 /// This value has a direct impact on the amount of memory allocated by the
3279 /// pooling instance allocator.
3280 #[cfg(feature = "async")]
3281 pub fn total_stacks(&mut self, count: u32) -> &mut Self {
3282 self.config.limits.total_stacks = count;
3283 self
3284 }
3285
3286 /// The maximum number of concurrent core instances supported (default is
3287 /// `1000`).
3288 ///
3289 /// This provides an upper-bound on the total size of core instance
3290 /// metadata-related allocations, along with
3291 /// [`PoolingAllocationConfig::max_core_instance_size`]. The upper bound is
3292 ///
3293 /// ```text
3294 /// total_core_instances * max_core_instance_size
3295 /// ```
3296 ///
3297 /// where `max_core_instance_size` is rounded up to the size and alignment of
3298 /// the internal representation of the metadata.
3299 pub fn total_core_instances(&mut self, count: u32) -> &mut Self {
3300 self.config.limits.total_core_instances = count;
3301 self
3302 }
3303
3304 /// The maximum size, in bytes, allocated for a core instance's `VMContext`
3305 /// metadata.
3306 ///
3307 /// The [`Instance`][crate::Instance] type has a static size but its
3308 /// `VMContext` metadata is dynamically sized depending on the module being
3309 /// instantiated. This size limit loosely correlates to the size of the Wasm
3310 /// module, taking into account factors such as:
3311 ///
3312 /// * number of functions
3313 /// * number of globals
3314 /// * number of memories
3315 /// * number of tables
3316 /// * number of function types
3317 ///
3318 /// If the allocated size per instance is too small then instantiation of a
3319 /// module will fail at runtime with an error indicating how many bytes were
3320 /// needed.
3321 ///
3322 /// The default value for this is 1MiB.
3323 ///
3324 /// This provides an upper-bound on the total size of core instance
3325 /// metadata-related allocations, along with
3326 /// [`PoolingAllocationConfig::total_core_instances`]. The upper bound is
3327 ///
3328 /// ```text
3329 /// total_core_instances * max_core_instance_size
3330 /// ```
3331 ///
3332 /// where `max_core_instance_size` is rounded up to the size and alignment of
3333 /// the internal representation of the metadata.
3334 pub fn max_core_instance_size(&mut self, size: usize) -> &mut Self {
3335 self.config.limits.core_instance_size = size;
3336 self
3337 }
3338
3339 /// The maximum number of defined tables for a core module (default is `1`).
3340 ///
3341 /// This value controls the capacity of the `VMTableDefinition` table in
3342 /// each instance's `VMContext` structure.
3343 ///
3344 /// The allocated size of the table will be `tables *
3345 /// sizeof(VMTableDefinition)` for each instance regardless of how many
3346 /// tables are defined by an instance's module.
3347 pub fn max_tables_per_module(&mut self, tables: u32) -> &mut Self {
3348 self.config.limits.max_tables_per_module = tables;
3349 self
3350 }
3351
3352 /// The maximum table elements for any table defined in a module (default is
3353 /// `20000`).
3354 ///
3355 /// If a table's minimum element limit is greater than this value, the
3356 /// module will fail to instantiate.
3357 ///
3358 /// If a table's maximum element limit is unbounded or greater than this
3359 /// value, the maximum will be `table_elements` for the purpose of any
3360 /// `table.grow` instruction.
3361 ///
3362 /// This value is used to reserve the maximum space for each supported
3363 /// table; table elements are pointer-sized in the Wasmtime runtime.
3364 /// Therefore, the space reserved for each instance is `tables *
3365 /// table_elements * sizeof::<*const ()>`.
3366 pub fn table_elements(&mut self, elements: usize) -> &mut Self {
3367 self.config.limits.table_elements = elements;
3368 self
3369 }
3370
3371 /// The maximum number of defined linear memories for a module (default is
3372 /// `1`).
3373 ///
3374 /// This value controls the capacity of the `VMMemoryDefinition` table in
3375 /// each core instance's `VMContext` structure.
3376 ///
3377 /// The allocated size of the table will be `memories *
3378 /// sizeof(VMMemoryDefinition)` for each core instance regardless of how
3379 /// many memories are defined by the core instance's module.
3380 pub fn max_memories_per_module(&mut self, memories: u32) -> &mut Self {
3381 self.config.limits.max_memories_per_module = memories;
3382 self
3383 }
3384
3385 /// The maximum byte size that any WebAssembly linear memory may grow to.
3386 ///
3387 /// This option defaults to 4 GiB meaning that for 32-bit linear memories
3388 /// there is no restrictions. 64-bit linear memories will not be allowed to
3389 /// grow beyond 4 GiB by default.
3390 ///
3391 /// If a memory's minimum size is greater than this value, the module will
3392 /// fail to instantiate.
3393 ///
3394 /// If a memory's maximum size is unbounded or greater than this value, the
3395 /// maximum will be `max_memory_size` for the purpose of any `memory.grow`
3396 /// instruction.
3397 ///
3398 /// This value is used to control the maximum accessible space for each
3399 /// linear memory of a core instance. This can be thought of as a simple
3400 /// mechanism like [`Store::limiter`](crate::Store::limiter) to limit memory
3401 /// at runtime. This value can also affect striping/coloring behavior when
3402 /// used in conjunction with
3403 /// [`memory_protection_keys`](PoolingAllocationConfig::memory_protection_keys).
3404 ///
3405 /// The virtual memory reservation size of each linear memory is controlled
3406 /// by the [`Config::memory_reservation`] setting and this method's
3407 /// configuration cannot exceed [`Config::memory_reservation`].
3408 pub fn max_memory_size(&mut self, bytes: usize) -> &mut Self {
3409 self.config.limits.max_memory_size = bytes;
3410 self
3411 }
3412
3413 /// Configures whether memory protection keys (MPK) should be used for more
3414 /// efficient layout of pool-allocated memories.
3415 ///
3416 /// When using the pooling allocator (see [`Config::allocation_strategy`],
3417 /// [`InstanceAllocationStrategy::Pooling`]), memory protection keys can
3418 /// reduce the total amount of allocated virtual memory by eliminating guard
3419 /// regions between WebAssembly memories in the pool. It does so by
3420 /// "coloring" memory regions with different memory keys and setting which
3421 /// regions are accessible each time executions switches from host to guest
3422 /// (or vice versa).
3423 ///
3424 /// Leveraging MPK requires configuring a smaller-than-default
3425 /// [`max_memory_size`](PoolingAllocationConfig::max_memory_size) to enable
3426 /// this coloring/striping behavior. For example embeddings might want to
3427 /// reduce the default 4G allowance to 128M.
3428 ///
3429 /// MPK is only available on Linux (called `pku` there) and recent x86
3430 /// systems; we check for MPK support at runtime by examining the `CPUID`
3431 /// register. This configuration setting can be in three states:
3432 ///
3433 /// - `auto`: if MPK support is available the guard regions are removed; if
3434 /// not, the guard regions remain
3435 /// - `enable`: use MPK to eliminate guard regions; fail if MPK is not
3436 /// supported
3437 /// - `disable`: never use MPK
3438 ///
3439 /// By default this value is `disabled`, but may become `auto` in future
3440 /// releases.
3441 ///
3442 /// __WARNING__: this configuration options is still experimental--use at
3443 /// your own risk! MPK uses kernel and CPU features to protect memory
3444 /// regions; you may observe segmentation faults if anything is
3445 /// misconfigured.
3446 #[cfg(feature = "memory-protection-keys")]
3447 pub fn memory_protection_keys(&mut self, enable: MpkEnabled) -> &mut Self {
3448 self.config.memory_protection_keys = enable;
3449 self
3450 }
3451
3452 /// Sets an upper limit on how many memory protection keys (MPK) Wasmtime
3453 /// will use.
3454 ///
3455 /// This setting is only applicable when
3456 /// [`PoolingAllocationConfig::memory_protection_keys`] is set to `enable`
3457 /// or `auto`. Configuring this above the HW and OS limits (typically 15)
3458 /// has no effect.
3459 ///
3460 /// If multiple Wasmtime engines are used in the same process, note that all
3461 /// engines will share the same set of allocated keys; this setting will
3462 /// limit how many keys are allocated initially and thus available to all
3463 /// other engines.
3464 #[cfg(feature = "memory-protection-keys")]
3465 pub fn max_memory_protection_keys(&mut self, max: usize) -> &mut Self {
3466 self.config.max_memory_protection_keys = max;
3467 self
3468 }
3469
3470 /// Check if memory protection keys (MPK) are available on the current host.
3471 ///
3472 /// This is a convenience method for determining MPK availability using the
3473 /// same method that [`MpkEnabled::Auto`] does. See
3474 /// [`PoolingAllocationConfig::memory_protection_keys`] for more
3475 /// information.
3476 #[cfg(feature = "memory-protection-keys")]
3477 pub fn are_memory_protection_keys_available() -> bool {
3478 crate::runtime::vm::mpk::is_supported()
3479 }
3480
3481 /// The maximum number of concurrent GC heaps supported (default is `1000`).
3482 ///
3483 /// This value has a direct impact on the amount of memory allocated by the
3484 /// pooling instance allocator.
3485 ///
3486 /// The pooling instance allocator allocates a GC heap pool, where each
3487 /// entry in the pool contains the space needed for each GC heap used by a
3488 /// store.
3489 #[cfg(feature = "gc")]
3490 pub fn total_gc_heaps(&mut self, count: u32) -> &mut Self {
3491 self.config.limits.total_gc_heaps = count;
3492 self
3493 }
3494}
3495
3496#[cfg(feature = "std")]
3497fn detect_host_feature(feature: &str) -> Option<bool> {
3498 #[cfg(target_arch = "aarch64")]
3499 {
3500 return match feature {
3501 "lse" => Some(std::arch::is_aarch64_feature_detected!("lse")),
3502 "paca" => Some(std::arch::is_aarch64_feature_detected!("paca")),
3503 "fp16" => Some(std::arch::is_aarch64_feature_detected!("fp16")),
3504
3505 _ => None,
3506 };
3507 }
3508
3509 // There is no is_s390x_feature_detected macro yet, so for now
3510 // we use getauxval from the libc crate directly.
3511 #[cfg(all(target_arch = "s390x", target_os = "linux"))]
3512 {
3513 let v = unsafe { libc::getauxval(libc::AT_HWCAP) };
3514 const HWCAP_S390X_VXRS_EXT2: libc::c_ulong = 32768;
3515
3516 return match feature {
3517 // There is no separate HWCAP bit for mie2, so assume
3518 // that any machine with vxrs_ext2 also has mie2.
3519 "vxrs_ext2" | "mie2" => Some((v & HWCAP_S390X_VXRS_EXT2) != 0),
3520
3521 _ => None,
3522 };
3523 }
3524
3525 #[cfg(target_arch = "riscv64")]
3526 {
3527 return match feature {
3528 // due to `is_riscv64_feature_detected` is not stable.
3529 // we cannot use it. For now lie and say all features are always
3530 // found to keep tests working.
3531 _ => Some(true),
3532 };
3533 }
3534
3535 #[cfg(target_arch = "x86_64")]
3536 {
3537 return match feature {
3538 "cmpxchg16b" => Some(std::is_x86_feature_detected!("cmpxchg16b")),
3539 "sse3" => Some(std::is_x86_feature_detected!("sse3")),
3540 "ssse3" => Some(std::is_x86_feature_detected!("ssse3")),
3541 "sse4.1" => Some(std::is_x86_feature_detected!("sse4.1")),
3542 "sse4.2" => Some(std::is_x86_feature_detected!("sse4.2")),
3543 "popcnt" => Some(std::is_x86_feature_detected!("popcnt")),
3544 "avx" => Some(std::is_x86_feature_detected!("avx")),
3545 "avx2" => Some(std::is_x86_feature_detected!("avx2")),
3546 "fma" => Some(std::is_x86_feature_detected!("fma")),
3547 "bmi1" => Some(std::is_x86_feature_detected!("bmi1")),
3548 "bmi2" => Some(std::is_x86_feature_detected!("bmi2")),
3549 "avx512bitalg" => Some(std::is_x86_feature_detected!("avx512bitalg")),
3550 "avx512dq" => Some(std::is_x86_feature_detected!("avx512dq")),
3551 "avx512f" => Some(std::is_x86_feature_detected!("avx512f")),
3552 "avx512vl" => Some(std::is_x86_feature_detected!("avx512vl")),
3553 "avx512vbmi" => Some(std::is_x86_feature_detected!("avx512vbmi")),
3554 "lzcnt" => Some(std::is_x86_feature_detected!("lzcnt")),
3555
3556 _ => None,
3557 };
3558 }
3559
3560 #[allow(unreachable_code)]
3561 {
3562 let _ = feature;
3563 return None;
3564 }
3565}