wasmtime/config.rs
1use crate::prelude::*;
2use alloc::sync::Arc;
3use bitflags::Flags;
4use core::fmt;
5use core::str::FromStr;
6#[cfg(any(feature = "cache", feature = "cranelift", feature = "winch"))]
7use std::path::Path;
8use wasmparser::WasmFeatures;
9use wasmtime_environ::{ConfigTunables, TripleExt, Tunables};
10
11#[cfg(feature = "runtime")]
12use crate::memory::MemoryCreator;
13#[cfg(feature = "runtime")]
14use crate::profiling_agent::{self, ProfilingAgent};
15#[cfg(feature = "runtime")]
16use crate::runtime::vm::{
17 GcRuntime, InstanceAllocator, OnDemandInstanceAllocator, RuntimeMemoryCreator,
18};
19#[cfg(feature = "runtime")]
20use crate::trampoline::MemoryCreatorProxy;
21
22#[cfg(feature = "async")]
23use crate::stack::{StackCreator, StackCreatorProxy};
24#[cfg(feature = "async")]
25use wasmtime_fiber::RuntimeFiberStackCreator;
26
27#[cfg(feature = "runtime")]
28pub use crate::runtime::code_memory::CustomCodeMemory;
29#[cfg(feature = "cache")]
30pub use wasmtime_cache::{Cache, CacheConfig};
31#[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
32pub use wasmtime_environ::CacheStore;
33
34/// Represents the module instance allocation strategy to use.
35#[derive(Clone)]
36#[non_exhaustive]
37pub enum InstanceAllocationStrategy {
38 /// The on-demand instance allocation strategy.
39 ///
40 /// Resources related to a module instance are allocated at instantiation time and
41 /// immediately deallocated when the `Store` referencing the instance is dropped.
42 ///
43 /// This is the default allocation strategy for Wasmtime.
44 OnDemand,
45 /// The pooling instance allocation strategy.
46 ///
47 /// A pool of resources is created in advance and module instantiation reuses resources
48 /// from the pool. Resources are returned to the pool when the `Store` referencing the instance
49 /// is dropped.
50 #[cfg(feature = "pooling-allocator")]
51 Pooling(PoolingAllocationConfig),
52}
53
54impl InstanceAllocationStrategy {
55 /// The default pooling instance allocation strategy.
56 #[cfg(feature = "pooling-allocator")]
57 pub fn pooling() -> Self {
58 Self::Pooling(Default::default())
59 }
60}
61
62impl Default for InstanceAllocationStrategy {
63 fn default() -> Self {
64 Self::OnDemand
65 }
66}
67
68#[cfg(feature = "pooling-allocator")]
69impl From<PoolingAllocationConfig> for InstanceAllocationStrategy {
70 fn from(cfg: PoolingAllocationConfig) -> InstanceAllocationStrategy {
71 InstanceAllocationStrategy::Pooling(cfg)
72 }
73}
74
75#[derive(Clone)]
76/// Configure the strategy used for versioning in serializing and deserializing [`crate::Module`].
77pub enum ModuleVersionStrategy {
78 /// Use the wasmtime crate's Cargo package version.
79 WasmtimeVersion,
80 /// Use a custom version string. Must be at most 255 bytes.
81 Custom(String),
82 /// Emit no version string in serialization, and accept all version strings in deserialization.
83 None,
84}
85
86impl Default for ModuleVersionStrategy {
87 fn default() -> Self {
88 ModuleVersionStrategy::WasmtimeVersion
89 }
90}
91
92impl core::hash::Hash for ModuleVersionStrategy {
93 fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
94 match self {
95 Self::WasmtimeVersion => env!("CARGO_PKG_VERSION").hash(hasher),
96 Self::Custom(s) => s.hash(hasher),
97 Self::None => {}
98 };
99 }
100}
101
102/// Global configuration options used to create an [`Engine`](crate::Engine)
103/// and customize its behavior.
104///
105/// This structure exposed a builder-like interface and is primarily consumed by
106/// [`Engine::new()`](crate::Engine::new).
107///
108/// The validation of `Config` is deferred until the engine is being built, thus
109/// a problematic config may cause `Engine::new` to fail.
110///
111/// # Defaults
112///
113/// The `Default` trait implementation and the return value from
114/// [`Config::new()`] are the same and represent the default set of
115/// configuration for an engine. The exact set of defaults will differ based on
116/// properties such as enabled Cargo features at compile time and the configured
117/// target (see [`Config::target`]). Configuration options document their
118/// default values and what the conditional value of the default is where
119/// applicable.
120#[derive(Clone)]
121pub struct Config {
122 #[cfg(any(feature = "cranelift", feature = "winch"))]
123 compiler_config: CompilerConfig,
124 target: Option<target_lexicon::Triple>,
125 #[cfg(feature = "gc")]
126 collector: Collector,
127 profiling_strategy: ProfilingStrategy,
128 tunables: ConfigTunables,
129
130 #[cfg(feature = "cache")]
131 pub(crate) cache: Option<Cache>,
132 #[cfg(feature = "runtime")]
133 pub(crate) mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>,
134 #[cfg(feature = "runtime")]
135 pub(crate) custom_code_memory: Option<Arc<dyn CustomCodeMemory>>,
136 pub(crate) allocation_strategy: InstanceAllocationStrategy,
137 pub(crate) max_wasm_stack: usize,
138 /// Explicitly enabled features via `Config::wasm_*` methods. This is a
139 /// signal that the embedder specifically wants something turned on
140 /// regardless of the defaults that Wasmtime might otherwise have enabled.
141 ///
142 /// Note that this, and `disabled_features` below, start as the empty set of
143 /// features to only track explicit user requests.
144 pub(crate) enabled_features: WasmFeatures,
145 /// Same as `enabled_features`, but for those that are explicitly disabled.
146 pub(crate) disabled_features: WasmFeatures,
147 pub(crate) wasm_backtrace: bool,
148 pub(crate) wasm_backtrace_details_env_used: bool,
149 pub(crate) native_unwind_info: Option<bool>,
150 #[cfg(any(feature = "async", feature = "stack-switching"))]
151 pub(crate) async_stack_size: usize,
152 #[cfg(feature = "async")]
153 pub(crate) async_stack_zeroing: bool,
154 #[cfg(feature = "async")]
155 pub(crate) stack_creator: Option<Arc<dyn RuntimeFiberStackCreator>>,
156 pub(crate) async_support: bool,
157 pub(crate) module_version: ModuleVersionStrategy,
158 pub(crate) parallel_compilation: bool,
159 pub(crate) memory_guaranteed_dense_image_size: u64,
160 pub(crate) force_memory_init_memfd: bool,
161 pub(crate) wmemcheck: bool,
162 #[cfg(feature = "coredump")]
163 pub(crate) coredump_on_trap: bool,
164 pub(crate) macos_use_mach_ports: bool,
165 pub(crate) detect_host_feature: Option<fn(&str) -> Option<bool>>,
166 pub(crate) x86_float_abi_ok: Option<bool>,
167}
168
169/// User-provided configuration for the compiler.
170#[cfg(any(feature = "cranelift", feature = "winch"))]
171#[derive(Debug, Clone)]
172struct CompilerConfig {
173 strategy: Option<Strategy>,
174 settings: crate::hash_map::HashMap<String, String>,
175 flags: crate::hash_set::HashSet<String>,
176 #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
177 cache_store: Option<Arc<dyn CacheStore>>,
178 clif_dir: Option<std::path::PathBuf>,
179 wmemcheck: bool,
180}
181
182#[cfg(any(feature = "cranelift", feature = "winch"))]
183impl CompilerConfig {
184 fn new() -> Self {
185 Self {
186 strategy: Strategy::Auto.not_auto(),
187 settings: Default::default(),
188 flags: Default::default(),
189 #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
190 cache_store: None,
191 clif_dir: None,
192 wmemcheck: false,
193 }
194 }
195
196 /// Ensures that the key is not set or equals to the given value.
197 /// If the key is not set, it will be set to the given value.
198 ///
199 /// # Returns
200 ///
201 /// Returns true if successfully set or already had the given setting
202 /// value, or false if the setting was explicitly set to something
203 /// else previously.
204 fn ensure_setting_unset_or_given(&mut self, k: &str, v: &str) -> bool {
205 if let Some(value) = self.settings.get(k) {
206 if value != v {
207 return false;
208 }
209 } else {
210 self.settings.insert(k.to_string(), v.to_string());
211 }
212 true
213 }
214}
215
216#[cfg(any(feature = "cranelift", feature = "winch"))]
217impl Default for CompilerConfig {
218 fn default() -> Self {
219 Self::new()
220 }
221}
222
223impl Config {
224 /// Creates a new configuration object with the default configuration
225 /// specified.
226 pub fn new() -> Self {
227 let mut ret = Self {
228 tunables: ConfigTunables::default(),
229 #[cfg(any(feature = "cranelift", feature = "winch"))]
230 compiler_config: CompilerConfig::default(),
231 target: None,
232 #[cfg(feature = "gc")]
233 collector: Collector::default(),
234 #[cfg(feature = "cache")]
235 cache: None,
236 profiling_strategy: ProfilingStrategy::None,
237 #[cfg(feature = "runtime")]
238 mem_creator: None,
239 #[cfg(feature = "runtime")]
240 custom_code_memory: None,
241 allocation_strategy: InstanceAllocationStrategy::OnDemand,
242 // 512k of stack -- note that this is chosen currently to not be too
243 // big, not be too small, and be a good default for most platforms.
244 // One platform of particular note is Windows where the stack size
245 // of the main thread seems to, by default, be smaller than that of
246 // Linux and macOS. This 512k value at least lets our current test
247 // suite pass on the main thread of Windows (using `--test-threads
248 // 1` forces this), or at least it passed when this change was
249 // committed.
250 max_wasm_stack: 512 * 1024,
251 wasm_backtrace: true,
252 wasm_backtrace_details_env_used: false,
253 native_unwind_info: None,
254 enabled_features: WasmFeatures::empty(),
255 disabled_features: WasmFeatures::empty(),
256 #[cfg(any(feature = "async", feature = "stack-switching"))]
257 async_stack_size: 2 << 20,
258 #[cfg(feature = "async")]
259 async_stack_zeroing: false,
260 #[cfg(feature = "async")]
261 stack_creator: None,
262 async_support: false,
263 module_version: ModuleVersionStrategy::default(),
264 parallel_compilation: !cfg!(miri),
265 memory_guaranteed_dense_image_size: 16 << 20,
266 force_memory_init_memfd: false,
267 wmemcheck: false,
268 #[cfg(feature = "coredump")]
269 coredump_on_trap: false,
270 macos_use_mach_ports: !cfg!(miri),
271 #[cfg(feature = "std")]
272 detect_host_feature: Some(detect_host_feature),
273 #[cfg(not(feature = "std"))]
274 detect_host_feature: None,
275 x86_float_abi_ok: None,
276 };
277 #[cfg(any(feature = "cranelift", feature = "winch"))]
278 {
279 ret.cranelift_debug_verifier(false);
280 ret.cranelift_opt_level(OptLevel::Speed);
281
282 // When running under MIRI try to optimize for compile time of wasm
283 // code itself as much as possible. Disable optimizations by
284 // default and use the fastest regalloc available to us.
285 if cfg!(miri) {
286 ret.cranelift_opt_level(OptLevel::None);
287 ret.cranelift_regalloc_algorithm(RegallocAlgorithm::SinglePass);
288 }
289 }
290
291 ret.wasm_backtrace_details(WasmBacktraceDetails::Environment);
292
293 ret
294 }
295
296 /// Configures the target platform of this [`Config`].
297 ///
298 /// This method is used to configure the output of compilation in an
299 /// [`Engine`](crate::Engine). This can be used, for example, to
300 /// cross-compile from one platform to another. By default, the host target
301 /// triple is used meaning compiled code is suitable to run on the host.
302 ///
303 /// Note that the [`Module`](crate::Module) type can only be created if the
304 /// target configured here matches the host. Otherwise if a cross-compile is
305 /// being performed where the host doesn't match the target then
306 /// [`Engine::precompile_module`](crate::Engine::precompile_module) must be
307 /// used instead.
308 ///
309 /// Target-specific flags (such as CPU features) will not be inferred by
310 /// default for the target when one is provided here. This means that this
311 /// can also be used, for example, with the host architecture to disable all
312 /// host-inferred feature flags. Configuring target-specific flags can be
313 /// done with [`Config::cranelift_flag_set`] and
314 /// [`Config::cranelift_flag_enable`].
315 ///
316 /// # Errors
317 ///
318 /// This method will error if the given target triple is not supported.
319 pub fn target(&mut self, target: &str) -> Result<&mut Self> {
320 self.target =
321 Some(target_lexicon::Triple::from_str(target).map_err(|e| anyhow::anyhow!(e))?);
322
323 Ok(self)
324 }
325
326 /// Enables the incremental compilation cache in Cranelift, using the provided `CacheStore`
327 /// backend for storage.
328 #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
329 pub fn enable_incremental_compilation(
330 &mut self,
331 cache_store: Arc<dyn CacheStore>,
332 ) -> Result<&mut Self> {
333 self.compiler_config.cache_store = Some(cache_store);
334 Ok(self)
335 }
336
337 /// Whether or not to enable support for asynchronous functions in Wasmtime.
338 ///
339 /// When enabled, the config can optionally define host functions with `async`.
340 /// Instances created and functions called with this `Config` *must* be called
341 /// through their asynchronous APIs, however. For example using
342 /// [`Func::call`](crate::Func::call) will panic when used with this config.
343 ///
344 /// # Asynchronous Wasm
345 ///
346 /// WebAssembly does not currently have a way to specify at the bytecode
347 /// level what is and isn't async. Host-defined functions, however, may be
348 /// defined as `async`. WebAssembly imports always appear synchronous, which
349 /// gives rise to a bit of an impedance mismatch here. To solve this
350 /// Wasmtime supports "asynchronous configs" which enables calling these
351 /// asynchronous functions in a way that looks synchronous to the executing
352 /// WebAssembly code.
353 ///
354 /// An asynchronous config must always invoke wasm code asynchronously,
355 /// meaning we'll always represent its computation as a
356 /// [`Future`](std::future::Future). The `poll` method of the futures
357 /// returned by Wasmtime will perform the actual work of calling the
358 /// WebAssembly. Wasmtime won't manage its own thread pools or similar,
359 /// that's left up to the embedder.
360 ///
361 /// To implement futures in a way that WebAssembly sees asynchronous host
362 /// functions as synchronous, all async Wasmtime futures will execute on a
363 /// separately allocated native stack from the thread otherwise executing
364 /// Wasmtime. This separate native stack can then be switched to and from.
365 /// Using this whenever an `async` host function returns a future that
366 /// resolves to `Pending` we switch away from the temporary stack back to
367 /// the main stack and propagate the `Pending` status.
368 ///
369 /// In general it's encouraged that the integration with `async` and
370 /// wasmtime is designed early on in your embedding of Wasmtime to ensure
371 /// that it's planned that WebAssembly executes in the right context of your
372 /// application.
373 ///
374 /// # Execution in `poll`
375 ///
376 /// The [`Future::poll`](std::future::Future::poll) method is the main
377 /// driving force behind Rust's futures. That method's own documentation
378 /// states "an implementation of `poll` should strive to return quickly, and
379 /// should not block". This, however, can be at odds with executing
380 /// WebAssembly code as part of the `poll` method itself. If your
381 /// WebAssembly is untrusted then this could allow the `poll` method to take
382 /// arbitrarily long in the worst case, likely blocking all other
383 /// asynchronous tasks.
384 ///
385 /// To remedy this situation you have a few possible ways to solve this:
386 ///
387 /// * The most efficient solution is to enable
388 /// [`Config::epoch_interruption`] in conjunction with
389 /// [`crate::Store::epoch_deadline_async_yield_and_update`]. Coupled with
390 /// periodic calls to [`crate::Engine::increment_epoch`] this will cause
391 /// executing WebAssembly to periodically yield back according to the
392 /// epoch configuration settings. This enables `Future::poll` to take at
393 /// most a certain amount of time according to epoch configuration
394 /// settings and when increments happen. The benefit of this approach is
395 /// that the instrumentation in compiled code is quite lightweight, but a
396 /// downside can be that the scheduling is somewhat nondeterministic since
397 /// increments are usually timer-based which are not always deterministic.
398 ///
399 /// Note that to prevent infinite execution of wasm it's recommended to
400 /// place a timeout on the entire future representing executing wasm code
401 /// and the periodic yields with epochs should ensure that when the
402 /// timeout is reached it's appropriately recognized.
403 ///
404 /// * Alternatively you can enable the
405 /// [`Config::consume_fuel`](crate::Config::consume_fuel) method as well
406 /// as [`crate::Store::fuel_async_yield_interval`] When doing so this will
407 /// configure Wasmtime futures to yield periodically while they're
408 /// executing WebAssembly code. After consuming the specified amount of
409 /// fuel wasm futures will return `Poll::Pending` from their `poll`
410 /// method, and will get automatically re-polled later. This enables the
411 /// `Future::poll` method to take roughly a fixed amount of time since
412 /// fuel is guaranteed to get consumed while wasm is executing. Unlike
413 /// epoch-based preemption this is deterministic since wasm always
414 /// consumes a fixed amount of fuel per-operation. The downside of this
415 /// approach, however, is that the compiled code instrumentation is
416 /// significantly more expensive than epoch checks.
417 ///
418 /// Note that to prevent infinite execution of wasm it's recommended to
419 /// place a timeout on the entire future representing executing wasm code
420 /// and the periodic yields with epochs should ensure that when the
421 /// timeout is reached it's appropriately recognized.
422 ///
423 /// In all cases special care needs to be taken when integrating
424 /// asynchronous wasm into your application. You should carefully plan where
425 /// WebAssembly will execute and what compute resources will be allotted to
426 /// it. If Wasmtime doesn't support exactly what you'd like just yet, please
427 /// feel free to open an issue!
428 #[cfg(feature = "async")]
429 pub fn async_support(&mut self, enable: bool) -> &mut Self {
430 self.async_support = enable;
431 self
432 }
433
434 /// Configures whether DWARF debug information will be emitted during
435 /// compilation.
436 ///
437 /// Note that the `debug-builtins` compile-time Cargo feature must also be
438 /// enabled for native debuggers such as GDB or LLDB to be able to debug
439 /// guest WebAssembly programs.
440 ///
441 /// By default this option is `false`.
442 /// **Note** Enabling this option is not compatible with the Winch compiler.
443 pub fn debug_info(&mut self, enable: bool) -> &mut Self {
444 self.tunables.generate_native_debuginfo = Some(enable);
445 self
446 }
447
448 /// Configures whether [`WasmBacktrace`] will be present in the context of
449 /// errors returned from Wasmtime.
450 ///
451 /// A backtrace may be collected whenever an error is returned from a host
452 /// function call through to WebAssembly or when WebAssembly itself hits a
453 /// trap condition, such as an out-of-bounds memory access. This flag
454 /// indicates, in these conditions, whether the backtrace is collected or
455 /// not.
456 ///
457 /// Currently wasm backtraces are implemented through frame pointer walking.
458 /// This means that collecting a backtrace is expected to be a fast and
459 /// relatively cheap operation. Additionally backtrace collection is
460 /// suitable in concurrent environments since one thread capturing a
461 /// backtrace won't block other threads.
462 ///
463 /// Collected backtraces are attached via [`anyhow::Error::context`] to
464 /// errors returned from host functions. The [`WasmBacktrace`] type can be
465 /// acquired via [`anyhow::Error::downcast_ref`] to inspect the backtrace.
466 /// When this option is disabled then this context is never applied to
467 /// errors coming out of wasm.
468 ///
469 /// This option is `true` by default.
470 ///
471 /// [`WasmBacktrace`]: crate::WasmBacktrace
472 pub fn wasm_backtrace(&mut self, enable: bool) -> &mut Self {
473 self.wasm_backtrace = enable;
474 self
475 }
476
477 /// Configures whether backtraces in `Trap` will parse debug info in the wasm file to
478 /// have filename/line number information.
479 ///
480 /// When enabled this will causes modules to retain debugging information
481 /// found in wasm binaries. This debug information will be used when a trap
482 /// happens to symbolicate each stack frame and attempt to print a
483 /// filename/line number for each wasm frame in the stack trace.
484 ///
485 /// By default this option is `WasmBacktraceDetails::Environment`, meaning
486 /// that wasm will read `WASMTIME_BACKTRACE_DETAILS` to indicate whether
487 /// details should be parsed. Note that the `std` feature of this crate must
488 /// be active to read environment variables, otherwise this is disabled by
489 /// default.
490 pub fn wasm_backtrace_details(&mut self, enable: WasmBacktraceDetails) -> &mut Self {
491 self.wasm_backtrace_details_env_used = false;
492 self.tunables.parse_wasm_debuginfo = match enable {
493 WasmBacktraceDetails::Enable => Some(true),
494 WasmBacktraceDetails::Disable => Some(false),
495 WasmBacktraceDetails::Environment => {
496 #[cfg(feature = "std")]
497 {
498 self.wasm_backtrace_details_env_used = true;
499 std::env::var("WASMTIME_BACKTRACE_DETAILS")
500 .map(|s| Some(s == "1"))
501 .unwrap_or(Some(false))
502 }
503 #[cfg(not(feature = "std"))]
504 {
505 Some(false)
506 }
507 }
508 };
509 self
510 }
511
512 /// Configures whether to generate native unwind information
513 /// (e.g. `.eh_frame` on Linux).
514 ///
515 /// This configuration option only exists to help third-party stack
516 /// capturing mechanisms, such as the system's unwinder or the `backtrace`
517 /// crate, determine how to unwind through Wasm frames. It does not affect
518 /// whether Wasmtime can capture Wasm backtraces or not. The presence of
519 /// [`WasmBacktrace`] is controlled by the [`Config::wasm_backtrace`]
520 /// option.
521 ///
522 /// Native unwind information is included:
523 /// - When targeting Windows, since the Windows ABI requires it.
524 /// - By default.
525 ///
526 /// Note that systems loading many modules may wish to disable this
527 /// configuration option instead of leaving it on-by-default. Some platforms
528 /// exhibit quadratic behavior when registering/unregistering unwinding
529 /// information which can greatly slow down the module loading/unloading
530 /// process.
531 ///
532 /// [`WasmBacktrace`]: crate::WasmBacktrace
533 pub fn native_unwind_info(&mut self, enable: bool) -> &mut Self {
534 self.native_unwind_info = Some(enable);
535 self
536 }
537
538 /// Configures whether execution of WebAssembly will "consume fuel" to
539 /// either halt or yield execution as desired.
540 ///
541 /// This can be used to deterministically prevent infinitely-executing
542 /// WebAssembly code by instrumenting generated code to consume fuel as it
543 /// executes. When fuel runs out a trap is raised, however [`Store`] can be
544 /// configured to yield execution periodically via
545 /// [`crate::Store::fuel_async_yield_interval`].
546 ///
547 /// Note that a [`Store`] starts with no fuel, so if you enable this option
548 /// you'll have to be sure to pour some fuel into [`Store`] before
549 /// executing some code.
550 ///
551 /// By default this option is `false`.
552 ///
553 /// **Note** Enabling this option is not compatible with the Winch compiler.
554 ///
555 /// [`Store`]: crate::Store
556 pub fn consume_fuel(&mut self, enable: bool) -> &mut Self {
557 self.tunables.consume_fuel = Some(enable);
558 self
559 }
560
561 /// Enables epoch-based interruption.
562 ///
563 /// When executing code in async mode, we sometimes want to
564 /// implement a form of cooperative timeslicing: long-running Wasm
565 /// guest code should periodically yield to the executor
566 /// loop. This yielding could be implemented by using "fuel" (see
567 /// [`consume_fuel`](Config::consume_fuel)). However, fuel
568 /// instrumentation is somewhat expensive: it modifies the
569 /// compiled form of the Wasm code so that it maintains a precise
570 /// instruction count, frequently checking this count against the
571 /// remaining fuel. If one does not need this precise count or
572 /// deterministic interruptions, and only needs a periodic
573 /// interrupt of some form, then It would be better to have a more
574 /// lightweight mechanism.
575 ///
576 /// Epoch-based interruption is that mechanism. There is a global
577 /// "epoch", which is a counter that divides time into arbitrary
578 /// periods (or epochs). This counter lives on the
579 /// [`Engine`](crate::Engine) and can be incremented by calling
580 /// [`Engine::increment_epoch`](crate::Engine::increment_epoch).
581 /// Epoch-based instrumentation works by setting a "deadline
582 /// epoch". The compiled code knows the deadline, and at certain
583 /// points, checks the current epoch against that deadline. It
584 /// will yield if the deadline has been reached.
585 ///
586 /// The idea is that checking an infrequently-changing counter is
587 /// cheaper than counting and frequently storing a precise metric
588 /// (instructions executed) locally. The interruptions are not
589 /// deterministic, but if the embedder increments the epoch in a
590 /// periodic way (say, every regular timer tick by a thread or
591 /// signal handler), then we can ensure that all async code will
592 /// yield to the executor within a bounded time.
593 ///
594 /// The deadline check cannot be avoided by malicious wasm code. It is safe
595 /// to use epoch deadlines to limit the execution time of untrusted
596 /// code.
597 ///
598 /// The [`Store`](crate::Store) tracks the deadline, and controls
599 /// what happens when the deadline is reached during
600 /// execution. Several behaviors are possible:
601 ///
602 /// - Trap if code is executing when the epoch deadline is
603 /// met. See
604 /// [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap).
605 ///
606 /// - Call an arbitrary function. This function may chose to trap or
607 /// increment the epoch. See
608 /// [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback).
609 ///
610 /// - Yield to the executor loop, then resume when the future is
611 /// next polled. See
612 /// [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update).
613 ///
614 /// Trapping is the default. The yielding behaviour may be used for
615 /// the timeslicing behavior described above.
616 ///
617 /// This feature is available with or without async support.
618 /// However, without async support, the timeslicing behaviour is
619 /// not available. This means epoch-based interruption can only
620 /// serve as a simple external-interruption mechanism.
621 ///
622 /// An initial deadline must be set before executing code by calling
623 /// [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline). If this
624 /// deadline is not configured then wasm will immediately trap.
625 ///
626 /// ## Interaction with blocking host calls
627 ///
628 /// Epochs (and fuel) do not assist in handling WebAssembly code blocked in
629 /// a call to the host. For example if the WebAssembly function calls
630 /// `wasi:io/poll/poll` to sleep epochs will not assist in waking this up or
631 /// timing it out. Epochs intentionally only affect running WebAssembly code
632 /// itself and it's left to the embedder to determine how best to wake up
633 /// indefinitely blocking code in the host.
634 ///
635 /// The typical solution for this, however, is to use
636 /// [`Config::async_support(true)`](Config::async_support) and the `async`
637 /// variant of WASI host functions. This models computation as a Rust
638 /// `Future` which means that when blocking happens the future is only
639 /// suspended and control yields back to the main event loop. This gives the
640 /// embedder the opportunity to use `tokio::time::timeout` for example on a
641 /// wasm computation and have the desired effect of cancelling a blocking
642 /// operation when a timeout expires.
643 ///
644 /// ## When to use fuel vs. epochs
645 ///
646 /// In general, epoch-based interruption results in faster
647 /// execution. This difference is sometimes significant: in some
648 /// measurements, up to 2-3x. This is because epoch-based
649 /// interruption does less work: it only watches for a global
650 /// rarely-changing counter to increment, rather than keeping a
651 /// local frequently-changing counter and comparing it to a
652 /// deadline.
653 ///
654 /// Fuel, in contrast, should be used when *deterministic*
655 /// yielding or trapping is needed. For example, if it is required
656 /// that the same function call with the same starting state will
657 /// always either complete or trap with an out-of-fuel error,
658 /// deterministically, then fuel with a fixed bound should be
659 /// used.
660 ///
661 /// **Note** Enabling this option is not compatible with the Winch compiler.
662 ///
663 /// # See Also
664 ///
665 /// - [`Engine::increment_epoch`](crate::Engine::increment_epoch)
666 /// - [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline)
667 /// - [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap)
668 /// - [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback)
669 /// - [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update)
670 pub fn epoch_interruption(&mut self, enable: bool) -> &mut Self {
671 self.tunables.epoch_interruption = Some(enable);
672 self
673 }
674
675 /// Configures the maximum amount of stack space available for
676 /// executing WebAssembly code.
677 ///
678 /// WebAssembly has well-defined semantics on stack overflow. This is
679 /// intended to be a knob which can help configure how much stack space
680 /// wasm execution is allowed to consume. Note that the number here is not
681 /// super-precise, but rather wasm will take at most "pretty close to this
682 /// much" stack space.
683 ///
684 /// If a wasm call (or series of nested wasm calls) take more stack space
685 /// than the `size` specified then a stack overflow trap will be raised.
686 ///
687 /// Caveat: this knob only limits the stack space consumed by wasm code.
688 /// More importantly, it does not ensure that this much stack space is
689 /// available on the calling thread stack. Exhausting the thread stack
690 /// typically leads to an **abort** of the process.
691 ///
692 /// Here are some examples of how that could happen:
693 ///
694 /// - Let's assume this option is set to 2 MiB and then a thread that has
695 /// a stack with 512 KiB left.
696 ///
697 /// If wasm code consumes more than 512 KiB then the process will be aborted.
698 ///
699 /// - Assuming the same conditions, but this time wasm code does not consume
700 /// any stack but calls into a host function. The host function consumes
701 /// more than 512 KiB of stack space. The process will be aborted.
702 ///
703 /// There's another gotcha related to recursive calling into wasm: the stack
704 /// space consumed by a host function is counted towards this limit. The
705 /// host functions are not prevented from consuming more than this limit.
706 /// However, if the host function that used more than this limit and called
707 /// back into wasm, then the execution will trap immediately because of
708 /// stack overflow.
709 ///
710 /// When the `async` feature is enabled, this value cannot exceed the
711 /// `async_stack_size` option. Be careful not to set this value too close
712 /// to `async_stack_size` as doing so may limit how much stack space
713 /// is available for host functions.
714 ///
715 /// By default this option is 512 KiB.
716 ///
717 /// # Errors
718 ///
719 /// The `Engine::new` method will fail if the `size` specified here is
720 /// either 0 or larger than the [`Config::async_stack_size`] configuration.
721 pub fn max_wasm_stack(&mut self, size: usize) -> &mut Self {
722 self.max_wasm_stack = size;
723 self
724 }
725
726 /// Configures the size of the stacks used for asynchronous execution.
727 ///
728 /// This setting configures the size of the stacks that are allocated for
729 /// asynchronous execution. The value cannot be less than `max_wasm_stack`.
730 ///
731 /// The amount of stack space guaranteed for host functions is
732 /// `async_stack_size - max_wasm_stack`, so take care not to set these two values
733 /// close to one another; doing so may cause host functions to overflow the
734 /// stack and abort the process.
735 ///
736 /// By default this option is 2 MiB.
737 ///
738 /// # Errors
739 ///
740 /// The `Engine::new` method will fail if the value for this option is
741 /// smaller than the [`Config::max_wasm_stack`] option.
742 #[cfg(any(feature = "async", feature = "stack-switching"))]
743 pub fn async_stack_size(&mut self, size: usize) -> &mut Self {
744 self.async_stack_size = size;
745 self
746 }
747
748 /// Configures whether or not stacks used for async futures are zeroed
749 /// before (re)use.
750 ///
751 /// When the [`async_support`](Config::async_support) method is enabled for
752 /// Wasmtime and the [`call_async`] variant of calling WebAssembly is used
753 /// then Wasmtime will create a separate runtime execution stack for each
754 /// future produced by [`call_async`]. By default upon allocation, depending
755 /// on the platform, these stacks might be filled with uninitialized
756 /// memory. This is safe and correct because, modulo bugs in Wasmtime,
757 /// compiled Wasm code will never read from a stack slot before it
758 /// initializes the stack slot.
759 ///
760 /// However, as a defense-in-depth mechanism, you may configure Wasmtime to
761 /// ensure that these stacks are zeroed before they are used. Notably, if
762 /// you are using the pooling allocator, stacks can be pooled and reused
763 /// across different Wasm guests; ensuring that stacks are zeroed can
764 /// prevent data leakage between Wasm guests even in the face of potential
765 /// read-of-stack-slot-before-initialization bugs in Wasmtime's compiler.
766 ///
767 /// Stack zeroing can be a costly operation in highly concurrent
768 /// environments due to modifications of the virtual address space requiring
769 /// process-wide synchronization. It can also be costly in `no-std`
770 /// environments that must manually zero memory, and cannot rely on an OS
771 /// and virtual memory to provide zeroed pages.
772 ///
773 /// This option defaults to `false`.
774 ///
775 /// [`call_async`]: crate::TypedFunc::call_async
776 #[cfg(feature = "async")]
777 pub fn async_stack_zeroing(&mut self, enable: bool) -> &mut Self {
778 self.async_stack_zeroing = enable;
779 self
780 }
781
782 fn wasm_feature(&mut self, flag: WasmFeatures, enable: bool) -> &mut Self {
783 self.enabled_features.set(flag, enable);
784 self.disabled_features.set(flag, !enable);
785 self
786 }
787
788 /// Configures whether the WebAssembly tail calls proposal will be enabled
789 /// for compilation or not.
790 ///
791 /// The [WebAssembly tail calls proposal] introduces the `return_call` and
792 /// `return_call_indirect` instructions. These instructions allow for Wasm
793 /// programs to implement some recursive algorithms with *O(1)* stack space
794 /// usage.
795 ///
796 /// This is `true` by default except when the Winch compiler is enabled.
797 ///
798 /// [WebAssembly tail calls proposal]: https://github.com/WebAssembly/tail-call
799 pub fn wasm_tail_call(&mut self, enable: bool) -> &mut Self {
800 self.wasm_feature(WasmFeatures::TAIL_CALL, enable);
801 self
802 }
803
804 /// Configures whether the WebAssembly custom-page-sizes proposal will be
805 /// enabled for compilation or not.
806 ///
807 /// The [WebAssembly custom-page-sizes proposal] allows a memory to
808 /// customize its page sizes. By default, Wasm page sizes are 64KiB
809 /// large. This proposal allows the memory to opt into smaller page sizes
810 /// instead, allowing Wasm to run in environments with less than 64KiB RAM
811 /// available, for example.
812 ///
813 /// Note that the page size is part of the memory's type, and because
814 /// different memories may have different types, they may also have
815 /// different page sizes.
816 ///
817 /// Currently the only valid page sizes are 64KiB (the default) and 1
818 /// byte. Future extensions may relax this constraint and allow all powers
819 /// of two.
820 ///
821 /// Support for this proposal is disabled by default.
822 ///
823 /// [WebAssembly custom-page-sizes proposal]: https://github.com/WebAssembly/custom-page-sizes
824 pub fn wasm_custom_page_sizes(&mut self, enable: bool) -> &mut Self {
825 self.wasm_feature(WasmFeatures::CUSTOM_PAGE_SIZES, enable);
826 self
827 }
828
829 /// Configures whether the WebAssembly [threads] proposal will be enabled
830 /// for compilation.
831 ///
832 /// This feature gates items such as shared memories and atomic
833 /// instructions. Note that the threads feature depends on the bulk memory
834 /// feature, which is enabled by default. Additionally note that while the
835 /// wasm feature is called "threads" it does not actually include the
836 /// ability to spawn threads. Spawning threads is part of the [wasi-threads]
837 /// proposal which is a separately gated feature in Wasmtime.
838 ///
839 /// Embeddings of Wasmtime are able to build their own custom threading
840 /// scheme on top of the core wasm threads proposal, however.
841 ///
842 /// The default value for this option is whether the `threads`
843 /// crate feature of Wasmtime is enabled or not. By default this crate
844 /// feature is enabled.
845 ///
846 /// [threads]: https://github.com/webassembly/threads
847 /// [wasi-threads]: https://github.com/webassembly/wasi-threads
848 #[cfg(feature = "threads")]
849 pub fn wasm_threads(&mut self, enable: bool) -> &mut Self {
850 self.wasm_feature(WasmFeatures::THREADS, enable);
851 self
852 }
853
854 /// Configures whether the WebAssembly [shared-everything-threads] proposal
855 /// will be enabled for compilation.
856 ///
857 /// This feature gates extended use of the `shared` attribute on items other
858 /// than memories, extra atomic instructions, and new component model
859 /// intrinsics for spawning threads. It depends on the
860 /// [`wasm_threads`][Self::wasm_threads] being enabled.
861 ///
862 /// [shared-everything-threads]:
863 /// https://github.com/webassembly/shared-everything-threads
864 pub fn wasm_shared_everything_threads(&mut self, enable: bool) -> &mut Self {
865 self.wasm_feature(WasmFeatures::SHARED_EVERYTHING_THREADS, enable);
866 self
867 }
868
869 /// Configures whether the [WebAssembly reference types proposal][proposal]
870 /// will be enabled for compilation.
871 ///
872 /// This feature gates items such as the `externref` and `funcref` types as
873 /// well as allowing a module to define multiple tables.
874 ///
875 /// Note that the reference types proposal depends on the bulk memory proposal.
876 ///
877 /// This feature is `true` by default.
878 ///
879 /// # Errors
880 ///
881 /// The validation of this feature are deferred until the engine is being built,
882 /// and thus may cause `Engine::new` fail if the `bulk_memory` feature is disabled.
883 ///
884 /// [proposal]: https://github.com/webassembly/reference-types
885 #[cfg(feature = "gc")]
886 pub fn wasm_reference_types(&mut self, enable: bool) -> &mut Self {
887 self.wasm_feature(WasmFeatures::REFERENCE_TYPES, enable);
888 self
889 }
890
891 /// Configures whether the [WebAssembly function references
892 /// proposal][proposal] will be enabled for compilation.
893 ///
894 /// This feature gates non-nullable reference types, function reference
895 /// types, `call_ref`, `ref.func`, and non-nullable reference related
896 /// instructions.
897 ///
898 /// Note that the function references proposal depends on the reference
899 /// types proposal.
900 ///
901 /// This feature is `false` by default.
902 ///
903 /// [proposal]: https://github.com/WebAssembly/function-references
904 #[cfg(feature = "gc")]
905 pub fn wasm_function_references(&mut self, enable: bool) -> &mut Self {
906 self.wasm_feature(WasmFeatures::FUNCTION_REFERENCES, enable);
907 self
908 }
909
910 /// Configures whether the [WebAssembly wide-arithmetic][proposal] will be
911 /// enabled for compilation.
912 ///
913 /// This feature is `false` by default.
914 ///
915 /// [proposal]: https://github.com/WebAssembly/wide-arithmetic
916 pub fn wasm_wide_arithmetic(&mut self, enable: bool) -> &mut Self {
917 self.wasm_feature(WasmFeatures::WIDE_ARITHMETIC, enable);
918 self
919 }
920
921 /// Configures whether the [WebAssembly Garbage Collection
922 /// proposal][proposal] will be enabled for compilation.
923 ///
924 /// This feature gates `struct` and `array` type definitions and references,
925 /// the `i31ref` type, and all related instructions.
926 ///
927 /// Note that the function references proposal depends on the typed function
928 /// references proposal.
929 ///
930 /// This feature is `false` by default.
931 ///
932 /// **Warning: Wasmtime's implementation of the GC proposal is still in
933 /// progress and generally not ready for primetime.**
934 ///
935 /// [proposal]: https://github.com/WebAssembly/gc
936 #[cfg(feature = "gc")]
937 pub fn wasm_gc(&mut self, enable: bool) -> &mut Self {
938 self.wasm_feature(WasmFeatures::GC, enable);
939 self
940 }
941
942 /// Configures whether the WebAssembly SIMD proposal will be
943 /// enabled for compilation.
944 ///
945 /// The [WebAssembly SIMD proposal][proposal]. This feature gates items such
946 /// as the `v128` type and all of its operators being in a module. Note that
947 /// this does not enable the [relaxed simd proposal].
948 ///
949 /// **Note**
950 ///
951 /// On x86_64 platforms the base CPU feature requirement for SIMD
952 /// is SSE2 for the Cranelift compiler and AVX for the Winch compiler.
953 ///
954 /// This is `true` by default.
955 ///
956 /// [proposal]: https://github.com/webassembly/simd
957 /// [relaxed simd proposal]: https://github.com/WebAssembly/relaxed-simd
958 pub fn wasm_simd(&mut self, enable: bool) -> &mut Self {
959 self.wasm_feature(WasmFeatures::SIMD, enable);
960 self
961 }
962
963 /// Configures whether the WebAssembly Relaxed SIMD proposal will be
964 /// enabled for compilation.
965 ///
966 /// The relaxed SIMD proposal adds new instructions to WebAssembly which,
967 /// for some specific inputs, are allowed to produce different results on
968 /// different hosts. More-or-less this proposal enables exposing
969 /// platform-specific semantics of SIMD instructions in a controlled
970 /// fashion to a WebAssembly program. From an embedder's perspective this
971 /// means that WebAssembly programs may execute differently depending on
972 /// whether the host is x86_64 or AArch64, for example.
973 ///
974 /// By default Wasmtime lowers relaxed SIMD instructions to the fastest
975 /// lowering for the platform it's running on. This means that, by default,
976 /// some relaxed SIMD instructions may have different results for the same
977 /// inputs across x86_64 and AArch64. This behavior can be disabled through
978 /// the [`Config::relaxed_simd_deterministic`] option which will force
979 /// deterministic behavior across all platforms, as classified by the
980 /// specification, at the cost of performance.
981 ///
982 /// This is `true` by default.
983 ///
984 /// [proposal]: https://github.com/webassembly/relaxed-simd
985 pub fn wasm_relaxed_simd(&mut self, enable: bool) -> &mut Self {
986 self.wasm_feature(WasmFeatures::RELAXED_SIMD, enable);
987 self
988 }
989
990 /// This option can be used to control the behavior of the [relaxed SIMD
991 /// proposal's][proposal] instructions.
992 ///
993 /// The relaxed SIMD proposal introduces instructions that are allowed to
994 /// have different behavior on different architectures, primarily to afford
995 /// an efficient implementation on all architectures. This means, however,
996 /// that the same module may execute differently on one host than another,
997 /// which typically is not otherwise the case. This option is provided to
998 /// force Wasmtime to generate deterministic code for all relaxed simd
999 /// instructions, at the cost of performance, for all architectures. When
1000 /// this option is enabled then the deterministic behavior of all
1001 /// instructions in the relaxed SIMD proposal is selected.
1002 ///
1003 /// This is `false` by default.
1004 ///
1005 /// [proposal]: https://github.com/webassembly/relaxed-simd
1006 pub fn relaxed_simd_deterministic(&mut self, enable: bool) -> &mut Self {
1007 self.tunables.relaxed_simd_deterministic = Some(enable);
1008 self
1009 }
1010
1011 /// Configures whether the [WebAssembly bulk memory operations
1012 /// proposal][proposal] will be enabled for compilation.
1013 ///
1014 /// This feature gates items such as the `memory.copy` instruction, passive
1015 /// data/table segments, etc, being in a module.
1016 ///
1017 /// This is `true` by default.
1018 ///
1019 /// Feature `reference_types`, which is also `true` by default, requires
1020 /// this feature to be enabled. Thus disabling this feature must also disable
1021 /// `reference_types` as well using [`wasm_reference_types`](crate::Config::wasm_reference_types).
1022 ///
1023 /// # Errors
1024 ///
1025 /// Disabling this feature without disabling `reference_types` will cause
1026 /// `Engine::new` to fail.
1027 ///
1028 /// [proposal]: https://github.com/webassembly/bulk-memory-operations
1029 pub fn wasm_bulk_memory(&mut self, enable: bool) -> &mut Self {
1030 self.wasm_feature(WasmFeatures::BULK_MEMORY, enable);
1031 self
1032 }
1033
1034 /// Configures whether the WebAssembly multi-value [proposal] will
1035 /// be enabled for compilation.
1036 ///
1037 /// This feature gates functions and blocks returning multiple values in a
1038 /// module, for example.
1039 ///
1040 /// This is `true` by default.
1041 ///
1042 /// [proposal]: https://github.com/webassembly/multi-value
1043 pub fn wasm_multi_value(&mut self, enable: bool) -> &mut Self {
1044 self.wasm_feature(WasmFeatures::MULTI_VALUE, enable);
1045 self
1046 }
1047
1048 /// Configures whether the WebAssembly multi-memory [proposal] will
1049 /// be enabled for compilation.
1050 ///
1051 /// This feature gates modules having more than one linear memory
1052 /// declaration or import.
1053 ///
1054 /// This is `true` by default.
1055 ///
1056 /// [proposal]: https://github.com/webassembly/multi-memory
1057 pub fn wasm_multi_memory(&mut self, enable: bool) -> &mut Self {
1058 self.wasm_feature(WasmFeatures::MULTI_MEMORY, enable);
1059 self
1060 }
1061
1062 /// Configures whether the WebAssembly memory64 [proposal] will
1063 /// be enabled for compilation.
1064 ///
1065 /// Note that this the upstream specification is not finalized and Wasmtime
1066 /// may also have bugs for this feature since it hasn't been exercised
1067 /// much.
1068 ///
1069 /// This is `false` by default.
1070 ///
1071 /// [proposal]: https://github.com/webassembly/memory64
1072 pub fn wasm_memory64(&mut self, enable: bool) -> &mut Self {
1073 self.wasm_feature(WasmFeatures::MEMORY64, enable);
1074 self
1075 }
1076
1077 /// Configures whether the WebAssembly extended-const [proposal] will
1078 /// be enabled for compilation.
1079 ///
1080 /// This is `true` by default.
1081 ///
1082 /// [proposal]: https://github.com/webassembly/extended-const
1083 pub fn wasm_extended_const(&mut self, enable: bool) -> &mut Self {
1084 self.wasm_feature(WasmFeatures::EXTENDED_CONST, enable);
1085 self
1086 }
1087
1088 /// Configures whether the [WebAssembly stack switching
1089 /// proposal][proposal] will be enabled for compilation.
1090 ///
1091 /// This feature gates the use of control tags.
1092 ///
1093 /// This feature depends on the `function_reference_types` and
1094 /// `exceptions` features.
1095 ///
1096 /// This feature is `false` by default.
1097 ///
1098 /// # Errors
1099 ///
1100 /// [proposal]: https://github.com/webassembly/stack-switching
1101 pub fn wasm_stack_switching(&mut self, enable: bool) -> &mut Self {
1102 self.wasm_feature(WasmFeatures::STACK_SWITCHING, enable);
1103 self
1104 }
1105
1106 /// Configures whether the WebAssembly component-model [proposal] will
1107 /// be enabled for compilation.
1108 ///
1109 /// This flag can be used to blanket disable all components within Wasmtime.
1110 /// Otherwise usage of components requires statically using
1111 /// [`Component`](crate::component::Component) instead of
1112 /// [`Module`](crate::Module) for example anyway.
1113 ///
1114 /// The default value for this option is whether the `component-model`
1115 /// crate feature of Wasmtime is enabled or not. By default this crate
1116 /// feature is enabled.
1117 ///
1118 /// [proposal]: https://github.com/webassembly/component-model
1119 #[cfg(feature = "component-model")]
1120 pub fn wasm_component_model(&mut self, enable: bool) -> &mut Self {
1121 self.wasm_feature(WasmFeatures::COMPONENT_MODEL, enable);
1122 self
1123 }
1124
1125 /// Configures whether components support the async ABI [proposal] for
1126 /// lifting and lowering functions, as well as `stream`, `future`, and
1127 /// `error-context` types.
1128 ///
1129 /// Please note that Wasmtime's support for this feature is _very_
1130 /// incomplete.
1131 ///
1132 /// [proposal]:
1133 /// https://github.com/WebAssembly/component-model/blob/main/design/mvp/Async.md
1134 #[cfg(feature = "component-model-async")]
1135 pub fn wasm_component_model_async(&mut self, enable: bool) -> &mut Self {
1136 self.wasm_feature(WasmFeatures::CM_ASYNC, enable);
1137 self
1138 }
1139
1140 /// This corresponds to the 🚝 emoji in the component model specification.
1141 ///
1142 /// Please note that Wasmtime's support for this feature is _very_
1143 /// incomplete.
1144 ///
1145 /// [proposal]:
1146 /// https://github.com/WebAssembly/component-model/blob/main/design/mvp/Async.md
1147 #[cfg(feature = "component-model-async")]
1148 pub fn wasm_component_model_async_builtins(&mut self, enable: bool) -> &mut Self {
1149 self.wasm_feature(WasmFeatures::CM_ASYNC_BUILTINS, enable);
1150 self
1151 }
1152
1153 /// This corresponds to the 🚟 emoji in the component model specification.
1154 ///
1155 /// Please note that Wasmtime's support for this feature is _very_
1156 /// incomplete.
1157 ///
1158 /// [proposal]: https://github.com/WebAssembly/component-model/blob/main/design/mvp/Async.md
1159 #[cfg(feature = "component-model-async")]
1160 pub fn wasm_component_model_async_stackful(&mut self, enable: bool) -> &mut Self {
1161 self.wasm_feature(WasmFeatures::CM_ASYNC_STACKFUL, enable);
1162 self
1163 }
1164
1165 /// This corresponds to the 📝 emoji in the component model specification.
1166 ///
1167 /// Please note that Wasmtime's support for this feature is _very_
1168 /// incomplete.
1169 ///
1170 /// [proposal]: https://github.com/WebAssembly/component-model/blob/main/design/mvp/Async.md
1171 #[cfg(feature = "component-model")]
1172 pub fn wasm_component_model_error_context(&mut self, enable: bool) -> &mut Self {
1173 self.wasm_feature(WasmFeatures::CM_ERROR_CONTEXT, enable);
1174 self
1175 }
1176
1177 /// Configures whether the [GC extension to the component-model
1178 /// proposal][proposal] is enabled or not.
1179 ///
1180 /// This corresponds to the 🛸 emoji in the component model specification.
1181 ///
1182 /// Please note that Wasmtime's support for this feature is _very_
1183 /// incomplete.
1184 ///
1185 /// [proposal]: https://github.com/WebAssembly/component-model/issues/525
1186 #[cfg(feature = "component-model")]
1187 pub fn wasm_component_model_gc(&mut self, enable: bool) -> &mut Self {
1188 self.wasm_feature(WasmFeatures::CM_GC, enable);
1189 self
1190 }
1191
1192 /// Configures whether the [Exception-handling proposal][proposal] is enabled or not.
1193 ///
1194 /// [proposal]: https://github.com/WebAssembly/exception-handling
1195 #[cfg(feature = "gc")]
1196 pub fn wasm_exceptions(&mut self, enable: bool) -> &mut Self {
1197 self.wasm_feature(WasmFeatures::EXCEPTIONS, enable);
1198 self
1199 }
1200
1201 #[doc(hidden)] // FIXME(#3427) - if/when implemented then un-hide this
1202 #[deprecated = "This configuration option only exists for internal \
1203 usage with the spec testsuite. It may be removed at \
1204 any time and without warning. Do not rely on it!"]
1205 pub fn wasm_legacy_exceptions(&mut self, enable: bool) -> &mut Self {
1206 self.wasm_feature(WasmFeatures::LEGACY_EXCEPTIONS, enable);
1207 self
1208 }
1209
1210 /// Configures which compilation strategy will be used for wasm modules.
1211 ///
1212 /// This method can be used to configure which compiler is used for wasm
1213 /// modules, and for more documentation consult the [`Strategy`] enumeration
1214 /// and its documentation.
1215 ///
1216 /// The default value for this is `Strategy::Auto`.
1217 #[cfg(any(feature = "cranelift", feature = "winch"))]
1218 pub fn strategy(&mut self, strategy: Strategy) -> &mut Self {
1219 self.compiler_config.strategy = strategy.not_auto();
1220 self
1221 }
1222
1223 /// Configures which garbage collector will be used for Wasm modules.
1224 ///
1225 /// This method can be used to configure which garbage collector
1226 /// implementation is used for Wasm modules. For more documentation, consult
1227 /// the [`Collector`] enumeration and its documentation.
1228 ///
1229 /// The default value for this is `Collector::Auto`.
1230 #[cfg(feature = "gc")]
1231 pub fn collector(&mut self, collector: Collector) -> &mut Self {
1232 self.collector = collector;
1233 self
1234 }
1235
1236 /// Creates a default profiler based on the profiling strategy chosen.
1237 ///
1238 /// Profiler creation calls the type's default initializer where the purpose is
1239 /// really just to put in place the type used for profiling.
1240 ///
1241 /// Some [`ProfilingStrategy`] require specific platforms or particular feature
1242 /// to be enabled, such as `ProfilingStrategy::JitDump` requires the `jitdump`
1243 /// feature.
1244 ///
1245 /// # Errors
1246 ///
1247 /// The validation of this field is deferred until the engine is being built, and thus may
1248 /// cause `Engine::new` fail if the required feature is disabled, or the platform is not
1249 /// supported.
1250 pub fn profiler(&mut self, profile: ProfilingStrategy) -> &mut Self {
1251 self.profiling_strategy = profile;
1252 self
1253 }
1254
1255 /// Configures whether the debug verifier of Cranelift is enabled or not.
1256 ///
1257 /// When Cranelift is used as a code generation backend this will configure
1258 /// it to have the `enable_verifier` flag which will enable a number of debug
1259 /// checks inside of Cranelift. This is largely only useful for the
1260 /// developers of wasmtime itself.
1261 ///
1262 /// The default value for this is `false`
1263 #[cfg(any(feature = "cranelift", feature = "winch"))]
1264 pub fn cranelift_debug_verifier(&mut self, enable: bool) -> &mut Self {
1265 let val = if enable { "true" } else { "false" };
1266 self.compiler_config
1267 .settings
1268 .insert("enable_verifier".to_string(), val.to_string());
1269 self
1270 }
1271
1272 /// Configures the Cranelift code generator optimization level.
1273 ///
1274 /// When the Cranelift code generator is used you can configure the
1275 /// optimization level used for generated code in a few various ways. For
1276 /// more information see the documentation of [`OptLevel`].
1277 ///
1278 /// The default value for this is `OptLevel::Speed`.
1279 #[cfg(any(feature = "cranelift", feature = "winch"))]
1280 pub fn cranelift_opt_level(&mut self, level: OptLevel) -> &mut Self {
1281 let val = match level {
1282 OptLevel::None => "none",
1283 OptLevel::Speed => "speed",
1284 OptLevel::SpeedAndSize => "speed_and_size",
1285 };
1286 self.compiler_config
1287 .settings
1288 .insert("opt_level".to_string(), val.to_string());
1289 self
1290 }
1291
1292 /// Configures the regalloc algorithm used by the Cranelift code generator.
1293 ///
1294 /// Cranelift can select any of several register allocator algorithms. Each
1295 /// of these algorithms generates correct code, but they represent different
1296 /// tradeoffs between compile speed (how expensive the compilation process
1297 /// is) and run-time speed (how fast the generated code runs).
1298 /// For more information see the documentation of [`RegallocAlgorithm`].
1299 ///
1300 /// The default value for this is `RegallocAlgorithm::Backtracking`.
1301 #[cfg(any(feature = "cranelift", feature = "winch"))]
1302 pub fn cranelift_regalloc_algorithm(&mut self, algo: RegallocAlgorithm) -> &mut Self {
1303 let val = match algo {
1304 RegallocAlgorithm::Backtracking => "backtracking",
1305 RegallocAlgorithm::SinglePass => "single_pass",
1306 };
1307 self.compiler_config
1308 .settings
1309 .insert("regalloc_algorithm".to_string(), val.to_string());
1310 self
1311 }
1312
1313 /// Configures whether Cranelift should perform a NaN-canonicalization pass.
1314 ///
1315 /// When Cranelift is used as a code generation backend this will configure
1316 /// it to replace NaNs with a single canonical value. This is useful for
1317 /// users requiring entirely deterministic WebAssembly computation. This is
1318 /// not required by the WebAssembly spec, so it is not enabled by default.
1319 ///
1320 /// Note that this option affects not only WebAssembly's `f32` and `f64`
1321 /// types but additionally the `v128` type. This option will cause
1322 /// operations using any of these types to have extra checks placed after
1323 /// them to normalize NaN values as needed.
1324 ///
1325 /// The default value for this is `false`
1326 #[cfg(any(feature = "cranelift", feature = "winch"))]
1327 pub fn cranelift_nan_canonicalization(&mut self, enable: bool) -> &mut Self {
1328 let val = if enable { "true" } else { "false" };
1329 self.compiler_config
1330 .settings
1331 .insert("enable_nan_canonicalization".to_string(), val.to_string());
1332 self
1333 }
1334
1335 /// Controls whether proof-carrying code (PCC) is used to validate
1336 /// lowering of Wasm sandbox checks.
1337 ///
1338 /// Proof-carrying code carries "facts" about program values from
1339 /// the IR all the way to machine code, and checks those facts
1340 /// against known machine-instruction semantics. This guards
1341 /// against bugs in instruction lowering that might create holes
1342 /// in the Wasm sandbox.
1343 ///
1344 /// PCC is designed to be fast: it does not require complex
1345 /// solvers or logic engines to verify, but only a linear pass
1346 /// over a trail of "breadcrumbs" or facts at each intermediate
1347 /// value. Thus, it is appropriate to enable in production.
1348 #[cfg(any(feature = "cranelift", feature = "winch"))]
1349 pub fn cranelift_pcc(&mut self, enable: bool) -> &mut Self {
1350 let val = if enable { "true" } else { "false" };
1351 self.compiler_config
1352 .settings
1353 .insert("enable_pcc".to_string(), val.to_string());
1354 self
1355 }
1356
1357 /// Allows setting a Cranelift boolean flag or preset. This allows
1358 /// fine-tuning of Cranelift settings.
1359 ///
1360 /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1361 /// either; other `Config` functions should be preferred for stability.
1362 ///
1363 /// # Safety
1364 ///
1365 /// This is marked as unsafe, because setting the wrong flag might break invariants,
1366 /// resulting in execution hazards.
1367 ///
1368 /// # Errors
1369 ///
1370 /// The validation of the flags are deferred until the engine is being built, and thus may
1371 /// cause `Engine::new` fail if the flag's name does not exist, or the value is not appropriate
1372 /// for the flag type.
1373 #[cfg(any(feature = "cranelift", feature = "winch"))]
1374 pub unsafe fn cranelift_flag_enable(&mut self, flag: &str) -> &mut Self {
1375 self.compiler_config.flags.insert(flag.to_string());
1376 self
1377 }
1378
1379 /// Allows settings another Cranelift flag defined by a flag name and value. This allows
1380 /// fine-tuning of Cranelift settings.
1381 ///
1382 /// Since Cranelift flags may be unstable, this method should not be considered to be stable
1383 /// either; other `Config` functions should be preferred for stability.
1384 ///
1385 /// # Safety
1386 ///
1387 /// This is marked as unsafe, because setting the wrong flag might break invariants,
1388 /// resulting in execution hazards.
1389 ///
1390 /// # Errors
1391 ///
1392 /// The validation of the flags are deferred until the engine is being built, and thus may
1393 /// cause `Engine::new` fail if the flag's name does not exist, or incompatible with other
1394 /// settings.
1395 ///
1396 /// For example, feature `wasm_backtrace` will set `unwind_info` to `true`, but if it's
1397 /// manually set to false then it will fail.
1398 #[cfg(any(feature = "cranelift", feature = "winch"))]
1399 pub unsafe fn cranelift_flag_set(&mut self, name: &str, value: &str) -> &mut Self {
1400 self.compiler_config
1401 .settings
1402 .insert(name.to_string(), value.to_string());
1403 self
1404 }
1405
1406 /// Set a custom [`Cache`].
1407 ///
1408 /// To load a cache configuration from a file, use [`Cache::from_file`]. Otherwise, you can
1409 /// create a new cache config using [`CacheConfig::new`] and passing that to [`Cache::new`].
1410 ///
1411 /// If you want to disable the cache, you can call this method with `None`.
1412 ///
1413 /// By default, new configs do not have caching enabled.
1414 /// Every call to [`Module::new(my_wasm)`][crate::Module::new] will recompile `my_wasm`,
1415 /// even when it is unchanged, unless an enabled `CacheConfig` is provided.
1416 ///
1417 /// This method is only available when the `cache` feature of this crate is
1418 /// enabled.
1419 ///
1420 /// [docs]: https://bytecodealliance.github.io/wasmtime/cli-cache.html
1421 #[cfg(feature = "cache")]
1422 pub fn cache(&mut self, cache: Option<Cache>) -> &mut Self {
1423 self.cache = cache;
1424 self
1425 }
1426
1427 /// Sets a custom memory creator.
1428 ///
1429 /// Custom memory creators are used when creating host `Memory` objects or when
1430 /// creating instance linear memories for the on-demand instance allocation strategy.
1431 #[cfg(feature = "runtime")]
1432 pub fn with_host_memory(&mut self, mem_creator: Arc<dyn MemoryCreator>) -> &mut Self {
1433 self.mem_creator = Some(Arc::new(MemoryCreatorProxy(mem_creator)));
1434 self
1435 }
1436
1437 /// Sets a custom stack creator.
1438 ///
1439 /// Custom memory creators are used when creating creating async instance stacks for
1440 /// the on-demand instance allocation strategy.
1441 #[cfg(feature = "async")]
1442 pub fn with_host_stack(&mut self, stack_creator: Arc<dyn StackCreator>) -> &mut Self {
1443 self.stack_creator = Some(Arc::new(StackCreatorProxy(stack_creator)));
1444 self
1445 }
1446
1447 /// Sets a custom executable-memory publisher.
1448 ///
1449 /// Custom executable-memory publishers are hooks that allow
1450 /// Wasmtime to make certain regions of memory executable when
1451 /// loading precompiled modules or compiling new modules
1452 /// in-process. In most modern operating systems, memory allocated
1453 /// for heap usage is readable and writable by default but not
1454 /// executable. To jump to machine code stored in that memory, we
1455 /// need to make it executable. For security reasons, we usually
1456 /// also make it read-only at the same time, so the executing code
1457 /// can't be modified later.
1458 ///
1459 /// By default, Wasmtime will use the appropriate system calls on
1460 /// the host platform for this work. However, it also allows
1461 /// plugging in a custom implementation via this configuration
1462 /// option. This may be useful on custom or `no_std` platforms,
1463 /// for example, especially where virtual memory is not otherwise
1464 /// used by Wasmtime (no `signals-and-traps` feature).
1465 #[cfg(feature = "runtime")]
1466 pub fn with_custom_code_memory(
1467 &mut self,
1468 custom_code_memory: Option<Arc<dyn CustomCodeMemory>>,
1469 ) -> &mut Self {
1470 self.custom_code_memory = custom_code_memory;
1471 self
1472 }
1473
1474 /// Sets the instance allocation strategy to use.
1475 ///
1476 /// This is notably used in conjunction with
1477 /// [`InstanceAllocationStrategy::Pooling`] and [`PoolingAllocationConfig`].
1478 pub fn allocation_strategy(
1479 &mut self,
1480 strategy: impl Into<InstanceAllocationStrategy>,
1481 ) -> &mut Self {
1482 self.allocation_strategy = strategy.into();
1483 self
1484 }
1485
1486 /// Specifies the capacity of linear memories, in bytes, in their initial
1487 /// allocation.
1488 ///
1489 /// > Note: this value has important performance ramifications, be sure to
1490 /// > benchmark when setting this to a non-default value and read over this
1491 /// > documentation.
1492 ///
1493 /// This function will change the size of the initial memory allocation made
1494 /// for linear memories. This setting is only applicable when the initial
1495 /// size of a linear memory is below this threshold. Linear memories are
1496 /// allocated in the virtual address space of the host process with OS APIs
1497 /// such as `mmap` and this setting affects how large the allocation will
1498 /// be.
1499 ///
1500 /// ## Background: WebAssembly Linear Memories
1501 ///
1502 /// WebAssembly linear memories always start with a minimum size and can
1503 /// possibly grow up to a maximum size. The minimum size is always specified
1504 /// in a WebAssembly module itself and the maximum size can either be
1505 /// optionally specified in the module or inherently limited by the index
1506 /// type. For example for this module:
1507 ///
1508 /// ```wasm
1509 /// (module
1510 /// (memory $a 4)
1511 /// (memory $b 4096 4096 (pagesize 1))
1512 /// (memory $c i64 10)
1513 /// )
1514 /// ```
1515 ///
1516 /// * Memory `$a` initially allocates 4 WebAssembly pages (256KiB) and can
1517 /// grow up to 4GiB, the limit of the 32-bit index space.
1518 /// * Memory `$b` initially allocates 4096 WebAssembly pages, but in this
1519 /// case its page size is 1, so it's 4096 bytes. Memory can also grow no
1520 /// further meaning that it will always be 4096 bytes.
1521 /// * Memory `$c` is a 64-bit linear memory which starts with 640KiB of
1522 /// memory and can theoretically grow up to 2^64 bytes, although most
1523 /// hosts will run out of memory long before that.
1524 ///
1525 /// All operations on linear memories done by wasm are required to be
1526 /// in-bounds. Any access beyond the end of a linear memory is considered a
1527 /// trap.
1528 ///
1529 /// ## What this setting affects: Virtual Memory
1530 ///
1531 /// This setting is used to configure the behavior of the size of the linear
1532 /// memory allocation performed for each of these memories. For example the
1533 /// initial linear memory allocation looks like this:
1534 ///
1535 /// ```text
1536 /// memory_reservation
1537 /// |
1538 /// ◄─────────┴────────────────►
1539 /// ┌───────┬─────────┬──────────────────┬───────┐
1540 /// │ guard │ initial │ ... capacity ... │ guard │
1541 /// └───────┴─────────┴──────────────────┴───────┘
1542 /// ◄──┬──► ◄──┬──►
1543 /// │ │
1544 /// │ memory_guard_size
1545 /// │
1546 /// │
1547 /// memory_guard_size (if guard_before_linear_memory)
1548 /// ```
1549 ///
1550 /// Memory in the `initial` range is accessible to the instance and can be
1551 /// read/written by wasm code. Memory in the `guard` regions is never
1552 /// accessible to wasm code and memory in `capacity` is initially
1553 /// inaccessible but may become accessible through `memory.grow` instructions
1554 /// for example.
1555 ///
1556 /// This means that this setting is the size of the initial chunk of virtual
1557 /// memory that a linear memory may grow into.
1558 ///
1559 /// ## What this setting affects: Runtime Speed
1560 ///
1561 /// This is a performance-sensitive setting which is taken into account
1562 /// during the compilation process of a WebAssembly module. For example if a
1563 /// 32-bit WebAssembly linear memory has a `memory_reservation` size of 4GiB
1564 /// then bounds checks can be elided because `capacity` will be guaranteed
1565 /// to be unmapped for all addressable bytes that wasm can access (modulo a
1566 /// few details).
1567 ///
1568 /// If `memory_reservation` was something smaller like 256KiB then that
1569 /// would have a much smaller impact on virtual memory but the compile code
1570 /// would then need to have explicit bounds checks to ensure that
1571 /// loads/stores are in-bounds.
1572 ///
1573 /// The goal of this setting is to enable skipping bounds checks in most
1574 /// modules by default. Some situations which require explicit bounds checks
1575 /// though are:
1576 ///
1577 /// * When `memory_reservation` is smaller than the addressable size of the
1578 /// linear memory. For example if 64-bit linear memories always need
1579 /// bounds checks as they can address the entire virtual address spacce.
1580 /// For 32-bit linear memories a `memory_reservation` minimum size of 4GiB
1581 /// is required to elide bounds checks.
1582 ///
1583 /// * When linear memories have a page size of 1 then bounds checks are
1584 /// required. In this situation virtual memory can't be relied upon
1585 /// because that operates at the host page size granularity where wasm
1586 /// requires a per-byte level granularity.
1587 ///
1588 /// * Configuration settings such as [`Config::signals_based_traps`] can be
1589 /// used to disable the use of signal handlers and virtual memory so
1590 /// explicit bounds checks are required.
1591 ///
1592 /// * When [`Config::memory_guard_size`] is too small a bounds check may be
1593 /// required. For 32-bit wasm addresses are actually 33-bit effective
1594 /// addresses because loads/stores have a 32-bit static offset to add to
1595 /// the dynamic 32-bit address. If the static offset is larger than the
1596 /// size of the guard region then an explicit bounds check is required.
1597 ///
1598 /// ## What this setting affects: Memory Growth Behavior
1599 ///
1600 /// In addition to affecting bounds checks emitted in compiled code this
1601 /// setting also affects how WebAssembly linear memories are grown. The
1602 /// `memory.grow` instruction can be used to make a linear memory larger and
1603 /// this is also affected by APIs such as
1604 /// [`Memory::grow`](crate::Memory::grow).
1605 ///
1606 /// In these situations when the amount being grown is small enough to fit
1607 /// within the remaining capacity then the linear memory doesn't have to be
1608 /// moved at runtime. If the capacity runs out though then a new linear
1609 /// memory allocation must be made and the contents of linear memory is
1610 /// copied over.
1611 ///
1612 /// For example here's a situation where a copy happens:
1613 ///
1614 /// * The `memory_reservation` setting is configured to 128KiB.
1615 /// * A WebAssembly linear memory starts with a single 64KiB page.
1616 /// * This memory can be grown by one page to contain the full 128KiB of
1617 /// memory.
1618 /// * If grown by one more page, though, then a 192KiB allocation must be
1619 /// made and the previous 128KiB of contents are copied into the new
1620 /// allocation.
1621 ///
1622 /// This growth behavior can have a significant performance impact if lots
1623 /// of data needs to be copied on growth. Conversely if memory growth never
1624 /// needs to happen because the capacity will always be large enough then
1625 /// optimizations can be applied to cache the base pointer of linear memory.
1626 ///
1627 /// When memory is grown then the
1628 /// [`Config::memory_reservation_for_growth`] is used for the new
1629 /// memory allocation to have memory to grow into.
1630 ///
1631 /// When using the pooling allocator via [`PoolingAllocationConfig`] then
1632 /// memories are never allowed to move so requests for growth are instead
1633 /// rejected with an error.
1634 ///
1635 /// ## When this setting is not used
1636 ///
1637 /// This setting is ignored and unused when the initial size of linear
1638 /// memory is larger than this threshold. For example if this setting is set
1639 /// to 1MiB but a wasm module requires a 2MiB minimum allocation then this
1640 /// setting is ignored. In this situation the minimum size of memory will be
1641 /// allocated along with [`Config::memory_reservation_for_growth`]
1642 /// after it to grow into.
1643 ///
1644 /// That means that this value can be set to zero. That can be useful in
1645 /// benchmarking to see the overhead of bounds checks for example.
1646 /// Additionally it can be used to minimize the virtual memory allocated by
1647 /// Wasmtime.
1648 ///
1649 /// ## Default Value
1650 ///
1651 /// The default value for this property depends on the host platform. For
1652 /// 64-bit platforms there's lots of address space available, so the default
1653 /// configured here is 4GiB. When coupled with the default size of
1654 /// [`Config::memory_guard_size`] this means that 32-bit WebAssembly linear
1655 /// memories with 64KiB page sizes will skip almost all bounds checks by
1656 /// default.
1657 ///
1658 /// For 32-bit platforms this value defaults to 10MiB. This means that
1659 /// bounds checks will be required on 32-bit platforms.
1660 pub fn memory_reservation(&mut self, bytes: u64) -> &mut Self {
1661 self.tunables.memory_reservation = Some(bytes);
1662 self
1663 }
1664
1665 /// Indicates whether linear memories may relocate their base pointer at
1666 /// runtime.
1667 ///
1668 /// WebAssembly linear memories either have a maximum size that's explicitly
1669 /// listed in the type of a memory or inherently limited by the index type
1670 /// of the memory (e.g. 4GiB for 32-bit linear memories). Depending on how
1671 /// the linear memory is allocated (see [`Config::memory_reservation`]) it
1672 /// may be necessary to move the memory in the host's virtual address space
1673 /// during growth. This option controls whether this movement is allowed or
1674 /// not.
1675 ///
1676 /// An example of a linear memory needing to move is when
1677 /// [`Config::memory_reservation`] is 0 then a linear memory will be
1678 /// allocated as the minimum size of the memory plus
1679 /// [`Config::memory_reservation_for_growth`]. When memory grows beyond the
1680 /// reservation for growth then the memory needs to be relocated.
1681 ///
1682 /// When this option is set to `false` then it can have a number of impacts
1683 /// on how memories work at runtime:
1684 ///
1685 /// * Modules can be compiled with static knowledge the base pointer of
1686 /// linear memory never changes to enable optimizations such as
1687 /// loop invariant code motion (hoisting the base pointer out of a loop).
1688 ///
1689 /// * Memories cannot grow in excess of their original allocation. This
1690 /// means that [`Config::memory_reservation`] and
1691 /// [`Config::memory_reservation_for_growth`] may need tuning to ensure
1692 /// the memory configuration works at runtime.
1693 ///
1694 /// The default value for this option is `true`.
1695 pub fn memory_may_move(&mut self, enable: bool) -> &mut Self {
1696 self.tunables.memory_may_move = Some(enable);
1697 self
1698 }
1699
1700 /// Configures the size, in bytes, of the guard region used at the end of a
1701 /// linear memory's address space reservation.
1702 ///
1703 /// > Note: this value has important performance ramifications, be sure to
1704 /// > understand what this value does before tweaking it and benchmarking.
1705 ///
1706 /// This setting controls how many bytes are guaranteed to be unmapped after
1707 /// the virtual memory allocation of a linear memory. When
1708 /// combined with sufficiently large values of
1709 /// [`Config::memory_reservation`] (e.g. 4GiB for 32-bit linear memories)
1710 /// then a guard region can be used to eliminate bounds checks in generated
1711 /// code.
1712 ///
1713 /// This setting additionally can be used to help deduplicate bounds checks
1714 /// in code that otherwise requires bounds checks. For example with a 4KiB
1715 /// guard region then a 64-bit linear memory which accesses addresses `x+8`
1716 /// and `x+16` only needs to perform a single bounds check on `x`. If that
1717 /// bounds check passes then the offset is guaranteed to either reside in
1718 /// linear memory or the guard region, resulting in deterministic behavior
1719 /// either way.
1720 ///
1721 /// ## How big should the guard be?
1722 ///
1723 /// In general, like with configuring [`Config::memory_reservation`], you
1724 /// probably don't want to change this value from the defaults. Removing
1725 /// bounds checks is dependent on a number of factors where the size of the
1726 /// guard region is only one piece of the equation. Other factors include:
1727 ///
1728 /// * [`Config::memory_reservation`]
1729 /// * The index type of the linear memory (e.g. 32-bit or 64-bit)
1730 /// * The page size of the linear memory
1731 /// * Other settings such as [`Config::signals_based_traps`]
1732 ///
1733 /// Embeddings using virtual memory almost always want at least some guard
1734 /// region, but otherwise changes from the default should be profiled
1735 /// locally to see the performance impact.
1736 ///
1737 /// ## Default
1738 ///
1739 /// The default value for this property is 32MiB on 64-bit platforms. This
1740 /// allows eliminating almost all bounds checks on loads/stores with an
1741 /// immediate offset of less than 32MiB. On 32-bit platforms this defaults
1742 /// to 64KiB.
1743 pub fn memory_guard_size(&mut self, bytes: u64) -> &mut Self {
1744 self.tunables.memory_guard_size = Some(bytes);
1745 self
1746 }
1747
1748 /// Configures the size, in bytes, of the extra virtual memory space
1749 /// reserved after a linear memory is relocated.
1750 ///
1751 /// This setting is used in conjunction with [`Config::memory_reservation`]
1752 /// to configure what happens after a linear memory is relocated in the host
1753 /// address space. If the initial size of a linear memory exceeds
1754 /// [`Config::memory_reservation`] or if it grows beyond that size
1755 /// throughout its lifetime then this setting will be used.
1756 ///
1757 /// When a linear memory is relocated it will initially look like this:
1758 ///
1759 /// ```text
1760 /// memory.size
1761 /// │
1762 /// ◄──────┴─────►
1763 /// ┌───────┬──────────────┬───────┐
1764 /// │ guard │ accessible │ guard │
1765 /// └───────┴──────────────┴───────┘
1766 /// ◄──┬──►
1767 /// │
1768 /// memory_guard_size
1769 /// ```
1770 ///
1771 /// where `accessible` needs to be grown but there's no more memory to grow
1772 /// into. A new region of the virtual address space will be allocated that
1773 /// looks like this:
1774 ///
1775 /// ```text
1776 /// memory_reservation_for_growth
1777 /// │
1778 /// memory.size │
1779 /// │ │
1780 /// ◄──────┴─────► ◄─────────────┴───────────►
1781 /// ┌───────┬──────────────┬───────────────────────────┬───────┐
1782 /// │ guard │ accessible │ .. reserved for growth .. │ guard │
1783 /// └───────┴──────────────┴───────────────────────────┴───────┘
1784 /// ◄──┬──►
1785 /// │
1786 /// memory_guard_size
1787 /// ```
1788 ///
1789 /// This means that up to `memory_reservation_for_growth` bytes can be
1790 /// allocated again before the entire linear memory needs to be moved again
1791 /// when another `memory_reservation_for_growth` bytes will be appended to
1792 /// the size of the allocation.
1793 ///
1794 /// Note that this is a currently simple heuristic for optimizing the growth
1795 /// of dynamic memories, primarily implemented for the memory64 proposal
1796 /// where the maximum size of memory is larger than 4GiB. This setting is
1797 /// unlikely to be a one-size-fits-all style approach and if you're an
1798 /// embedder running into issues with growth and are interested in having
1799 /// other growth strategies available here please feel free to [open an
1800 /// issue on the Wasmtime repository][issue]!
1801 ///
1802 /// [issue]: https://github.com/bytecodealliance/wasmtime/issues/new
1803 ///
1804 /// ## Default
1805 ///
1806 /// For 64-bit platforms this defaults to 2GiB, and for 32-bit platforms
1807 /// this defaults to 1MiB.
1808 pub fn memory_reservation_for_growth(&mut self, bytes: u64) -> &mut Self {
1809 self.tunables.memory_reservation_for_growth = Some(bytes);
1810 self
1811 }
1812
1813 /// Indicates whether a guard region is present before allocations of
1814 /// linear memory.
1815 ///
1816 /// Guard regions before linear memories are never used during normal
1817 /// operation of WebAssembly modules, even if they have out-of-bounds
1818 /// loads. The only purpose for a preceding guard region in linear memory
1819 /// is extra protection against possible bugs in code generators like
1820 /// Cranelift. This setting does not affect performance in any way, but will
1821 /// result in larger virtual memory reservations for linear memories (it
1822 /// won't actually ever use more memory, just use more of the address
1823 /// space).
1824 ///
1825 /// The size of the guard region before linear memory is the same as the
1826 /// guard size that comes after linear memory, which is configured by
1827 /// [`Config::memory_guard_size`].
1828 ///
1829 /// ## Default
1830 ///
1831 /// This value defaults to `true`.
1832 pub fn guard_before_linear_memory(&mut self, enable: bool) -> &mut Self {
1833 self.tunables.guard_before_linear_memory = Some(enable);
1834 self
1835 }
1836
1837 /// Indicates whether to initialize tables lazily, so that instantiation
1838 /// is fast but indirect calls are a little slower. If false, tables
1839 /// are initialized eagerly during instantiation from any active element
1840 /// segments that apply to them.
1841 ///
1842 /// **Note** Disabling this option is not compatible with the Winch compiler.
1843 ///
1844 /// ## Default
1845 ///
1846 /// This value defaults to `true`.
1847 pub fn table_lazy_init(&mut self, table_lazy_init: bool) -> &mut Self {
1848 self.tunables.table_lazy_init = Some(table_lazy_init);
1849 self
1850 }
1851
1852 /// Configure the version information used in serialized and deserialized [`crate::Module`]s.
1853 /// This effects the behavior of [`crate::Module::serialize()`], as well as
1854 /// [`crate::Module::deserialize()`] and related functions.
1855 ///
1856 /// The default strategy is to use the wasmtime crate's Cargo package version.
1857 pub fn module_version(&mut self, strategy: ModuleVersionStrategy) -> Result<&mut Self> {
1858 match strategy {
1859 // This case requires special precondition for assertion in SerializedModule::to_bytes
1860 ModuleVersionStrategy::Custom(ref v) => {
1861 if v.as_bytes().len() > 255 {
1862 bail!("custom module version cannot be more than 255 bytes: {}", v);
1863 }
1864 }
1865 _ => {}
1866 }
1867 self.module_version = strategy;
1868 Ok(self)
1869 }
1870
1871 /// Configure whether wasmtime should compile a module using multiple
1872 /// threads.
1873 ///
1874 /// Disabling this will result in a single thread being used to compile
1875 /// the wasm bytecode.
1876 ///
1877 /// By default parallel compilation is enabled.
1878 #[cfg(feature = "parallel-compilation")]
1879 pub fn parallel_compilation(&mut self, parallel: bool) -> &mut Self {
1880 self.parallel_compilation = parallel;
1881 self
1882 }
1883
1884 /// Configures whether compiled artifacts will contain information to map
1885 /// native program addresses back to the original wasm module.
1886 ///
1887 /// This configuration option is `true` by default and, if enabled,
1888 /// generates the appropriate tables in compiled modules to map from native
1889 /// address back to wasm source addresses. This is used for displaying wasm
1890 /// program counters in backtraces as well as generating filenames/line
1891 /// numbers if so configured as well (and the original wasm module has DWARF
1892 /// debugging information present).
1893 pub fn generate_address_map(&mut self, generate: bool) -> &mut Self {
1894 self.tunables.generate_address_map = Some(generate);
1895 self
1896 }
1897
1898 /// Configures whether copy-on-write memory-mapped data is used to
1899 /// initialize a linear memory.
1900 ///
1901 /// Initializing linear memory via a copy-on-write mapping can drastically
1902 /// improve instantiation costs of a WebAssembly module because copying
1903 /// memory is deferred. Additionally if a page of memory is only ever read
1904 /// from WebAssembly and never written too then the same underlying page of
1905 /// data will be reused between all instantiations of a module meaning that
1906 /// if a module is instantiated many times this can lower the overall memory
1907 /// required needed to run that module.
1908 ///
1909 /// The main disadvantage of copy-on-write initialization, however, is that
1910 /// it may be possible for highly-parallel scenarios to be less scalable. If
1911 /// a page is read initially by a WebAssembly module then that page will be
1912 /// mapped to a read-only copy shared between all WebAssembly instances. If
1913 /// the same page is then written, however, then a private copy is created
1914 /// and swapped out from the read-only version. This also requires an [IPI],
1915 /// however, which can be a significant bottleneck in high-parallelism
1916 /// situations.
1917 ///
1918 /// This feature is only applicable when a WebAssembly module meets specific
1919 /// criteria to be initialized in this fashion, such as:
1920 ///
1921 /// * Only memories defined in the module can be initialized this way.
1922 /// * Data segments for memory must use statically known offsets.
1923 /// * Data segments for memory must all be in-bounds.
1924 ///
1925 /// Modules which do not meet these criteria will fall back to
1926 /// initialization of linear memory based on copying memory.
1927 ///
1928 /// This feature of Wasmtime is also platform-specific:
1929 ///
1930 /// * Linux - this feature is supported for all instances of [`Module`].
1931 /// Modules backed by an existing mmap (such as those created by
1932 /// [`Module::deserialize_file`]) will reuse that mmap to cow-initialize
1933 /// memory. Other instance of [`Module`] may use the `memfd_create`
1934 /// syscall to create an initialization image to `mmap`.
1935 /// * Unix (not Linux) - this feature is only supported when loading modules
1936 /// from a precompiled file via [`Module::deserialize_file`] where there
1937 /// is a file descriptor to use to map data into the process. Note that
1938 /// the module must have been compiled with this setting enabled as well.
1939 /// * Windows - there is no support for this feature at this time. Memory
1940 /// initialization will always copy bytes.
1941 ///
1942 /// By default this option is enabled.
1943 ///
1944 /// [`Module::deserialize_file`]: crate::Module::deserialize_file
1945 /// [`Module`]: crate::Module
1946 /// [IPI]: https://en.wikipedia.org/wiki/Inter-processor_interrupt
1947 pub fn memory_init_cow(&mut self, enable: bool) -> &mut Self {
1948 self.tunables.memory_init_cow = Some(enable);
1949 self
1950 }
1951
1952 /// A configuration option to force the usage of `memfd_create` on Linux to
1953 /// be used as the backing source for a module's initial memory image.
1954 ///
1955 /// When [`Config::memory_init_cow`] is enabled, which is enabled by
1956 /// default, module memory initialization images are taken from a module's
1957 /// original mmap if possible. If a precompiled module was loaded from disk
1958 /// this means that the disk's file is used as an mmap source for the
1959 /// initial linear memory contents. This option can be used to force, on
1960 /// Linux, that instead of using the original file on disk a new in-memory
1961 /// file is created with `memfd_create` to hold the contents of the initial
1962 /// image.
1963 ///
1964 /// This option can be used to avoid possibly loading the contents of memory
1965 /// from disk through a page fault. Instead with `memfd_create` the contents
1966 /// of memory are always in RAM, meaning that even page faults which
1967 /// initially populate a wasm linear memory will only work with RAM instead
1968 /// of ever hitting the disk that the original precompiled module is stored
1969 /// on.
1970 ///
1971 /// This option is disabled by default.
1972 pub fn force_memory_init_memfd(&mut self, enable: bool) -> &mut Self {
1973 self.force_memory_init_memfd = enable;
1974 self
1975 }
1976
1977 /// Configures whether or not a coredump should be generated and attached to
1978 /// the anyhow::Error when a trap is raised.
1979 ///
1980 /// This option is disabled by default.
1981 #[cfg(feature = "coredump")]
1982 pub fn coredump_on_trap(&mut self, enable: bool) -> &mut Self {
1983 self.coredump_on_trap = enable;
1984 self
1985 }
1986
1987 /// Enables memory error checking for wasm programs.
1988 ///
1989 /// This option is disabled by default.
1990 #[cfg(any(feature = "cranelift", feature = "winch"))]
1991 pub fn wmemcheck(&mut self, enable: bool) -> &mut Self {
1992 self.wmemcheck = enable;
1993 self.compiler_config.wmemcheck = enable;
1994 self
1995 }
1996
1997 /// Configures the "guaranteed dense image size" for copy-on-write
1998 /// initialized memories.
1999 ///
2000 /// When using the [`Config::memory_init_cow`] feature to initialize memory
2001 /// efficiently (which is enabled by default), compiled modules contain an
2002 /// image of the module's initial heap. If the module has a fairly sparse
2003 /// initial heap, with just a few data segments at very different offsets,
2004 /// this could result in a large region of zero bytes in the image. In
2005 /// other words, it's not very memory-efficient.
2006 ///
2007 /// We normally use a heuristic to avoid this: if less than half
2008 /// of the initialized range (first non-zero to last non-zero
2009 /// byte) of any memory in the module has pages with nonzero
2010 /// bytes, then we avoid creating a memory image for the entire module.
2011 ///
2012 /// However, if the embedder always needs the instantiation-time efficiency
2013 /// of copy-on-write initialization, and is otherwise carefully controlling
2014 /// parameters of the modules (for example, by limiting the maximum heap
2015 /// size of the modules), then it may be desirable to ensure a memory image
2016 /// is created even if this could go against the heuristic above. Thus, we
2017 /// add another condition: there is a size of initialized data region up to
2018 /// which we *always* allow a memory image. The embedder can set this to a
2019 /// known maximum heap size if they desire to always get the benefits of
2020 /// copy-on-write images.
2021 ///
2022 /// In the future we may implement a "best of both worlds"
2023 /// solution where we have a dense image up to some limit, and
2024 /// then support a sparse list of initializers beyond that; this
2025 /// would get most of the benefit of copy-on-write and pay the incremental
2026 /// cost of eager initialization only for those bits of memory
2027 /// that are out-of-bounds. However, for now, an embedder desiring
2028 /// fast instantiation should ensure that this setting is as large
2029 /// as the maximum module initial memory content size.
2030 ///
2031 /// By default this value is 16 MiB.
2032 pub fn memory_guaranteed_dense_image_size(&mut self, size_in_bytes: u64) -> &mut Self {
2033 self.memory_guaranteed_dense_image_size = size_in_bytes;
2034 self
2035 }
2036
2037 /// Whether to enable function inlining during compilation or not.
2038 ///
2039 /// This may result in faster execution at runtime, but adds additional
2040 /// compilation time. Inlining may also enlarge the size of compiled
2041 /// artifacts (for example, the size of the result of
2042 /// [`Engine::precompile_component`]).
2043 ///
2044 /// Inlining is not supported by all of Wasmtime's compilation strategies;
2045 /// currently, it only Cranelift supports it. This setting will be ignored
2046 /// when using a compilation strategy that does not support inlining, like
2047 /// Winch.
2048 ///
2049 /// Note that inlining is still somewhat experimental at the moment (as of
2050 /// the Wasmtime version 36).
2051 pub fn compiler_inlining(&mut self, inlining: bool) -> &mut Self {
2052 self.tunables.inlining = Some(inlining);
2053 self
2054 }
2055
2056 /// Returns the set of features that the currently selected compiler backend
2057 /// does not support at all and may panic on.
2058 ///
2059 /// Wasmtime strives to reject unknown modules or unsupported modules with
2060 /// first-class errors instead of panics. Not all compiler backends have the
2061 /// same level of feature support on all platforms as well. This method
2062 /// returns a set of features that the currently selected compiler
2063 /// configuration is known to not support and may panic on. This acts as a
2064 /// first-level filter on incoming wasm modules/configuration to fail-fast
2065 /// instead of panicking later on.
2066 ///
2067 /// Note that if a feature is not listed here it does not mean that the
2068 /// backend fully supports the proposal. Instead that means that the backend
2069 /// doesn't ever panic on the proposal, but errors during compilation may
2070 /// still be returned. This means that features listed here are definitely
2071 /// not supported at all, but features not listed here may still be
2072 /// partially supported. For example at the time of this writing the Winch
2073 /// backend partially supports simd so it's not listed here. Winch doesn't
2074 /// fully support simd but unimplemented instructions just return errors.
2075 fn compiler_panicking_wasm_features(&self) -> WasmFeatures {
2076 #[cfg(any(feature = "cranelift", feature = "winch"))]
2077 match self.compiler_config.strategy {
2078 None | Some(Strategy::Cranelift) => {
2079 let mut unsupported = WasmFeatures::empty();
2080
2081 // Pulley at this time fundamentally doesn't support the
2082 // `threads` proposal, notably shared memory, because Rust can't
2083 // safely implement loads/stores in the face of shared memory.
2084 // Stack switching is not implemented, either.
2085 if self.compiler_target().is_pulley() {
2086 unsupported |= WasmFeatures::THREADS;
2087 unsupported |= WasmFeatures::STACK_SWITCHING;
2088 }
2089
2090 use target_lexicon::*;
2091 match self.compiler_target() {
2092 Triple {
2093 architecture: Architecture::X86_64 | Architecture::X86_64h,
2094 operating_system:
2095 OperatingSystem::Linux
2096 | OperatingSystem::MacOSX(_)
2097 | OperatingSystem::Darwin(_),
2098 ..
2099 } => {
2100 // Stack switching supported on (non-Pulley) Cranelift.
2101 }
2102
2103 _ => {
2104 // On platforms other than x64 Unix-like, we don't
2105 // support stack switching.
2106 unsupported |= WasmFeatures::STACK_SWITCHING;
2107 }
2108 }
2109 unsupported
2110 }
2111 Some(Strategy::Winch) => {
2112 let mut unsupported = WasmFeatures::GC
2113 | WasmFeatures::FUNCTION_REFERENCES
2114 | WasmFeatures::RELAXED_SIMD
2115 | WasmFeatures::TAIL_CALL
2116 | WasmFeatures::GC_TYPES
2117 | WasmFeatures::EXCEPTIONS
2118 | WasmFeatures::LEGACY_EXCEPTIONS
2119 | WasmFeatures::STACK_SWITCHING;
2120 match self.compiler_target().architecture {
2121 target_lexicon::Architecture::Aarch64(_) => {
2122 unsupported |= WasmFeatures::THREADS;
2123 unsupported |= WasmFeatures::WIDE_ARITHMETIC;
2124 }
2125
2126 // Winch doesn't support other non-x64 architectures at this
2127 // time either but will return an first-class error for
2128 // them.
2129 _ => {}
2130 }
2131 unsupported
2132 }
2133 Some(Strategy::Auto) => unreachable!(),
2134 }
2135 #[cfg(not(any(feature = "cranelift", feature = "winch")))]
2136 return WasmFeatures::empty();
2137 }
2138
2139 /// Calculates the set of features that are enabled for this `Config`.
2140 ///
2141 /// This method internally will start with the an empty set of features to
2142 /// avoid being tied to wasmparser's defaults. Next Wasmtime's set of
2143 /// default features are added to this set, some of which are conditional
2144 /// depending on crate features. Finally explicitly requested features via
2145 /// `wasm_*` methods on `Config` are applied. Everything is then validated
2146 /// later in `Config::validate`.
2147 fn features(&self) -> WasmFeatures {
2148 // Wasmtime by default supports all of the wasm 2.0 version of the
2149 // specification.
2150 let mut features = WasmFeatures::WASM2;
2151
2152 // On-by-default features that wasmtime has. Note that these are all
2153 // subject to the criteria at
2154 // https://docs.wasmtime.dev/contributing-implementing-wasm-proposals.html
2155 // and
2156 // https://docs.wasmtime.dev/stability-wasm-proposals.html
2157 features |= WasmFeatures::MULTI_MEMORY;
2158 features |= WasmFeatures::RELAXED_SIMD;
2159 features |= WasmFeatures::TAIL_CALL;
2160 features |= WasmFeatures::EXTENDED_CONST;
2161 features |= WasmFeatures::MEMORY64;
2162 // NB: if you add a feature above this line please double-check
2163 // https://docs.wasmtime.dev/stability-wasm-proposals.html
2164 // to ensure all requirements are met and/or update the documentation
2165 // there too.
2166
2167 // Set some features to their conditionally-enabled defaults depending
2168 // on crate compile-time features.
2169 features.set(WasmFeatures::GC_TYPES, cfg!(feature = "gc"));
2170 features.set(WasmFeatures::THREADS, cfg!(feature = "threads"));
2171 features.set(
2172 WasmFeatures::COMPONENT_MODEL,
2173 cfg!(feature = "component-model"),
2174 );
2175
2176 // From the default set of proposals remove any that the current
2177 // compiler backend may panic on if the module contains them.
2178 features = features & !self.compiler_panicking_wasm_features();
2179
2180 // After wasmtime's defaults are configured then factor in user requests
2181 // and disable/enable features. Note that the enable/disable sets should
2182 // be disjoint.
2183 debug_assert!((self.enabled_features & self.disabled_features).is_empty());
2184 features &= !self.disabled_features;
2185 features |= self.enabled_features;
2186
2187 features
2188 }
2189
2190 /// Returns the configured compiler target for this `Config`.
2191 pub(crate) fn compiler_target(&self) -> target_lexicon::Triple {
2192 // If a target is explicitly configured, always use that.
2193 if let Some(target) = self.target.clone() {
2194 return target;
2195 }
2196
2197 // If the `build.rs` script determined that this platform uses pulley by
2198 // default, then use Pulley.
2199 if cfg!(default_target_pulley) {
2200 return target_lexicon::Triple::pulley_host();
2201 }
2202
2203 // And at this point the target is for sure the host.
2204 target_lexicon::Triple::host()
2205 }
2206
2207 pub(crate) fn validate(&self) -> Result<(Tunables, WasmFeatures)> {
2208 let features = self.features();
2209
2210 // First validate that the selected compiler backend and configuration
2211 // supports the set of `features` that are enabled. This will help
2212 // provide more first class errors instead of panics about unsupported
2213 // features and configurations.
2214 let unsupported = features & self.compiler_panicking_wasm_features();
2215 if !unsupported.is_empty() {
2216 for flag in WasmFeatures::FLAGS.iter() {
2217 if !unsupported.contains(*flag.value()) {
2218 continue;
2219 }
2220 bail!(
2221 "the wasm_{} feature is not supported on this compiler configuration",
2222 flag.name().to_lowercase()
2223 );
2224 }
2225
2226 panic!("should have returned an error by now")
2227 }
2228
2229 #[cfg(any(feature = "async", feature = "stack-switching"))]
2230 if self.async_support && self.max_wasm_stack > self.async_stack_size {
2231 bail!("max_wasm_stack size cannot exceed the async_stack_size");
2232 }
2233 if self.max_wasm_stack == 0 {
2234 bail!("max_wasm_stack size cannot be zero");
2235 }
2236 if !cfg!(feature = "wmemcheck") && self.wmemcheck {
2237 bail!("wmemcheck (memory checker) was requested but is not enabled in this build");
2238 }
2239
2240 if !cfg!(feature = "gc") && features.gc_types() {
2241 bail!("support for GC was disabled at compile time")
2242 }
2243
2244 if !cfg!(feature = "gc") && features.contains(WasmFeatures::EXCEPTIONS) {
2245 bail!("exceptions support requires garbage collection (GC) to be enabled in the build");
2246 }
2247
2248 let mut tunables = Tunables::default_for_target(&self.compiler_target())?;
2249
2250 // If no target is explicitly specified then further refine `tunables`
2251 // for the configuration of this host depending on what platform
2252 // features were found available at compile time. This means that anyone
2253 // cross-compiling for a customized host will need to further refine
2254 // compilation options.
2255 if self.target.is_none() {
2256 // If this platform doesn't have native signals then change some
2257 // defaults to account for that. Note that VM guards are turned off
2258 // here because that's primarily a feature of eliding
2259 // bounds-checks.
2260 if !cfg!(has_native_signals) {
2261 tunables.signals_based_traps = cfg!(has_native_signals);
2262 tunables.memory_guard_size = 0;
2263 }
2264
2265 // When virtual memory is not available use slightly different
2266 // defaults for tunables to be more amenable to `MallocMemory`.
2267 // Note that these can still be overridden by config options.
2268 if !cfg!(has_virtual_memory) {
2269 tunables.memory_reservation = 0;
2270 tunables.memory_reservation_for_growth = 1 << 20; // 1MB
2271 tunables.memory_init_cow = false;
2272 }
2273 }
2274
2275 self.tunables.configure(&mut tunables);
2276
2277 // If we're going to compile with winch, we must use the winch calling convention.
2278 #[cfg(any(feature = "cranelift", feature = "winch"))]
2279 {
2280 tunables.winch_callable = self.compiler_config.strategy == Some(Strategy::Winch);
2281 }
2282
2283 tunables.collector = if features.gc_types() {
2284 #[cfg(feature = "gc")]
2285 {
2286 use wasmtime_environ::Collector as EnvCollector;
2287 Some(match self.collector.try_not_auto()? {
2288 Collector::DeferredReferenceCounting => EnvCollector::DeferredReferenceCounting,
2289 Collector::Null => EnvCollector::Null,
2290 Collector::Auto => unreachable!(),
2291 })
2292 }
2293 #[cfg(not(feature = "gc"))]
2294 bail!("cannot use GC types: the `gc` feature was disabled at compile time")
2295 } else {
2296 None
2297 };
2298
2299 Ok((tunables, features))
2300 }
2301
2302 #[cfg(feature = "runtime")]
2303 pub(crate) fn build_allocator(
2304 &self,
2305 tunables: &Tunables,
2306 ) -> Result<Box<dyn InstanceAllocator + Send + Sync>> {
2307 #[cfg(feature = "async")]
2308 let (stack_size, stack_zeroing) = (self.async_stack_size, self.async_stack_zeroing);
2309
2310 #[cfg(not(feature = "async"))]
2311 let (stack_size, stack_zeroing) = (0, false);
2312
2313 let _ = tunables;
2314
2315 match &self.allocation_strategy {
2316 InstanceAllocationStrategy::OnDemand => {
2317 let mut _allocator = Box::new(OnDemandInstanceAllocator::new(
2318 self.mem_creator.clone(),
2319 stack_size,
2320 stack_zeroing,
2321 ));
2322 #[cfg(feature = "async")]
2323 if let Some(stack_creator) = &self.stack_creator {
2324 _allocator.set_stack_creator(stack_creator.clone());
2325 }
2326 Ok(_allocator)
2327 }
2328 #[cfg(feature = "pooling-allocator")]
2329 InstanceAllocationStrategy::Pooling(config) => {
2330 let mut config = config.config;
2331 config.stack_size = stack_size;
2332 config.async_stack_zeroing = stack_zeroing;
2333 Ok(Box::new(crate::runtime::vm::PoolingInstanceAllocator::new(
2334 &config, tunables,
2335 )?))
2336 }
2337 }
2338 }
2339
2340 #[cfg(feature = "runtime")]
2341 pub(crate) fn build_gc_runtime(&self) -> Result<Option<Arc<dyn GcRuntime>>> {
2342 if !self.features().gc_types() {
2343 return Ok(None);
2344 }
2345
2346 #[cfg(not(feature = "gc"))]
2347 bail!("cannot create a GC runtime: the `gc` feature was disabled at compile time");
2348
2349 #[cfg(feature = "gc")]
2350 #[cfg_attr(
2351 not(any(feature = "gc-null", feature = "gc-drc")),
2352 expect(unreachable_code, reason = "definitions known to be dummy")
2353 )]
2354 {
2355 Ok(Some(match self.collector.try_not_auto()? {
2356 #[cfg(feature = "gc-drc")]
2357 Collector::DeferredReferenceCounting => {
2358 Arc::new(crate::runtime::vm::DrcCollector::default()) as Arc<dyn GcRuntime>
2359 }
2360 #[cfg(not(feature = "gc-drc"))]
2361 Collector::DeferredReferenceCounting => unreachable!(),
2362
2363 #[cfg(feature = "gc-null")]
2364 Collector::Null => {
2365 Arc::new(crate::runtime::vm::NullCollector::default()) as Arc<dyn GcRuntime>
2366 }
2367 #[cfg(not(feature = "gc-null"))]
2368 Collector::Null => unreachable!(),
2369
2370 Collector::Auto => unreachable!(),
2371 }))
2372 }
2373 }
2374
2375 #[cfg(feature = "runtime")]
2376 pub(crate) fn build_profiler(&self) -> Result<Box<dyn ProfilingAgent>> {
2377 Ok(match self.profiling_strategy {
2378 ProfilingStrategy::PerfMap => profiling_agent::new_perfmap()?,
2379 ProfilingStrategy::JitDump => profiling_agent::new_jitdump()?,
2380 ProfilingStrategy::VTune => profiling_agent::new_vtune()?,
2381 ProfilingStrategy::None => profiling_agent::new_null(),
2382 ProfilingStrategy::Pulley => profiling_agent::new_pulley()?,
2383 })
2384 }
2385
2386 #[cfg(any(feature = "cranelift", feature = "winch"))]
2387 pub(crate) fn build_compiler(
2388 mut self,
2389 tunables: &Tunables,
2390 features: WasmFeatures,
2391 ) -> Result<(Self, Box<dyn wasmtime_environ::Compiler>)> {
2392 let target = self.compiler_target();
2393
2394 // The target passed to the builders below is an `Option<Triple>` where
2395 // `None` represents the current host with CPU features inferred from
2396 // the host's CPU itself. The `target` above is not an `Option`, so
2397 // switch it to `None` in the case that a target wasn't explicitly
2398 // specified (which indicates no feature inference) and the target
2399 // matches the host.
2400 let target_for_builder =
2401 if self.target.is_none() && target == target_lexicon::Triple::host() {
2402 None
2403 } else {
2404 Some(target.clone())
2405 };
2406
2407 let mut compiler = match self.compiler_config.strategy {
2408 #[cfg(feature = "cranelift")]
2409 Some(Strategy::Cranelift) => wasmtime_cranelift::builder(target_for_builder)?,
2410 #[cfg(not(feature = "cranelift"))]
2411 Some(Strategy::Cranelift) => bail!("cranelift support not compiled in"),
2412 #[cfg(feature = "winch")]
2413 Some(Strategy::Winch) => wasmtime_winch::builder(target_for_builder)?,
2414 #[cfg(not(feature = "winch"))]
2415 Some(Strategy::Winch) => bail!("winch support not compiled in"),
2416
2417 None | Some(Strategy::Auto) => unreachable!(),
2418 };
2419
2420 if let Some(path) = &self.compiler_config.clif_dir {
2421 compiler.clif_dir(path)?;
2422 }
2423
2424 // If probestack is enabled for a target, Wasmtime will always use the
2425 // inline strategy which doesn't require us to define a `__probestack`
2426 // function or similar.
2427 self.compiler_config
2428 .settings
2429 .insert("probestack_strategy".into(), "inline".into());
2430
2431 // We enable stack probing by default on all targets.
2432 // This is required on Windows because of the way Windows
2433 // commits its stacks, but it's also a good idea on other
2434 // platforms to ensure guard pages are hit for large frame
2435 // sizes.
2436 self.compiler_config
2437 .flags
2438 .insert("enable_probestack".into());
2439
2440 // The current wasm multivalue implementation depends on this.
2441 // FIXME(#9510) handle this in wasmtime-cranelift instead.
2442 self.compiler_config
2443 .flags
2444 .insert("enable_multi_ret_implicit_sret".into());
2445
2446 if let Some(unwind_requested) = self.native_unwind_info {
2447 if !self
2448 .compiler_config
2449 .ensure_setting_unset_or_given("unwind_info", &unwind_requested.to_string())
2450 {
2451 bail!(
2452 "incompatible settings requested for Cranelift and Wasmtime `unwind-info` settings"
2453 );
2454 }
2455 }
2456
2457 if target.operating_system == target_lexicon::OperatingSystem::Windows {
2458 if !self
2459 .compiler_config
2460 .ensure_setting_unset_or_given("unwind_info", "true")
2461 {
2462 bail!("`native_unwind_info` cannot be disabled on Windows");
2463 }
2464 }
2465
2466 // We require frame pointers for correct stack walking, which is safety
2467 // critical in the presence of reference types, and otherwise it is just
2468 // really bad developer experience to get wrong.
2469 self.compiler_config
2470 .settings
2471 .insert("preserve_frame_pointers".into(), "true".into());
2472
2473 if !tunables.signals_based_traps {
2474 let mut ok = self
2475 .compiler_config
2476 .ensure_setting_unset_or_given("enable_table_access_spectre_mitigation", "false");
2477 ok = ok
2478 && self.compiler_config.ensure_setting_unset_or_given(
2479 "enable_heap_access_spectre_mitigation",
2480 "false",
2481 );
2482
2483 // Right now spectre-mitigated bounds checks will load from zero so
2484 // if host-based signal handlers are disabled then that's a mismatch
2485 // and doesn't work right now. Fixing this will require more thought
2486 // of how to implement the bounds check in spectre-only mode.
2487 if !ok {
2488 bail!(
2489 "when signals-based traps are disabled then spectre \
2490 mitigations must also be disabled"
2491 );
2492 }
2493 }
2494
2495 // check for incompatible compiler options and set required values
2496 if features.contains(WasmFeatures::REFERENCE_TYPES) {
2497 if !self
2498 .compiler_config
2499 .ensure_setting_unset_or_given("enable_safepoints", "true")
2500 {
2501 bail!(
2502 "compiler option 'enable_safepoints' must be enabled when 'reference types' is enabled"
2503 );
2504 }
2505 }
2506
2507 if features.contains(WasmFeatures::RELAXED_SIMD) && !features.contains(WasmFeatures::SIMD) {
2508 bail!("cannot disable the simd proposal but enable the relaxed simd proposal");
2509 }
2510
2511 if features.contains(WasmFeatures::STACK_SWITCHING) {
2512 use target_lexicon::OperatingSystem;
2513 let model = match target.operating_system {
2514 OperatingSystem::Windows => "update_windows_tib",
2515 OperatingSystem::Linux
2516 | OperatingSystem::MacOSX(_)
2517 | OperatingSystem::Darwin(_) => "basic",
2518 _ => bail!("stack-switching feature not supported on this platform "),
2519 };
2520
2521 if !self
2522 .compiler_config
2523 .ensure_setting_unset_or_given("stack_switch_model", model)
2524 {
2525 bail!(
2526 "compiler option 'stack_switch_model' must be set to '{}' on this platform",
2527 model
2528 );
2529 }
2530 }
2531
2532 // Apply compiler settings and flags
2533 compiler.set_tunables(tunables.clone())?;
2534 for (k, v) in self.compiler_config.settings.iter() {
2535 compiler.set(k, v)?;
2536 }
2537 for flag in self.compiler_config.flags.iter() {
2538 compiler.enable(flag)?;
2539 }
2540
2541 #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
2542 if let Some(cache_store) = &self.compiler_config.cache_store {
2543 compiler.enable_incremental_compilation(cache_store.clone())?;
2544 }
2545
2546 compiler.wmemcheck(self.compiler_config.wmemcheck);
2547
2548 Ok((self, compiler.build()?))
2549 }
2550
2551 /// Internal setting for whether adapter modules for components will have
2552 /// extra WebAssembly instructions inserted performing more debug checks
2553 /// then are necessary.
2554 #[cfg(feature = "component-model")]
2555 pub fn debug_adapter_modules(&mut self, debug: bool) -> &mut Self {
2556 self.tunables.debug_adapter_modules = Some(debug);
2557 self
2558 }
2559
2560 /// Enables clif output when compiling a WebAssembly module.
2561 #[cfg(any(feature = "cranelift", feature = "winch"))]
2562 pub fn emit_clif(&mut self, path: &Path) -> &mut Self {
2563 self.compiler_config.clif_dir = Some(path.to_path_buf());
2564 self
2565 }
2566
2567 /// Configures whether, when on macOS, Mach ports are used for exception
2568 /// handling instead of traditional Unix-based signal handling.
2569 ///
2570 /// WebAssembly traps in Wasmtime are implemented with native faults, for
2571 /// example a `SIGSEGV` will occur when a WebAssembly guest accesses
2572 /// out-of-bounds memory. Handling this can be configured to either use Unix
2573 /// signals or Mach ports on macOS. By default Mach ports are used.
2574 ///
2575 /// Mach ports enable Wasmtime to work by default with foreign
2576 /// error-handling systems such as breakpad which also use Mach ports to
2577 /// handle signals. In this situation Wasmtime will continue to handle guest
2578 /// faults gracefully while any non-guest faults will get forwarded to
2579 /// process-level handlers such as breakpad. Some more background on this
2580 /// can be found in #2456.
2581 ///
2582 /// A downside of using mach ports, however, is that they don't interact
2583 /// well with `fork()`. Forking a Wasmtime process on macOS will produce a
2584 /// child process that cannot successfully run WebAssembly. In this
2585 /// situation traditional Unix signal handling should be used as that's
2586 /// inherited and works across forks.
2587 ///
2588 /// If your embedding wants to use a custom error handler which leverages
2589 /// Mach ports and you additionally wish to `fork()` the process and use
2590 /// Wasmtime in the child process that's not currently possible. Please
2591 /// reach out to us if you're in this bucket!
2592 ///
2593 /// This option defaults to `true`, using Mach ports by default.
2594 pub fn macos_use_mach_ports(&mut self, mach_ports: bool) -> &mut Self {
2595 self.macos_use_mach_ports = mach_ports;
2596 self
2597 }
2598
2599 /// Configures an embedder-provided function, `detect`, which is used to
2600 /// determine if an ISA-specific feature is available on the current host.
2601 ///
2602 /// This function is used to verify that any features enabled for a compiler
2603 /// backend, such as AVX support on x86\_64, are also available on the host.
2604 /// It is undefined behavior to execute an AVX instruction on a host that
2605 /// doesn't support AVX instructions, for example.
2606 ///
2607 /// When the `std` feature is active on this crate then this function is
2608 /// configured to a default implementation that uses the standard library's
2609 /// feature detection. When the `std` feature is disabled then there is no
2610 /// default available and this method must be called to configure a feature
2611 /// probing function.
2612 ///
2613 /// The `detect` function provided is given a string name of an ISA feature.
2614 /// The function should then return:
2615 ///
2616 /// * `Some(true)` - indicates that the feature was found on the host and it
2617 /// is supported.
2618 /// * `Some(false)` - the feature name was recognized but it was not
2619 /// detected on the host, for example the CPU is too old.
2620 /// * `None` - the feature name was not recognized and it's not known
2621 /// whether it's on the host or not.
2622 ///
2623 /// Feature names passed to `detect` match the same feature name used in the
2624 /// Rust standard library. For example `"sse4.2"` is used on x86\_64.
2625 ///
2626 /// # Unsafety
2627 ///
2628 /// This function is `unsafe` because it is undefined behavior to execute
2629 /// instructions that a host does not support. This means that the result of
2630 /// `detect` must be correct for memory safe execution at runtime.
2631 pub unsafe fn detect_host_feature(&mut self, detect: fn(&str) -> Option<bool>) -> &mut Self {
2632 self.detect_host_feature = Some(detect);
2633 self
2634 }
2635
2636 /// Configures Wasmtime to not use signals-based trap handlers, for example
2637 /// disables `SIGILL` and `SIGSEGV` handler registration on Unix platforms.
2638 ///
2639 /// > **Note:** this option has important performance ramifications, be sure
2640 /// > to understand the implications. Wasm programs have been measured to
2641 /// > run up to 2x slower when signals-based traps are disabled.
2642 ///
2643 /// Wasmtime will by default leverage signals-based trap handlers (or the
2644 /// platform equivalent, for example "vectored exception handlers" on
2645 /// Windows) to make generated code more efficient. For example, when
2646 /// Wasmtime can use signals-based traps, it can elide explicit bounds
2647 /// checks for Wasm linear memory accesses, instead relying on virtual
2648 /// memory guard pages to raise a `SIGSEGV` (on Unix) for out-of-bounds
2649 /// accesses, which Wasmtime's runtime then catches and handles. Another
2650 /// example is divide-by-zero: with signals-based traps, Wasmtime can let
2651 /// the hardware raise a trap when the divisor is zero. Without
2652 /// signals-based traps, Wasmtime must explicitly emit additional
2653 /// instructions to check for zero and conditionally branch to a trapping
2654 /// code path.
2655 ///
2656 /// Some environments however may not have access to signal handlers. For
2657 /// example embedded scenarios may not support virtual memory. Other
2658 /// environments where Wasmtime is embedded within the surrounding
2659 /// environment may require that new signal handlers aren't registered due
2660 /// to the global nature of signal handlers. This option exists to disable
2661 /// the signal handler registration when required for these scenarios.
2662 ///
2663 /// When signals-based trap handlers are disabled, then Wasmtime and its
2664 /// generated code will *never* rely on segfaults or other
2665 /// signals. Generated code will be slower because bounds must be explicitly
2666 /// checked along with other conditions like division by zero.
2667 ///
2668 /// The following additional factors can also affect Wasmtime's ability to
2669 /// elide explicit bounds checks and leverage signals-based traps:
2670 ///
2671 /// * The [`Config::memory_reservation`] and [`Config::memory_guard_size`]
2672 /// settings
2673 /// * The index type of the linear memory (e.g. 32-bit or 64-bit)
2674 /// * The page size of the linear memory
2675 ///
2676 /// When this option is disabled, the
2677 /// `enable_heap_access_spectre_mitigation` and
2678 /// `enable_table_access_spectre_mitigation` Cranelift settings must also be
2679 /// disabled. This means that generated code must have spectre mitigations
2680 /// disabled. This is because spectre mitigations rely on faults from
2681 /// loading from the null address to implement bounds checks.
2682 ///
2683 /// This option defaults to `true`: signals-based trap handlers are enabled
2684 /// by default.
2685 ///
2686 /// > **Note:** Disabling this option is not compatible with the Winch
2687 /// > compiler.
2688 pub fn signals_based_traps(&mut self, enable: bool) -> &mut Self {
2689 self.tunables.signals_based_traps = Some(enable);
2690 self
2691 }
2692
2693 /// Enable/disable GC support in Wasmtime entirely.
2694 ///
2695 /// This flag can be used to gate whether GC infrastructure is enabled or
2696 /// initialized in Wasmtime at all. Wasmtime's GC implementation is required
2697 /// for the [`Self::wasm_gc`] proposal, [`Self::wasm_function_references`],
2698 /// and [`Self::wasm_exceptions`] at this time. None of those proposal can
2699 /// be enabled without also having this option enabled.
2700 ///
2701 /// This option defaults to whether the crate `gc` feature is enabled or
2702 /// not.
2703 pub fn gc_support(&mut self, enable: bool) -> &mut Self {
2704 self.wasm_feature(WasmFeatures::GC_TYPES, enable)
2705 }
2706
2707 /// Explicitly indicate or not whether the host is using a hardware float
2708 /// ABI on x86 targets.
2709 ///
2710 /// This configuration option is only applicable on the
2711 /// `x86_64-unknown-none` Rust target and has no effect on other host
2712 /// targets. The `x86_64-unknown-none` Rust target does not support hardware
2713 /// floats by default and uses a "soft float" implementation and ABI. This
2714 /// means that `f32`, for example, is passed in a general-purpose register
2715 /// between functions instead of a floating-point register. This does not
2716 /// match Cranelift's ABI for `f32` where it's passed in floating-point
2717 /// registers. Cranelift does not have support for a "soft float"
2718 /// implementation where all floating-point operations are lowered to
2719 /// libcalls.
2720 ///
2721 /// This means that for the `x86_64-unknown-none` target the ABI between
2722 /// Wasmtime's libcalls and the host is incompatible when floats are used.
2723 /// This further means that, by default, Wasmtime is unable to load native
2724 /// code when compiled to the `x86_64-unknown-none` target. The purpose of
2725 /// this option is to explicitly allow loading code and bypass this check.
2726 ///
2727 /// Setting this configuration option to `true` indicates that either:
2728 /// (a) the Rust target is compiled with the hard-float ABI manually via
2729 /// `-Zbuild-std` and a custom target JSON configuration, or (b) sufficient
2730 /// x86 features have been enabled in the compiler such that float libcalls
2731 /// will not be used in Wasmtime. For (a) there is no way in Rust at this
2732 /// time to detect whether a hard-float or soft-float ABI is in use on
2733 /// stable Rust, so this manual opt-in is required. For (b) the only
2734 /// instance where Wasmtime passes a floating-point value in a register
2735 /// between the host and compiled wasm code is with libcalls.
2736 ///
2737 /// Float-based libcalls are only used when the compilation target for a
2738 /// wasm module has insufficient target features enabled for native
2739 /// support. For example SSE4.1 is required for the `f32.ceil` WebAssembly
2740 /// instruction to be compiled to a native instruction. If SSE4.1 is not
2741 /// enabled then `f32.ceil` is translated to a "libcall" which is
2742 /// implemented on the host. Float-based libcalls can be avoided with
2743 /// sufficient target features enabled, for example:
2744 ///
2745 /// * `self.cranelift_flag_enable("has_sse3")`
2746 /// * `self.cranelift_flag_enable("has_ssse3")`
2747 /// * `self.cranelift_flag_enable("has_sse41")`
2748 /// * `self.cranelift_flag_enable("has_sse42")`
2749 /// * `self.cranelift_flag_enable("has_fma")`
2750 ///
2751 /// Note that when these features are enabled Wasmtime will perform a
2752 /// runtime check to determine that the host actually has the feature
2753 /// present.
2754 ///
2755 /// For some more discussion see [#11506].
2756 ///
2757 /// [#11506]: https://github.com/bytecodealliance/wasmtime/issues/11506
2758 ///
2759 /// # Safety
2760 ///
2761 /// This method is not safe because it cannot be detected in Rust right now
2762 /// whether the host is compiled with a soft or hard float ABI. Additionally
2763 /// if the host is compiled with a soft float ABI disabling this check does
2764 /// not ensure that the wasm module in question has zero usage of floats
2765 /// in the boundary to the host.
2766 ///
2767 /// Safely using this method requires one of:
2768 ///
2769 /// * The host target is compiled to use hardware floats.
2770 /// * Wasm modules loaded are compiled with enough x86 Cranelift features
2771 /// enabled to avoid float-related hostcalls.
2772 pub unsafe fn x86_float_abi_ok(&mut self, enable: bool) -> &mut Self {
2773 self.x86_float_abi_ok = Some(enable);
2774 self
2775 }
2776}
2777
2778impl Default for Config {
2779 fn default() -> Config {
2780 Config::new()
2781 }
2782}
2783
2784impl fmt::Debug for Config {
2785 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2786 let mut f = f.debug_struct("Config");
2787
2788 // Not every flag in WasmFeatures can be enabled as part of creating
2789 // a Config. This impl gives a complete picture of all WasmFeatures
2790 // enabled, and doesn't require maintenance by hand (which has become out
2791 // of date in the past), at the cost of possible confusion for why
2792 // a flag in this set doesn't have a Config setter.
2793 let features = self.features();
2794 for flag in WasmFeatures::FLAGS.iter() {
2795 f.field(
2796 &format!("wasm_{}", flag.name().to_lowercase()),
2797 &features.contains(*flag.value()),
2798 );
2799 }
2800
2801 f.field("parallel_compilation", &self.parallel_compilation);
2802 #[cfg(any(feature = "cranelift", feature = "winch"))]
2803 {
2804 f.field("compiler_config", &self.compiler_config);
2805 }
2806
2807 self.tunables.format(&mut f);
2808 f.finish()
2809 }
2810}
2811
2812/// Possible Compilation strategies for a wasm module.
2813///
2814/// This is used as an argument to the [`Config::strategy`] method.
2815#[non_exhaustive]
2816#[derive(PartialEq, Eq, Clone, Debug, Copy)]
2817pub enum Strategy {
2818 /// An indicator that the compilation strategy should be automatically
2819 /// selected.
2820 ///
2821 /// This is generally what you want for most projects and indicates that the
2822 /// `wasmtime` crate itself should make the decision about what the best
2823 /// code generator for a wasm module is.
2824 ///
2825 /// Currently this always defaults to Cranelift, but the default value may
2826 /// change over time.
2827 Auto,
2828
2829 /// Currently the default backend, Cranelift aims to be a reasonably fast
2830 /// code generator which generates high quality machine code.
2831 Cranelift,
2832
2833 /// A baseline compiler for WebAssembly, currently under active development and not ready for
2834 /// production applications.
2835 Winch,
2836}
2837
2838#[cfg(any(feature = "winch", feature = "cranelift"))]
2839impl Strategy {
2840 fn not_auto(&self) -> Option<Strategy> {
2841 match self {
2842 Strategy::Auto => {
2843 if cfg!(feature = "cranelift") {
2844 Some(Strategy::Cranelift)
2845 } else if cfg!(feature = "winch") {
2846 Some(Strategy::Winch)
2847 } else {
2848 None
2849 }
2850 }
2851 other => Some(*other),
2852 }
2853 }
2854}
2855
2856/// Possible garbage collector implementations for Wasm.
2857///
2858/// This is used as an argument to the [`Config::collector`] method.
2859///
2860/// The properties of Wasmtime's available collectors are summarized in the
2861/// following table:
2862///
2863/// | Collector | Collects Garbage[^1] | Latency[^2] | Throughput[^3] | Allocation Speed[^4] | Heap Utilization[^5] |
2864/// |-----------------------------|----------------------|-------------|----------------|----------------------|----------------------|
2865/// | `DeferredReferenceCounting` | Yes, but not cycles | 🙂 | 🙁 | 😐 | 😐 |
2866/// | `Null` | No | 🙂 | 🙂 | 🙂 | 🙂 |
2867///
2868/// [^1]: Whether or not the collector is capable of collecting garbage and cyclic garbage.
2869///
2870/// [^2]: How long the Wasm program is paused during garbage
2871/// collections. Shorter is better. In general, better latency implies
2872/// worse throughput and vice versa.
2873///
2874/// [^3]: How fast the Wasm program runs when using this collector. Roughly
2875/// equivalent to the number of Wasm instructions executed per
2876/// second. Faster is better. In general, better throughput implies worse
2877/// latency and vice versa.
2878///
2879/// [^4]: How fast can individual objects be allocated?
2880///
2881/// [^5]: How many objects can the collector fit into N bytes of memory? That
2882/// is, how much space for bookkeeping and metadata does this collector
2883/// require? Less space taken up by metadata means more space for
2884/// additional objects. Reference counts are larger than mark bits and
2885/// free lists are larger than bump pointers, for example.
2886#[non_exhaustive]
2887#[derive(PartialEq, Eq, Clone, Debug, Copy)]
2888pub enum Collector {
2889 /// An indicator that the garbage collector should be automatically
2890 /// selected.
2891 ///
2892 /// This is generally what you want for most projects and indicates that the
2893 /// `wasmtime` crate itself should make the decision about what the best
2894 /// collector for a wasm module is.
2895 ///
2896 /// Currently this always defaults to the deferred reference-counting
2897 /// collector, but the default value may change over time.
2898 Auto,
2899
2900 /// The deferred reference-counting collector.
2901 ///
2902 /// A reference-counting collector, generally trading improved latency for
2903 /// worsened throughput. However, to avoid the largest overheads of
2904 /// reference counting, it avoids manipulating reference counts for Wasm
2905 /// objects on the stack. Instead, it will hold a reference count for an
2906 /// over-approximation of all objects that are currently on the stack, trace
2907 /// the stack during collection to find the precise set of on-stack roots,
2908 /// and decrement the reference count of any object that was in the
2909 /// over-approximation but not the precise set. This improves throughput,
2910 /// compared to "pure" reference counting, by performing many fewer
2911 /// refcount-increment and -decrement operations. The cost is the increased
2912 /// latency associated with tracing the stack.
2913 ///
2914 /// This collector cannot currently collect cycles; they will leak until the
2915 /// GC heap's store is dropped.
2916 DeferredReferenceCounting,
2917
2918 /// The null collector.
2919 ///
2920 /// This collector does not actually collect any garbage. It simply
2921 /// allocates objects until it runs out of memory, at which point further
2922 /// objects allocation attempts will trap.
2923 ///
2924 /// This collector is useful for incredibly short-running Wasm instances
2925 /// where additionally you would rather halt an over-allocating Wasm program
2926 /// than spend time collecting its garbage to allow it to keep running. It
2927 /// is also useful for measuring the overheads associated with other
2928 /// collectors, as this collector imposes as close to zero throughput and
2929 /// latency overhead as possible.
2930 Null,
2931}
2932
2933impl Default for Collector {
2934 fn default() -> Collector {
2935 Collector::Auto
2936 }
2937}
2938
2939#[cfg(feature = "gc")]
2940impl Collector {
2941 fn not_auto(&self) -> Option<Collector> {
2942 match self {
2943 Collector::Auto => {
2944 if cfg!(feature = "gc-drc") {
2945 Some(Collector::DeferredReferenceCounting)
2946 } else if cfg!(feature = "gc-null") {
2947 Some(Collector::Null)
2948 } else {
2949 None
2950 }
2951 }
2952 other => Some(*other),
2953 }
2954 }
2955
2956 fn try_not_auto(&self) -> Result<Self> {
2957 match self.not_auto() {
2958 #[cfg(feature = "gc-drc")]
2959 Some(c @ Collector::DeferredReferenceCounting) => Ok(c),
2960 #[cfg(not(feature = "gc-drc"))]
2961 Some(Collector::DeferredReferenceCounting) => bail!(
2962 "cannot create an engine using the deferred reference-counting \
2963 collector because the `gc-drc` feature was not enabled at \
2964 compile time",
2965 ),
2966
2967 #[cfg(feature = "gc-null")]
2968 Some(c @ Collector::Null) => Ok(c),
2969 #[cfg(not(feature = "gc-null"))]
2970 Some(Collector::Null) => bail!(
2971 "cannot create an engine using the null collector because \
2972 the `gc-null` feature was not enabled at compile time",
2973 ),
2974
2975 Some(Collector::Auto) => unreachable!(),
2976
2977 None => bail!(
2978 "cannot create an engine with GC support when none of the \
2979 collectors are available; enable one of the following \
2980 features: `gc-drc`, `gc-null`",
2981 ),
2982 }
2983 }
2984}
2985
2986/// Possible optimization levels for the Cranelift codegen backend.
2987#[non_exhaustive]
2988#[derive(Copy, Clone, Debug, Eq, PartialEq)]
2989pub enum OptLevel {
2990 /// No optimizations performed, minimizes compilation time by disabling most
2991 /// optimizations.
2992 None,
2993 /// Generates the fastest possible code, but may take longer.
2994 Speed,
2995 /// Similar to `speed`, but also performs transformations aimed at reducing
2996 /// code size.
2997 SpeedAndSize,
2998}
2999
3000/// Possible register allocator algorithms for the Cranelift codegen backend.
3001#[non_exhaustive]
3002#[derive(Copy, Clone, Debug, Eq, PartialEq)]
3003pub enum RegallocAlgorithm {
3004 /// Generates the fastest possible code, but may take longer.
3005 ///
3006 /// This algorithm performs "backtracking", which means that it may
3007 /// undo its earlier work and retry as it discovers conflicts. This
3008 /// results in better register utilization, producing fewer spills
3009 /// and moves, but can cause super-linear compile runtime.
3010 Backtracking,
3011 /// Generates acceptable code very quickly.
3012 ///
3013 /// This algorithm performs a single pass through the code,
3014 /// guaranteed to work in linear time. (Note that the rest of
3015 /// Cranelift is not necessarily guaranteed to run in linear time,
3016 /// however.) It cannot undo earlier decisions, however, and it
3017 /// cannot foresee constraints or issues that may occur further
3018 /// ahead in the code, so the code may have more spills and moves as
3019 /// a result.
3020 SinglePass,
3021}
3022
3023/// Select which profiling technique to support.
3024#[derive(Debug, Clone, Copy, PartialEq)]
3025pub enum ProfilingStrategy {
3026 /// No profiler support.
3027 None,
3028
3029 /// Collect function name information as the "perf map" file format, used with `perf` on Linux.
3030 PerfMap,
3031
3032 /// Collect profiling info for "jitdump" file format, used with `perf` on
3033 /// Linux.
3034 JitDump,
3035
3036 /// Collect profiling info using the "ittapi", used with `VTune` on Linux.
3037 VTune,
3038
3039 /// Support for profiling Pulley, Wasmtime's interpreter. Note that enabling
3040 /// this at runtime requires enabling the `profile-pulley` Cargo feature at
3041 /// compile time.
3042 Pulley,
3043}
3044
3045/// Select how wasm backtrace detailed information is handled.
3046#[derive(Debug, Clone, Copy)]
3047pub enum WasmBacktraceDetails {
3048 /// Support is unconditionally enabled and wasmtime will parse and read
3049 /// debug information.
3050 Enable,
3051
3052 /// Support is disabled, and wasmtime will not parse debug information for
3053 /// backtrace details.
3054 Disable,
3055
3056 /// Support for backtrace details is conditional on the
3057 /// `WASMTIME_BACKTRACE_DETAILS` environment variable.
3058 Environment,
3059}
3060
3061/// Describe the tri-state configuration of keys such as MPK or PAGEMAP_SCAN.
3062#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
3063pub enum Enabled {
3064 /// Enable this feature if it's detected on the host system, otherwise leave
3065 /// it disabled.
3066 Auto,
3067 /// Enable this feature and fail configuration if the feature is not
3068 /// detected on the host system.
3069 Yes,
3070 /// Do not enable this feature, even if the host system supports it.
3071 No,
3072}
3073
3074/// Configuration options used with [`InstanceAllocationStrategy::Pooling`] to
3075/// change the behavior of the pooling instance allocator.
3076///
3077/// This structure has a builder-style API in the same manner as [`Config`] and
3078/// is configured with [`Config::allocation_strategy`].
3079///
3080/// Note that usage of the pooling allocator does not affect compiled
3081/// WebAssembly code. Compiled `*.cwasm` files, for example, are usable both
3082/// with and without the pooling allocator.
3083///
3084/// ## Advantages of Pooled Allocation
3085///
3086/// The main benefit of the pooling allocator is to make WebAssembly
3087/// instantiation both faster and more scalable in terms of parallelism.
3088/// Allocation is faster because virtual memory is already configured and ready
3089/// to go within the pool, there's no need to [`mmap`] (for example on Unix) a
3090/// new region and configure it with guard pages. By avoiding [`mmap`] this
3091/// avoids whole-process virtual memory locks which can improve scalability and
3092/// performance through avoiding this.
3093///
3094/// Additionally with pooled allocation it's possible to create "affine slots"
3095/// to a particular WebAssembly module or component over time. For example if
3096/// the same module is multiple times over time the pooling allocator will, by
3097/// default, attempt to reuse the same slot. This mean that the slot has been
3098/// pre-configured and can retain virtual memory mappings for a copy-on-write
3099/// image, for example (see [`Config::memory_init_cow`] for more information.
3100/// This means that in a steady state instance deallocation is a single
3101/// [`madvise`] to reset linear memory to its original contents followed by a
3102/// single (optional) [`mprotect`] during the next instantiation to shrink
3103/// memory back to its original size. Compared to non-pooled allocation this
3104/// avoids the need to [`mmap`] a new region of memory, [`munmap`] it, and
3105/// [`mprotect`] regions too.
3106///
3107/// Another benefit of pooled allocation is that it's possible to configure
3108/// things such that no virtual memory management is required at all in a steady
3109/// state. For example a pooling allocator can be configured with:
3110///
3111/// * [`Config::memory_init_cow`] disabled
3112/// * [`Config::memory_guard_size`] disabled
3113/// * [`Config::memory_reservation`] shrunk to minimal size
3114/// * [`PoolingAllocationConfig::table_keep_resident`] sufficiently large
3115/// * [`PoolingAllocationConfig::linear_memory_keep_resident`] sufficiently large
3116///
3117/// With all these options in place no virtual memory tricks are used at all and
3118/// everything is manually managed by Wasmtime (for example resetting memory is
3119/// a `memset(0)`). This is not as fast in a single-threaded scenario but can
3120/// provide benefits in high-parallelism situations as no virtual memory locks
3121/// or IPIs need happen.
3122///
3123/// ## Disadvantages of Pooled Allocation
3124///
3125/// Despite the above advantages to instantiation performance the pooling
3126/// allocator is not enabled by default in Wasmtime. One reason is that the
3127/// performance advantages are not necessarily portable, for example while the
3128/// pooling allocator works on Windows it has not been tuned for performance on
3129/// Windows in the same way it has on Linux.
3130///
3131/// Additionally the main cost of the pooling allocator is that it requires a
3132/// very large reservation of virtual memory (on the order of most of the
3133/// addressable virtual address space). WebAssembly 32-bit linear memories in
3134/// Wasmtime are, by default 4G address space reservations with a small guard
3135/// region both before and after the linear memory. Memories in the pooling
3136/// allocator are contiguous which means that we only need a guard after linear
3137/// memory because the previous linear memory's slot post-guard is our own
3138/// pre-guard. This means that, by default, the pooling allocator uses roughly
3139/// 4G of virtual memory per WebAssembly linear memory slot. 4G of virtual
3140/// memory is 32 bits of a 64-bit address. Many 64-bit systems can only
3141/// actually use 48-bit addresses by default (although this can be extended on
3142/// architectures nowadays too), and of those 48 bits one of them is reserved
3143/// to indicate kernel-vs-userspace. This leaves 47-32=15 bits left,
3144/// meaning you can only have at most 32k slots of linear memories on many
3145/// systems by default. This is a relatively small number and shows how the
3146/// pooling allocator can quickly exhaust all of virtual memory.
3147///
3148/// Another disadvantage of the pooling allocator is that it may keep memory
3149/// alive when nothing is using it. A previously used slot for an instance might
3150/// have paged-in memory that will not get paged out until the
3151/// [`Engine`](crate::Engine) owning the pooling allocator is dropped. While
3152/// suitable for some applications this behavior may not be suitable for all
3153/// applications.
3154///
3155/// Finally the last disadvantage of the pooling allocator is that the
3156/// configuration values for the maximum number of instances, memories, tables,
3157/// etc, must all be fixed up-front. There's not always a clear answer as to
3158/// what these values should be so not all applications may be able to work
3159/// with this constraint.
3160///
3161/// [`madvise`]: https://man7.org/linux/man-pages/man2/madvise.2.html
3162/// [`mprotect`]: https://man7.org/linux/man-pages/man2/mprotect.2.html
3163/// [`mmap`]: https://man7.org/linux/man-pages/man2/mmap.2.html
3164/// [`munmap`]: https://man7.org/linux/man-pages/man2/munmap.2.html
3165#[cfg(feature = "pooling-allocator")]
3166#[derive(Debug, Clone, Default)]
3167pub struct PoolingAllocationConfig {
3168 config: crate::runtime::vm::PoolingInstanceAllocatorConfig,
3169}
3170
3171#[cfg(feature = "pooling-allocator")]
3172impl PoolingAllocationConfig {
3173 /// Returns a new configuration builder with all default settings
3174 /// configured.
3175 pub fn new() -> PoolingAllocationConfig {
3176 PoolingAllocationConfig::default()
3177 }
3178
3179 /// Configures the maximum number of "unused warm slots" to retain in the
3180 /// pooling allocator.
3181 ///
3182 /// The pooling allocator operates over slots to allocate from, and each
3183 /// slot is considered "cold" if it's never been used before or "warm" if
3184 /// it's been used by some module in the past. Slots in the pooling
3185 /// allocator additionally track an "affinity" flag to a particular core
3186 /// wasm module. When a module is instantiated into a slot then the slot is
3187 /// considered affine to that module, even after the instance has been
3188 /// deallocated.
3189 ///
3190 /// When a new instance is created then a slot must be chosen, and the
3191 /// current algorithm for selecting a slot is:
3192 ///
3193 /// * If there are slots that are affine to the module being instantiated,
3194 /// then the most recently used slot is selected to be allocated from.
3195 /// This is done to improve reuse of resources such as memory mappings and
3196 /// additionally try to benefit from temporal locality for things like
3197 /// caches.
3198 ///
3199 /// * Otherwise if there are more than N affine slots to other modules, then
3200 /// one of those affine slots is chosen to be allocated. The slot chosen
3201 /// is picked on a least-recently-used basis.
3202 ///
3203 /// * Finally, if there are less than N affine slots to other modules, then
3204 /// the non-affine slots are allocated from.
3205 ///
3206 /// This setting, `max_unused_warm_slots`, is the value for N in the above
3207 /// algorithm. The purpose of this setting is to have a knob over the RSS
3208 /// impact of "unused slots" for a long-running wasm server.
3209 ///
3210 /// If this setting is set to 0, for example, then affine slots are
3211 /// aggressively reused on a least-recently-used basis. A "cold" slot is
3212 /// only used if there are no affine slots available to allocate from. This
3213 /// means that the set of slots used over the lifetime of a program is the
3214 /// same as the maximum concurrent number of wasm instances.
3215 ///
3216 /// If this setting is set to infinity, however, then cold slots are
3217 /// prioritized to be allocated from. This means that the set of slots used
3218 /// over the lifetime of a program will approach
3219 /// [`PoolingAllocationConfig::total_memories`], or the maximum number of
3220 /// slots in the pooling allocator.
3221 ///
3222 /// Wasmtime does not aggressively decommit all resources associated with a
3223 /// slot when the slot is not in use. For example the
3224 /// [`PoolingAllocationConfig::linear_memory_keep_resident`] option can be
3225 /// used to keep memory associated with a slot, even when it's not in use.
3226 /// This means that the total set of used slots in the pooling instance
3227 /// allocator can impact the overall RSS usage of a program.
3228 ///
3229 /// The default value for this option is `100`.
3230 pub fn max_unused_warm_slots(&mut self, max: u32) -> &mut Self {
3231 self.config.max_unused_warm_slots = max;
3232 self
3233 }
3234
3235 /// The target number of decommits to do per batch.
3236 ///
3237 /// This is not precise, as we can queue up decommits at times when we
3238 /// aren't prepared to immediately flush them, and so we may go over this
3239 /// target size occasionally.
3240 ///
3241 /// A batch size of one effectively disables batching.
3242 ///
3243 /// Defaults to `1`.
3244 pub fn decommit_batch_size(&mut self, batch_size: usize) -> &mut Self {
3245 self.config.decommit_batch_size = batch_size;
3246 self
3247 }
3248
3249 /// How much memory, in bytes, to keep resident for async stacks allocated
3250 /// with the pooling allocator.
3251 ///
3252 /// When [`PoolingAllocationConfig::async_stack_zeroing`] is enabled then
3253 /// Wasmtime will reset the contents of async stacks back to zero upon
3254 /// deallocation. This option can be used to perform the zeroing operation
3255 /// with `memset` up to a certain threshold of bytes instead of using system
3256 /// calls to reset the stack to zero.
3257 ///
3258 /// Note that when using this option the memory with async stacks will
3259 /// never be decommitted.
3260 #[cfg(feature = "async")]
3261 pub fn async_stack_keep_resident(&mut self, size: usize) -> &mut Self {
3262 self.config.async_stack_keep_resident = size;
3263 self
3264 }
3265
3266 /// How much memory, in bytes, to keep resident for each linear memory
3267 /// after deallocation.
3268 ///
3269 /// This option is only applicable on Linux and has no effect on other
3270 /// platforms.
3271 ///
3272 /// By default Wasmtime will use `madvise` to reset the entire contents of
3273 /// linear memory back to zero when a linear memory is deallocated. This
3274 /// option can be used to use `memset` instead to set memory back to zero
3275 /// which can, in some configurations, reduce the number of page faults
3276 /// taken when a slot is reused.
3277 pub fn linear_memory_keep_resident(&mut self, size: usize) -> &mut Self {
3278 self.config.linear_memory_keep_resident = size;
3279 self
3280 }
3281
3282 /// How much memory, in bytes, to keep resident for each table after
3283 /// deallocation.
3284 ///
3285 /// This option is only applicable on Linux and has no effect on other
3286 /// platforms.
3287 ///
3288 /// This option is the same as
3289 /// [`PoolingAllocationConfig::linear_memory_keep_resident`] except that it
3290 /// is applicable to tables instead.
3291 pub fn table_keep_resident(&mut self, size: usize) -> &mut Self {
3292 self.config.table_keep_resident = size;
3293 self
3294 }
3295
3296 /// The maximum number of concurrent component instances supported (default
3297 /// is `1000`).
3298 ///
3299 /// This provides an upper-bound on the total size of component
3300 /// metadata-related allocations, along with
3301 /// [`PoolingAllocationConfig::max_component_instance_size`]. The upper bound is
3302 ///
3303 /// ```text
3304 /// total_component_instances * max_component_instance_size
3305 /// ```
3306 ///
3307 /// where `max_component_instance_size` is rounded up to the size and alignment
3308 /// of the internal representation of the metadata.
3309 pub fn total_component_instances(&mut self, count: u32) -> &mut Self {
3310 self.config.limits.total_component_instances = count;
3311 self
3312 }
3313
3314 /// The maximum size, in bytes, allocated for a component instance's
3315 /// `VMComponentContext` metadata.
3316 ///
3317 /// The [`wasmtime::component::Instance`][crate::component::Instance] type
3318 /// has a static size but its internal `VMComponentContext` is dynamically
3319 /// sized depending on the component being instantiated. This size limit
3320 /// loosely correlates to the size of the component, taking into account
3321 /// factors such as:
3322 ///
3323 /// * number of lifted and lowered functions,
3324 /// * number of memories
3325 /// * number of inner instances
3326 /// * number of resources
3327 ///
3328 /// If the allocated size per instance is too small then instantiation of a
3329 /// module will fail at runtime with an error indicating how many bytes were
3330 /// needed.
3331 ///
3332 /// The default value for this is 1MiB.
3333 ///
3334 /// This provides an upper-bound on the total size of component
3335 /// metadata-related allocations, along with
3336 /// [`PoolingAllocationConfig::total_component_instances`]. The upper bound is
3337 ///
3338 /// ```text
3339 /// total_component_instances * max_component_instance_size
3340 /// ```
3341 ///
3342 /// where `max_component_instance_size` is rounded up to the size and alignment
3343 /// of the internal representation of the metadata.
3344 pub fn max_component_instance_size(&mut self, size: usize) -> &mut Self {
3345 self.config.limits.component_instance_size = size;
3346 self
3347 }
3348
3349 /// The maximum number of core instances a single component may contain
3350 /// (default is unlimited).
3351 ///
3352 /// This method (along with
3353 /// [`PoolingAllocationConfig::max_memories_per_component`],
3354 /// [`PoolingAllocationConfig::max_tables_per_component`], and
3355 /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3356 /// the amount of resources a single component allocation consumes.
3357 ///
3358 /// If a component will instantiate more core instances than `count`, then
3359 /// the component will fail to instantiate.
3360 pub fn max_core_instances_per_component(&mut self, count: u32) -> &mut Self {
3361 self.config.limits.max_core_instances_per_component = count;
3362 self
3363 }
3364
3365 /// The maximum number of Wasm linear memories that a single component may
3366 /// transitively contain (default is unlimited).
3367 ///
3368 /// This method (along with
3369 /// [`PoolingAllocationConfig::max_core_instances_per_component`],
3370 /// [`PoolingAllocationConfig::max_tables_per_component`], and
3371 /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3372 /// the amount of resources a single component allocation consumes.
3373 ///
3374 /// If a component transitively contains more linear memories than `count`,
3375 /// then the component will fail to instantiate.
3376 pub fn max_memories_per_component(&mut self, count: u32) -> &mut Self {
3377 self.config.limits.max_memories_per_component = count;
3378 self
3379 }
3380
3381 /// The maximum number of tables that a single component may transitively
3382 /// contain (default is unlimited).
3383 ///
3384 /// This method (along with
3385 /// [`PoolingAllocationConfig::max_core_instances_per_component`],
3386 /// [`PoolingAllocationConfig::max_memories_per_component`],
3387 /// [`PoolingAllocationConfig::max_component_instance_size`]) allows you to cap
3388 /// the amount of resources a single component allocation consumes.
3389 ///
3390 /// If a component will transitively contains more tables than `count`, then
3391 /// the component will fail to instantiate.
3392 pub fn max_tables_per_component(&mut self, count: u32) -> &mut Self {
3393 self.config.limits.max_tables_per_component = count;
3394 self
3395 }
3396
3397 /// The maximum number of concurrent Wasm linear memories supported (default
3398 /// is `1000`).
3399 ///
3400 /// This value has a direct impact on the amount of memory allocated by the pooling
3401 /// instance allocator.
3402 ///
3403 /// The pooling instance allocator allocates a memory pool, where each entry
3404 /// in the pool contains the reserved address space for each linear memory
3405 /// supported by an instance.
3406 ///
3407 /// The memory pool will reserve a large quantity of host process address
3408 /// space to elide the bounds checks required for correct WebAssembly memory
3409 /// semantics. Even with 64-bit address spaces, the address space is limited
3410 /// when dealing with a large number of linear memories.
3411 ///
3412 /// For example, on Linux x86_64, the userland address space limit is 128
3413 /// TiB. That might seem like a lot, but each linear memory will *reserve* 6
3414 /// GiB of space by default.
3415 pub fn total_memories(&mut self, count: u32) -> &mut Self {
3416 self.config.limits.total_memories = count;
3417 self
3418 }
3419
3420 /// The maximum number of concurrent tables supported (default is `1000`).
3421 ///
3422 /// This value has a direct impact on the amount of memory allocated by the
3423 /// pooling instance allocator.
3424 ///
3425 /// The pooling instance allocator allocates a table pool, where each entry
3426 /// in the pool contains the space needed for each WebAssembly table
3427 /// supported by an instance (see `table_elements` to control the size of
3428 /// each table).
3429 pub fn total_tables(&mut self, count: u32) -> &mut Self {
3430 self.config.limits.total_tables = count;
3431 self
3432 }
3433
3434 /// The maximum number of execution stacks allowed for asynchronous
3435 /// execution, when enabled (default is `1000`).
3436 ///
3437 /// This value has a direct impact on the amount of memory allocated by the
3438 /// pooling instance allocator.
3439 #[cfg(feature = "async")]
3440 pub fn total_stacks(&mut self, count: u32) -> &mut Self {
3441 self.config.limits.total_stacks = count;
3442 self
3443 }
3444
3445 /// The maximum number of concurrent core instances supported (default is
3446 /// `1000`).
3447 ///
3448 /// This provides an upper-bound on the total size of core instance
3449 /// metadata-related allocations, along with
3450 /// [`PoolingAllocationConfig::max_core_instance_size`]. The upper bound is
3451 ///
3452 /// ```text
3453 /// total_core_instances * max_core_instance_size
3454 /// ```
3455 ///
3456 /// where `max_core_instance_size` is rounded up to the size and alignment of
3457 /// the internal representation of the metadata.
3458 pub fn total_core_instances(&mut self, count: u32) -> &mut Self {
3459 self.config.limits.total_core_instances = count;
3460 self
3461 }
3462
3463 /// The maximum size, in bytes, allocated for a core instance's `VMContext`
3464 /// metadata.
3465 ///
3466 /// The [`Instance`][crate::Instance] type has a static size but its
3467 /// `VMContext` metadata is dynamically sized depending on the module being
3468 /// instantiated. This size limit loosely correlates to the size of the Wasm
3469 /// module, taking into account factors such as:
3470 ///
3471 /// * number of functions
3472 /// * number of globals
3473 /// * number of memories
3474 /// * number of tables
3475 /// * number of function types
3476 ///
3477 /// If the allocated size per instance is too small then instantiation of a
3478 /// module will fail at runtime with an error indicating how many bytes were
3479 /// needed.
3480 ///
3481 /// The default value for this is 1MiB.
3482 ///
3483 /// This provides an upper-bound on the total size of core instance
3484 /// metadata-related allocations, along with
3485 /// [`PoolingAllocationConfig::total_core_instances`]. The upper bound is
3486 ///
3487 /// ```text
3488 /// total_core_instances * max_core_instance_size
3489 /// ```
3490 ///
3491 /// where `max_core_instance_size` is rounded up to the size and alignment of
3492 /// the internal representation of the metadata.
3493 pub fn max_core_instance_size(&mut self, size: usize) -> &mut Self {
3494 self.config.limits.core_instance_size = size;
3495 self
3496 }
3497
3498 /// The maximum number of defined tables for a core module (default is `1`).
3499 ///
3500 /// This value controls the capacity of the `VMTableDefinition` table in
3501 /// each instance's `VMContext` structure.
3502 ///
3503 /// The allocated size of the table will be `tables *
3504 /// sizeof(VMTableDefinition)` for each instance regardless of how many
3505 /// tables are defined by an instance's module.
3506 pub fn max_tables_per_module(&mut self, tables: u32) -> &mut Self {
3507 self.config.limits.max_tables_per_module = tables;
3508 self
3509 }
3510
3511 /// The maximum table elements for any table defined in a module (default is
3512 /// `20000`).
3513 ///
3514 /// If a table's minimum element limit is greater than this value, the
3515 /// module will fail to instantiate.
3516 ///
3517 /// If a table's maximum element limit is unbounded or greater than this
3518 /// value, the maximum will be `table_elements` for the purpose of any
3519 /// `table.grow` instruction.
3520 ///
3521 /// This value is used to reserve the maximum space for each supported
3522 /// table; table elements are pointer-sized in the Wasmtime runtime.
3523 /// Therefore, the space reserved for each instance is `tables *
3524 /// table_elements * sizeof::<*const ()>`.
3525 pub fn table_elements(&mut self, elements: usize) -> &mut Self {
3526 self.config.limits.table_elements = elements;
3527 self
3528 }
3529
3530 /// The maximum number of defined linear memories for a module (default is
3531 /// `1`).
3532 ///
3533 /// This value controls the capacity of the `VMMemoryDefinition` table in
3534 /// each core instance's `VMContext` structure.
3535 ///
3536 /// The allocated size of the table will be `memories *
3537 /// sizeof(VMMemoryDefinition)` for each core instance regardless of how
3538 /// many memories are defined by the core instance's module.
3539 pub fn max_memories_per_module(&mut self, memories: u32) -> &mut Self {
3540 self.config.limits.max_memories_per_module = memories;
3541 self
3542 }
3543
3544 /// The maximum byte size that any WebAssembly linear memory may grow to.
3545 ///
3546 /// This option defaults to 4 GiB meaning that for 32-bit linear memories
3547 /// there is no restrictions. 64-bit linear memories will not be allowed to
3548 /// grow beyond 4 GiB by default.
3549 ///
3550 /// If a memory's minimum size is greater than this value, the module will
3551 /// fail to instantiate.
3552 ///
3553 /// If a memory's maximum size is unbounded or greater than this value, the
3554 /// maximum will be `max_memory_size` for the purpose of any `memory.grow`
3555 /// instruction.
3556 ///
3557 /// This value is used to control the maximum accessible space for each
3558 /// linear memory of a core instance. This can be thought of as a simple
3559 /// mechanism like [`Store::limiter`](crate::Store::limiter) to limit memory
3560 /// at runtime. This value can also affect striping/coloring behavior when
3561 /// used in conjunction with
3562 /// [`memory_protection_keys`](PoolingAllocationConfig::memory_protection_keys).
3563 ///
3564 /// The virtual memory reservation size of each linear memory is controlled
3565 /// by the [`Config::memory_reservation`] setting and this method's
3566 /// configuration cannot exceed [`Config::memory_reservation`].
3567 pub fn max_memory_size(&mut self, bytes: usize) -> &mut Self {
3568 self.config.limits.max_memory_size = bytes;
3569 self
3570 }
3571
3572 /// Configures whether memory protection keys (MPK) should be used for more
3573 /// efficient layout of pool-allocated memories.
3574 ///
3575 /// When using the pooling allocator (see [`Config::allocation_strategy`],
3576 /// [`InstanceAllocationStrategy::Pooling`]), memory protection keys can
3577 /// reduce the total amount of allocated virtual memory by eliminating guard
3578 /// regions between WebAssembly memories in the pool. It does so by
3579 /// "coloring" memory regions with different memory keys and setting which
3580 /// regions are accessible each time executions switches from host to guest
3581 /// (or vice versa).
3582 ///
3583 /// Leveraging MPK requires configuring a smaller-than-default
3584 /// [`max_memory_size`](PoolingAllocationConfig::max_memory_size) to enable
3585 /// this coloring/striping behavior. For example embeddings might want to
3586 /// reduce the default 4G allowance to 128M.
3587 ///
3588 /// MPK is only available on Linux (called `pku` there) and recent x86
3589 /// systems; we check for MPK support at runtime by examining the `CPUID`
3590 /// register. This configuration setting can be in three states:
3591 ///
3592 /// - `auto`: if MPK support is available the guard regions are removed; if
3593 /// not, the guard regions remain
3594 /// - `yes`: use MPK to eliminate guard regions; fail if MPK is not
3595 /// supported
3596 /// - `no`: never use MPK
3597 ///
3598 /// By default this value is `no`, but may become `auto` in future
3599 /// releases.
3600 ///
3601 /// __WARNING__: this configuration options is still experimental--use at
3602 /// your own risk! MPK uses kernel and CPU features to protect memory
3603 /// regions; you may observe segmentation faults if anything is
3604 /// misconfigured.
3605 #[cfg(feature = "memory-protection-keys")]
3606 pub fn memory_protection_keys(&mut self, enable: Enabled) -> &mut Self {
3607 self.config.memory_protection_keys = enable;
3608 self
3609 }
3610
3611 /// Sets an upper limit on how many memory protection keys (MPK) Wasmtime
3612 /// will use.
3613 ///
3614 /// This setting is only applicable when
3615 /// [`PoolingAllocationConfig::memory_protection_keys`] is set to `enable`
3616 /// or `auto`. Configuring this above the HW and OS limits (typically 15)
3617 /// has no effect.
3618 ///
3619 /// If multiple Wasmtime engines are used in the same process, note that all
3620 /// engines will share the same set of allocated keys; this setting will
3621 /// limit how many keys are allocated initially and thus available to all
3622 /// other engines.
3623 #[cfg(feature = "memory-protection-keys")]
3624 pub fn max_memory_protection_keys(&mut self, max: usize) -> &mut Self {
3625 self.config.max_memory_protection_keys = max;
3626 self
3627 }
3628
3629 /// Check if memory protection keys (MPK) are available on the current host.
3630 ///
3631 /// This is a convenience method for determining MPK availability using the
3632 /// same method that [`Enabled::Auto`] does. See
3633 /// [`PoolingAllocationConfig::memory_protection_keys`] for more
3634 /// information.
3635 #[cfg(feature = "memory-protection-keys")]
3636 pub fn are_memory_protection_keys_available() -> bool {
3637 crate::runtime::vm::mpk::is_supported()
3638 }
3639
3640 /// The maximum number of concurrent GC heaps supported (default is `1000`).
3641 ///
3642 /// This value has a direct impact on the amount of memory allocated by the
3643 /// pooling instance allocator.
3644 ///
3645 /// The pooling instance allocator allocates a GC heap pool, where each
3646 /// entry in the pool contains the space needed for each GC heap used by a
3647 /// store.
3648 #[cfg(feature = "gc")]
3649 pub fn total_gc_heaps(&mut self, count: u32) -> &mut Self {
3650 self.config.limits.total_gc_heaps = count;
3651 self
3652 }
3653
3654 /// Configures whether the Linux-specific [`PAGEMAP_SCAN` ioctl][ioctl] is
3655 /// used to help reset linear memory.
3656 ///
3657 /// When [`Self::linear_memory_keep_resident`] or
3658 /// [`Self::table_keep_resident`] options are configured to nonzero values
3659 /// the default behavior is to `memset` the lowest addresses of a table or
3660 /// memory back to their original contents. With the `PAGEMAP_SCAN` ioctl on
3661 /// Linux this can be done to more intelligently scan for resident pages in
3662 /// the region and only reset those pages back to their original contents
3663 /// with `memset` rather than assuming the low addresses are all resident.
3664 ///
3665 /// This ioctl has the potential to provide a number of performance benefits
3666 /// in high-reuse and high concurrency scenarios. Notably this enables
3667 /// Wasmtime to scan the entire region of WebAssembly linear memory and
3668 /// manually reset memory back to its original contents, up to
3669 /// [`Self::linear_memory_keep_resident`] bytes, possibly skipping an
3670 /// `madvise` entirely. This can be more efficient by avoiding removing
3671 /// pages from the address space entirely and additionally ensuring that
3672 /// future use of the linear memory doesn't incur page faults as the pages
3673 /// remain resident.
3674 ///
3675 /// At this time this configuration option is still being evaluated as to
3676 /// how appropriate it is for all use cases. It currently defaults to
3677 /// `no` or disabled but may change to `auto`, enable if supported, in the
3678 /// future. This option is only supported on Linux and requires a kernel
3679 /// version of 6.7 or higher.
3680 ///
3681 /// [ioctl]: https://www.man7.org/linux/man-pages/man2/PAGEMAP_SCAN.2const.html
3682 pub fn pagemap_scan(&mut self, enable: Enabled) -> &mut Self {
3683 self.config.pagemap_scan = enable;
3684 self
3685 }
3686
3687 /// Tests whether [`Self::pagemap_scan`] is available or not on the host
3688 /// system.
3689 pub fn is_pagemap_scan_available() -> bool {
3690 crate::runtime::vm::PoolingInstanceAllocatorConfig::is_pagemap_scan_available()
3691 }
3692}
3693
3694#[cfg(feature = "std")]
3695fn detect_host_feature(feature: &str) -> Option<bool> {
3696 #[cfg(target_arch = "aarch64")]
3697 {
3698 return match feature {
3699 "lse" => Some(std::arch::is_aarch64_feature_detected!("lse")),
3700 "paca" => Some(std::arch::is_aarch64_feature_detected!("paca")),
3701 "fp16" => Some(std::arch::is_aarch64_feature_detected!("fp16")),
3702
3703 _ => None,
3704 };
3705 }
3706
3707 // `is_s390x_feature_detected` is nightly only for now, so use the
3708 // STORE FACILITY LIST EXTENDED instruction as a temporary measure.
3709 #[cfg(target_arch = "s390x")]
3710 {
3711 let mut facility_list: [u64; 4] = [0; 4];
3712 unsafe {
3713 core::arch::asm!(
3714 "stfle 0({})",
3715 in(reg_addr) facility_list.as_mut_ptr() ,
3716 inout("r0") facility_list.len() as u64 - 1 => _,
3717 options(nostack)
3718 );
3719 }
3720 let get_facility_bit = |n: usize| {
3721 // NOTE: bits are numbered from the left.
3722 facility_list[n / 64] & (1 << (63 - (n % 64))) != 0
3723 };
3724
3725 return match feature {
3726 "mie3" => Some(get_facility_bit(61)),
3727 "mie4" => Some(get_facility_bit(84)),
3728 "vxrs_ext2" => Some(get_facility_bit(148)),
3729 "vxrs_ext3" => Some(get_facility_bit(198)),
3730
3731 _ => None,
3732 };
3733 }
3734
3735 #[cfg(target_arch = "riscv64")]
3736 {
3737 return match feature {
3738 // due to `is_riscv64_feature_detected` is not stable.
3739 // we cannot use it. For now lie and say all features are always
3740 // found to keep tests working.
3741 _ => Some(true),
3742 };
3743 }
3744
3745 #[cfg(target_arch = "x86_64")]
3746 {
3747 return match feature {
3748 "cmpxchg16b" => Some(std::is_x86_feature_detected!("cmpxchg16b")),
3749 "sse3" => Some(std::is_x86_feature_detected!("sse3")),
3750 "ssse3" => Some(std::is_x86_feature_detected!("ssse3")),
3751 "sse4.1" => Some(std::is_x86_feature_detected!("sse4.1")),
3752 "sse4.2" => Some(std::is_x86_feature_detected!("sse4.2")),
3753 "popcnt" => Some(std::is_x86_feature_detected!("popcnt")),
3754 "avx" => Some(std::is_x86_feature_detected!("avx")),
3755 "avx2" => Some(std::is_x86_feature_detected!("avx2")),
3756 "fma" => Some(std::is_x86_feature_detected!("fma")),
3757 "bmi1" => Some(std::is_x86_feature_detected!("bmi1")),
3758 "bmi2" => Some(std::is_x86_feature_detected!("bmi2")),
3759 "avx512bitalg" => Some(std::is_x86_feature_detected!("avx512bitalg")),
3760 "avx512dq" => Some(std::is_x86_feature_detected!("avx512dq")),
3761 "avx512f" => Some(std::is_x86_feature_detected!("avx512f")),
3762 "avx512vl" => Some(std::is_x86_feature_detected!("avx512vl")),
3763 "avx512vbmi" => Some(std::is_x86_feature_detected!("avx512vbmi")),
3764 "lzcnt" => Some(std::is_x86_feature_detected!("lzcnt")),
3765
3766 _ => None,
3767 };
3768 }
3769
3770 #[allow(
3771 unreachable_code,
3772 reason = "reachable or not depending on if a target above matches"
3773 )]
3774 {
3775 let _ = feature;
3776 return None;
3777 }
3778}