wasmtime/runtime/component/func/typed.rs
1use crate::component::Instance;
2use crate::component::func::{Func, LiftContext, LowerContext};
3use crate::component::matching::InstanceType;
4use crate::component::storage::{storage_as_slice, storage_as_slice_mut};
5use crate::prelude::*;
6use crate::{AsContextMut, StoreContext, StoreContextMut, ValRaw};
7use alloc::borrow::Cow;
8use core::fmt;
9use core::iter;
10use core::marker;
11use core::mem::{self, MaybeUninit};
12use core::str;
13use wasmtime_environ::component::{
14 CanonicalAbiInfo, InterfaceType, MAX_FLAT_PARAMS, MAX_FLAT_RESULTS, OptionsIndex,
15 StringEncoding, VariantInfo,
16};
17
18#[cfg(feature = "component-model-async")]
19use crate::component::concurrent::{self, AsAccessor, PreparedCall};
20#[cfg(feature = "component-model-async")]
21use crate::component::func::TaskExit;
22
23/// A statically-typed version of [`Func`] which takes `Params` as input and
24/// returns `Return`.
25///
26/// This is an efficient way to invoke a WebAssembly component where if the
27/// inputs and output are statically known this can eschew the vast majority of
28/// machinery and checks when calling WebAssembly. This is the most optimized
29/// way to call a WebAssembly component.
30///
31/// Note that like [`Func`] this is a pointer within a [`Store`](crate::Store)
32/// and usage will panic if used with the wrong store.
33///
34/// This type is primarily created with the [`Func::typed`] API.
35///
36/// See [`ComponentType`] for more information about supported types.
37pub struct TypedFunc<Params, Return> {
38 func: Func,
39
40 // The definition of this field is somewhat subtle and may be surprising.
41 // Naively one might expect something like
42 //
43 // _marker: marker::PhantomData<fn(Params) -> Return>,
44 //
45 // Since this is a function pointer after all. The problem with this
46 // definition though is that it imposes the wrong variance on `Params` from
47 // what we want. Abstractly a `fn(Params)` is able to store `Params` within
48 // it meaning you can only give it `Params` that live longer than the
49 // function pointer.
50 //
51 // With a component model function, however, we're always copying data from
52 // the host into the guest, so we are never storing pointers to `Params`
53 // into the guest outside the duration of a `call`, meaning we can actually
54 // accept values in `TypedFunc::call` which live for a shorter duration
55 // than the `Params` argument on the struct.
56 //
57 // This all means that we don't use a phantom function pointer, but instead
58 // feign phantom storage here to get the variance desired.
59 _marker: marker::PhantomData<(Params, Return)>,
60}
61
62impl<Params, Return> Copy for TypedFunc<Params, Return> {}
63
64impl<Params, Return> Clone for TypedFunc<Params, Return> {
65 fn clone(&self) -> TypedFunc<Params, Return> {
66 *self
67 }
68}
69
70impl<Params, Return> TypedFunc<Params, Return>
71where
72 Params: ComponentNamedList + Lower,
73 Return: ComponentNamedList + Lift,
74{
75 /// Creates a new [`TypedFunc`] from the provided component [`Func`],
76 /// unsafely asserting that the underlying function takes `Params` as
77 /// input and returns `Return`.
78 ///
79 /// # Unsafety
80 ///
81 /// This is an unsafe function because it does not verify that the [`Func`]
82 /// provided actually implements this signature. It's up to the caller to
83 /// have performed some other sort of check to ensure that the signature is
84 /// correct.
85 pub unsafe fn new_unchecked(func: Func) -> TypedFunc<Params, Return> {
86 TypedFunc {
87 _marker: marker::PhantomData,
88 func,
89 }
90 }
91
92 /// Returns the underlying un-typed [`Func`] that this [`TypedFunc`]
93 /// references.
94 pub fn func(&self) -> &Func {
95 &self.func
96 }
97
98 /// Calls the underlying WebAssembly component function using the provided
99 /// `params` as input.
100 ///
101 /// This method is used to enter into a component. Execution happens within
102 /// the `store` provided. The `params` are copied into WebAssembly memory
103 /// as appropriate and a core wasm function is invoked.
104 ///
105 /// # Post-return
106 ///
107 /// In the component model each function can have a "post return" specified
108 /// which allows cleaning up the arguments returned to the host. For example
109 /// if WebAssembly returns a string to the host then it might be a uniquely
110 /// allocated string which, after the host finishes processing it, needs to
111 /// be deallocated in the wasm instance's own linear memory to prevent
112 /// memory leaks in wasm itself. The `post-return` canonical abi option is
113 /// used to configured this.
114 ///
115 /// If a post-return function is present, it will be called automatically by
116 /// this function.
117 ///
118 /// # Errors
119 ///
120 /// This function can return an error for a number of reasons:
121 ///
122 /// * If the wasm itself traps during execution.
123 /// * If the wasm traps while copying arguments into memory.
124 /// * If the wasm provides bad allocation pointers when copying arguments
125 /// into memory.
126 /// * If the wasm returns a value which violates the canonical ABI.
127 /// * If this function's instances cannot be entered, for example if the
128 /// instance is currently calling a host function.
129 /// * If `store` requires using [`Self::call_async`] instead, see
130 /// [crate documentation](crate#async) for more info.
131 ///
132 /// In general there are many ways that things could go wrong when copying
133 /// types in and out of a wasm module with the canonical ABI, and certain
134 /// error conditions are specific to certain types. For example a
135 /// WebAssembly module can't return an invalid `char`. When allocating space
136 /// for this host to copy a string into the returned pointer must be
137 /// in-bounds in memory.
138 ///
139 /// If an error happens then the error should contain detailed enough
140 /// information to understand which part of the canonical ABI went wrong
141 /// and what to inspect.
142 ///
143 /// # Panics
144 ///
145 /// Panics if `store` does not own this function.
146 pub fn call(&self, mut store: impl AsContextMut, params: Params) -> Result<Return> {
147 let mut store = store.as_context_mut();
148 store.0.validate_sync_call()?;
149 self.call_impl(store.as_context_mut(), params)
150 }
151
152 /// Exactly like [`Self::call`], except for invoking WebAssembly
153 /// [asynchronously](crate#async).
154 ///
155 /// # Panics
156 ///
157 /// Panics if `store` does not own this function.
158 #[cfg(feature = "async")]
159 pub async fn call_async(
160 &self,
161 mut store: impl AsContextMut<Data: Send>,
162 params: Params,
163 ) -> Result<Return>
164 where
165 Return: 'static,
166 {
167 let mut store = store.as_context_mut();
168
169 #[cfg(feature = "component-model-async")]
170 if store.0.concurrency_support() {
171 use crate::component::concurrent::TaskId;
172 use crate::runtime::vm::SendSyncPtr;
173 use core::ptr::NonNull;
174
175 let ptr = SendSyncPtr::from(NonNull::from(¶ms).cast::<u8>());
176 let prepared =
177 self.prepare_call(store.as_context_mut(), true, move |cx, ty, dst| {
178 // SAFETY: The goal here is to get `Params`, a non-`'static`
179 // value, to live long enough to the lowering of the
180 // parameters. We're guaranteed that `Params` lives in the
181 // future of the outer function (we're in an `async fn`) so it'll
182 // stay alive as long as the future itself. That is distinct,
183 // for example, from the signature of `call_concurrent` below.
184 //
185 // Here a pointer to `Params` is smuggled to this location
186 // through a `SendSyncPtr<u8>` to thwart the `'static` check
187 // of rustc and the signature of `prepare_call`.
188 //
189 // Note the use of `SignalOnDrop` in the code that follows
190 // this closure, which ensures that the task will be removed
191 // from the concurrent state to which it belongs when the
192 // containing `Future` is dropped, so long as the parameters
193 // have not yet been lowered. Since this closure is removed from
194 // the task after the parameters are lowered, it will never be called
195 // after the containing `Future` is dropped.
196 let params = unsafe { ptr.cast::<Params>().as_ref() };
197 Self::lower_args(cx, ty, dst, params)
198 })?;
199
200 struct SignalOnDrop<'a, T: 'static> {
201 store: StoreContextMut<'a, T>,
202 task: TaskId,
203 }
204
205 impl<'a, T> Drop for SignalOnDrop<'a, T> {
206 fn drop(&mut self) {
207 self.task
208 .host_future_dropped(self.store.as_context_mut())
209 .unwrap();
210 }
211 }
212
213 let mut wrapper = SignalOnDrop {
214 store,
215 task: prepared.task_id(),
216 };
217
218 let result = concurrent::queue_call(wrapper.store.as_context_mut(), prepared)?;
219 return wrapper
220 .store
221 .as_context_mut()
222 .run_concurrent_trap_on_idle(async |_| Ok(result.await?.0))
223 .await?;
224 }
225
226 store
227 .on_fiber(|store| self.call_impl(store, params))
228 .await?
229 }
230
231 /// Start a concurrent call to this function.
232 ///
233 /// Concurrency is achieved by relying on the [`Accessor`] argument, which
234 /// can be obtained by calling [`StoreContextMut::run_concurrent`].
235 ///
236 /// Unlike [`Self::call`] and [`Self::call_async`] (both of which require
237 /// exclusive access to the store until the completion of the call), calls
238 /// made using this method may run concurrently with other calls to the same
239 /// instance. In addition, the runtime will call the `post-return` function
240 /// (if any) automatically when the guest task completes.
241 ///
242 /// Besides the task's return value, this returns a [`TaskExit`]
243 /// representing the completion of the guest task and any transitive
244 /// subtasks it might create.
245 ///
246 /// This function will return an error if [`Config::concurrency_support`] is
247 /// disabled.
248 ///
249 /// [`Config::concurrency_support`]: crate::Config::concurrency_support
250 ///
251 /// # Progress and Cancellation
252 ///
253 /// For more information about how to make progress on the wasm task or how
254 /// to cancel the wasm task see the documentation for
255 /// [`Func::call_concurrent`].
256 ///
257 /// [`Func::call_concurrent`]: crate::component::Func::call_concurrent
258 ///
259 /// # Panics
260 ///
261 /// Panics if the store that the [`Accessor`] is derived from does not own
262 /// this function.
263 ///
264 /// [`Accessor`]: crate::component::Accessor
265 ///
266 /// # Example
267 ///
268 /// Using [`StoreContextMut::run_concurrent`] to get an [`Accessor`]:
269 ///
270 /// ```
271 /// # use {
272 /// # wasmtime::{
273 /// # error::{Result},
274 /// # component::{Component, Linker, ResourceTable},
275 /// # Config, Engine, Store
276 /// # },
277 /// # };
278 /// #
279 /// # struct Ctx { table: ResourceTable }
280 /// #
281 /// # async fn foo() -> Result<()> {
282 /// # let mut config = Config::new();
283 /// # let engine = Engine::new(&config)?;
284 /// # let mut store = Store::new(&engine, Ctx { table: ResourceTable::new() });
285 /// # let mut linker = Linker::new(&engine);
286 /// # let component = Component::new(&engine, "")?;
287 /// # let instance = linker.instantiate_async(&mut store, &component).await?;
288 /// let my_typed_func = instance.get_typed_func::<(), ()>(&mut store, "my_typed_func")?;
289 /// store.run_concurrent(async |accessor| -> wasmtime::Result<_> {
290 /// my_typed_func.call_concurrent(accessor, ()).await?;
291 /// Ok(())
292 /// }).await??;
293 /// # Ok(())
294 /// # }
295 /// ```
296 #[cfg(feature = "component-model-async")]
297 pub async fn call_concurrent(
298 self,
299 accessor: impl AsAccessor<Data: Send>,
300 params: Params,
301 ) -> Result<(Return, TaskExit)>
302 where
303 Params: 'static,
304 Return: 'static,
305 {
306 let result = accessor.as_accessor().with(|mut store| {
307 let mut store = store.as_context_mut();
308 ensure!(
309 store.0.concurrency_support(),
310 "cannot use `call_concurrent` Config::concurrency_support disabled",
311 );
312
313 let prepared =
314 self.prepare_call(store.as_context_mut(), false, move |cx, ty, dst| {
315 Self::lower_args(cx, ty, dst, ¶ms)
316 })?;
317 concurrent::queue_call(store, prepared)
318 });
319 let (result, rx) = result?.await?;
320 Ok((result, TaskExit(rx)))
321 }
322
323 fn lower_args<T>(
324 cx: &mut LowerContext<T>,
325 ty: InterfaceType,
326 dst: &mut [MaybeUninit<ValRaw>],
327 params: &Params,
328 ) -> Result<()> {
329 use crate::component::storage::slice_to_storage_mut;
330
331 if Params::flatten_count() <= MAX_FLAT_PARAMS {
332 // SAFETY: the safety of `slice_to_storage_mut` relies on
333 // `Params::Lower` being represented by a sequence of
334 // `ValRaw`, and that's a guarantee upheld by the `Lower`
335 // trait itself.
336 let dst: &mut MaybeUninit<Params::Lower> = unsafe { slice_to_storage_mut(dst) };
337 Self::lower_stack_args(cx, ¶ms, ty, dst)
338 } else {
339 Self::lower_heap_args(cx, ¶ms, ty, &mut dst[0])
340 }
341 }
342
343 /// Calls `concurrent::prepare_call` with monomorphized functions for
344 /// lowering the parameters and lifting the result according to the number
345 /// of core Wasm parameters and results in the signature of the function to
346 /// be called.
347 #[cfg(feature = "component-model-async")]
348 fn prepare_call<T>(
349 self,
350 store: StoreContextMut<'_, T>,
351 host_future_present: bool,
352 lower: impl FnOnce(
353 &mut LowerContext<T>,
354 InterfaceType,
355 &mut [MaybeUninit<ValRaw>],
356 ) -> Result<()>
357 + Send
358 + Sync
359 + 'static,
360 ) -> Result<PreparedCall<Return>>
361 where
362 Return: 'static,
363 {
364 use crate::component::storage::slice_to_storage;
365 debug_assert!(store.0.concurrency_support());
366
367 let param_count = if Params::flatten_count() <= MAX_FLAT_PARAMS {
368 Params::flatten_count()
369 } else {
370 1
371 };
372 let max_results = if self.func.abi_async(store.0) {
373 MAX_FLAT_PARAMS
374 } else {
375 MAX_FLAT_RESULTS
376 };
377 concurrent::prepare_call(
378 store,
379 self.func,
380 param_count,
381 host_future_present,
382 move |func, store, params_out| {
383 func.with_lower_context(store, |cx, ty| lower(cx, ty, params_out))
384 },
385 move |func, store, results| {
386 let result = if Return::flatten_count() <= max_results {
387 func.with_lift_context(store, |cx, ty| {
388 // SAFETY: Per the safety requiments documented for the
389 // `ComponentType` trait, `Return::Lower` must be
390 // compatible at the binary level with a `[ValRaw; N]`,
391 // where `N` is `mem::size_of::<Return::Lower>() /
392 // mem::size_of::<ValRaw>()`. And since this function
393 // is only used when `Return::flatten_count() <=
394 // MAX_FLAT_RESULTS` and `MAX_FLAT_RESULTS == 1`, `N`
395 // can only either be 0 or 1.
396 //
397 // See `ComponentInstance::exit_call` for where we use
398 // the result count passed from
399 // `wasmtime_environ::fact::trampoline`-generated code
400 // to ensure the slice has the correct length, and also
401 // `concurrent::start_call` for where we conservatively
402 // use a slice length of 1 unconditionally. Also note
403 // that, as of this writing `slice_to_storage`
404 // double-checks the slice length is sufficient.
405 let results: &Return::Lower = unsafe { slice_to_storage(results) };
406 Self::lift_stack_result(cx, ty, results)
407 })?
408 } else {
409 func.with_lift_context(store, |cx, ty| {
410 Self::lift_heap_result(cx, ty, &results[0])
411 })?
412 };
413 Ok(Box::new(result))
414 },
415 )
416 }
417
418 fn call_impl(&self, mut store: impl AsContextMut, params: Params) -> Result<Return> {
419 let mut store = store.as_context_mut();
420
421 if self.func.abi_async(store.0) {
422 bail!("must enable the `component-model-async` feature to call async-lifted exports")
423 }
424
425 // Note that this is in theory simpler than it might read at this time.
426 // Here we're doing a runtime dispatch on the `flatten_count` for the
427 // params/results to see whether they're inbounds. This creates 4 cases
428 // to handle. In reality this is a highly optimizable branch where LLVM
429 // will easily figure out that only one branch here is taken.
430 //
431 // Otherwise this current construction is done to ensure that the stack
432 // space reserved for the params/results is always of the appropriate
433 // size (as the params/results needed differ depending on the "flatten"
434 // count)
435 //
436 // SAFETY: the safety of these invocations of `call_raw` depends on the
437 // correctness of the ascription of the `LowerParams` and `LowerReturn`
438 // types on the `call_raw` function. That's upheld here through the
439 // safety requirements of `Lift` and `Lower` on `Params` and `Return` in
440 // combination with checking the various possible branches here and
441 // dispatching to appropriately typed functions.
442 let (result, post_return_arg) = unsafe {
443 // This type is used as `LowerParams` for `call_raw` which is either
444 // `Params::Lower` or `ValRaw` representing it's either on the stack
445 // or it's on the heap. This allocates 1 extra `ValRaw` on the stack
446 // if `Params` is empty and `Return` is also empty, but that's a
447 // reasonable enough price to pay for now given the current code
448 // organization.
449 #[derive(Copy, Clone)]
450 union Union<T: Copy, U: Copy> {
451 _a: T,
452 _b: U,
453 }
454
455 if Return::flatten_count() <= MAX_FLAT_RESULTS {
456 self.func.call_raw(
457 store.as_context_mut(),
458 |cx, ty, dst: &mut MaybeUninit<Union<Params::Lower, ValRaw>>| {
459 let dst = storage_as_slice_mut(dst);
460 Self::lower_args(cx, ty, dst, ¶ms)
461 },
462 Self::lift_stack_result,
463 )
464 } else {
465 self.func.call_raw(
466 store.as_context_mut(),
467 |cx, ty, dst: &mut MaybeUninit<Union<Params::Lower, ValRaw>>| {
468 let dst = storage_as_slice_mut(dst);
469 Self::lower_args(cx, ty, dst, ¶ms)
470 },
471 Self::lift_heap_result,
472 )
473 }
474 }?;
475
476 self.func.post_return_impl(store, post_return_arg)?;
477
478 Ok(result)
479 }
480
481 /// Lower parameters directly onto the stack specified by the `dst`
482 /// location.
483 ///
484 /// This is only valid to call when the "flatten count" is small enough, or
485 /// when the canonical ABI says arguments go through the stack rather than
486 /// the heap.
487 fn lower_stack_args<T>(
488 cx: &mut LowerContext<'_, T>,
489 params: &Params,
490 ty: InterfaceType,
491 dst: &mut MaybeUninit<Params::Lower>,
492 ) -> Result<()> {
493 assert!(Params::flatten_count() <= MAX_FLAT_PARAMS);
494 params.linear_lower_to_flat(cx, ty, dst)?;
495 Ok(())
496 }
497
498 /// Lower parameters onto a heap-allocated location.
499 ///
500 /// This is used when the stack space to be used for the arguments is above
501 /// the `MAX_FLAT_PARAMS` threshold. Here the wasm's `realloc` function is
502 /// invoked to allocate space and then parameters are stored at that heap
503 /// pointer location.
504 fn lower_heap_args<T>(
505 cx: &mut LowerContext<'_, T>,
506 params: &Params,
507 ty: InterfaceType,
508 dst: &mut MaybeUninit<ValRaw>,
509 ) -> Result<()> {
510 // Memory must exist via validation if the arguments are stored on the
511 // heap, so we can create a `MemoryMut` at this point. Afterwards
512 // `realloc` is used to allocate space for all the arguments and then
513 // they're all stored in linear memory.
514 //
515 // Note that `realloc` will bake in a check that the returned pointer is
516 // in-bounds.
517 let ptr = cx.realloc(0, 0, Params::ALIGN32, Params::SIZE32)?;
518 params.linear_lower_to_memory(cx, ty, ptr)?;
519
520 // Note that the pointer here is stored as a 64-bit integer. This allows
521 // this to work with either 32 or 64-bit memories. For a 32-bit memory
522 // it'll just ignore the upper 32 zero bits, and for 64-bit memories
523 // this'll have the full 64-bits. Note that for 32-bit memories the call
524 // to `realloc` above guarantees that the `ptr` is in-bounds meaning
525 // that we will know that the zero-extended upper bits of `ptr` are
526 // guaranteed to be zero.
527 //
528 // This comment about 64-bit integers is also referred to below with
529 // "WRITEPTR64".
530 dst.write(ValRaw::i64(ptr as i64));
531
532 Ok(())
533 }
534
535 /// Lift the result of a function directly from the stack result.
536 ///
537 /// This is only used when the result fits in the maximum number of stack
538 /// slots.
539 fn lift_stack_result(
540 cx: &mut LiftContext<'_>,
541 ty: InterfaceType,
542 dst: &Return::Lower,
543 ) -> Result<Return> {
544 Return::linear_lift_from_flat(cx, ty, dst)
545 }
546
547 /// Lift the result of a function where the result is stored indirectly on
548 /// the heap.
549 fn lift_heap_result(
550 cx: &mut LiftContext<'_>,
551 ty: InterfaceType,
552 dst: &ValRaw,
553 ) -> Result<Return> {
554 assert!(Return::flatten_count() > MAX_FLAT_RESULTS);
555 // FIXME(#4311): needs to read an i64 for memory64
556 let ptr = usize::try_from(dst.get_u32())?;
557 if ptr % usize::try_from(Return::ALIGN32)? != 0 {
558 bail!("return pointer not aligned");
559 }
560
561 let bytes = cx
562 .memory()
563 .get(ptr..)
564 .and_then(|b| b.get(..Return::SIZE32))
565 .ok_or_else(|| crate::format_err!("pointer out of bounds of memory"))?;
566 Return::linear_lift_from_memory(cx, ty, bytes)
567 }
568
569 #[doc(hidden)]
570 #[deprecated(note = "no longer needs to be called; this function has no effect")]
571 pub fn post_return(&self, _store: impl AsContextMut) -> Result<()> {
572 Ok(())
573 }
574
575 #[doc(hidden)]
576 #[deprecated(note = "no longer needs to be called; this function has no effect")]
577 #[cfg(feature = "async")]
578 pub async fn post_return_async<T: Send>(
579 &self,
580 _store: impl AsContextMut<Data = T>,
581 ) -> Result<()> {
582 Ok(())
583 }
584}
585
586/// A trait representing a static list of named types that can be passed to or
587/// returned from a [`TypedFunc`].
588///
589/// This trait is implemented for a number of tuple types and is not expected
590/// to be implemented externally. The contents of this trait are hidden as it's
591/// intended to be an implementation detail of Wasmtime. The contents of this
592/// trait are not covered by Wasmtime's stability guarantees.
593///
594/// For more information about this trait see [`Func::typed`] and
595/// [`TypedFunc`].
596//
597// Note that this is an `unsafe` trait, and the unsafety means that
598// implementations of this trait must be correct or otherwise [`TypedFunc`]
599// would not be memory safe. The main reason this is `unsafe` is the
600// `typecheck` function which must operate correctly relative to the `AsTuple`
601// interpretation of the implementor.
602pub unsafe trait ComponentNamedList: ComponentType {}
603
604/// A trait representing types which can be passed to and read from components
605/// with the canonical ABI.
606///
607/// This trait is implemented for Rust types which can be communicated to
608/// components. The [`Func::typed`] and [`TypedFunc`] Rust items are the main
609/// consumers of this trait.
610///
611/// Supported Rust types include:
612///
613/// | Component Model Type | Rust Type |
614/// |-----------------------------------|--------------------------------------|
615/// | `{s,u}{8,16,32,64}` | `{i,u}{8,16,32,64}` |
616/// | `f{32,64}` | `f{32,64}` |
617/// | `bool` | `bool` |
618/// | `char` | `char` |
619/// | `tuple<A, B>` | `(A, B)` |
620/// | `option<T>` | `Option<T>` |
621/// | `result` | `Result<(), ()>` |
622/// | `result<T>` | `Result<T, ()>` |
623/// | `result<_, E>` | `Result<(), E>` |
624/// | `result<T, E>` | `Result<T, E>` |
625/// | `string` | `String`, `&str`, or [`WasmStr`] |
626/// | `list<T>` | `Vec<T>`, `&[T]`, or [`WasmList`] |
627/// | `own<T>`, `borrow<T>` | [`Resource<T>`] or [`ResourceAny`] |
628/// | `record` | [`#[derive(ComponentType)]`][d-cm] |
629/// | `variant` | [`#[derive(ComponentType)]`][d-cm] |
630/// | `enum` | [`#[derive(ComponentType)]`][d-cm] |
631/// | `flags` | [`flags!`][f-m] |
632///
633/// [`Resource<T>`]: crate::component::Resource
634/// [`ResourceAny`]: crate::component::ResourceAny
635/// [d-cm]: macro@crate::component::ComponentType
636/// [f-m]: crate::component::flags
637///
638/// Rust standard library pointers such as `&T`, `Box<T>`, and `Arc<T>`
639/// additionally represent whatever type `T` represents in the component model.
640/// Note that types such as `record`, `variant`, `enum`, and `flags` are
641/// generated by the embedder at compile time. These macros derive
642/// implementation of this trait for custom types to map to custom types in the
643/// component model. Note that for `record`, `variant`, `enum`, and `flags`
644/// those types are often generated by the
645/// [`bindgen!`](crate::component::bindgen) macro from WIT definitions.
646///
647/// Types that implement [`ComponentType`] are used for `Params` and `Return`
648/// in [`TypedFunc`] and [`Func::typed`].
649///
650/// The contents of this trait are hidden as it's intended to be an
651/// implementation detail of Wasmtime. The contents of this trait are not
652/// covered by Wasmtime's stability guarantees.
653///
654/// # Safety
655///
656/// Note that this is an `unsafe` trait as `TypedFunc`'s safety heavily relies on
657/// the correctness of the implementations of this trait. Some ways in which this
658/// trait must be correct to be safe are:
659///
660/// * The `Lower` associated type must be a `ValRaw` sequence. It doesn't have to
661/// literally be `[ValRaw; N]` but when laid out in memory it must be adjacent
662/// `ValRaw` values and have a multiple of the size of `ValRaw` and the same
663/// alignment.
664///
665/// * The `lower` function must initialize the bits within `Lower` that are going
666/// to be read by the trampoline that's used to enter core wasm. A trampoline
667/// is passed `*mut Lower` and will read the canonical abi arguments in
668/// sequence, so all of the bits must be correctly initialized.
669///
670/// * The `size` and `align` functions must be correct for this value stored in
671/// the canonical ABI. The `Cursor<T>` iteration of these bytes rely on this
672/// for correctness as they otherwise eschew bounds-checking.
673///
674/// There are likely some other correctness issues which aren't documented as
675/// well, this isn't currently an exhaustive list. It suffices to say, though,
676/// that correctness bugs in this trait implementation are highly likely to
677/// lead to security bugs, which again leads to the `unsafe` in the trait.
678///
679/// Note that this trait specifically is not sealed because `bindgen!`-generated
680/// types must be able to implement this trait using a `#[derive]` macro. For
681/// users it's recommended to not implement this trait manually given the
682/// non-exhaustive list of safety requirements that must be upheld. This trait
683/// is implemented at your own risk if you do so.
684///
685/// # Send and Sync
686///
687/// While on the topic of safety it's worth discussing the `Send` and `Sync`
688/// bounds here as well. These bounds might naively seem like they shouldn't be
689/// required for all component types as they're host-level types not guest-level
690/// types persisted anywhere. Various subtleties lead to these bounds, however:
691///
692/// * Fibers require that all stack-local variables are `Send` and `Sync` for
693/// fibers themselves to be send/sync. Unfortunately we have no help from the
694/// compiler on this one so it's up to Wasmtime's discipline to maintain this.
695/// One instance of this is that return values are placed on the stack as
696/// they're lowered into guest memory. This lowering operation can involve
697/// malloc and context switches, so return values must be Send/Sync.
698///
699/// * In the implementation of component model async it's not uncommon for types
700/// to be "buffered" in the store temporarily. For example parameters might
701/// reside in a store temporarily while wasm has backpressure turned on.
702///
703/// Overall it's generally easiest to require `Send` and `Sync` for all
704/// component types. There additionally aren't known use case for non-`Send` or
705/// non-`Sync` types at this time.
706pub unsafe trait ComponentType: Send + Sync {
707 /// Representation of the "lowered" form of this component value.
708 ///
709 /// Lowerings lower into core wasm values which are represented by `ValRaw`.
710 /// This `Lower` type must be a list of `ValRaw` as either a literal array
711 /// or a struct where every field is a `ValRaw`. This must be `Copy` (as
712 /// `ValRaw` is `Copy`) and support all byte patterns. This being correct is
713 /// one reason why the trait is unsafe.
714 #[doc(hidden)]
715 type Lower: Copy;
716
717 /// The information about this type's canonical ABI (size/align/etc).
718 #[doc(hidden)]
719 const ABI: CanonicalAbiInfo;
720
721 #[doc(hidden)]
722 const SIZE32: usize = Self::ABI.size32 as usize;
723 #[doc(hidden)]
724 const ALIGN32: u32 = Self::ABI.align32;
725
726 #[doc(hidden)]
727 const IS_RUST_UNIT_TYPE: bool = false;
728
729 /// Whether this type might require a call to the guest's realloc function
730 /// to allocate linear memory when lowering (e.g. a non-empty `string`).
731 ///
732 /// If this is `false`, Wasmtime may optimize lowering by using
733 /// `LowerContext::new_without_realloc` and lowering values outside of any
734 /// fiber. That will panic if the lowering process ends up needing realloc
735 /// after all, so `true` is a conservative default.
736 #[doc(hidden)]
737 const MAY_REQUIRE_REALLOC: bool = true;
738
739 /// Returns the number of core wasm abi values will be used to represent
740 /// this type in its lowered form.
741 ///
742 /// This divides the size of `Self::Lower` by the size of `ValRaw`.
743 #[doc(hidden)]
744 fn flatten_count() -> usize {
745 assert!(mem::size_of::<Self::Lower>() % mem::size_of::<ValRaw>() == 0);
746 assert!(mem::align_of::<Self::Lower>() == mem::align_of::<ValRaw>());
747 mem::size_of::<Self::Lower>() / mem::size_of::<ValRaw>()
748 }
749
750 /// Performs a type-check to see whether this component value type matches
751 /// the interface type `ty` provided.
752 #[doc(hidden)]
753 fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()>;
754}
755
756#[doc(hidden)]
757pub unsafe trait ComponentVariant: ComponentType {
758 const CASES: &'static [Option<CanonicalAbiInfo>];
759 const INFO: VariantInfo = VariantInfo::new_static(Self::CASES);
760 const PAYLOAD_OFFSET32: usize = Self::INFO.payload_offset32 as usize;
761}
762
763/// Host types which can be passed to WebAssembly components.
764///
765/// This trait is implemented for all types that can be passed to components
766/// either as parameters of component exports or returns of component imports.
767/// This trait represents the ability to convert from the native host
768/// representation to the canonical ABI.
769///
770/// Built-in types to Rust such as `Option<T>` implement this trait as
771/// appropriate. For a mapping of component model to Rust types see
772/// [`ComponentType`].
773///
774/// For user-defined types, for example `record` types mapped to Rust `struct`s,
775/// this crate additionally has
776/// [`#[derive(Lower)]`](macro@crate::component::Lower).
777///
778/// Note that like [`ComponentType`] the definition of this trait is intended to
779/// be an internal implementation detail of Wasmtime at this time. It's
780/// recommended to use the `#[derive(Lower)]` implementation instead.
781pub unsafe trait Lower: ComponentType {
782 /// Performs the "lower" function in the linear memory version of the
783 /// canonical ABI.
784 ///
785 /// This method will lower the current value into a component. The `lower`
786 /// function performs a "flat" lowering into the `dst` specified which is
787 /// allowed to be uninitialized entering this method but is guaranteed to be
788 /// fully initialized if the method returns `Ok(())`.
789 ///
790 /// The `cx` context provided is the context within which this lowering is
791 /// happening. This contains information such as canonical options specified
792 /// (e.g. string encodings, memories, etc), the store itself, along with
793 /// type information.
794 ///
795 /// The `ty` parameter is the destination type that is being lowered into.
796 /// For example this is the component's "view" of the type that is being
797 /// lowered. This is guaranteed to have passed a `typecheck` earlier.
798 ///
799 /// This will only be called if `typecheck` passes for `Op::Lower`.
800 #[doc(hidden)]
801 fn linear_lower_to_flat<T>(
802 &self,
803 cx: &mut LowerContext<'_, T>,
804 ty: InterfaceType,
805 dst: &mut MaybeUninit<Self::Lower>,
806 ) -> Result<()>;
807
808 /// Performs the "store" operation in the linear memory version of the
809 /// canonical ABI.
810 ///
811 /// This function will store `self` into the linear memory described by
812 /// `cx` at the `offset` provided.
813 ///
814 /// It is expected that `offset` is a valid offset in memory for
815 /// `Self::SIZE32` bytes. At this time that's not an unsafe contract as it's
816 /// always re-checked on all stores, but this is something that will need to
817 /// be improved in the future to remove extra bounds checks. For now this
818 /// function will panic if there's a bug and `offset` isn't valid within
819 /// memory.
820 ///
821 /// The `ty` type information passed here is the same as the type
822 /// information passed to `lower` above, and is the component's own view of
823 /// what the resulting type should be.
824 ///
825 /// This will only be called if `typecheck` passes for `Op::Lower`.
826 #[doc(hidden)]
827 fn linear_lower_to_memory<T>(
828 &self,
829 cx: &mut LowerContext<'_, T>,
830 ty: InterfaceType,
831 offset: usize,
832 ) -> Result<()>;
833
834 /// Provided method to lower a list of `Self` into memory.
835 ///
836 /// Requires that `offset` has already been checked for alignment and
837 /// validity in terms of being in-bounds, otherwise this may panic.
838 ///
839 /// This is primarily here to get overridden for implementations of integers
840 /// which can avoid some extra fluff and use a pattern that's more easily
841 /// optimizable by LLVM.
842 #[doc(hidden)]
843 fn linear_store_list_to_memory<T>(
844 cx: &mut LowerContext<'_, T>,
845 ty: InterfaceType,
846 mut offset: usize,
847 items: &[Self],
848 ) -> Result<()>
849 where
850 Self: Sized,
851 {
852 for item in items {
853 item.linear_lower_to_memory(cx, ty, offset)?;
854 offset += Self::SIZE32;
855 }
856 Ok(())
857 }
858}
859
860/// Host types which can be created from the canonical ABI.
861///
862/// This is the mirror of the [`Lower`] trait where it represents the capability
863/// of acquiring items from WebAssembly and passing them to the host.
864///
865/// Built-in types to Rust such as `Option<T>` implement this trait as
866/// appropriate. For a mapping of component model to Rust types see
867/// [`ComponentType`].
868///
869/// For user-defined types, for example `record` types mapped to Rust `struct`s,
870/// this crate additionally has
871/// [`#[derive(Lift)]`](macro@crate::component::Lift).
872///
873/// Note that like [`ComponentType`] the definition of this trait is intended to
874/// be an internal implementation detail of Wasmtime at this time. It's
875/// recommended to use the `#[derive(Lift)]` implementation instead.
876pub unsafe trait Lift: Sized + ComponentType {
877 /// Performs the "lift" operation in the linear memory version of the
878 /// canonical ABI.
879 ///
880 /// This function performs a "flat" lift operation from the `src` specified
881 /// which is a sequence of core wasm values. The lifting operation will
882 /// validate core wasm values and produce a `Self` on success.
883 ///
884 /// The `cx` provided contains contextual information such as the store
885 /// that's being loaded from, canonical options, and type information.
886 ///
887 /// The `ty` parameter is the origin component's specification for what the
888 /// type that is being lifted is. For example this is the record type or the
889 /// resource type that is being lifted.
890 ///
891 /// Note that this has a default implementation but if `typecheck` passes
892 /// for `Op::Lift` this needs to be overridden.
893 #[doc(hidden)]
894 fn linear_lift_from_flat(
895 cx: &mut LiftContext<'_>,
896 ty: InterfaceType,
897 src: &Self::Lower,
898 ) -> Result<Self>;
899
900 /// Performs the "load" operation in the linear memory version of the
901 /// canonical ABI.
902 ///
903 /// This will read the `bytes` provided, which are a sub-slice into the
904 /// linear memory described by `cx`. The `bytes` array provided is
905 /// guaranteed to be `Self::SIZE32` bytes large. All of memory is then also
906 /// available through `cx` for bounds-checks and such as necessary for
907 /// strings/lists.
908 ///
909 /// The `ty` argument is the type that's being loaded, as described by the
910 /// original component.
911 ///
912 /// Note that this has a default implementation but if `typecheck` passes
913 /// for `Op::Lift` this needs to be overridden.
914 #[doc(hidden)]
915 fn linear_lift_from_memory(
916 cx: &mut LiftContext<'_>,
917 ty: InterfaceType,
918 bytes: &[u8],
919 ) -> Result<Self>;
920
921 /// Converts `list` into a `Vec<T>`, used in `Lift for Vec<T>`.
922 #[doc(hidden)]
923 fn linear_lift_list_from_memory(
924 cx: &mut LiftContext<'_>,
925 list: &WasmList<Self>,
926 ) -> Result<Vec<Self>>
927 where
928 Self: Sized,
929 {
930 let mut dst = Vec::with_capacity(list.len);
931 Self::linear_lift_into_from_memory(cx, list, &mut dst)?;
932 Ok(dst)
933 }
934
935 /// Load no more than `max_count` items from `list` into `dst`.
936 ///
937 /// This is primarily here to get overridden for implementations of integers
938 /// which can avoid some extra fluff and use a pattern that's more easily
939 /// optimizable by LLVM.
940 #[doc(hidden)]
941 fn linear_lift_into_from_memory(
942 cx: &mut LiftContext<'_>,
943 list: &WasmList<Self>,
944 dst: &mut impl Extend<Self>,
945 ) -> Result<()>
946 where
947 Self: Sized,
948 {
949 for i in 0..list.len {
950 dst.extend(Some(list.get_from_store(cx, i).unwrap()?));
951 }
952 Ok(())
953 }
954}
955
956// Macro to help generate "forwarding implementations" of `ComponentType` to
957// another type, used for wrappers in Rust like `&T`, `Box<T>`, etc. Note that
958// these wrappers only implement lowering because lifting native Rust types
959// cannot be done.
960macro_rules! forward_type_impls {
961 ($(($($generics:tt)*) $a:ty => $b:ty,)*) => ($(
962 unsafe impl <$($generics)*> ComponentType for $a {
963 type Lower = <$b as ComponentType>::Lower;
964
965 const ABI: CanonicalAbiInfo = <$b as ComponentType>::ABI;
966
967 #[inline]
968 fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
969 <$b as ComponentType>::typecheck(ty, types)
970 }
971 }
972 )*)
973}
974
975forward_type_impls! {
976 (T: ComponentType + ?Sized) &'_ T => T,
977 (T: ComponentType + ?Sized) Box<T> => T,
978 (T: ComponentType + ?Sized) alloc::sync::Arc<T> => T,
979 () String => str,
980 (T: ComponentType) Vec<T> => [T],
981}
982
983macro_rules! forward_lowers {
984 ($(($($generics:tt)*) $a:ty => $b:ty,)*) => ($(
985 unsafe impl <$($generics)*> Lower for $a {
986 fn linear_lower_to_flat<U>(
987 &self,
988 cx: &mut LowerContext<'_, U>,
989 ty: InterfaceType,
990 dst: &mut MaybeUninit<Self::Lower>,
991 ) -> Result<()> {
992 <$b as Lower>::linear_lower_to_flat(self, cx, ty, dst)
993 }
994
995 fn linear_lower_to_memory<U>(
996 &self,
997 cx: &mut LowerContext<'_, U>,
998 ty: InterfaceType,
999 offset: usize,
1000 ) -> Result<()> {
1001 <$b as Lower>::linear_lower_to_memory(self, cx, ty, offset)
1002 }
1003 }
1004 )*)
1005}
1006
1007forward_lowers! {
1008 (T: Lower + ?Sized) &'_ T => T,
1009 (T: Lower + ?Sized) Box<T> => T,
1010 (T: Lower + ?Sized) alloc::sync::Arc<T> => T,
1011 () String => str,
1012 (T: Lower) Vec<T> => [T],
1013}
1014
1015macro_rules! forward_string_lifts {
1016 ($($a:ty,)*) => ($(
1017 unsafe impl Lift for $a {
1018 #[inline]
1019 fn linear_lift_from_flat(cx: &mut LiftContext<'_>, ty: InterfaceType, src: &Self::Lower) -> Result<Self> {
1020 let s = <WasmStr as Lift>::linear_lift_from_flat(cx, ty, src)?;
1021 let encoding = cx.options().string_encoding;
1022 Ok(s.to_str_from_memory(encoding, cx.memory())?.into())
1023 }
1024
1025 #[inline]
1026 fn linear_lift_from_memory(cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
1027 let s = <WasmStr as Lift>::linear_lift_from_memory(cx, ty, bytes)?;
1028 let encoding = cx.options().string_encoding;
1029 Ok(s.to_str_from_memory(encoding, cx.memory())?.into())
1030 }
1031 }
1032 )*)
1033}
1034
1035forward_string_lifts! {
1036 Box<str>,
1037 alloc::sync::Arc<str>,
1038 String,
1039}
1040
1041macro_rules! forward_list_lifts {
1042 ($($a:ty,)*) => ($(
1043 unsafe impl <T: Lift> Lift for $a {
1044 fn linear_lift_from_flat(cx: &mut LiftContext<'_>, ty: InterfaceType, src: &Self::Lower) -> Result<Self> {
1045 let list = <WasmList::<T> as Lift>::linear_lift_from_flat(cx, ty, src)?;
1046 Ok(T::linear_lift_list_from_memory(cx, &list)?.into())
1047 }
1048
1049 fn linear_lift_from_memory(cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
1050 let list = <WasmList::<T> as Lift>::linear_lift_from_memory(cx, ty, bytes)?;
1051 Ok(T::linear_lift_list_from_memory(cx, &list)?.into())
1052 }
1053 }
1054 )*)
1055}
1056
1057forward_list_lifts! {
1058 Box<[T]>,
1059 alloc::sync::Arc<[T]>,
1060 Vec<T>,
1061}
1062
1063// Macro to help generate `ComponentType` implementations for primitive types
1064// such as integers, char, bool, etc.
1065macro_rules! integers {
1066 ($($primitive:ident = $ty:ident in $field:ident/$get:ident with abi:$abi:ident,)*) => ($(
1067 unsafe impl ComponentType for $primitive {
1068 type Lower = ValRaw;
1069
1070 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::$abi;
1071
1072 const MAY_REQUIRE_REALLOC: bool = false;
1073
1074 fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1075 match ty {
1076 InterfaceType::$ty => Ok(()),
1077 other => bail!("expected `{}` found `{}`", desc(&InterfaceType::$ty), desc(other))
1078 }
1079 }
1080 }
1081
1082 unsafe impl Lower for $primitive {
1083 #[inline]
1084 #[allow(trivial_numeric_casts, reason = "macro-generated code")]
1085 fn linear_lower_to_flat<T>(
1086 &self,
1087 _cx: &mut LowerContext<'_, T>,
1088 ty: InterfaceType,
1089 dst: &mut MaybeUninit<Self::Lower>,
1090 ) -> Result<()> {
1091 debug_assert!(matches!(ty, InterfaceType::$ty));
1092 dst.write(ValRaw::$field(*self as $field));
1093 Ok(())
1094 }
1095
1096 #[inline]
1097 fn linear_lower_to_memory<T>(
1098 &self,
1099 cx: &mut LowerContext<'_, T>,
1100 ty: InterfaceType,
1101 offset: usize,
1102 ) -> Result<()> {
1103 debug_assert!(matches!(ty, InterfaceType::$ty));
1104 debug_assert!(offset % Self::SIZE32 == 0);
1105 *cx.get(offset) = self.to_le_bytes();
1106 Ok(())
1107 }
1108
1109 fn linear_store_list_to_memory<T>(
1110 cx: &mut LowerContext<'_, T>,
1111 ty: InterfaceType,
1112 offset: usize,
1113 items: &[Self],
1114 ) -> Result<()> {
1115 debug_assert!(matches!(ty, InterfaceType::$ty));
1116
1117 // Double-check that the CM alignment is at least the host's
1118 // alignment for this type which should be true for all
1119 // platforms.
1120 assert!((Self::ALIGN32 as usize) >= mem::align_of::<Self>());
1121
1122 // Slice `cx`'s memory to the window that we'll be modifying.
1123 // This should all have already been verified in terms of
1124 // alignment and sizing meaning that these assertions here are
1125 // not truly necessary but are instead double-checks.
1126 //
1127 // Note that we're casting a `[u8]` slice to `[Self]` with
1128 // `align_to_mut` which is not safe in general but is safe in
1129 // our specific case as all `u8` patterns are valid `Self`
1130 // patterns since `Self` is an integral type.
1131 let dst = &mut cx.as_slice_mut()[offset..][..items.len() * Self::SIZE32];
1132 let (before, middle, end) = unsafe { dst.align_to_mut::<Self>() };
1133 assert!(before.is_empty() && end.is_empty());
1134 assert_eq!(middle.len(), items.len());
1135
1136 // And with all that out of the way perform the copying loop.
1137 // This is not a `copy_from_slice` because endianness needs to
1138 // be handled here, but LLVM should pretty easily transform this
1139 // into a memcpy on little-endian platforms.
1140 for (dst, src) in middle.iter_mut().zip(items) {
1141 *dst = src.to_le();
1142 }
1143 Ok(())
1144 }
1145 }
1146
1147 unsafe impl Lift for $primitive {
1148 #[inline]
1149 #[allow(
1150 trivial_numeric_casts,
1151 clippy::cast_possible_truncation,
1152 reason = "macro-generated code"
1153 )]
1154 fn linear_lift_from_flat(_cx: &mut LiftContext<'_>, ty: InterfaceType, src: &Self::Lower) -> Result<Self> {
1155 debug_assert!(matches!(ty, InterfaceType::$ty));
1156 Ok(src.$get() as $primitive)
1157 }
1158
1159 #[inline]
1160 fn linear_lift_from_memory(_cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
1161 debug_assert!(matches!(ty, InterfaceType::$ty));
1162 debug_assert!((bytes.as_ptr() as usize) % Self::SIZE32 == 0);
1163 Ok($primitive::from_le_bytes(bytes.try_into().unwrap()))
1164 }
1165
1166 fn linear_lift_into_from_memory(
1167 cx: &mut LiftContext<'_>,
1168 list: &WasmList<Self>,
1169 dst: &mut impl Extend<Self>,
1170 ) -> Result<()>
1171 where
1172 Self: Sized,
1173 {
1174 dst.extend(list._as_le_slice(cx.memory())
1175 .iter()
1176 .map(|i| Self::from_le(*i)));
1177 Ok(())
1178 }
1179 }
1180 )*)
1181}
1182
1183integers! {
1184 i8 = S8 in i32/get_i32 with abi:SCALAR1,
1185 u8 = U8 in u32/get_u32 with abi:SCALAR1,
1186 i16 = S16 in i32/get_i32 with abi:SCALAR2,
1187 u16 = U16 in u32/get_u32 with abi:SCALAR2,
1188 i32 = S32 in i32/get_i32 with abi:SCALAR4,
1189 u32 = U32 in u32/get_u32 with abi:SCALAR4,
1190 i64 = S64 in i64/get_i64 with abi:SCALAR8,
1191 u64 = U64 in u64/get_u64 with abi:SCALAR8,
1192}
1193
1194macro_rules! floats {
1195 ($($float:ident/$get_float:ident = $ty:ident with abi:$abi:ident)*) => ($(const _: () = {
1196 unsafe impl ComponentType for $float {
1197 type Lower = ValRaw;
1198
1199 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::$abi;
1200
1201 fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1202 match ty {
1203 InterfaceType::$ty => Ok(()),
1204 other => bail!("expected `{}` found `{}`", desc(&InterfaceType::$ty), desc(other))
1205 }
1206 }
1207 }
1208
1209 unsafe impl Lower for $float {
1210 #[inline]
1211 fn linear_lower_to_flat<T>(
1212 &self,
1213 _cx: &mut LowerContext<'_, T>,
1214 ty: InterfaceType,
1215 dst: &mut MaybeUninit<Self::Lower>,
1216 ) -> Result<()> {
1217 debug_assert!(matches!(ty, InterfaceType::$ty));
1218 dst.write(ValRaw::$float(self.to_bits()));
1219 Ok(())
1220 }
1221
1222 #[inline]
1223 fn linear_lower_to_memory<T>(
1224 &self,
1225 cx: &mut LowerContext<'_, T>,
1226 ty: InterfaceType,
1227 offset: usize,
1228 ) -> Result<()> {
1229 debug_assert!(matches!(ty, InterfaceType::$ty));
1230 debug_assert!(offset % Self::SIZE32 == 0);
1231 let ptr = cx.get(offset);
1232 *ptr = self.to_bits().to_le_bytes();
1233 Ok(())
1234 }
1235
1236 fn linear_store_list_to_memory<T>(
1237 cx: &mut LowerContext<'_, T>,
1238 ty: InterfaceType,
1239 offset: usize,
1240 items: &[Self],
1241 ) -> Result<()> {
1242 debug_assert!(matches!(ty, InterfaceType::$ty));
1243
1244 // Double-check that the CM alignment is at least the host's
1245 // alignment for this type which should be true for all
1246 // platforms.
1247 assert!((Self::ALIGN32 as usize) >= mem::align_of::<Self>());
1248
1249 // Slice `cx`'s memory to the window that we'll be modifying.
1250 // This should all have already been verified in terms of
1251 // alignment and sizing meaning that these assertions here are
1252 // not truly necessary but are instead double-checks.
1253 let dst = &mut cx.as_slice_mut()[offset..][..items.len() * Self::SIZE32];
1254 assert!(dst.as_ptr().cast::<Self>().is_aligned());
1255
1256 // And with all that out of the way perform the copying loop.
1257 // This is not a `copy_from_slice` because endianness needs to
1258 // be handled here, but LLVM should pretty easily transform this
1259 // into a memcpy on little-endian platforms.
1260 // TODO use `as_chunks` when https://github.com/rust-lang/rust/issues/74985
1261 // is stabilized
1262 for (dst, src) in iter::zip(dst.chunks_exact_mut(Self::SIZE32), items) {
1263 let dst: &mut [u8; Self::SIZE32] = dst.try_into().unwrap();
1264 *dst = src.to_le_bytes();
1265 }
1266 Ok(())
1267 }
1268 }
1269
1270 unsafe impl Lift for $float {
1271 #[inline]
1272 fn linear_lift_from_flat(_cx: &mut LiftContext<'_>, ty: InterfaceType, src: &Self::Lower) -> Result<Self> {
1273 debug_assert!(matches!(ty, InterfaceType::$ty));
1274 Ok($float::from_bits(src.$get_float()))
1275 }
1276
1277 #[inline]
1278 fn linear_lift_from_memory(_cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
1279 debug_assert!(matches!(ty, InterfaceType::$ty));
1280 debug_assert!((bytes.as_ptr() as usize) % Self::SIZE32 == 0);
1281 Ok($float::from_le_bytes(bytes.try_into().unwrap()))
1282 }
1283
1284 fn linear_lift_list_from_memory(cx: &mut LiftContext<'_>, list: &WasmList<Self>) -> Result<Vec<Self>> where Self: Sized {
1285 // See comments in `WasmList::get` for the panicking indexing
1286 let byte_size = list.len * mem::size_of::<Self>();
1287 let bytes = &cx.memory()[list.ptr..][..byte_size];
1288
1289 // The canonical ABI requires that everything is aligned to its
1290 // own size, so this should be an aligned array.
1291 assert!(bytes.as_ptr().cast::<Self>().is_aligned());
1292
1293 // Copy the resulting slice to a new Vec, handling endianness
1294 // in the process
1295 // TODO use `as_chunks` when https://github.com/rust-lang/rust/issues/74985
1296 // is stabilized
1297 Ok(
1298 bytes
1299 .chunks_exact(Self::SIZE32)
1300 .map(|i| $float::from_le_bytes(i.try_into().unwrap()))
1301 .collect()
1302 )
1303 }
1304 }
1305 };)*)
1306}
1307
1308floats! {
1309 f32/get_f32 = Float32 with abi:SCALAR4
1310 f64/get_f64 = Float64 with abi:SCALAR8
1311}
1312
1313unsafe impl ComponentType for bool {
1314 type Lower = ValRaw;
1315
1316 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::SCALAR1;
1317
1318 fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1319 match ty {
1320 InterfaceType::Bool => Ok(()),
1321 other => bail!("expected `bool` found `{}`", desc(other)),
1322 }
1323 }
1324}
1325
1326unsafe impl Lower for bool {
1327 fn linear_lower_to_flat<T>(
1328 &self,
1329 _cx: &mut LowerContext<'_, T>,
1330 ty: InterfaceType,
1331 dst: &mut MaybeUninit<Self::Lower>,
1332 ) -> Result<()> {
1333 debug_assert!(matches!(ty, InterfaceType::Bool));
1334 dst.write(ValRaw::i32(*self as i32));
1335 Ok(())
1336 }
1337
1338 fn linear_lower_to_memory<T>(
1339 &self,
1340 cx: &mut LowerContext<'_, T>,
1341 ty: InterfaceType,
1342 offset: usize,
1343 ) -> Result<()> {
1344 debug_assert!(matches!(ty, InterfaceType::Bool));
1345 debug_assert!(offset % Self::SIZE32 == 0);
1346 cx.get::<1>(offset)[0] = *self as u8;
1347 Ok(())
1348 }
1349}
1350
1351unsafe impl Lift for bool {
1352 #[inline]
1353 fn linear_lift_from_flat(
1354 _cx: &mut LiftContext<'_>,
1355 ty: InterfaceType,
1356 src: &Self::Lower,
1357 ) -> Result<Self> {
1358 debug_assert!(matches!(ty, InterfaceType::Bool));
1359 match src.get_i32() {
1360 0 => Ok(false),
1361 _ => Ok(true),
1362 }
1363 }
1364
1365 #[inline]
1366 fn linear_lift_from_memory(
1367 _cx: &mut LiftContext<'_>,
1368 ty: InterfaceType,
1369 bytes: &[u8],
1370 ) -> Result<Self> {
1371 debug_assert!(matches!(ty, InterfaceType::Bool));
1372 match bytes[0] {
1373 0 => Ok(false),
1374 _ => Ok(true),
1375 }
1376 }
1377}
1378
1379unsafe impl ComponentType for char {
1380 type Lower = ValRaw;
1381
1382 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::SCALAR4;
1383
1384 fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1385 match ty {
1386 InterfaceType::Char => Ok(()),
1387 other => bail!("expected `char` found `{}`", desc(other)),
1388 }
1389 }
1390}
1391
1392unsafe impl Lower for char {
1393 #[inline]
1394 fn linear_lower_to_flat<T>(
1395 &self,
1396 _cx: &mut LowerContext<'_, T>,
1397 ty: InterfaceType,
1398 dst: &mut MaybeUninit<Self::Lower>,
1399 ) -> Result<()> {
1400 debug_assert!(matches!(ty, InterfaceType::Char));
1401 dst.write(ValRaw::u32(u32::from(*self)));
1402 Ok(())
1403 }
1404
1405 #[inline]
1406 fn linear_lower_to_memory<T>(
1407 &self,
1408 cx: &mut LowerContext<'_, T>,
1409 ty: InterfaceType,
1410 offset: usize,
1411 ) -> Result<()> {
1412 debug_assert!(matches!(ty, InterfaceType::Char));
1413 debug_assert!(offset % Self::SIZE32 == 0);
1414 *cx.get::<4>(offset) = u32::from(*self).to_le_bytes();
1415 Ok(())
1416 }
1417}
1418
1419unsafe impl Lift for char {
1420 #[inline]
1421 fn linear_lift_from_flat(
1422 _cx: &mut LiftContext<'_>,
1423 ty: InterfaceType,
1424 src: &Self::Lower,
1425 ) -> Result<Self> {
1426 debug_assert!(matches!(ty, InterfaceType::Char));
1427 Ok(char::try_from(src.get_u32())?)
1428 }
1429
1430 #[inline]
1431 fn linear_lift_from_memory(
1432 _cx: &mut LiftContext<'_>,
1433 ty: InterfaceType,
1434 bytes: &[u8],
1435 ) -> Result<Self> {
1436 debug_assert!(matches!(ty, InterfaceType::Char));
1437 debug_assert!((bytes.as_ptr() as usize) % Self::SIZE32 == 0);
1438 let bits = u32::from_le_bytes(bytes.try_into().unwrap());
1439 Ok(char::try_from(bits)?)
1440 }
1441}
1442
1443// FIXME(#4311): these probably need different constants for memory64
1444const UTF16_TAG: usize = 1 << 31;
1445const MAX_STRING_BYTE_LENGTH: usize = (1 << 31) - 1;
1446
1447// Note that this is similar to `ComponentType for WasmStr` except it can only
1448// be used for lowering, not lifting.
1449unsafe impl ComponentType for str {
1450 type Lower = [ValRaw; 2];
1451
1452 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
1453
1454 fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1455 match ty {
1456 InterfaceType::String => Ok(()),
1457 other => bail!("expected `string` found `{}`", desc(other)),
1458 }
1459 }
1460}
1461
1462unsafe impl Lower for str {
1463 fn linear_lower_to_flat<T>(
1464 &self,
1465 cx: &mut LowerContext<'_, T>,
1466 ty: InterfaceType,
1467 dst: &mut MaybeUninit<[ValRaw; 2]>,
1468 ) -> Result<()> {
1469 debug_assert!(matches!(ty, InterfaceType::String));
1470 let (ptr, len) = lower_string(cx, self)?;
1471 // See "WRITEPTR64" above for why this is always storing a 64-bit
1472 // integer.
1473 map_maybe_uninit!(dst[0]).write(ValRaw::i64(ptr as i64));
1474 map_maybe_uninit!(dst[1]).write(ValRaw::i64(len as i64));
1475 Ok(())
1476 }
1477
1478 fn linear_lower_to_memory<T>(
1479 &self,
1480 cx: &mut LowerContext<'_, T>,
1481 ty: InterfaceType,
1482 offset: usize,
1483 ) -> Result<()> {
1484 debug_assert!(matches!(ty, InterfaceType::String));
1485 debug_assert!(offset % (Self::ALIGN32 as usize) == 0);
1486 let (ptr, len) = lower_string(cx, self)?;
1487 // FIXME(#4311): needs memory64 handling
1488 *cx.get(offset + 0) = u32::try_from(ptr).unwrap().to_le_bytes();
1489 *cx.get(offset + 4) = u32::try_from(len).unwrap().to_le_bytes();
1490 Ok(())
1491 }
1492}
1493
1494fn lower_string<T>(cx: &mut LowerContext<'_, T>, string: &str) -> Result<(usize, usize)> {
1495 // Note that in general the wasm module can't assume anything about what the
1496 // host strings are encoded as. Additionally hosts are allowed to have
1497 // differently-encoded strings at runtime. Finally when copying a string
1498 // into wasm it's somewhat strict in the sense that the various patterns of
1499 // allocation and such are already dictated for us.
1500 //
1501 // In general what this means is that when copying a string from the host
1502 // into the destination we need to follow one of the cases of copying into
1503 // WebAssembly. It doesn't particularly matter which case as long as it ends
1504 // up in the right encoding. For example a destination encoding of
1505 // latin1+utf16 has a number of ways to get copied into and we do something
1506 // here that isn't the default "utf8 to latin1+utf16" since we have access
1507 // to simd-accelerated helpers in the `encoding_rs` crate. This is ok though
1508 // because we can fake that the host string was already stored in latin1
1509 // format and follow that copy pattern instead.
1510 match cx.options().string_encoding {
1511 // This corresponds to `store_string_copy` in the canonical ABI where
1512 // the host's representation is utf-8 and the wasm module wants utf-8 so
1513 // a copy is all that's needed (and the `realloc` can be precise for the
1514 // initial memory allocation).
1515 StringEncoding::Utf8 => {
1516 if string.len() > MAX_STRING_BYTE_LENGTH {
1517 bail!(
1518 "string length of {} too large to copy into wasm",
1519 string.len()
1520 );
1521 }
1522 let ptr = cx.realloc(0, 0, 1, string.len())?;
1523 cx.as_slice_mut()[ptr..][..string.len()].copy_from_slice(string.as_bytes());
1524 Ok((ptr, string.len()))
1525 }
1526
1527 // This corresponds to `store_utf8_to_utf16` in the canonical ABI. Here
1528 // an over-large allocation is performed and then shrunk afterwards if
1529 // necessary.
1530 StringEncoding::Utf16 => {
1531 let size = string.len() * 2;
1532 if size > MAX_STRING_BYTE_LENGTH {
1533 bail!(
1534 "string length of {} too large to copy into wasm",
1535 string.len()
1536 );
1537 }
1538 let mut ptr = cx.realloc(0, 0, 2, size)?;
1539 let mut copied = 0;
1540 let bytes = &mut cx.as_slice_mut()[ptr..][..size];
1541 for (u, bytes) in string.encode_utf16().zip(bytes.chunks_mut(2)) {
1542 let u_bytes = u.to_le_bytes();
1543 bytes[0] = u_bytes[0];
1544 bytes[1] = u_bytes[1];
1545 copied += 1;
1546 }
1547 if (copied * 2) < size {
1548 ptr = cx.realloc(ptr, size, 2, copied * 2)?;
1549 }
1550 Ok((ptr, copied))
1551 }
1552
1553 StringEncoding::CompactUtf16 => {
1554 // This corresponds to `store_string_to_latin1_or_utf16`
1555 let bytes = string.as_bytes();
1556 let mut iter = string.char_indices();
1557 let mut ptr = cx.realloc(0, 0, 2, bytes.len())?;
1558 let mut dst = &mut cx.as_slice_mut()[ptr..][..bytes.len()];
1559 let mut result = 0;
1560 while let Some((i, ch)) = iter.next() {
1561 // Test if this `char` fits into the latin1 encoding.
1562 if let Ok(byte) = u8::try_from(u32::from(ch)) {
1563 dst[result] = byte;
1564 result += 1;
1565 continue;
1566 }
1567
1568 // .. if utf16 is forced to be used then the allocation is
1569 // bumped up to the maximum size.
1570 let worst_case = bytes
1571 .len()
1572 .checked_mul(2)
1573 .ok_or_else(|| format_err!("byte length overflow"))?;
1574 if worst_case > MAX_STRING_BYTE_LENGTH {
1575 bail!("byte length too large");
1576 }
1577 ptr = cx.realloc(ptr, bytes.len(), 2, worst_case)?;
1578 dst = &mut cx.as_slice_mut()[ptr..][..worst_case];
1579
1580 // Previously encoded latin1 bytes are inflated to their 16-bit
1581 // size for utf16
1582 for i in (0..result).rev() {
1583 dst[2 * i] = dst[i];
1584 dst[2 * i + 1] = 0;
1585 }
1586
1587 // and then the remainder of the string is encoded.
1588 for (u, bytes) in string[i..]
1589 .encode_utf16()
1590 .zip(dst[2 * result..].chunks_mut(2))
1591 {
1592 let u_bytes = u.to_le_bytes();
1593 bytes[0] = u_bytes[0];
1594 bytes[1] = u_bytes[1];
1595 result += 1;
1596 }
1597 if worst_case > 2 * result {
1598 ptr = cx.realloc(ptr, worst_case, 2, 2 * result)?;
1599 }
1600 return Ok((ptr, result | UTF16_TAG));
1601 }
1602 if result < bytes.len() {
1603 ptr = cx.realloc(ptr, bytes.len(), 2, result)?;
1604 }
1605 Ok((ptr, result))
1606 }
1607 }
1608}
1609
1610/// Representation of a string located in linear memory in a WebAssembly
1611/// instance.
1612///
1613/// This type can be used in place of `String` and `str` for string-taking APIs
1614/// in some situations. The purpose of this type is to represent a range of
1615/// validated bytes within a component but does not actually copy the bytes. The
1616/// primary method, [`WasmStr::to_str`], attempts to return a reference to the
1617/// string directly located in the component's memory, avoiding a copy into the
1618/// host if possible.
1619///
1620/// The downside of this type, however, is that accessing a string requires a
1621/// [`Store`](crate::Store) pointer (via [`StoreContext`]). Bindings generated
1622/// by [`bindgen!`](crate::component::bindgen), for example, do not have access
1623/// to [`StoreContext`] and thus can't use this type.
1624///
1625/// This is intended for more advanced use cases such as defining functions
1626/// directly in a [`Linker`](crate::component::Linker). It's expected that in
1627/// the future [`bindgen!`](crate::component::bindgen) will also have a way to
1628/// use this type.
1629///
1630/// This type is used with [`TypedFunc`], for example, when WebAssembly returns
1631/// a string. This type cannot be used to give a string to WebAssembly, instead
1632/// `&str` should be used for that (since it's coming from the host).
1633///
1634/// Note that this type represents an in-bounds string in linear memory, but it
1635/// does not represent a valid string (e.g. valid utf-8). Validation happens
1636/// when [`WasmStr::to_str`] is called.
1637///
1638/// Also note that this type does not implement [`Lower`], it only implements
1639/// [`Lift`].
1640pub struct WasmStr {
1641 ptr: usize,
1642 len: usize,
1643 options: OptionsIndex,
1644 instance: Instance,
1645}
1646
1647impl WasmStr {
1648 pub(crate) fn new(ptr: usize, len: usize, cx: &mut LiftContext<'_>) -> Result<WasmStr> {
1649 let byte_len = match cx.options().string_encoding {
1650 StringEncoding::Utf8 => Some(len),
1651 StringEncoding::Utf16 => len.checked_mul(2),
1652 StringEncoding::CompactUtf16 => {
1653 if len & UTF16_TAG == 0 {
1654 Some(len)
1655 } else {
1656 (len ^ UTF16_TAG).checked_mul(2)
1657 }
1658 }
1659 };
1660 match byte_len.and_then(|len| ptr.checked_add(len)) {
1661 Some(n) if n <= cx.memory().len() => {}
1662 _ => bail!("string pointer/length out of bounds of memory"),
1663 }
1664 Ok(WasmStr {
1665 ptr,
1666 len,
1667 options: cx.options_index(),
1668 instance: cx.instance_handle(),
1669 })
1670 }
1671
1672 /// Returns the underlying string that this cursor points to.
1673 ///
1674 /// Note that this will internally decode the string from the wasm's
1675 /// encoding to utf-8 and additionally perform validation.
1676 ///
1677 /// The `store` provided must be the store where this string lives to
1678 /// access the correct memory.
1679 ///
1680 /// # Errors
1681 ///
1682 /// Returns an error if the string wasn't encoded correctly (e.g. invalid
1683 /// utf-8).
1684 ///
1685 /// # Panics
1686 ///
1687 /// Panics if this string is not owned by `store`.
1688 //
1689 // TODO: should add accessors for specifically utf-8 and utf-16 that perhaps
1690 // in an opt-in basis don't do validation. Additionally there should be some
1691 // method that returns `[u16]` after validating to avoid the utf16-to-utf8
1692 // transcode.
1693 pub fn to_str<'a, T: 'static>(
1694 &self,
1695 store: impl Into<StoreContext<'a, T>>,
1696 ) -> Result<Cow<'a, str>> {
1697 let store = store.into().0;
1698 let memory = self.instance.options_memory(store, self.options);
1699 let encoding = self.instance.options(store, self.options).string_encoding;
1700 self.to_str_from_memory(encoding, memory)
1701 }
1702
1703 pub(crate) fn to_str_from_memory<'a>(
1704 &self,
1705 encoding: StringEncoding,
1706 memory: &'a [u8],
1707 ) -> Result<Cow<'a, str>> {
1708 match encoding {
1709 StringEncoding::Utf8 => self.decode_utf8(memory),
1710 StringEncoding::Utf16 => self.decode_utf16(memory, self.len),
1711 StringEncoding::CompactUtf16 => {
1712 if self.len & UTF16_TAG == 0 {
1713 self.decode_latin1(memory)
1714 } else {
1715 self.decode_utf16(memory, self.len ^ UTF16_TAG)
1716 }
1717 }
1718 }
1719 }
1720
1721 fn decode_utf8<'a>(&self, memory: &'a [u8]) -> Result<Cow<'a, str>> {
1722 // Note that bounds-checking already happen in construction of `WasmStr`
1723 // so this is never expected to panic. This could theoretically be
1724 // unchecked indexing if we're feeling wild enough.
1725 Ok(str::from_utf8(&memory[self.ptr..][..self.len])?.into())
1726 }
1727
1728 fn decode_utf16<'a>(&self, memory: &'a [u8], len: usize) -> Result<Cow<'a, str>> {
1729 // See notes in `decode_utf8` for why this is panicking indexing.
1730 let memory = &memory[self.ptr..][..len * 2];
1731 Ok(core::char::decode_utf16(
1732 memory
1733 .chunks(2)
1734 .map(|chunk| u16::from_le_bytes(chunk.try_into().unwrap())),
1735 )
1736 .collect::<Result<String, _>>()?
1737 .into())
1738 }
1739
1740 fn decode_latin1<'a>(&self, memory: &'a [u8]) -> Result<Cow<'a, str>> {
1741 // See notes in `decode_utf8` for why this is panicking indexing.
1742 Ok(encoding_rs::mem::decode_latin1(
1743 &memory[self.ptr..][..self.len],
1744 ))
1745 }
1746}
1747
1748// Note that this is similar to `ComponentType for str` except it can only be
1749// used for lifting, not lowering.
1750unsafe impl ComponentType for WasmStr {
1751 type Lower = <str as ComponentType>::Lower;
1752
1753 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
1754
1755 fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1756 match ty {
1757 InterfaceType::String => Ok(()),
1758 other => bail!("expected `string` found `{}`", desc(other)),
1759 }
1760 }
1761}
1762
1763unsafe impl Lift for WasmStr {
1764 #[inline]
1765 fn linear_lift_from_flat(
1766 cx: &mut LiftContext<'_>,
1767 ty: InterfaceType,
1768 src: &Self::Lower,
1769 ) -> Result<Self> {
1770 debug_assert!(matches!(ty, InterfaceType::String));
1771 // FIXME(#4311): needs memory64 treatment
1772 let ptr = src[0].get_u32();
1773 let len = src[1].get_u32();
1774 let (ptr, len) = (usize::try_from(ptr)?, usize::try_from(len)?);
1775 WasmStr::new(ptr, len, cx)
1776 }
1777
1778 #[inline]
1779 fn linear_lift_from_memory(
1780 cx: &mut LiftContext<'_>,
1781 ty: InterfaceType,
1782 bytes: &[u8],
1783 ) -> Result<Self> {
1784 debug_assert!(matches!(ty, InterfaceType::String));
1785 debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
1786 // FIXME(#4311): needs memory64 treatment
1787 let ptr = u32::from_le_bytes(bytes[..4].try_into().unwrap());
1788 let len = u32::from_le_bytes(bytes[4..].try_into().unwrap());
1789 let (ptr, len) = (usize::try_from(ptr)?, usize::try_from(len)?);
1790 WasmStr::new(ptr, len, cx)
1791 }
1792}
1793
1794unsafe impl<T> ComponentType for [T]
1795where
1796 T: ComponentType,
1797{
1798 type Lower = [ValRaw; 2];
1799
1800 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
1801
1802 fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
1803 match ty {
1804 InterfaceType::List(t) => T::typecheck(&types.types[*t].element, types),
1805 other => bail!("expected `list` found `{}`", desc(other)),
1806 }
1807 }
1808}
1809
1810unsafe impl<T> Lower for [T]
1811where
1812 T: Lower,
1813{
1814 fn linear_lower_to_flat<U>(
1815 &self,
1816 cx: &mut LowerContext<'_, U>,
1817 ty: InterfaceType,
1818 dst: &mut MaybeUninit<[ValRaw; 2]>,
1819 ) -> Result<()> {
1820 let elem = match ty {
1821 InterfaceType::List(i) => cx.types[i].element,
1822 _ => bad_type_info(),
1823 };
1824 let (ptr, len) = lower_list(cx, elem, self)?;
1825 // See "WRITEPTR64" above for why this is always storing a 64-bit
1826 // integer.
1827 map_maybe_uninit!(dst[0]).write(ValRaw::i64(ptr as i64));
1828 map_maybe_uninit!(dst[1]).write(ValRaw::i64(len as i64));
1829 Ok(())
1830 }
1831
1832 fn linear_lower_to_memory<U>(
1833 &self,
1834 cx: &mut LowerContext<'_, U>,
1835 ty: InterfaceType,
1836 offset: usize,
1837 ) -> Result<()> {
1838 let elem = match ty {
1839 InterfaceType::List(i) => cx.types[i].element,
1840 _ => bad_type_info(),
1841 };
1842 debug_assert!(offset % (Self::ALIGN32 as usize) == 0);
1843 let (ptr, len) = lower_list(cx, elem, self)?;
1844 *cx.get(offset + 0) = u32::try_from(ptr).unwrap().to_le_bytes();
1845 *cx.get(offset + 4) = u32::try_from(len).unwrap().to_le_bytes();
1846 Ok(())
1847 }
1848}
1849
1850// FIXME: this is not a memcpy for `T` where `T` is something like `u8`.
1851//
1852// Some attempts to fix this have proved not fruitful. In isolation an attempt
1853// was made where:
1854//
1855// * `MemoryMut` stored a `*mut [u8]` as its "last view" of memory to avoid
1856// reloading the base pointer constantly. This view is reset on `realloc`.
1857// * The bounds-checks in `MemoryMut::get` were removed (replaced with unsafe
1858// indexing)
1859//
1860// Even then though this didn't correctly vectorized for `Vec<u8>`. It's not
1861// entirely clear why but it appeared that it's related to reloading the base
1862// pointer to memory (I guess from `MemoryMut` itself?). Overall I'm not really
1863// clear on what's happening there, but this is surely going to be a performance
1864// bottleneck in the future.
1865fn lower_list<T, U>(
1866 cx: &mut LowerContext<'_, U>,
1867 ty: InterfaceType,
1868 list: &[T],
1869) -> Result<(usize, usize)>
1870where
1871 T: Lower,
1872{
1873 let elem_size = T::SIZE32;
1874 let size = list
1875 .len()
1876 .checked_mul(elem_size)
1877 .ok_or_else(|| format_err!("size overflow copying a list"))?;
1878 let ptr = cx.realloc(0, 0, T::ALIGN32, size)?;
1879 T::linear_store_list_to_memory(cx, ty, ptr, list)?;
1880 Ok((ptr, list.len()))
1881}
1882
1883/// Representation of a list of values that are owned by a WebAssembly instance.
1884///
1885/// For some more commentary about the rationale for this type see the
1886/// documentation of [`WasmStr`]. In summary this type can avoid a copy when
1887/// passing data to the host in some situations but is additionally more
1888/// cumbersome to use by requiring a [`Store`](crate::Store) to be provided.
1889///
1890/// This type is used whenever a `(list T)` is returned from a [`TypedFunc`],
1891/// for example. This type represents a list of values that are stored in linear
1892/// memory which are waiting to be read.
1893///
1894/// Note that this type represents only a valid range of bytes for the list
1895/// itself, it does not represent validity of the elements themselves and that's
1896/// performed when they're iterated.
1897///
1898/// Note that this type does not implement the [`Lower`] trait, only [`Lift`].
1899pub struct WasmList<T> {
1900 ptr: usize,
1901 len: usize,
1902 options: OptionsIndex,
1903 elem: InterfaceType,
1904 instance: Instance,
1905 _marker: marker::PhantomData<T>,
1906}
1907
1908impl<T: Lift> WasmList<T> {
1909 pub(crate) fn new(
1910 ptr: usize,
1911 len: usize,
1912 cx: &mut LiftContext<'_>,
1913 elem: InterfaceType,
1914 ) -> Result<WasmList<T>> {
1915 match len
1916 .checked_mul(T::SIZE32)
1917 .and_then(|len| ptr.checked_add(len))
1918 {
1919 Some(n) if n <= cx.memory().len() => {}
1920 _ => bail!("list pointer/length out of bounds of memory"),
1921 }
1922 if ptr % usize::try_from(T::ALIGN32)? != 0 {
1923 bail!("list pointer is not aligned")
1924 }
1925 Ok(WasmList {
1926 ptr,
1927 len,
1928 options: cx.options_index(),
1929 elem,
1930 instance: cx.instance_handle(),
1931 _marker: marker::PhantomData,
1932 })
1933 }
1934
1935 /// Returns the item length of this vector
1936 #[inline]
1937 pub fn len(&self) -> usize {
1938 self.len
1939 }
1940
1941 /// Gets the `n`th element of this list.
1942 ///
1943 /// Returns `None` if `index` is out of bounds. Returns `Some(Err(..))` if
1944 /// the value couldn't be decoded (it was invalid). Returns `Some(Ok(..))`
1945 /// if the value is valid.
1946 ///
1947 /// # Panics
1948 ///
1949 /// This function will panic if the string did not originally come from the
1950 /// `store` specified.
1951 //
1952 // TODO: given that interface values are intended to be consumed in one go
1953 // should we even expose a random access iteration API? In theory all
1954 // consumers should be validating through the iterator.
1955 pub fn get(&self, mut store: impl AsContextMut, index: usize) -> Option<Result<T>> {
1956 let store = store.as_context_mut().0;
1957 let mut cx = LiftContext::new(store, self.options, self.instance);
1958 self.get_from_store(&mut cx, index)
1959 }
1960
1961 fn get_from_store(&self, cx: &mut LiftContext<'_>, index: usize) -> Option<Result<T>> {
1962 if index >= self.len {
1963 return None;
1964 }
1965 // Note that this is using panicking indexing and this is expected to
1966 // never fail. The bounds-checking here happened during the construction
1967 // of the `WasmList` itself which means these should always be in-bounds
1968 // (and wasm memory can only grow). This could theoretically be
1969 // unchecked indexing if we're confident enough and it's actually a perf
1970 // issue one day.
1971 let bytes = &cx.memory()[self.ptr + index * T::SIZE32..][..T::SIZE32];
1972 Some(T::linear_lift_from_memory(cx, self.elem, bytes))
1973 }
1974
1975 /// Returns an iterator over the elements of this list.
1976 ///
1977 /// Each item of the list may fail to decode and is represented through the
1978 /// `Result` value of the iterator.
1979 pub fn iter<'a, U: 'static>(
1980 &'a self,
1981 store: impl Into<StoreContextMut<'a, U>>,
1982 ) -> impl ExactSizeIterator<Item = Result<T>> + 'a {
1983 let store = store.into().0;
1984 let mut cx = LiftContext::new(store, self.options, self.instance);
1985 (0..self.len).map(move |i| self.get_from_store(&mut cx, i).unwrap())
1986 }
1987}
1988
1989macro_rules! raw_wasm_list_accessors {
1990 ($($i:ident)*) => ($(
1991 impl WasmList<$i> {
1992 /// Get access to the raw underlying memory for this list.
1993 ///
1994 /// This method will return a direct slice into the original wasm
1995 /// module's linear memory where the data for this slice is stored.
1996 /// This allows the embedder to have efficient access to the
1997 /// underlying memory if needed and avoid copies and such if
1998 /// desired.
1999 ///
2000 /// Note that multi-byte integers are stored in little-endian format
2001 /// so portable processing of this slice must be aware of the host's
2002 /// byte-endianness. The `from_le` constructors in the Rust standard
2003 /// library should be suitable for converting from little-endian.
2004 ///
2005 /// # Panics
2006 ///
2007 /// Panics if the `store` provided is not the one from which this
2008 /// slice originated.
2009 pub fn as_le_slice<'a, T: 'static>(&self, store: impl Into<StoreContext<'a, T>>) -> &'a [$i] {
2010 let memory = self.instance.options_memory(store.into().0, self.options);
2011 self._as_le_slice(memory)
2012 }
2013
2014 fn _as_le_slice<'a>(&self, all_of_memory: &'a [u8]) -> &'a [$i] {
2015 // See comments in `WasmList::get` for the panicking indexing
2016 let byte_size = self.len * mem::size_of::<$i>();
2017 let bytes = &all_of_memory[self.ptr..][..byte_size];
2018
2019 // The canonical ABI requires that everything is aligned to its
2020 // own size, so this should be an aligned array. Furthermore the
2021 // alignment of primitive integers for hosts should be smaller
2022 // than or equal to the size of the primitive itself, meaning
2023 // that a wasm canonical-abi-aligned list is also aligned for
2024 // the host. That should mean that the head/tail slices here are
2025 // empty.
2026 //
2027 // Also note that the `unsafe` here is needed since the type
2028 // we're aligning to isn't guaranteed to be valid, but in our
2029 // case it's just integers and bytes so this should be safe.
2030 unsafe {
2031 let (head, body, tail) = bytes.align_to::<$i>();
2032 assert!(head.is_empty() && tail.is_empty());
2033 body
2034 }
2035 }
2036 }
2037 )*)
2038}
2039
2040raw_wasm_list_accessors! {
2041 i8 i16 i32 i64
2042 u8 u16 u32 u64
2043}
2044
2045// Note that this is similar to `ComponentType for str` except it can only be
2046// used for lifting, not lowering.
2047unsafe impl<T: ComponentType> ComponentType for WasmList<T> {
2048 type Lower = <[T] as ComponentType>::Lower;
2049
2050 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
2051
2052 fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
2053 <[T] as ComponentType>::typecheck(ty, types)
2054 }
2055}
2056
2057unsafe impl<T: Lift> Lift for WasmList<T> {
2058 fn linear_lift_from_flat(
2059 cx: &mut LiftContext<'_>,
2060 ty: InterfaceType,
2061 src: &Self::Lower,
2062 ) -> Result<Self> {
2063 let elem = match ty {
2064 InterfaceType::List(i) => cx.types[i].element,
2065 _ => bad_type_info(),
2066 };
2067 // FIXME(#4311): needs memory64 treatment
2068 let ptr = src[0].get_u32();
2069 let len = src[1].get_u32();
2070 let (ptr, len) = (usize::try_from(ptr)?, usize::try_from(len)?);
2071 WasmList::new(ptr, len, cx, elem)
2072 }
2073
2074 fn linear_lift_from_memory(
2075 cx: &mut LiftContext<'_>,
2076 ty: InterfaceType,
2077 bytes: &[u8],
2078 ) -> Result<Self> {
2079 let elem = match ty {
2080 InterfaceType::List(i) => cx.types[i].element,
2081 _ => bad_type_info(),
2082 };
2083 debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
2084 // FIXME(#4311): needs memory64 treatment
2085 let ptr = u32::from_le_bytes(bytes[..4].try_into().unwrap());
2086 let len = u32::from_le_bytes(bytes[4..].try_into().unwrap());
2087 let (ptr, len) = (usize::try_from(ptr)?, usize::try_from(len)?);
2088 WasmList::new(ptr, len, cx, elem)
2089 }
2090}
2091
2092/// Verify that the given wasm type is a tuple with the expected fields in the right order.
2093fn typecheck_tuple(
2094 ty: &InterfaceType,
2095 types: &InstanceType<'_>,
2096 expected: &[fn(&InterfaceType, &InstanceType<'_>) -> Result<()>],
2097) -> Result<()> {
2098 match ty {
2099 InterfaceType::Tuple(t) => {
2100 let tuple = &types.types[*t];
2101 if tuple.types.len() != expected.len() {
2102 bail!(
2103 "expected {}-tuple, found {}-tuple",
2104 expected.len(),
2105 tuple.types.len()
2106 );
2107 }
2108 for (ty, check) in tuple.types.iter().zip(expected) {
2109 check(ty, types)?;
2110 }
2111 Ok(())
2112 }
2113 other => bail!("expected `tuple` found `{}`", desc(other)),
2114 }
2115}
2116
2117/// Verify that the given wasm type is a record with the expected fields in the right order and with the right
2118/// names.
2119pub fn typecheck_record(
2120 ty: &InterfaceType,
2121 types: &InstanceType<'_>,
2122 expected: &[(&str, fn(&InterfaceType, &InstanceType<'_>) -> Result<()>)],
2123) -> Result<()> {
2124 match ty {
2125 InterfaceType::Record(index) => {
2126 let fields = &types.types[*index].fields;
2127
2128 if fields.len() != expected.len() {
2129 bail!(
2130 "expected record of {} fields, found {} fields",
2131 expected.len(),
2132 fields.len()
2133 );
2134 }
2135
2136 for (field, &(name, check)) in fields.iter().zip(expected) {
2137 check(&field.ty, types)
2138 .with_context(|| format!("type mismatch for field {name}"))?;
2139
2140 if field.name != name {
2141 bail!("expected record field named {}, found {}", name, field.name);
2142 }
2143 }
2144
2145 Ok(())
2146 }
2147 other => bail!("expected `record` found `{}`", desc(other)),
2148 }
2149}
2150
2151/// Verify that the given wasm type is a variant with the expected cases in the right order and with the right
2152/// names.
2153pub fn typecheck_variant(
2154 ty: &InterfaceType,
2155 types: &InstanceType<'_>,
2156 expected: &[(
2157 &str,
2158 Option<fn(&InterfaceType, &InstanceType<'_>) -> Result<()>>,
2159 )],
2160) -> Result<()> {
2161 match ty {
2162 InterfaceType::Variant(index) => {
2163 let cases = &types.types[*index].cases;
2164
2165 if cases.len() != expected.len() {
2166 bail!(
2167 "expected variant of {} cases, found {} cases",
2168 expected.len(),
2169 cases.len()
2170 );
2171 }
2172
2173 for ((case_name, case_ty), &(name, check)) in cases.iter().zip(expected) {
2174 if *case_name != name {
2175 bail!("expected variant case named {name}, found {case_name}");
2176 }
2177
2178 match (check, case_ty) {
2179 (Some(check), Some(ty)) => check(ty, types)
2180 .with_context(|| format!("type mismatch for case {name}"))?,
2181 (None, None) => {}
2182 (Some(_), None) => {
2183 bail!("case `{name}` has no type but one was expected")
2184 }
2185 (None, Some(_)) => {
2186 bail!("case `{name}` has a type but none was expected")
2187 }
2188 }
2189 }
2190
2191 Ok(())
2192 }
2193 other => bail!("expected `variant` found `{}`", desc(other)),
2194 }
2195}
2196
2197/// Verify that the given wasm type is a enum with the expected cases in the right order and with the right
2198/// names.
2199pub fn typecheck_enum(
2200 ty: &InterfaceType,
2201 types: &InstanceType<'_>,
2202 expected: &[&str],
2203) -> Result<()> {
2204 match ty {
2205 InterfaceType::Enum(index) => {
2206 let names = &types.types[*index].names;
2207
2208 if names.len() != expected.len() {
2209 bail!(
2210 "expected enum of {} names, found {} names",
2211 expected.len(),
2212 names.len()
2213 );
2214 }
2215
2216 for (name, expected) in names.iter().zip(expected) {
2217 if name != expected {
2218 bail!("expected enum case named {expected}, found {name}");
2219 }
2220 }
2221
2222 Ok(())
2223 }
2224 other => bail!("expected `enum` found `{}`", desc(other)),
2225 }
2226}
2227
2228/// Verify that the given wasm type is a flags type with the expected flags in the right order and with the right
2229/// names.
2230pub fn typecheck_flags(
2231 ty: &InterfaceType,
2232 types: &InstanceType<'_>,
2233 expected: &[&str],
2234) -> Result<()> {
2235 match ty {
2236 InterfaceType::Flags(index) => {
2237 let names = &types.types[*index].names;
2238
2239 if names.len() != expected.len() {
2240 bail!(
2241 "expected flags type with {} names, found {} names",
2242 expected.len(),
2243 names.len()
2244 );
2245 }
2246
2247 for (name, expected) in names.iter().zip(expected) {
2248 if name != expected {
2249 bail!("expected flag named {expected}, found {name}");
2250 }
2251 }
2252
2253 Ok(())
2254 }
2255 other => bail!("expected `flags` found `{}`", desc(other)),
2256 }
2257}
2258
2259/// Format the specified bitflags using the specified names for debugging
2260pub fn format_flags(bits: &[u32], names: &[&str], f: &mut fmt::Formatter) -> fmt::Result {
2261 f.write_str("(")?;
2262 let mut wrote = false;
2263 for (index, name) in names.iter().enumerate() {
2264 if ((bits[index / 32] >> (index % 32)) & 1) != 0 {
2265 if wrote {
2266 f.write_str("|")?;
2267 } else {
2268 wrote = true;
2269 }
2270
2271 f.write_str(name)?;
2272 }
2273 }
2274 f.write_str(")")
2275}
2276
2277unsafe impl<T> ComponentType for Option<T>
2278where
2279 T: ComponentType,
2280{
2281 type Lower = TupleLower<<u32 as ComponentType>::Lower, T::Lower>;
2282
2283 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::variant_static(&[None, Some(T::ABI)]);
2284
2285 fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
2286 match ty {
2287 InterfaceType::Option(t) => T::typecheck(&types.types[*t].ty, types),
2288 other => bail!("expected `option` found `{}`", desc(other)),
2289 }
2290 }
2291}
2292
2293unsafe impl<T> ComponentVariant for Option<T>
2294where
2295 T: ComponentType,
2296{
2297 const CASES: &'static [Option<CanonicalAbiInfo>] = &[None, Some(T::ABI)];
2298}
2299
2300unsafe impl<T> Lower for Option<T>
2301where
2302 T: Lower,
2303{
2304 fn linear_lower_to_flat<U>(
2305 &self,
2306 cx: &mut LowerContext<'_, U>,
2307 ty: InterfaceType,
2308 dst: &mut MaybeUninit<Self::Lower>,
2309 ) -> Result<()> {
2310 let payload = match ty {
2311 InterfaceType::Option(ty) => cx.types[ty].ty,
2312 _ => bad_type_info(),
2313 };
2314 match self {
2315 None => {
2316 map_maybe_uninit!(dst.A1).write(ValRaw::i32(0));
2317 // Note that this is unsafe as we're writing an arbitrary
2318 // bit-pattern to an arbitrary type, but part of the unsafe
2319 // contract of the `ComponentType` trait is that we can assign
2320 // any bit-pattern. By writing all zeros here we're ensuring
2321 // that the core wasm arguments this translates to will all be
2322 // zeros (as the canonical ABI requires).
2323 unsafe {
2324 map_maybe_uninit!(dst.A2).as_mut_ptr().write_bytes(0u8, 1);
2325 }
2326 }
2327 Some(val) => {
2328 map_maybe_uninit!(dst.A1).write(ValRaw::i32(1));
2329 val.linear_lower_to_flat(cx, payload, map_maybe_uninit!(dst.A2))?;
2330 }
2331 }
2332 Ok(())
2333 }
2334
2335 fn linear_lower_to_memory<U>(
2336 &self,
2337 cx: &mut LowerContext<'_, U>,
2338 ty: InterfaceType,
2339 offset: usize,
2340 ) -> Result<()> {
2341 debug_assert!(offset % (Self::ALIGN32 as usize) == 0);
2342 let payload = match ty {
2343 InterfaceType::Option(ty) => cx.types[ty].ty,
2344 _ => bad_type_info(),
2345 };
2346 match self {
2347 None => {
2348 cx.get::<1>(offset)[0] = 0;
2349 }
2350 Some(val) => {
2351 cx.get::<1>(offset)[0] = 1;
2352 val.linear_lower_to_memory(
2353 cx,
2354 payload,
2355 offset + (Self::INFO.payload_offset32 as usize),
2356 )?;
2357 }
2358 }
2359 Ok(())
2360 }
2361}
2362
2363unsafe impl<T> Lift for Option<T>
2364where
2365 T: Lift,
2366{
2367 fn linear_lift_from_flat(
2368 cx: &mut LiftContext<'_>,
2369 ty: InterfaceType,
2370 src: &Self::Lower,
2371 ) -> Result<Self> {
2372 let payload = match ty {
2373 InterfaceType::Option(ty) => cx.types[ty].ty,
2374 _ => bad_type_info(),
2375 };
2376 Ok(match src.A1.get_i32() {
2377 0 => None,
2378 1 => Some(T::linear_lift_from_flat(cx, payload, &src.A2)?),
2379 _ => bail!("invalid option discriminant"),
2380 })
2381 }
2382
2383 fn linear_lift_from_memory(
2384 cx: &mut LiftContext<'_>,
2385 ty: InterfaceType,
2386 bytes: &[u8],
2387 ) -> Result<Self> {
2388 debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
2389 let payload_ty = match ty {
2390 InterfaceType::Option(ty) => cx.types[ty].ty,
2391 _ => bad_type_info(),
2392 };
2393 let discrim = bytes[0];
2394 let payload = &bytes[Self::INFO.payload_offset32 as usize..];
2395 match discrim {
2396 0 => Ok(None),
2397 1 => Ok(Some(T::linear_lift_from_memory(cx, payload_ty, payload)?)),
2398 _ => bail!("invalid option discriminant"),
2399 }
2400 }
2401}
2402
2403#[derive(Clone, Copy)]
2404#[repr(C)]
2405pub struct ResultLower<T: Copy, E: Copy> {
2406 tag: ValRaw,
2407 payload: ResultLowerPayload<T, E>,
2408}
2409
2410#[derive(Clone, Copy)]
2411#[repr(C)]
2412union ResultLowerPayload<T: Copy, E: Copy> {
2413 ok: T,
2414 err: E,
2415}
2416
2417unsafe impl<T, E> ComponentType for Result<T, E>
2418where
2419 T: ComponentType,
2420 E: ComponentType,
2421{
2422 type Lower = ResultLower<T::Lower, E::Lower>;
2423
2424 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::variant_static(&[Some(T::ABI), Some(E::ABI)]);
2425
2426 fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
2427 match ty {
2428 InterfaceType::Result(r) => {
2429 let result = &types.types[*r];
2430 match &result.ok {
2431 Some(ty) => T::typecheck(ty, types)?,
2432 None if T::IS_RUST_UNIT_TYPE => {}
2433 None => bail!("expected no `ok` type"),
2434 }
2435 match &result.err {
2436 Some(ty) => E::typecheck(ty, types)?,
2437 None if E::IS_RUST_UNIT_TYPE => {}
2438 None => bail!("expected no `err` type"),
2439 }
2440 Ok(())
2441 }
2442 other => bail!("expected `result` found `{}`", desc(other)),
2443 }
2444 }
2445}
2446
2447/// Lowers the payload of a variant into the storage for the entire payload,
2448/// handling writing zeros at the end of the representation if this payload is
2449/// smaller than the entire flat representation.
2450///
2451/// * `payload` - the flat storage space for the entire payload of the variant
2452/// * `typed_payload` - projection from the payload storage space to the
2453/// individual storage space for this variant.
2454/// * `lower` - lowering operation used to initialize the `typed_payload` return
2455/// value.
2456///
2457/// For more information on this se the comments in the `Lower for Result`
2458/// implementation below.
2459pub unsafe fn lower_payload<P, T>(
2460 payload: &mut MaybeUninit<P>,
2461 typed_payload: impl FnOnce(&mut MaybeUninit<P>) -> &mut MaybeUninit<T>,
2462 lower: impl FnOnce(&mut MaybeUninit<T>) -> Result<()>,
2463) -> Result<()> {
2464 let typed = typed_payload(payload);
2465 lower(typed)?;
2466
2467 let typed_len = unsafe { storage_as_slice(typed).len() };
2468 let payload = unsafe { storage_as_slice_mut(payload) };
2469 for slot in payload[typed_len..].iter_mut() {
2470 slot.write(ValRaw::u64(0));
2471 }
2472 Ok(())
2473}
2474
2475unsafe impl<T, E> ComponentVariant for Result<T, E>
2476where
2477 T: ComponentType,
2478 E: ComponentType,
2479{
2480 const CASES: &'static [Option<CanonicalAbiInfo>] = &[Some(T::ABI), Some(E::ABI)];
2481}
2482
2483unsafe impl<T, E> Lower for Result<T, E>
2484where
2485 T: Lower,
2486 E: Lower,
2487{
2488 fn linear_lower_to_flat<U>(
2489 &self,
2490 cx: &mut LowerContext<'_, U>,
2491 ty: InterfaceType,
2492 dst: &mut MaybeUninit<Self::Lower>,
2493 ) -> Result<()> {
2494 let (ok, err) = match ty {
2495 InterfaceType::Result(ty) => {
2496 let ty = &cx.types[ty];
2497 (ty.ok, ty.err)
2498 }
2499 _ => bad_type_info(),
2500 };
2501
2502 // This implementation of `Lower::lower`, if you're reading these from
2503 // the top of this file, is the first location that the "join" logic of
2504 // the component model's canonical ABI encountered. The rough problem is
2505 // that let's say we have a component model type of the form:
2506 //
2507 // (result u64 (error (tuple f32 u16)))
2508 //
2509 // The flat representation of this is actually pretty tricky. Currently
2510 // it is:
2511 //
2512 // i32 i64 i32
2513 //
2514 // The first `i32` is the discriminant for the `result`, and the payload
2515 // is represented by `i64 i32`. The "ok" variant will only use the `i64`
2516 // and the "err" variant will use both `i64` and `i32`.
2517 //
2518 // In the "ok" variant the first issue is encountered. The size of one
2519 // variant may not match the size of the other variants. All variants
2520 // start at the "front" but when lowering a type we need to be sure to
2521 // initialize the later variants (lest we leak random host memory into
2522 // the guest module). Due to how the `Lower` type is represented as a
2523 // `union` of all the variants what ends up happening here is that
2524 // internally within the `lower_payload` after the typed payload is
2525 // lowered the remaining bits of the payload that weren't initialized
2526 // are all set to zero. This will guarantee that we'll write to all the
2527 // slots for each variant.
2528 //
2529 // The "err" variant encounters the second issue, however, which is that
2530 // the flat representation for each type may differ between payloads. In
2531 // the "ok" arm an `i64` is written, but the `lower` implementation for
2532 // the "err" arm will write an `f32` and then an `i32`. For this
2533 // implementation of `lower` to be valid the `f32` needs to get inflated
2534 // to an `i64` with zero-padding in the upper bits. What may be
2535 // surprising, however, is that none of this is handled in this file.
2536 // This implementation looks like it's blindly deferring to `E::lower`
2537 // and hoping it does the right thing.
2538 //
2539 // In reality, however, the correctness of variant lowering relies on
2540 // two subtle details of the `ValRaw` implementation in Wasmtime:
2541 //
2542 // 1. First the `ValRaw` value always contains little-endian values.
2543 // This means that if a `u32` is written, a `u64` is read, and then
2544 // the `u64` has its upper bits truncated the original value will
2545 // always be retained. This is primarily here for big-endian
2546 // platforms where if it weren't little endian then the opposite
2547 // would occur and the wrong value would be read.
2548 //
2549 // 2. Second, and perhaps even more subtly, the `ValRaw` constructors
2550 // for 32-bit types actually always initialize 64-bits of the
2551 // `ValRaw`. In the component model flat ABI only 32 and 64-bit types
2552 // are used so 64-bits is big enough to contain everything. This
2553 // means that when a `ValRaw` is written into the destination it will
2554 // always, whether it's needed or not, be "ready" to get extended up
2555 // to 64-bits.
2556 //
2557 // Put together these two subtle guarantees means that all `Lower`
2558 // implementations can be written "naturally" as one might naively
2559 // expect. Variants will, on each arm, zero out remaining fields and all
2560 // writes to the flat representation will automatically be 64-bit writes
2561 // meaning that if the value is read as a 64-bit value, which isn't
2562 // known at the time of the write, it'll still be correct.
2563 match self {
2564 Ok(e) => {
2565 map_maybe_uninit!(dst.tag).write(ValRaw::i32(0));
2566 unsafe {
2567 lower_payload(
2568 map_maybe_uninit!(dst.payload),
2569 |payload| map_maybe_uninit!(payload.ok),
2570 |dst| match ok {
2571 Some(ok) => e.linear_lower_to_flat(cx, ok, dst),
2572 None => Ok(()),
2573 },
2574 )
2575 }
2576 }
2577 Err(e) => {
2578 map_maybe_uninit!(dst.tag).write(ValRaw::i32(1));
2579 unsafe {
2580 lower_payload(
2581 map_maybe_uninit!(dst.payload),
2582 |payload| map_maybe_uninit!(payload.err),
2583 |dst| match err {
2584 Some(err) => e.linear_lower_to_flat(cx, err, dst),
2585 None => Ok(()),
2586 },
2587 )
2588 }
2589 }
2590 }
2591 }
2592
2593 fn linear_lower_to_memory<U>(
2594 &self,
2595 cx: &mut LowerContext<'_, U>,
2596 ty: InterfaceType,
2597 offset: usize,
2598 ) -> Result<()> {
2599 let (ok, err) = match ty {
2600 InterfaceType::Result(ty) => {
2601 let ty = &cx.types[ty];
2602 (ty.ok, ty.err)
2603 }
2604 _ => bad_type_info(),
2605 };
2606 debug_assert!(offset % (Self::ALIGN32 as usize) == 0);
2607 let payload_offset = Self::INFO.payload_offset32 as usize;
2608 match self {
2609 Ok(e) => {
2610 cx.get::<1>(offset)[0] = 0;
2611 if let Some(ok) = ok {
2612 e.linear_lower_to_memory(cx, ok, offset + payload_offset)?;
2613 }
2614 }
2615 Err(e) => {
2616 cx.get::<1>(offset)[0] = 1;
2617 if let Some(err) = err {
2618 e.linear_lower_to_memory(cx, err, offset + payload_offset)?;
2619 }
2620 }
2621 }
2622 Ok(())
2623 }
2624}
2625
2626unsafe impl<T, E> Lift for Result<T, E>
2627where
2628 T: Lift,
2629 E: Lift,
2630{
2631 #[inline]
2632 fn linear_lift_from_flat(
2633 cx: &mut LiftContext<'_>,
2634 ty: InterfaceType,
2635 src: &Self::Lower,
2636 ) -> Result<Self> {
2637 let (ok, err) = match ty {
2638 InterfaceType::Result(ty) => {
2639 let ty = &cx.types[ty];
2640 (ty.ok, ty.err)
2641 }
2642 _ => bad_type_info(),
2643 };
2644 // Note that this implementation specifically isn't trying to actually
2645 // reinterpret or alter the bits of `lower` depending on which variant
2646 // we're lifting. This ends up all working out because the value is
2647 // stored in little-endian format.
2648 //
2649 // When stored in little-endian format the `{T,E}::Lower`, when each
2650 // individual `ValRaw` is read, means that if an i64 value, extended
2651 // from an i32 value, was stored then when the i32 value is read it'll
2652 // automatically ignore the upper bits.
2653 //
2654 // This "trick" allows us to seamlessly pass through the `Self::Lower`
2655 // representation into the lifting/lowering without trying to handle
2656 // "join"ed types as per the canonical ABI. It just so happens that i64
2657 // bits will naturally be reinterpreted as f64. Additionally if the
2658 // joined type is i64 but only the lower bits are read that's ok and we
2659 // don't need to validate the upper bits.
2660 //
2661 // This is largely enabled by WebAssembly/component-model#35 where no
2662 // validation needs to be performed for ignored bits and bytes here.
2663 Ok(match src.tag.get_i32() {
2664 0 => Ok(unsafe { lift_option(cx, ok, &src.payload.ok)? }),
2665 1 => Err(unsafe { lift_option(cx, err, &src.payload.err)? }),
2666 _ => bail!("invalid expected discriminant"),
2667 })
2668 }
2669
2670 #[inline]
2671 fn linear_lift_from_memory(
2672 cx: &mut LiftContext<'_>,
2673 ty: InterfaceType,
2674 bytes: &[u8],
2675 ) -> Result<Self> {
2676 debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
2677 let discrim = bytes[0];
2678 let payload = &bytes[Self::INFO.payload_offset32 as usize..];
2679 let (ok, err) = match ty {
2680 InterfaceType::Result(ty) => {
2681 let ty = &cx.types[ty];
2682 (ty.ok, ty.err)
2683 }
2684 _ => bad_type_info(),
2685 };
2686 match discrim {
2687 0 => Ok(Ok(load_option(cx, ok, &payload[..T::SIZE32])?)),
2688 1 => Ok(Err(load_option(cx, err, &payload[..E::SIZE32])?)),
2689 _ => bail!("invalid expected discriminant"),
2690 }
2691 }
2692}
2693
2694fn lift_option<T>(cx: &mut LiftContext<'_>, ty: Option<InterfaceType>, src: &T::Lower) -> Result<T>
2695where
2696 T: Lift,
2697{
2698 match ty {
2699 Some(ty) => T::linear_lift_from_flat(cx, ty, src),
2700 None => Ok(empty_lift()),
2701 }
2702}
2703
2704fn load_option<T>(cx: &mut LiftContext<'_>, ty: Option<InterfaceType>, bytes: &[u8]) -> Result<T>
2705where
2706 T: Lift,
2707{
2708 match ty {
2709 Some(ty) => T::linear_lift_from_memory(cx, ty, bytes),
2710 None => Ok(empty_lift()),
2711 }
2712}
2713
2714fn empty_lift<T>() -> T
2715where
2716 T: Lift,
2717{
2718 assert!(T::IS_RUST_UNIT_TYPE);
2719 assert_eq!(mem::size_of::<T>(), 0);
2720 unsafe { MaybeUninit::uninit().assume_init() }
2721}
2722
2723/// Helper structure to define `Lower` for tuples below.
2724///
2725/// Uses default type parameters to have fields be zero-sized and not present
2726/// in memory for smaller tuple values.
2727#[expect(non_snake_case, reason = "more amenable to macro-generated code")]
2728#[doc(hidden)]
2729#[derive(Clone, Copy)]
2730#[repr(C)]
2731pub struct TupleLower<
2732 T1 = (),
2733 T2 = (),
2734 T3 = (),
2735 T4 = (),
2736 T5 = (),
2737 T6 = (),
2738 T7 = (),
2739 T8 = (),
2740 T9 = (),
2741 T10 = (),
2742 T11 = (),
2743 T12 = (),
2744 T13 = (),
2745 T14 = (),
2746 T15 = (),
2747 T16 = (),
2748 T17 = (),
2749> {
2750 // NB: these names match the names in `for_each_function_signature!`
2751 A1: T1,
2752 A2: T2,
2753 A3: T3,
2754 A4: T4,
2755 A5: T5,
2756 A6: T6,
2757 A7: T7,
2758 A8: T8,
2759 A9: T9,
2760 A10: T10,
2761 A11: T11,
2762 A12: T12,
2763 A13: T13,
2764 A14: T14,
2765 A15: T15,
2766 A16: T16,
2767 A17: T17,
2768 _align_tuple_lower0_correctly: [ValRaw; 0],
2769}
2770
2771macro_rules! impl_component_ty_for_tuples {
2772 ($n:tt $($t:ident)*) => {
2773 #[allow(non_snake_case, reason = "macro-generated code")]
2774 unsafe impl<$($t,)*> ComponentType for ($($t,)*)
2775 where $($t: ComponentType),*
2776 {
2777 type Lower = TupleLower<$($t::Lower),*>;
2778
2779 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::record_static(&[
2780 $($t::ABI),*
2781 ]);
2782
2783 const IS_RUST_UNIT_TYPE: bool = {
2784 let mut _is_unit = true;
2785 $(
2786 let _anything_to_bind_the_macro_variable = $t::IS_RUST_UNIT_TYPE;
2787 _is_unit = false;
2788 )*
2789 _is_unit
2790 };
2791
2792 fn typecheck(
2793 ty: &InterfaceType,
2794 types: &InstanceType<'_>,
2795 ) -> Result<()> {
2796 typecheck_tuple(ty, types, &[$($t::typecheck),*])
2797 }
2798 }
2799
2800 #[allow(non_snake_case, reason = "macro-generated code")]
2801 unsafe impl<$($t,)*> Lower for ($($t,)*)
2802 where $($t: Lower),*
2803 {
2804 fn linear_lower_to_flat<U>(
2805 &self,
2806 cx: &mut LowerContext<'_, U>,
2807 ty: InterfaceType,
2808 _dst: &mut MaybeUninit<Self::Lower>,
2809 ) -> Result<()> {
2810 let types = match ty {
2811 InterfaceType::Tuple(t) => &cx.types[t].types,
2812 _ => bad_type_info(),
2813 };
2814 let ($($t,)*) = self;
2815 let mut _types = types.iter();
2816 $(
2817 let ty = *_types.next().unwrap_or_else(bad_type_info);
2818 $t.linear_lower_to_flat(cx, ty, map_maybe_uninit!(_dst.$t))?;
2819 )*
2820 Ok(())
2821 }
2822
2823 fn linear_lower_to_memory<U>(
2824 &self,
2825 cx: &mut LowerContext<'_, U>,
2826 ty: InterfaceType,
2827 mut _offset: usize,
2828 ) -> Result<()> {
2829 debug_assert!(_offset % (Self::ALIGN32 as usize) == 0);
2830 let types = match ty {
2831 InterfaceType::Tuple(t) => &cx.types[t].types,
2832 _ => bad_type_info(),
2833 };
2834 let ($($t,)*) = self;
2835 let mut _types = types.iter();
2836 $(
2837 let ty = *_types.next().unwrap_or_else(bad_type_info);
2838 $t.linear_lower_to_memory(cx, ty, $t::ABI.next_field32_size(&mut _offset))?;
2839 )*
2840 Ok(())
2841 }
2842 }
2843
2844 #[allow(non_snake_case, reason = "macro-generated code")]
2845 unsafe impl<$($t,)*> Lift for ($($t,)*)
2846 where $($t: Lift),*
2847 {
2848 #[inline]
2849 fn linear_lift_from_flat(cx: &mut LiftContext<'_>, ty: InterfaceType, _src: &Self::Lower) -> Result<Self> {
2850 let types = match ty {
2851 InterfaceType::Tuple(t) => &cx.types[t].types,
2852 _ => bad_type_info(),
2853 };
2854 let mut _types = types.iter();
2855 Ok(($(
2856 $t::linear_lift_from_flat(
2857 cx,
2858 *_types.next().unwrap_or_else(bad_type_info),
2859 &_src.$t,
2860 )?,
2861 )*))
2862 }
2863
2864 #[inline]
2865 fn linear_lift_from_memory(cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
2866 debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
2867 let types = match ty {
2868 InterfaceType::Tuple(t) => &cx.types[t].types,
2869 _ => bad_type_info(),
2870 };
2871 let mut _types = types.iter();
2872 let mut _offset = 0;
2873 $(
2874 let ty = *_types.next().unwrap_or_else(bad_type_info);
2875 let $t = $t::linear_lift_from_memory(cx, ty, &bytes[$t::ABI.next_field32_size(&mut _offset)..][..$t::SIZE32])?;
2876 )*
2877 Ok(($($t,)*))
2878 }
2879 }
2880
2881 #[allow(non_snake_case, reason = "macro-generated code")]
2882 unsafe impl<$($t,)*> ComponentNamedList for ($($t,)*)
2883 where $($t: ComponentType),*
2884 {}
2885 };
2886}
2887
2888for_each_function_signature!(impl_component_ty_for_tuples);
2889
2890pub fn desc(ty: &InterfaceType) -> &'static str {
2891 match ty {
2892 InterfaceType::U8 => "u8",
2893 InterfaceType::S8 => "s8",
2894 InterfaceType::U16 => "u16",
2895 InterfaceType::S16 => "s16",
2896 InterfaceType::U32 => "u32",
2897 InterfaceType::S32 => "s32",
2898 InterfaceType::U64 => "u64",
2899 InterfaceType::S64 => "s64",
2900 InterfaceType::Float32 => "f32",
2901 InterfaceType::Float64 => "f64",
2902 InterfaceType::Bool => "bool",
2903 InterfaceType::Char => "char",
2904 InterfaceType::String => "string",
2905 InterfaceType::List(_) => "list",
2906 InterfaceType::Tuple(_) => "tuple",
2907 InterfaceType::Option(_) => "option",
2908 InterfaceType::Result(_) => "result",
2909
2910 InterfaceType::Record(_) => "record",
2911 InterfaceType::Variant(_) => "variant",
2912 InterfaceType::Flags(_) => "flags",
2913 InterfaceType::Enum(_) => "enum",
2914 InterfaceType::Own(_) => "owned resource",
2915 InterfaceType::Borrow(_) => "borrowed resource",
2916 InterfaceType::Future(_) => "future",
2917 InterfaceType::Stream(_) => "stream",
2918 InterfaceType::ErrorContext(_) => "error-context",
2919 InterfaceType::FixedLengthList(_) => "list<_, N>",
2920 }
2921}
2922
2923#[cold]
2924#[doc(hidden)]
2925pub fn bad_type_info<T>() -> T {
2926 // NB: should consider something like `unreachable_unchecked` here if this
2927 // becomes a performance bottleneck at some point, but that also comes with
2928 // a tradeoff of propagating a lot of unsafety, so it may not be worth it.
2929 panic!("bad type information detected");
2930}