Skip to main content

wasmtime/runtime/component/func/
typed.rs

1use crate::component::Instance;
2use crate::component::func::{Func, LiftContext, LowerContext};
3use crate::component::matching::InstanceType;
4use crate::component::storage::{storage_as_slice, storage_as_slice_mut};
5use crate::prelude::*;
6use crate::{AsContextMut, StoreContext, StoreContextMut, ValRaw};
7use alloc::borrow::Cow;
8use core::fmt;
9use core::iter;
10use core::marker;
11use core::mem::{self, MaybeUninit};
12use core::str;
13use wasmtime_environ::component::{
14    CanonicalAbiInfo, InterfaceType, MAX_FLAT_PARAMS, MAX_FLAT_RESULTS, OptionsIndex,
15    StringEncoding, VariantInfo,
16};
17
18#[cfg(feature = "component-model-async")]
19use crate::component::concurrent::{self, AsAccessor, PreparedCall};
20
21/// A statically-typed version of [`Func`] which takes `Params` as input and
22/// returns `Return`.
23///
24/// This is an efficient way to invoke a WebAssembly component where if the
25/// inputs and output are statically known this can eschew the vast majority of
26/// machinery and checks when calling WebAssembly. This is the most optimized
27/// way to call a WebAssembly component.
28///
29/// Note that like [`Func`] this is a pointer within a [`Store`](crate::Store)
30/// and usage will panic if used with the wrong store.
31///
32/// This type is primarily created with the [`Func::typed`] API.
33///
34/// See [`ComponentType`] for more information about supported types.
35pub struct TypedFunc<Params, Return> {
36    func: Func,
37
38    // The definition of this field is somewhat subtle and may be surprising.
39    // Naively one might expect something like
40    //
41    //      _marker: marker::PhantomData<fn(Params) -> Return>,
42    //
43    // Since this is a function pointer after all. The problem with this
44    // definition though is that it imposes the wrong variance on `Params` from
45    // what we want. Abstractly a `fn(Params)` is able to store `Params` within
46    // it meaning you can only give it `Params` that live longer than the
47    // function pointer.
48    //
49    // With a component model function, however, we're always copying data from
50    // the host into the guest, so we are never storing pointers to `Params`
51    // into the guest outside the duration of a `call`, meaning we can actually
52    // accept values in `TypedFunc::call` which live for a shorter duration
53    // than the `Params` argument on the struct.
54    //
55    // This all means that we don't use a phantom function pointer, but instead
56    // feign phantom storage here to get the variance desired.
57    _marker: marker::PhantomData<(Params, Return)>,
58}
59
60impl<Params, Return> Copy for TypedFunc<Params, Return> {}
61
62impl<Params, Return> Clone for TypedFunc<Params, Return> {
63    fn clone(&self) -> TypedFunc<Params, Return> {
64        *self
65    }
66}
67
68impl<Params, Return> TypedFunc<Params, Return>
69where
70    Params: ComponentNamedList + Lower,
71    Return: ComponentNamedList + Lift,
72{
73    /// Creates a new [`TypedFunc`] from the provided component [`Func`],
74    /// unsafely asserting that the underlying function takes `Params` as
75    /// input and returns `Return`.
76    ///
77    /// # Unsafety
78    ///
79    /// This is an unsafe function because it does not verify that the [`Func`]
80    /// provided actually implements this signature. It's up to the caller to
81    /// have performed some other sort of check to ensure that the signature is
82    /// correct.
83    pub unsafe fn new_unchecked(func: Func) -> TypedFunc<Params, Return> {
84        TypedFunc {
85            _marker: marker::PhantomData,
86            func,
87        }
88    }
89
90    /// Returns the underlying un-typed [`Func`] that this [`TypedFunc`]
91    /// references.
92    pub fn func(&self) -> &Func {
93        &self.func
94    }
95
96    /// Calls the underlying WebAssembly component function using the provided
97    /// `params` as input.
98    ///
99    /// This method is used to enter into a component. Execution happens within
100    /// the `store` provided. The `params` are copied into WebAssembly memory
101    /// as appropriate and a core wasm function is invoked.
102    ///
103    /// # Post-return
104    ///
105    /// In the component model each function can have a "post return" specified
106    /// which allows cleaning up the arguments returned to the host. For example
107    /// if WebAssembly returns a string to the host then it might be a uniquely
108    /// allocated string which, after the host finishes processing it, needs to
109    /// be deallocated in the wasm instance's own linear memory to prevent
110    /// memory leaks in wasm itself. The `post-return` canonical abi option is
111    /// used to configured this.
112    ///
113    /// If a post-return function is present, it will be called automatically by
114    /// this function.
115    ///
116    /// # Errors
117    ///
118    /// This function can return an error for a number of reasons:
119    ///
120    /// * If the wasm itself traps during execution.
121    /// * If the wasm traps while copying arguments into memory.
122    /// * If the wasm provides bad allocation pointers when copying arguments
123    ///   into memory.
124    /// * If the wasm returns a value which violates the canonical ABI.
125    /// * If this function's instances cannot be entered, for example if the
126    ///   instance is currently calling a host function.
127    /// * If `store` requires using [`Self::call_async`] instead, see
128    ///   [crate documentation](crate#async) for more info.
129    ///
130    /// In general there are many ways that things could go wrong when copying
131    /// types in and out of a wasm module with the canonical ABI, and certain
132    /// error conditions are specific to certain types. For example a
133    /// WebAssembly module can't return an invalid `char`. When allocating space
134    /// for this host to copy a string into the returned pointer must be
135    /// in-bounds in memory.
136    ///
137    /// If an error happens then the error should contain detailed enough
138    /// information to understand which part of the canonical ABI went wrong
139    /// and what to inspect.
140    ///
141    /// # Panics
142    ///
143    /// Panics if `store` does not own this function.
144    pub fn call(&self, mut store: impl AsContextMut, params: Params) -> Result<Return> {
145        let mut store = store.as_context_mut();
146        store.0.validate_sync_call()?;
147        self.call_impl(store.as_context_mut(), params)
148    }
149
150    /// Exactly like [`Self::call`], except for invoking WebAssembly
151    /// [asynchronously](crate#async).
152    ///
153    /// # Panics
154    ///
155    /// Panics if `store` does not own this function.
156    #[cfg(feature = "async")]
157    pub async fn call_async(
158        &self,
159        mut store: impl AsContextMut<Data: Send>,
160        params: Params,
161    ) -> Result<Return>
162    where
163        Return: 'static,
164    {
165        let mut store = store.as_context_mut();
166
167        #[cfg(feature = "component-model-async")]
168        if store.0.concurrency_support() {
169            use crate::component::concurrent::TaskId;
170            use crate::runtime::vm::SendSyncPtr;
171            use core::ptr::NonNull;
172
173            let ptr = SendSyncPtr::from(NonNull::from(&params).cast::<u8>());
174            let prepared =
175                self.prepare_call(store.as_context_mut(), true, move |cx, ty, dst| {
176                    // SAFETY: The goal here is to get `Params`, a non-`'static`
177                    // value, to live long enough to the lowering of the
178                    // parameters. We're guaranteed that `Params` lives in the
179                    // future of the outer function (we're in an `async fn`) so it'll
180                    // stay alive as long as the future itself. That is distinct,
181                    // for example, from the signature of `call_concurrent` below.
182                    //
183                    // Here a pointer to `Params` is smuggled to this location
184                    // through a `SendSyncPtr<u8>` to thwart the `'static` check
185                    // of rustc and the signature of `prepare_call`.
186                    //
187                    // Note the use of `SignalOnDrop` in the code that follows
188                    // this closure, which ensures that the task will be removed
189                    // from the concurrent state to which it belongs when the
190                    // containing `Future` is dropped, so long as the parameters
191                    // have not yet been lowered. Since this closure is removed from
192                    // the task after the parameters are lowered, it will never be called
193                    // after the containing `Future` is dropped.
194                    let params = unsafe { ptr.cast::<Params>().as_ref() };
195                    Self::lower_args(cx, ty, dst, params)
196                })?;
197
198            struct SignalOnDrop<'a, T: 'static> {
199                store: StoreContextMut<'a, T>,
200                task: TaskId,
201            }
202
203            impl<'a, T> Drop for SignalOnDrop<'a, T> {
204                fn drop(&mut self) {
205                    self.task
206                        .host_future_dropped(self.store.as_context_mut())
207                        .unwrap();
208                }
209            }
210
211            let mut wrapper = SignalOnDrop {
212                store,
213                task: prepared.task_id(),
214            };
215
216            let result = concurrent::queue_call(wrapper.store.as_context_mut(), prepared)?;
217            return wrapper
218                .store
219                .as_context_mut()
220                .run_concurrent_trap_on_idle(async |_| Ok(result.await?))
221                .await?;
222        }
223
224        store
225            .on_fiber(|store| self.call_impl(store, params))
226            .await?
227    }
228
229    /// Start a concurrent call to this function.
230    ///
231    /// Concurrency is achieved by relying on the [`Accessor`] argument, which
232    /// can be obtained by calling [`StoreContextMut::run_concurrent`].
233    ///
234    /// Unlike [`Self::call`] and [`Self::call_async`] (both of which require
235    /// exclusive access to the store until the completion of the call), calls
236    /// made using this method may run concurrently with other calls to the same
237    /// instance.  In addition, the runtime will call the `post-return` function
238    /// (if any) automatically when the guest task completes.
239    ///
240    /// This function will return an error if [`Config::concurrency_support`] is
241    /// disabled.
242    ///
243    /// [`Config::concurrency_support`]: crate::Config::concurrency_support
244    ///
245    /// # Progress and Cancellation
246    ///
247    /// For more information about how to make progress on the wasm task or how
248    /// to cancel the wasm task see the documentation for
249    /// [`Func::call_concurrent`].
250    ///
251    /// [`Func::call_concurrent`]: crate::component::Func::call_concurrent
252    ///
253    /// # Panics
254    ///
255    /// Panics if the store that the [`Accessor`] is derived from does not own
256    /// this function.
257    ///
258    /// [`Accessor`]: crate::component::Accessor
259    ///
260    /// # Example
261    ///
262    /// Using [`StoreContextMut::run_concurrent`] to get an [`Accessor`]:
263    ///
264    /// ```
265    /// # use {
266    /// #   wasmtime::{
267    /// #     error::{Result},
268    /// #     component::{Component, Linker, ResourceTable},
269    /// #     Config, Engine, Store
270    /// #   },
271    /// # };
272    /// #
273    /// # struct Ctx { table: ResourceTable }
274    /// #
275    /// # async fn foo() -> Result<()> {
276    /// # let mut config = Config::new();
277    /// # let engine = Engine::new(&config)?;
278    /// # let mut store = Store::new(&engine, Ctx { table: ResourceTable::new() });
279    /// # let mut linker = Linker::new(&engine);
280    /// # let component = Component::new(&engine, "")?;
281    /// # let instance = linker.instantiate_async(&mut store, &component).await?;
282    /// let my_typed_func = instance.get_typed_func::<(), ()>(&mut store, "my_typed_func")?;
283    /// store.run_concurrent(async |accessor| -> wasmtime::Result<_> {
284    ///    my_typed_func.call_concurrent(accessor, ()).await?;
285    ///    Ok(())
286    /// }).await??;
287    /// # Ok(())
288    /// # }
289    /// ```
290    #[cfg(feature = "component-model-async")]
291    pub async fn call_concurrent(
292        self,
293        accessor: impl AsAccessor<Data: Send>,
294        params: Params,
295    ) -> Result<Return>
296    where
297        Params: 'static,
298        Return: 'static,
299    {
300        let result = accessor.as_accessor().with(|mut store| {
301            let mut store = store.as_context_mut();
302            ensure!(
303                store.0.concurrency_support(),
304                "cannot use `call_concurrent` Config::concurrency_support disabled",
305            );
306
307            let prepared =
308                self.prepare_call(store.as_context_mut(), false, move |cx, ty, dst| {
309                    Self::lower_args(cx, ty, dst, &params)
310                })?;
311            concurrent::queue_call(store, prepared)
312        });
313        Ok(result?.await?)
314    }
315
316    fn lower_args<T>(
317        cx: &mut LowerContext<T>,
318        ty: InterfaceType,
319        dst: &mut [MaybeUninit<ValRaw>],
320        params: &Params,
321    ) -> Result<()> {
322        use crate::component::storage::slice_to_storage_mut;
323
324        if Params::flatten_count() <= MAX_FLAT_PARAMS {
325            // SAFETY: the safety of `slice_to_storage_mut` relies on
326            // `Params::Lower` being represented by a sequence of
327            // `ValRaw`, and that's a guarantee upheld by the `Lower`
328            // trait itself.
329            let dst: &mut MaybeUninit<Params::Lower> = unsafe { slice_to_storage_mut(dst) };
330            Self::lower_stack_args(cx, &params, ty, dst)
331        } else {
332            Self::lower_heap_args(cx, &params, ty, &mut dst[0])
333        }
334    }
335
336    /// Calls `concurrent::prepare_call` with monomorphized functions for
337    /// lowering the parameters and lifting the result according to the number
338    /// of core Wasm parameters and results in the signature of the function to
339    /// be called.
340    #[cfg(feature = "component-model-async")]
341    fn prepare_call<T>(
342        self,
343        store: StoreContextMut<'_, T>,
344        host_future_present: bool,
345        lower: impl FnOnce(
346            &mut LowerContext<T>,
347            InterfaceType,
348            &mut [MaybeUninit<ValRaw>],
349        ) -> Result<()>
350        + Send
351        + Sync
352        + 'static,
353    ) -> Result<PreparedCall<Return>>
354    where
355        Return: 'static,
356    {
357        use crate::component::storage::slice_to_storage;
358        debug_assert!(store.0.concurrency_support());
359
360        let param_count = if Params::flatten_count() <= MAX_FLAT_PARAMS {
361            Params::flatten_count()
362        } else {
363            1
364        };
365        let max_results = if self.func.abi_async(store.0) {
366            MAX_FLAT_PARAMS
367        } else {
368            MAX_FLAT_RESULTS
369        };
370        concurrent::prepare_call(
371            store,
372            self.func,
373            param_count,
374            host_future_present,
375            move |func, store, params_out| {
376                func.with_lower_context(store, |cx, ty| lower(cx, ty, params_out))
377            },
378            move |func, store, results| {
379                let result = if Return::flatten_count() <= max_results {
380                    func.with_lift_context(store, |cx, ty| {
381                        // SAFETY: Per the safety requiments documented for the
382                        // `ComponentType` trait, `Return::Lower` must be
383                        // compatible at the binary level with a `[ValRaw; N]`,
384                        // where `N` is `mem::size_of::<Return::Lower>() /
385                        // mem::size_of::<ValRaw>()`.  And since this function
386                        // is only used when `Return::flatten_count() <=
387                        // MAX_FLAT_RESULTS` and `MAX_FLAT_RESULTS == 1`, `N`
388                        // can only either be 0 or 1.
389                        //
390                        // See `ComponentInstance::exit_call` for where we use
391                        // the result count passed from
392                        // `wasmtime_environ::fact::trampoline`-generated code
393                        // to ensure the slice has the correct length, and also
394                        // `concurrent::start_call` for where we conservatively
395                        // use a slice length of 1 unconditionally.  Also note
396                        // that, as of this writing `slice_to_storage`
397                        // double-checks the slice length is sufficient.
398                        let results: &Return::Lower = unsafe { slice_to_storage(results) };
399                        Self::lift_stack_result(cx, ty, results)
400                    })?
401                } else {
402                    func.with_lift_context(store, |cx, ty| {
403                        Self::lift_heap_result(cx, ty, &results[0])
404                    })?
405                };
406                Ok(Box::new(result))
407            },
408        )
409    }
410
411    fn call_impl(&self, mut store: impl AsContextMut, params: Params) -> Result<Return> {
412        let mut store = store.as_context_mut();
413
414        if self.func.abi_async(store.0) {
415            bail!("must enable the `component-model-async` feature to call async-lifted exports")
416        }
417
418        // Note that this is in theory simpler than it might read at this time.
419        // Here we're doing a runtime dispatch on the `flatten_count` for the
420        // params/results to see whether they're inbounds. This creates 4 cases
421        // to handle. In reality this is a highly optimizable branch where LLVM
422        // will easily figure out that only one branch here is taken.
423        //
424        // Otherwise this current construction is done to ensure that the stack
425        // space reserved for the params/results is always of the appropriate
426        // size (as the params/results needed differ depending on the "flatten"
427        // count)
428        //
429        // SAFETY: the safety of these invocations of `call_raw` depends on the
430        // correctness of the ascription of the `LowerParams` and `LowerReturn`
431        // types on the `call_raw` function. That's upheld here through the
432        // safety requirements of `Lift` and `Lower` on `Params` and `Return` in
433        // combination with checking the various possible branches here and
434        // dispatching to appropriately typed functions.
435        let (result, post_return_arg) = unsafe {
436            // This type is used as `LowerParams` for `call_raw` which is either
437            // `Params::Lower` or `ValRaw` representing it's either on the stack
438            // or it's on the heap. This allocates 1 extra `ValRaw` on the stack
439            // if `Params` is empty and `Return` is also empty, but that's a
440            // reasonable enough price to pay for now given the current code
441            // organization.
442            #[derive(Copy, Clone)]
443            union Union<T: Copy, U: Copy> {
444                _a: T,
445                _b: U,
446            }
447
448            if Return::flatten_count() <= MAX_FLAT_RESULTS {
449                self.func.call_raw(
450                    store.as_context_mut(),
451                    |cx, ty, dst: &mut MaybeUninit<Union<Params::Lower, ValRaw>>| {
452                        let dst = storage_as_slice_mut(dst);
453                        Self::lower_args(cx, ty, dst, &params)
454                    },
455                    Self::lift_stack_result,
456                )
457            } else {
458                self.func.call_raw(
459                    store.as_context_mut(),
460                    |cx, ty, dst: &mut MaybeUninit<Union<Params::Lower, ValRaw>>| {
461                        let dst = storage_as_slice_mut(dst);
462                        Self::lower_args(cx, ty, dst, &params)
463                    },
464                    Self::lift_heap_result,
465                )
466            }
467        }?;
468
469        self.func.post_return_impl(store, post_return_arg)?;
470
471        Ok(result)
472    }
473
474    /// Lower parameters directly onto the stack specified by the `dst`
475    /// location.
476    ///
477    /// This is only valid to call when the "flatten count" is small enough, or
478    /// when the canonical ABI says arguments go through the stack rather than
479    /// the heap.
480    fn lower_stack_args<T>(
481        cx: &mut LowerContext<'_, T>,
482        params: &Params,
483        ty: InterfaceType,
484        dst: &mut MaybeUninit<Params::Lower>,
485    ) -> Result<()> {
486        assert!(Params::flatten_count() <= MAX_FLAT_PARAMS);
487        params.linear_lower_to_flat(cx, ty, dst)?;
488        Ok(())
489    }
490
491    /// Lower parameters onto a heap-allocated location.
492    ///
493    /// This is used when the stack space to be used for the arguments is above
494    /// the `MAX_FLAT_PARAMS` threshold. Here the wasm's `realloc` function is
495    /// invoked to allocate space and then parameters are stored at that heap
496    /// pointer location.
497    fn lower_heap_args<T>(
498        cx: &mut LowerContext<'_, T>,
499        params: &Params,
500        ty: InterfaceType,
501        dst: &mut MaybeUninit<ValRaw>,
502    ) -> Result<()> {
503        // Memory must exist via validation if the arguments are stored on the
504        // heap, so we can create a `MemoryMut` at this point. Afterwards
505        // `realloc` is used to allocate space for all the arguments and then
506        // they're all stored in linear memory.
507        //
508        // Note that `realloc` will bake in a check that the returned pointer is
509        // in-bounds.
510        let ptr = cx.realloc(0, 0, Params::ALIGN32, Params::SIZE32)?;
511        params.linear_lower_to_memory(cx, ty, ptr)?;
512
513        // Note that the pointer here is stored as a 64-bit integer. This allows
514        // this to work with either 32 or 64-bit memories. For a 32-bit memory
515        // it'll just ignore the upper 32 zero bits, and for 64-bit memories
516        // this'll have the full 64-bits. Note that for 32-bit memories the call
517        // to `realloc` above guarantees that the `ptr` is in-bounds meaning
518        // that we will know that the zero-extended upper bits of `ptr` are
519        // guaranteed to be zero.
520        //
521        // This comment about 64-bit integers is also referred to below with
522        // "WRITEPTR64".
523        dst.write(ValRaw::i64(ptr as i64));
524
525        Ok(())
526    }
527
528    /// Lift the result of a function directly from the stack result.
529    ///
530    /// This is only used when the result fits in the maximum number of stack
531    /// slots.
532    fn lift_stack_result(
533        cx: &mut LiftContext<'_>,
534        ty: InterfaceType,
535        dst: &Return::Lower,
536    ) -> Result<Return> {
537        Return::linear_lift_from_flat(cx, ty, dst)
538    }
539
540    /// Lift the result of a function where the result is stored indirectly on
541    /// the heap.
542    fn lift_heap_result(
543        cx: &mut LiftContext<'_>,
544        ty: InterfaceType,
545        dst: &ValRaw,
546    ) -> Result<Return> {
547        assert!(Return::flatten_count() > MAX_FLAT_RESULTS);
548        // FIXME(#4311): needs to read an i64 for memory64
549        let ptr = usize::try_from(dst.get_u32())?;
550        if ptr % usize::try_from(Return::ALIGN32)? != 0 {
551            bail!("return pointer not aligned");
552        }
553
554        let bytes = cx
555            .memory()
556            .get(ptr..)
557            .and_then(|b| b.get(..Return::SIZE32))
558            .ok_or_else(|| crate::format_err!("pointer out of bounds of memory"))?;
559        Return::linear_lift_from_memory(cx, ty, bytes)
560    }
561
562    #[doc(hidden)]
563    #[deprecated(note = "no longer needs to be called; this function has no effect")]
564    pub fn post_return(&self, _store: impl AsContextMut) -> Result<()> {
565        Ok(())
566    }
567
568    #[doc(hidden)]
569    #[deprecated(note = "no longer needs to be called; this function has no effect")]
570    #[cfg(feature = "async")]
571    pub async fn post_return_async<T: Send>(
572        &self,
573        _store: impl AsContextMut<Data = T>,
574    ) -> Result<()> {
575        Ok(())
576    }
577}
578
579/// A trait representing a static list of named types that can be passed to or
580/// returned from a [`TypedFunc`].
581///
582/// This trait is implemented for a number of tuple types and is not expected
583/// to be implemented externally. The contents of this trait are hidden as it's
584/// intended to be an implementation detail of Wasmtime. The contents of this
585/// trait are not covered by Wasmtime's stability guarantees.
586///
587/// For more information about this trait see [`Func::typed`] and
588/// [`TypedFunc`].
589//
590// Note that this is an `unsafe` trait, and the unsafety means that
591// implementations of this trait must be correct or otherwise [`TypedFunc`]
592// would not be memory safe. The main reason this is `unsafe` is the
593// `typecheck` function which must operate correctly relative to the `AsTuple`
594// interpretation of the implementor.
595pub unsafe trait ComponentNamedList: ComponentType {}
596
597/// A trait representing types which can be passed to and read from components
598/// with the canonical ABI.
599///
600/// This trait is implemented for Rust types which can be communicated to
601/// components. The [`Func::typed`] and [`TypedFunc`] Rust items are the main
602/// consumers of this trait.
603///
604/// Supported Rust types include:
605///
606/// | Component Model Type              | Rust Type                            |
607/// |-----------------------------------|--------------------------------------|
608/// | `{s,u}{8,16,32,64}`               | `{i,u}{8,16,32,64}`                  |
609/// | `f{32,64}`                        | `f{32,64}`                           |
610/// | `bool`                            | `bool`                               |
611/// | `char`                            | `char`                               |
612/// | `tuple<A, B>`                     | `(A, B)`                             |
613/// | `option<T>`                       | `Option<T>`                          |
614/// | `result`                          | `Result<(), ()>`                     |
615/// | `result<T>`                       | `Result<T, ()>`                      |
616/// | `result<_, E>`                    | `Result<(), E>`                      |
617/// | `result<T, E>`                    | `Result<T, E>`                       |
618/// | `string`                          | `String`, `&str`, or [`WasmStr`]     |
619/// | `list<T>`                         | `Vec<T>`, `&[T]`, or [`WasmList`]    |
620/// | `own<T>`, `borrow<T>`             | [`Resource<T>`] or [`ResourceAny`]   |
621/// | `record`                          | [`#[derive(ComponentType)]`][d-cm]   |
622/// | `variant`                         | [`#[derive(ComponentType)]`][d-cm]   |
623/// | `enum`                            | [`#[derive(ComponentType)]`][d-cm]   |
624/// | `flags`                           | [`flags!`][f-m]                      |
625///
626/// [`Resource<T>`]: crate::component::Resource
627/// [`ResourceAny`]: crate::component::ResourceAny
628/// [d-cm]: macro@crate::component::ComponentType
629/// [f-m]: crate::component::flags
630///
631/// Rust standard library pointers such as `&T`, `Box<T>`, and `Arc<T>`
632/// additionally represent whatever type `T` represents in the component model.
633/// Note that types such as `record`, `variant`, `enum`, and `flags` are
634/// generated by the embedder at compile time. These macros derive
635/// implementation of this trait for custom types to map to custom types in the
636/// component model. Note that for `record`, `variant`, `enum`, and `flags`
637/// those types are often generated by the
638/// [`bindgen!`](crate::component::bindgen) macro from WIT definitions.
639///
640/// Types that implement [`ComponentType`] are used for `Params` and `Return`
641/// in [`TypedFunc`] and [`Func::typed`].
642///
643/// The contents of this trait are hidden as it's intended to be an
644/// implementation detail of Wasmtime. The contents of this trait are not
645/// covered by Wasmtime's stability guarantees.
646///
647/// # Safety
648///
649/// Note that this is an `unsafe` trait as `TypedFunc`'s safety heavily relies on
650/// the correctness of the implementations of this trait. Some ways in which this
651/// trait must be correct to be safe are:
652///
653/// * The `Lower` associated type must be a `ValRaw` sequence. It doesn't have to
654///   literally be `[ValRaw; N]` but when laid out in memory it must be adjacent
655///   `ValRaw` values and have a multiple of the size of `ValRaw` and the same
656///   alignment.
657///
658/// * The `lower` function must initialize the bits within `Lower` that are going
659///   to be read by the trampoline that's used to enter core wasm. A trampoline
660///   is passed `*mut Lower` and will read the canonical abi arguments in
661///   sequence, so all of the bits must be correctly initialized.
662///
663/// * The `size` and `align` functions must be correct for this value stored in
664///   the canonical ABI. The `Cursor<T>` iteration of these bytes rely on this
665///   for correctness as they otherwise eschew bounds-checking.
666///
667/// There are likely some other correctness issues which aren't documented as
668/// well, this isn't currently an exhaustive list. It suffices to say, though,
669/// that correctness bugs in this trait implementation are highly likely to
670/// lead to security bugs, which again leads to the `unsafe` in the trait.
671///
672/// Note that this trait specifically is not sealed because `bindgen!`-generated
673/// types must be able to implement this trait using a `#[derive]` macro. For
674/// users it's recommended to not implement this trait manually given the
675/// non-exhaustive list of safety requirements that must be upheld. This trait
676/// is implemented at your own risk if you do so.
677///
678/// # Send and Sync
679///
680/// While on the topic of safety it's worth discussing the `Send` and `Sync`
681/// bounds here as well. These bounds might naively seem like they shouldn't be
682/// required for all component types as they're host-level types not guest-level
683/// types persisted anywhere. Various subtleties lead to these bounds, however:
684///
685/// * Fibers require that all stack-local variables are `Send` and `Sync` for
686///   fibers themselves to be send/sync. Unfortunately we have no help from the
687///   compiler on this one so it's up to Wasmtime's discipline to maintain this.
688///   One instance of this is that return values are placed on the stack as
689///   they're lowered into guest memory. This lowering operation can involve
690///   malloc and context switches, so return values must be Send/Sync.
691///
692/// * In the implementation of component model async it's not uncommon for types
693///   to be "buffered" in the store temporarily. For example parameters might
694///   reside in a store temporarily while wasm has backpressure turned on.
695///
696/// Overall it's generally easiest to require `Send` and `Sync` for all
697/// component types. There additionally aren't known use case for non-`Send` or
698/// non-`Sync` types at this time.
699pub unsafe trait ComponentType: Send + Sync {
700    /// Representation of the "lowered" form of this component value.
701    ///
702    /// Lowerings lower into core wasm values which are represented by `ValRaw`.
703    /// This `Lower` type must be a list of `ValRaw` as either a literal array
704    /// or a struct where every field is a `ValRaw`. This must be `Copy` (as
705    /// `ValRaw` is `Copy`) and support all byte patterns. This being correct is
706    /// one reason why the trait is unsafe.
707    #[doc(hidden)]
708    type Lower: Copy;
709
710    /// The information about this type's canonical ABI (size/align/etc).
711    #[doc(hidden)]
712    const ABI: CanonicalAbiInfo;
713
714    #[doc(hidden)]
715    const SIZE32: usize = Self::ABI.size32 as usize;
716    #[doc(hidden)]
717    const ALIGN32: u32 = Self::ABI.align32;
718
719    #[doc(hidden)]
720    const IS_RUST_UNIT_TYPE: bool = false;
721
722    /// Whether this type might require a call to the guest's realloc function
723    /// to allocate linear memory when lowering (e.g. a non-empty `string`).
724    ///
725    /// If this is `false`, Wasmtime may optimize lowering by using
726    /// `LowerContext::new_without_realloc` and lowering values outside of any
727    /// fiber.  That will panic if the lowering process ends up needing realloc
728    /// after all, so `true` is a conservative default.
729    #[doc(hidden)]
730    const MAY_REQUIRE_REALLOC: bool = true;
731
732    /// Returns the number of core wasm abi values will be used to represent
733    /// this type in its lowered form.
734    ///
735    /// This divides the size of `Self::Lower` by the size of `ValRaw`.
736    #[doc(hidden)]
737    fn flatten_count() -> usize {
738        assert!(mem::size_of::<Self::Lower>() % mem::size_of::<ValRaw>() == 0);
739        assert!(mem::align_of::<Self::Lower>() == mem::align_of::<ValRaw>());
740        mem::size_of::<Self::Lower>() / mem::size_of::<ValRaw>()
741    }
742
743    /// Performs a type-check to see whether this component value type matches
744    /// the interface type `ty` provided.
745    #[doc(hidden)]
746    fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()>;
747}
748
749#[doc(hidden)]
750pub unsafe trait ComponentVariant: ComponentType {
751    const CASES: &'static [Option<CanonicalAbiInfo>];
752    const INFO: VariantInfo = VariantInfo::new_static(Self::CASES);
753    const PAYLOAD_OFFSET32: usize = Self::INFO.payload_offset32 as usize;
754}
755
756/// Host types which can be passed to WebAssembly components.
757///
758/// This trait is implemented for all types that can be passed to components
759/// either as parameters of component exports or returns of component imports.
760/// This trait represents the ability to convert from the native host
761/// representation to the canonical ABI.
762///
763/// Built-in types to Rust such as `Option<T>` implement this trait as
764/// appropriate. For a mapping of component model to Rust types see
765/// [`ComponentType`].
766///
767/// For user-defined types, for example `record` types mapped to Rust `struct`s,
768/// this crate additionally has
769/// [`#[derive(Lower)]`](macro@crate::component::Lower).
770///
771/// Note that like [`ComponentType`] the definition of this trait is intended to
772/// be an internal implementation detail of Wasmtime at this time. It's
773/// recommended to use the `#[derive(Lower)]` implementation instead.
774pub unsafe trait Lower: ComponentType {
775    /// Performs the "lower" function in the linear memory version of the
776    /// canonical ABI.
777    ///
778    /// This method will lower the current value into a component. The `lower`
779    /// function performs a "flat" lowering into the `dst` specified which is
780    /// allowed to be uninitialized entering this method but is guaranteed to be
781    /// fully initialized if the method returns `Ok(())`.
782    ///
783    /// The `cx` context provided is the context within which this lowering is
784    /// happening. This contains information such as canonical options specified
785    /// (e.g. string encodings, memories, etc), the store itself, along with
786    /// type information.
787    ///
788    /// The `ty` parameter is the destination type that is being lowered into.
789    /// For example this is the component's "view" of the type that is being
790    /// lowered. This is guaranteed to have passed a `typecheck` earlier.
791    ///
792    /// This will only be called if `typecheck` passes for `Op::Lower`.
793    #[doc(hidden)]
794    fn linear_lower_to_flat<T>(
795        &self,
796        cx: &mut LowerContext<'_, T>,
797        ty: InterfaceType,
798        dst: &mut MaybeUninit<Self::Lower>,
799    ) -> Result<()>;
800
801    /// Performs the "store" operation in the linear memory version of the
802    /// canonical ABI.
803    ///
804    /// This function will store `self` into the linear memory described by
805    /// `cx` at the `offset` provided.
806    ///
807    /// It is expected that `offset` is a valid offset in memory for
808    /// `Self::SIZE32` bytes. At this time that's not an unsafe contract as it's
809    /// always re-checked on all stores, but this is something that will need to
810    /// be improved in the future to remove extra bounds checks. For now this
811    /// function will panic if there's a bug and `offset` isn't valid within
812    /// memory.
813    ///
814    /// The `ty` type information passed here is the same as the type
815    /// information passed to `lower` above, and is the component's own view of
816    /// what the resulting type should be.
817    ///
818    /// This will only be called if `typecheck` passes for `Op::Lower`.
819    #[doc(hidden)]
820    fn linear_lower_to_memory<T>(
821        &self,
822        cx: &mut LowerContext<'_, T>,
823        ty: InterfaceType,
824        offset: usize,
825    ) -> Result<()>;
826
827    /// Provided method to lower a list of `Self` into memory.
828    ///
829    /// Requires that `offset` has already been checked for alignment and
830    /// validity in terms of being in-bounds, otherwise this may panic.
831    ///
832    /// This is primarily here to get overridden for implementations of integers
833    /// which can avoid some extra fluff and use a pattern that's more easily
834    /// optimizable by LLVM.
835    #[doc(hidden)]
836    fn linear_store_list_to_memory<T>(
837        cx: &mut LowerContext<'_, T>,
838        ty: InterfaceType,
839        mut offset: usize,
840        items: &[Self],
841    ) -> Result<()>
842    where
843        Self: Sized,
844    {
845        for item in items {
846            item.linear_lower_to_memory(cx, ty, offset)?;
847            offset += Self::SIZE32;
848        }
849        Ok(())
850    }
851}
852
853/// Host types which can be created from the canonical ABI.
854///
855/// This is the mirror of the [`Lower`] trait where it represents the capability
856/// of acquiring items from WebAssembly and passing them to the host.
857///
858/// Built-in types to Rust such as `Option<T>` implement this trait as
859/// appropriate. For a mapping of component model to Rust types see
860/// [`ComponentType`].
861///
862/// For user-defined types, for example `record` types mapped to Rust `struct`s,
863/// this crate additionally has
864/// [`#[derive(Lift)]`](macro@crate::component::Lift).
865///
866/// Note that like [`ComponentType`] the definition of this trait is intended to
867/// be an internal implementation detail of Wasmtime at this time. It's
868/// recommended to use the `#[derive(Lift)]` implementation instead.
869pub unsafe trait Lift: Sized + ComponentType {
870    /// Performs the "lift" operation in the linear memory version of the
871    /// canonical ABI.
872    ///
873    /// This function performs a "flat" lift operation from the `src` specified
874    /// which is a sequence of core wasm values. The lifting operation will
875    /// validate core wasm values and produce a `Self` on success.
876    ///
877    /// The `cx` provided contains contextual information such as the store
878    /// that's being loaded from, canonical options, and type information.
879    ///
880    /// The `ty` parameter is the origin component's specification for what the
881    /// type that is being lifted is. For example this is the record type or the
882    /// resource type that is being lifted.
883    ///
884    /// Note that this has a default implementation but if `typecheck` passes
885    /// for `Op::Lift` this needs to be overridden.
886    #[doc(hidden)]
887    fn linear_lift_from_flat(
888        cx: &mut LiftContext<'_>,
889        ty: InterfaceType,
890        src: &Self::Lower,
891    ) -> Result<Self>;
892
893    /// Performs the "load" operation in the linear memory version of the
894    /// canonical ABI.
895    ///
896    /// This will read the `bytes` provided, which are a sub-slice into the
897    /// linear memory described by `cx`. The `bytes` array provided is
898    /// guaranteed to be `Self::SIZE32` bytes large. All of memory is then also
899    /// available through `cx` for bounds-checks and such as necessary for
900    /// strings/lists.
901    ///
902    /// The `ty` argument is the type that's being loaded, as described by the
903    /// original component.
904    ///
905    /// Note that this has a default implementation but if `typecheck` passes
906    /// for `Op::Lift` this needs to be overridden.
907    #[doc(hidden)]
908    fn linear_lift_from_memory(
909        cx: &mut LiftContext<'_>,
910        ty: InterfaceType,
911        bytes: &[u8],
912    ) -> Result<Self>;
913
914    /// Converts `list` into a `Vec<T>`, used in `Lift for Vec<T>`.
915    #[doc(hidden)]
916    fn linear_lift_list_from_memory(
917        cx: &mut LiftContext<'_>,
918        list: &WasmList<Self>,
919    ) -> Result<Vec<Self>>
920    where
921        Self: Sized,
922    {
923        let mut dst = Vec::with_capacity(list.len);
924        Self::linear_lift_into_from_memory(cx, list, &mut dst)?;
925        Ok(dst)
926    }
927
928    /// Load no more than `max_count` items from `list` into `dst`.
929    ///
930    /// This is primarily here to get overridden for implementations of integers
931    /// which can avoid some extra fluff and use a pattern that's more easily
932    /// optimizable by LLVM.
933    #[doc(hidden)]
934    fn linear_lift_into_from_memory(
935        cx: &mut LiftContext<'_>,
936        list: &WasmList<Self>,
937        dst: &mut impl Extend<Self>,
938    ) -> Result<()>
939    where
940        Self: Sized,
941    {
942        for i in 0..list.len {
943            dst.extend(Some(list.get_from_store(cx, i).unwrap()?));
944        }
945        Ok(())
946    }
947}
948
949// Macro to help generate "forwarding implementations" of `ComponentType` to
950// another type, used for wrappers in Rust like `&T`, `Box<T>`, etc. Note that
951// these wrappers only implement lowering because lifting native Rust types
952// cannot be done.
953macro_rules! forward_type_impls {
954    ($(($($generics:tt)*) $a:ty => $b:ty,)*) => ($(
955        unsafe impl <$($generics)*> ComponentType for $a {
956            type Lower = <$b as ComponentType>::Lower;
957
958            const ABI: CanonicalAbiInfo = <$b as ComponentType>::ABI;
959
960            #[inline]
961            fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
962                <$b as ComponentType>::typecheck(ty, types)
963            }
964        }
965    )*)
966}
967
968forward_type_impls! {
969    (T: ComponentType + ?Sized) &'_ T => T,
970    (T: ComponentType + ?Sized) Box<T> => T,
971    (T: ComponentType + ?Sized) alloc::sync::Arc<T> => T,
972    () String => str,
973    (T: ComponentType) Vec<T> => [T],
974}
975
976macro_rules! forward_lowers {
977    ($(($($generics:tt)*) $a:ty => $b:ty,)*) => ($(
978        unsafe impl <$($generics)*> Lower for $a {
979            fn linear_lower_to_flat<U>(
980                &self,
981                cx: &mut LowerContext<'_, U>,
982                ty: InterfaceType,
983                dst: &mut MaybeUninit<Self::Lower>,
984            ) -> Result<()> {
985                <$b as Lower>::linear_lower_to_flat(self, cx, ty, dst)
986            }
987
988            fn linear_lower_to_memory<U>(
989                &self,
990                cx: &mut LowerContext<'_, U>,
991                ty: InterfaceType,
992                offset: usize,
993            ) -> Result<()> {
994                <$b as Lower>::linear_lower_to_memory(self, cx, ty, offset)
995            }
996        }
997    )*)
998}
999
1000forward_lowers! {
1001    (T: Lower + ?Sized) &'_ T => T,
1002    (T: Lower + ?Sized) Box<T> => T,
1003    (T: Lower + ?Sized) alloc::sync::Arc<T> => T,
1004    () String => str,
1005    (T: Lower) Vec<T> => [T],
1006}
1007
1008macro_rules! forward_string_lifts {
1009    ($($a:ty,)*) => ($(
1010        unsafe impl Lift for $a {
1011            #[inline]
1012            fn linear_lift_from_flat(cx: &mut LiftContext<'_>, ty: InterfaceType, src: &Self::Lower) -> Result<Self> {
1013                let s = <WasmStr as Lift>::linear_lift_from_flat(cx, ty, src)?;
1014                let encoding = cx.options().string_encoding;
1015                Ok(s.to_str_from_memory(encoding, cx.memory())?.into())
1016            }
1017
1018            #[inline]
1019            fn linear_lift_from_memory(cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
1020                let s = <WasmStr as Lift>::linear_lift_from_memory(cx, ty, bytes)?;
1021                let encoding = cx.options().string_encoding;
1022                Ok(s.to_str_from_memory(encoding, cx.memory())?.into())
1023            }
1024        }
1025    )*)
1026}
1027
1028forward_string_lifts! {
1029    Box<str>,
1030    alloc::sync::Arc<str>,
1031    String,
1032}
1033
1034macro_rules! forward_list_lifts {
1035    ($($a:ty,)*) => ($(
1036        unsafe impl <T: Lift> Lift for $a {
1037            fn linear_lift_from_flat(cx: &mut LiftContext<'_>, ty: InterfaceType, src: &Self::Lower) -> Result<Self> {
1038                let list = <WasmList::<T> as Lift>::linear_lift_from_flat(cx, ty, src)?;
1039                Ok(T::linear_lift_list_from_memory(cx, &list)?.into())
1040            }
1041
1042            fn linear_lift_from_memory(cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
1043                let list = <WasmList::<T> as Lift>::linear_lift_from_memory(cx, ty, bytes)?;
1044                Ok(T::linear_lift_list_from_memory(cx, &list)?.into())
1045            }
1046        }
1047    )*)
1048}
1049
1050forward_list_lifts! {
1051    Box<[T]>,
1052    alloc::sync::Arc<[T]>,
1053    Vec<T>,
1054}
1055
1056// Macro to help generate `ComponentType` implementations for primitive types
1057// such as integers, char, bool, etc.
1058macro_rules! integers {
1059    ($($primitive:ident = $ty:ident in $field:ident/$get:ident with abi:$abi:ident,)*) => ($(
1060        unsafe impl ComponentType for $primitive {
1061            type Lower = ValRaw;
1062
1063            const ABI: CanonicalAbiInfo = CanonicalAbiInfo::$abi;
1064
1065            const MAY_REQUIRE_REALLOC: bool = false;
1066
1067            fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1068                match ty {
1069                    InterfaceType::$ty => Ok(()),
1070                    other => bail!("expected `{}` found `{}`", desc(&InterfaceType::$ty), desc(other))
1071                }
1072            }
1073        }
1074
1075        unsafe impl Lower for $primitive {
1076            #[inline]
1077            #[allow(trivial_numeric_casts, reason = "macro-generated code")]
1078            fn linear_lower_to_flat<T>(
1079                &self,
1080                _cx: &mut LowerContext<'_, T>,
1081                ty: InterfaceType,
1082                dst: &mut MaybeUninit<Self::Lower>,
1083            ) -> Result<()> {
1084                debug_assert!(matches!(ty, InterfaceType::$ty));
1085                dst.write(ValRaw::$field(*self as $field));
1086                Ok(())
1087            }
1088
1089            #[inline]
1090            fn linear_lower_to_memory<T>(
1091                &self,
1092                cx: &mut LowerContext<'_, T>,
1093                ty: InterfaceType,
1094                offset: usize,
1095            ) -> Result<()> {
1096                debug_assert!(matches!(ty, InterfaceType::$ty));
1097                debug_assert!(offset % Self::SIZE32 == 0);
1098                *cx.get(offset) = self.to_le_bytes();
1099                Ok(())
1100            }
1101
1102            fn linear_store_list_to_memory<T>(
1103                cx: &mut LowerContext<'_, T>,
1104                ty: InterfaceType,
1105                offset: usize,
1106                items: &[Self],
1107            ) -> Result<()> {
1108                debug_assert!(matches!(ty, InterfaceType::$ty));
1109
1110                // Double-check that the CM alignment is at least the host's
1111                // alignment for this type which should be true for all
1112                // platforms.
1113                assert!((Self::ALIGN32 as usize) >= mem::align_of::<Self>());
1114
1115                // Slice `cx`'s memory to the window that we'll be modifying.
1116                // This should all have already been verified in terms of
1117                // alignment and sizing meaning that these assertions here are
1118                // not truly necessary but are instead double-checks.
1119                //
1120                // Note that we're casting a `[u8]` slice to `[Self]` with
1121                // `align_to_mut` which is not safe in general but is safe in
1122                // our specific case as all `u8` patterns are valid `Self`
1123                // patterns since `Self` is an integral type.
1124                let dst = &mut cx.as_slice_mut()[offset..][..items.len() * Self::SIZE32];
1125                let (before, middle, end) = unsafe { dst.align_to_mut::<Self>() };
1126                assert!(before.is_empty() && end.is_empty());
1127                assert_eq!(middle.len(), items.len());
1128
1129                // And with all that out of the way perform the copying loop.
1130                // This is not a `copy_from_slice` because endianness needs to
1131                // be handled here, but LLVM should pretty easily transform this
1132                // into a memcpy on little-endian platforms.
1133                for (dst, src) in middle.iter_mut().zip(items) {
1134                    *dst = src.to_le();
1135                }
1136                Ok(())
1137            }
1138        }
1139
1140        unsafe impl Lift for $primitive {
1141            #[inline]
1142            #[allow(
1143                trivial_numeric_casts,
1144                clippy::cast_possible_truncation,
1145                reason = "macro-generated code"
1146            )]
1147            fn linear_lift_from_flat(_cx: &mut LiftContext<'_>, ty: InterfaceType, src: &Self::Lower) -> Result<Self> {
1148                debug_assert!(matches!(ty, InterfaceType::$ty));
1149                Ok(src.$get() as $primitive)
1150            }
1151
1152            #[inline]
1153            fn linear_lift_from_memory(_cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
1154                debug_assert!(matches!(ty, InterfaceType::$ty));
1155                debug_assert!((bytes.as_ptr() as usize) % Self::SIZE32 == 0);
1156                Ok($primitive::from_le_bytes(bytes.try_into().unwrap()))
1157            }
1158
1159            fn linear_lift_into_from_memory(
1160                cx: &mut LiftContext<'_>,
1161                list: &WasmList<Self>,
1162                dst: &mut impl Extend<Self>,
1163            ) -> Result<()>
1164            where
1165                Self: Sized,
1166            {
1167                dst.extend(list._as_le_slice(cx.memory())
1168                           .iter()
1169                           .map(|i| Self::from_le(*i)));
1170                Ok(())
1171            }
1172        }
1173    )*)
1174}
1175
1176integers! {
1177    i8 = S8 in i32/get_i32 with abi:SCALAR1,
1178    u8 = U8 in u32/get_u32 with abi:SCALAR1,
1179    i16 = S16 in i32/get_i32 with abi:SCALAR2,
1180    u16 = U16 in u32/get_u32 with abi:SCALAR2,
1181    i32 = S32 in i32/get_i32 with abi:SCALAR4,
1182    u32 = U32 in u32/get_u32 with abi:SCALAR4,
1183    i64 = S64 in i64/get_i64 with abi:SCALAR8,
1184    u64 = U64 in u64/get_u64 with abi:SCALAR8,
1185}
1186
1187macro_rules! floats {
1188    ($($float:ident/$get_float:ident = $ty:ident with abi:$abi:ident)*) => ($(const _: () = {
1189        unsafe impl ComponentType for $float {
1190            type Lower = ValRaw;
1191
1192            const ABI: CanonicalAbiInfo = CanonicalAbiInfo::$abi;
1193
1194            fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1195                match ty {
1196                    InterfaceType::$ty => Ok(()),
1197                    other => bail!("expected `{}` found `{}`", desc(&InterfaceType::$ty), desc(other))
1198                }
1199            }
1200        }
1201
1202        unsafe impl Lower for $float {
1203            #[inline]
1204            fn linear_lower_to_flat<T>(
1205                &self,
1206                _cx: &mut LowerContext<'_, T>,
1207                ty: InterfaceType,
1208                dst: &mut MaybeUninit<Self::Lower>,
1209            ) -> Result<()> {
1210                debug_assert!(matches!(ty, InterfaceType::$ty));
1211                dst.write(ValRaw::$float(self.to_bits()));
1212                Ok(())
1213            }
1214
1215            #[inline]
1216            fn linear_lower_to_memory<T>(
1217                &self,
1218                cx: &mut LowerContext<'_, T>,
1219                ty: InterfaceType,
1220                offset: usize,
1221            ) -> Result<()> {
1222                debug_assert!(matches!(ty, InterfaceType::$ty));
1223                debug_assert!(offset % Self::SIZE32 == 0);
1224                let ptr = cx.get(offset);
1225                *ptr = self.to_bits().to_le_bytes();
1226                Ok(())
1227            }
1228
1229            fn linear_store_list_to_memory<T>(
1230                cx: &mut LowerContext<'_, T>,
1231                ty: InterfaceType,
1232                offset: usize,
1233                items: &[Self],
1234            ) -> Result<()> {
1235                debug_assert!(matches!(ty, InterfaceType::$ty));
1236
1237                // Double-check that the CM alignment is at least the host's
1238                // alignment for this type which should be true for all
1239                // platforms.
1240                assert!((Self::ALIGN32 as usize) >= mem::align_of::<Self>());
1241
1242                // Slice `cx`'s memory to the window that we'll be modifying.
1243                // This should all have already been verified in terms of
1244                // alignment and sizing meaning that these assertions here are
1245                // not truly necessary but are instead double-checks.
1246                let dst = &mut cx.as_slice_mut()[offset..][..items.len() * Self::SIZE32];
1247                assert!(dst.as_ptr().cast::<Self>().is_aligned());
1248
1249                // And with all that out of the way perform the copying loop.
1250                // This is not a `copy_from_slice` because endianness needs to
1251                // be handled here, but LLVM should pretty easily transform this
1252                // into a memcpy on little-endian platforms.
1253                // TODO use `as_chunks` when https://github.com/rust-lang/rust/issues/74985
1254                // is stabilized
1255                for (dst, src) in iter::zip(dst.chunks_exact_mut(Self::SIZE32), items) {
1256                    let dst: &mut [u8; Self::SIZE32] = dst.try_into().unwrap();
1257                    *dst = src.to_le_bytes();
1258                }
1259                Ok(())
1260            }
1261        }
1262
1263        unsafe impl Lift for $float {
1264            #[inline]
1265            fn linear_lift_from_flat(_cx: &mut LiftContext<'_>, ty: InterfaceType, src: &Self::Lower) -> Result<Self> {
1266                debug_assert!(matches!(ty, InterfaceType::$ty));
1267                Ok($float::from_bits(src.$get_float()))
1268            }
1269
1270            #[inline]
1271            fn linear_lift_from_memory(_cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
1272                debug_assert!(matches!(ty, InterfaceType::$ty));
1273                debug_assert!((bytes.as_ptr() as usize) % Self::SIZE32 == 0);
1274                Ok($float::from_le_bytes(bytes.try_into().unwrap()))
1275            }
1276
1277            fn linear_lift_list_from_memory(cx: &mut LiftContext<'_>, list: &WasmList<Self>) -> Result<Vec<Self>> where Self: Sized {
1278                // See comments in `WasmList::get` for the panicking indexing
1279                let byte_size = list.len * mem::size_of::<Self>();
1280                let bytes = &cx.memory()[list.ptr..][..byte_size];
1281
1282                // The canonical ABI requires that everything is aligned to its
1283                // own size, so this should be an aligned array.
1284                assert!(bytes.as_ptr().cast::<Self>().is_aligned());
1285
1286                // Copy the resulting slice to a new Vec, handling endianness
1287                // in the process
1288                // TODO use `as_chunks` when https://github.com/rust-lang/rust/issues/74985
1289                // is stabilized
1290                Ok(
1291                    bytes
1292                        .chunks_exact(Self::SIZE32)
1293                        .map(|i| $float::from_le_bytes(i.try_into().unwrap()))
1294                        .collect()
1295                )
1296            }
1297        }
1298    };)*)
1299}
1300
1301floats! {
1302    f32/get_f32 = Float32 with abi:SCALAR4
1303    f64/get_f64 = Float64 with abi:SCALAR8
1304}
1305
1306unsafe impl ComponentType for bool {
1307    type Lower = ValRaw;
1308
1309    const ABI: CanonicalAbiInfo = CanonicalAbiInfo::SCALAR1;
1310
1311    fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1312        match ty {
1313            InterfaceType::Bool => Ok(()),
1314            other => bail!("expected `bool` found `{}`", desc(other)),
1315        }
1316    }
1317}
1318
1319unsafe impl Lower for bool {
1320    fn linear_lower_to_flat<T>(
1321        &self,
1322        _cx: &mut LowerContext<'_, T>,
1323        ty: InterfaceType,
1324        dst: &mut MaybeUninit<Self::Lower>,
1325    ) -> Result<()> {
1326        debug_assert!(matches!(ty, InterfaceType::Bool));
1327        dst.write(ValRaw::i32(*self as i32));
1328        Ok(())
1329    }
1330
1331    fn linear_lower_to_memory<T>(
1332        &self,
1333        cx: &mut LowerContext<'_, T>,
1334        ty: InterfaceType,
1335        offset: usize,
1336    ) -> Result<()> {
1337        debug_assert!(matches!(ty, InterfaceType::Bool));
1338        debug_assert!(offset % Self::SIZE32 == 0);
1339        cx.get::<1>(offset)[0] = *self as u8;
1340        Ok(())
1341    }
1342}
1343
1344unsafe impl Lift for bool {
1345    #[inline]
1346    fn linear_lift_from_flat(
1347        _cx: &mut LiftContext<'_>,
1348        ty: InterfaceType,
1349        src: &Self::Lower,
1350    ) -> Result<Self> {
1351        debug_assert!(matches!(ty, InterfaceType::Bool));
1352        match src.get_i32() {
1353            0 => Ok(false),
1354            _ => Ok(true),
1355        }
1356    }
1357
1358    #[inline]
1359    fn linear_lift_from_memory(
1360        _cx: &mut LiftContext<'_>,
1361        ty: InterfaceType,
1362        bytes: &[u8],
1363    ) -> Result<Self> {
1364        debug_assert!(matches!(ty, InterfaceType::Bool));
1365        match bytes[0] {
1366            0 => Ok(false),
1367            _ => Ok(true),
1368        }
1369    }
1370}
1371
1372unsafe impl ComponentType for char {
1373    type Lower = ValRaw;
1374
1375    const ABI: CanonicalAbiInfo = CanonicalAbiInfo::SCALAR4;
1376
1377    fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1378        match ty {
1379            InterfaceType::Char => Ok(()),
1380            other => bail!("expected `char` found `{}`", desc(other)),
1381        }
1382    }
1383}
1384
1385unsafe impl Lower for char {
1386    #[inline]
1387    fn linear_lower_to_flat<T>(
1388        &self,
1389        _cx: &mut LowerContext<'_, T>,
1390        ty: InterfaceType,
1391        dst: &mut MaybeUninit<Self::Lower>,
1392    ) -> Result<()> {
1393        debug_assert!(matches!(ty, InterfaceType::Char));
1394        dst.write(ValRaw::u32(u32::from(*self)));
1395        Ok(())
1396    }
1397
1398    #[inline]
1399    fn linear_lower_to_memory<T>(
1400        &self,
1401        cx: &mut LowerContext<'_, T>,
1402        ty: InterfaceType,
1403        offset: usize,
1404    ) -> Result<()> {
1405        debug_assert!(matches!(ty, InterfaceType::Char));
1406        debug_assert!(offset % Self::SIZE32 == 0);
1407        *cx.get::<4>(offset) = u32::from(*self).to_le_bytes();
1408        Ok(())
1409    }
1410}
1411
1412unsafe impl Lift for char {
1413    #[inline]
1414    fn linear_lift_from_flat(
1415        _cx: &mut LiftContext<'_>,
1416        ty: InterfaceType,
1417        src: &Self::Lower,
1418    ) -> Result<Self> {
1419        debug_assert!(matches!(ty, InterfaceType::Char));
1420        Ok(char::try_from(src.get_u32())?)
1421    }
1422
1423    #[inline]
1424    fn linear_lift_from_memory(
1425        _cx: &mut LiftContext<'_>,
1426        ty: InterfaceType,
1427        bytes: &[u8],
1428    ) -> Result<Self> {
1429        debug_assert!(matches!(ty, InterfaceType::Char));
1430        debug_assert!((bytes.as_ptr() as usize) % Self::SIZE32 == 0);
1431        let bits = u32::from_le_bytes(bytes.try_into().unwrap());
1432        Ok(char::try_from(bits)?)
1433    }
1434}
1435
1436// FIXME(#4311): these probably need different constants for memory64
1437const UTF16_TAG: usize = 1 << 31;
1438const MAX_STRING_BYTE_LENGTH: usize = (1 << 31) - 1;
1439
1440// Note that this is similar to `ComponentType for WasmStr` except it can only
1441// be used for lowering, not lifting.
1442unsafe impl ComponentType for str {
1443    type Lower = [ValRaw; 2];
1444
1445    const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
1446
1447    fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1448        match ty {
1449            InterfaceType::String => Ok(()),
1450            other => bail!("expected `string` found `{}`", desc(other)),
1451        }
1452    }
1453}
1454
1455unsafe impl Lower for str {
1456    fn linear_lower_to_flat<T>(
1457        &self,
1458        cx: &mut LowerContext<'_, T>,
1459        ty: InterfaceType,
1460        dst: &mut MaybeUninit<[ValRaw; 2]>,
1461    ) -> Result<()> {
1462        debug_assert!(matches!(ty, InterfaceType::String));
1463        let (ptr, len) = lower_string(cx, self)?;
1464        // See "WRITEPTR64" above for why this is always storing a 64-bit
1465        // integer.
1466        map_maybe_uninit!(dst[0]).write(ValRaw::i64(ptr as i64));
1467        map_maybe_uninit!(dst[1]).write(ValRaw::i64(len as i64));
1468        Ok(())
1469    }
1470
1471    fn linear_lower_to_memory<T>(
1472        &self,
1473        cx: &mut LowerContext<'_, T>,
1474        ty: InterfaceType,
1475        offset: usize,
1476    ) -> Result<()> {
1477        debug_assert!(matches!(ty, InterfaceType::String));
1478        debug_assert!(offset % (Self::ALIGN32 as usize) == 0);
1479        let (ptr, len) = lower_string(cx, self)?;
1480        // FIXME(#4311): needs memory64 handling
1481        *cx.get(offset + 0) = u32::try_from(ptr).unwrap().to_le_bytes();
1482        *cx.get(offset + 4) = u32::try_from(len).unwrap().to_le_bytes();
1483        Ok(())
1484    }
1485}
1486
1487fn lower_string<T>(cx: &mut LowerContext<'_, T>, string: &str) -> Result<(usize, usize)> {
1488    // Note that in general the wasm module can't assume anything about what the
1489    // host strings are encoded as. Additionally hosts are allowed to have
1490    // differently-encoded strings at runtime. Finally when copying a string
1491    // into wasm it's somewhat strict in the sense that the various patterns of
1492    // allocation and such are already dictated for us.
1493    //
1494    // In general what this means is that when copying a string from the host
1495    // into the destination we need to follow one of the cases of copying into
1496    // WebAssembly. It doesn't particularly matter which case as long as it ends
1497    // up in the right encoding. For example a destination encoding of
1498    // latin1+utf16 has a number of ways to get copied into and we do something
1499    // here that isn't the default "utf8 to latin1+utf16" since we have access
1500    // to simd-accelerated helpers in the `encoding_rs` crate. This is ok though
1501    // because we can fake that the host string was already stored in latin1
1502    // format and follow that copy pattern instead.
1503    match cx.options().string_encoding {
1504        // This corresponds to `store_string_copy` in the canonical ABI where
1505        // the host's representation is utf-8 and the wasm module wants utf-8 so
1506        // a copy is all that's needed (and the `realloc` can be precise for the
1507        // initial memory allocation).
1508        StringEncoding::Utf8 => {
1509            if string.len() > MAX_STRING_BYTE_LENGTH {
1510                bail!(
1511                    "string length of {} too large to copy into wasm",
1512                    string.len()
1513                );
1514            }
1515            let ptr = cx.realloc(0, 0, 1, string.len())?;
1516            cx.as_slice_mut()[ptr..][..string.len()].copy_from_slice(string.as_bytes());
1517            Ok((ptr, string.len()))
1518        }
1519
1520        // This corresponds to `store_utf8_to_utf16` in the canonical ABI. Here
1521        // an over-large allocation is performed and then shrunk afterwards if
1522        // necessary.
1523        StringEncoding::Utf16 => {
1524            let size = string.len() * 2;
1525            if size > MAX_STRING_BYTE_LENGTH {
1526                bail!(
1527                    "string length of {} too large to copy into wasm",
1528                    string.len()
1529                );
1530            }
1531            let mut ptr = cx.realloc(0, 0, 2, size)?;
1532            let mut copied = 0;
1533            let bytes = &mut cx.as_slice_mut()[ptr..][..size];
1534            for (u, bytes) in string.encode_utf16().zip(bytes.chunks_mut(2)) {
1535                let u_bytes = u.to_le_bytes();
1536                bytes[0] = u_bytes[0];
1537                bytes[1] = u_bytes[1];
1538                copied += 1;
1539            }
1540            if (copied * 2) < size {
1541                ptr = cx.realloc(ptr, size, 2, copied * 2)?;
1542            }
1543            Ok((ptr, copied))
1544        }
1545
1546        StringEncoding::CompactUtf16 => {
1547            // This corresponds to `store_string_to_latin1_or_utf16`
1548            let bytes = string.as_bytes();
1549            let mut iter = string.char_indices();
1550            let mut ptr = cx.realloc(0, 0, 2, bytes.len())?;
1551            let mut dst = &mut cx.as_slice_mut()[ptr..][..bytes.len()];
1552            let mut result = 0;
1553            while let Some((i, ch)) = iter.next() {
1554                // Test if this `char` fits into the latin1 encoding.
1555                if let Ok(byte) = u8::try_from(u32::from(ch)) {
1556                    dst[result] = byte;
1557                    result += 1;
1558                    continue;
1559                }
1560
1561                // .. if utf16 is forced to be used then the allocation is
1562                // bumped up to the maximum size.
1563                let worst_case = bytes
1564                    .len()
1565                    .checked_mul(2)
1566                    .ok_or_else(|| format_err!("byte length overflow"))?;
1567                if worst_case > MAX_STRING_BYTE_LENGTH {
1568                    bail!("byte length too large");
1569                }
1570                ptr = cx.realloc(ptr, bytes.len(), 2, worst_case)?;
1571                dst = &mut cx.as_slice_mut()[ptr..][..worst_case];
1572
1573                // Previously encoded latin1 bytes are inflated to their 16-bit
1574                // size for utf16
1575                for i in (0..result).rev() {
1576                    dst[2 * i] = dst[i];
1577                    dst[2 * i + 1] = 0;
1578                }
1579
1580                // and then the remainder of the string is encoded.
1581                for (u, bytes) in string[i..]
1582                    .encode_utf16()
1583                    .zip(dst[2 * result..].chunks_mut(2))
1584                {
1585                    let u_bytes = u.to_le_bytes();
1586                    bytes[0] = u_bytes[0];
1587                    bytes[1] = u_bytes[1];
1588                    result += 1;
1589                }
1590                if worst_case > 2 * result {
1591                    ptr = cx.realloc(ptr, worst_case, 2, 2 * result)?;
1592                }
1593                return Ok((ptr, result | UTF16_TAG));
1594            }
1595            if result < bytes.len() {
1596                ptr = cx.realloc(ptr, bytes.len(), 2, result)?;
1597            }
1598            Ok((ptr, result))
1599        }
1600    }
1601}
1602
1603/// Representation of a string located in linear memory in a WebAssembly
1604/// instance.
1605///
1606/// This type can be used in place of `String` and `str` for string-taking APIs
1607/// in some situations. The purpose of this type is to represent a range of
1608/// validated bytes within a component but does not actually copy the bytes. The
1609/// primary method, [`WasmStr::to_str`], attempts to return a reference to the
1610/// string directly located in the component's memory, avoiding a copy into the
1611/// host if possible.
1612///
1613/// The downside of this type, however, is that accessing a string requires a
1614/// [`Store`](crate::Store) pointer (via [`StoreContext`]). Bindings generated
1615/// by [`bindgen!`](crate::component::bindgen), for example, do not have access
1616/// to [`StoreContext`] and thus can't use this type.
1617///
1618/// This is intended for more advanced use cases such as defining functions
1619/// directly in a [`Linker`](crate::component::Linker). It's expected that in
1620/// the future [`bindgen!`](crate::component::bindgen) will also have a way to
1621/// use this type.
1622///
1623/// This type is used with [`TypedFunc`], for example, when WebAssembly returns
1624/// a string. This type cannot be used to give a string to WebAssembly, instead
1625/// `&str` should be used for that (since it's coming from the host).
1626///
1627/// Note that this type represents an in-bounds string in linear memory, but it
1628/// does not represent a valid string (e.g. valid utf-8). Validation happens
1629/// when [`WasmStr::to_str`] is called.
1630///
1631/// Also note that this type does not implement [`Lower`], it only implements
1632/// [`Lift`].
1633pub struct WasmStr {
1634    ptr: usize,
1635    len: usize,
1636    options: OptionsIndex,
1637    instance: Instance,
1638}
1639
1640impl WasmStr {
1641    pub(crate) fn new(ptr: usize, len: usize, cx: &mut LiftContext<'_>) -> Result<WasmStr> {
1642        let byte_len = match cx.options().string_encoding {
1643            StringEncoding::Utf8 => Some(len),
1644            StringEncoding::Utf16 => len.checked_mul(2),
1645            StringEncoding::CompactUtf16 => {
1646                if len & UTF16_TAG == 0 {
1647                    Some(len)
1648                } else {
1649                    (len ^ UTF16_TAG).checked_mul(2)
1650                }
1651            }
1652        };
1653        match byte_len.and_then(|len| ptr.checked_add(len)) {
1654            Some(n) if n <= cx.memory().len() => cx.consume_fuel(n - ptr)?,
1655            _ => bail!("string pointer/length out of bounds of memory"),
1656        }
1657        Ok(WasmStr {
1658            ptr,
1659            len,
1660            options: cx.options_index(),
1661            instance: cx.instance_handle(),
1662        })
1663    }
1664
1665    /// Returns the underlying string that this cursor points to.
1666    ///
1667    /// Note that this will internally decode the string from the wasm's
1668    /// encoding to utf-8 and additionally perform validation.
1669    ///
1670    /// The `store` provided must be the store where this string lives to
1671    /// access the correct memory.
1672    ///
1673    /// # Errors
1674    ///
1675    /// Returns an error if the string wasn't encoded correctly (e.g. invalid
1676    /// utf-8).
1677    ///
1678    /// # Panics
1679    ///
1680    /// Panics if this string is not owned by `store`.
1681    //
1682    // TODO: should add accessors for specifically utf-8 and utf-16 that perhaps
1683    // in an opt-in basis don't do validation. Additionally there should be some
1684    // method that returns `[u16]` after validating to avoid the utf16-to-utf8
1685    // transcode.
1686    pub fn to_str<'a, T: 'static>(
1687        &self,
1688        store: impl Into<StoreContext<'a, T>>,
1689    ) -> Result<Cow<'a, str>> {
1690        let store = store.into().0;
1691        let memory = self.instance.options_memory(store, self.options);
1692        let encoding = self.instance.options(store, self.options).string_encoding;
1693        self.to_str_from_memory(encoding, memory)
1694    }
1695
1696    pub(crate) fn to_str_from_memory<'a>(
1697        &self,
1698        encoding: StringEncoding,
1699        memory: &'a [u8],
1700    ) -> Result<Cow<'a, str>> {
1701        match encoding {
1702            StringEncoding::Utf8 => self.decode_utf8(memory),
1703            StringEncoding::Utf16 => self.decode_utf16(memory, self.len),
1704            StringEncoding::CompactUtf16 => {
1705                if self.len & UTF16_TAG == 0 {
1706                    self.decode_latin1(memory)
1707                } else {
1708                    self.decode_utf16(memory, self.len ^ UTF16_TAG)
1709                }
1710            }
1711        }
1712    }
1713
1714    fn decode_utf8<'a>(&self, memory: &'a [u8]) -> Result<Cow<'a, str>> {
1715        // Note that bounds-checking already happen in construction of `WasmStr`
1716        // so this is never expected to panic. This could theoretically be
1717        // unchecked indexing if we're feeling wild enough.
1718        Ok(str::from_utf8(&memory[self.ptr..][..self.len])?.into())
1719    }
1720
1721    fn decode_utf16<'a>(&self, memory: &'a [u8], len: usize) -> Result<Cow<'a, str>> {
1722        // See notes in `decode_utf8` for why this is panicking indexing.
1723        let memory = &memory[self.ptr..][..len * 2];
1724        Ok(core::char::decode_utf16(
1725            memory
1726                .chunks(2)
1727                .map(|chunk| u16::from_le_bytes(chunk.try_into().unwrap())),
1728        )
1729        .collect::<Result<String, _>>()?
1730        .into())
1731    }
1732
1733    fn decode_latin1<'a>(&self, memory: &'a [u8]) -> Result<Cow<'a, str>> {
1734        // See notes in `decode_utf8` for why this is panicking indexing.
1735        Ok(encoding_rs::mem::decode_latin1(
1736            &memory[self.ptr..][..self.len],
1737        ))
1738    }
1739}
1740
1741// Note that this is similar to `ComponentType for str` except it can only be
1742// used for lifting, not lowering.
1743unsafe impl ComponentType for WasmStr {
1744    type Lower = <str as ComponentType>::Lower;
1745
1746    const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
1747
1748    fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1749        match ty {
1750            InterfaceType::String => Ok(()),
1751            other => bail!("expected `string` found `{}`", desc(other)),
1752        }
1753    }
1754}
1755
1756unsafe impl Lift for WasmStr {
1757    #[inline]
1758    fn linear_lift_from_flat(
1759        cx: &mut LiftContext<'_>,
1760        ty: InterfaceType,
1761        src: &Self::Lower,
1762    ) -> Result<Self> {
1763        debug_assert!(matches!(ty, InterfaceType::String));
1764        // FIXME(#4311): needs memory64 treatment
1765        let ptr = src[0].get_u32();
1766        let len = src[1].get_u32();
1767        let (ptr, len) = (usize::try_from(ptr)?, usize::try_from(len)?);
1768        WasmStr::new(ptr, len, cx)
1769    }
1770
1771    #[inline]
1772    fn linear_lift_from_memory(
1773        cx: &mut LiftContext<'_>,
1774        ty: InterfaceType,
1775        bytes: &[u8],
1776    ) -> Result<Self> {
1777        debug_assert!(matches!(ty, InterfaceType::String));
1778        debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
1779        // FIXME(#4311): needs memory64 treatment
1780        let ptr = u32::from_le_bytes(bytes[..4].try_into().unwrap());
1781        let len = u32::from_le_bytes(bytes[4..].try_into().unwrap());
1782        let (ptr, len) = (usize::try_from(ptr)?, usize::try_from(len)?);
1783        WasmStr::new(ptr, len, cx)
1784    }
1785}
1786
1787unsafe impl<T> ComponentType for [T]
1788where
1789    T: ComponentType,
1790{
1791    type Lower = [ValRaw; 2];
1792
1793    const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
1794
1795    fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
1796        match ty {
1797            InterfaceType::List(t) => T::typecheck(&types.types[*t].element, types),
1798            other => bail!("expected `list` found `{}`", desc(other)),
1799        }
1800    }
1801}
1802
1803unsafe impl<T> Lower for [T]
1804where
1805    T: Lower,
1806{
1807    fn linear_lower_to_flat<U>(
1808        &self,
1809        cx: &mut LowerContext<'_, U>,
1810        ty: InterfaceType,
1811        dst: &mut MaybeUninit<[ValRaw; 2]>,
1812    ) -> Result<()> {
1813        let elem = match ty {
1814            InterfaceType::List(i) => cx.types[i].element,
1815            _ => bad_type_info(),
1816        };
1817        let (ptr, len) = lower_list(cx, elem, self)?;
1818        // See "WRITEPTR64" above for why this is always storing a 64-bit
1819        // integer.
1820        map_maybe_uninit!(dst[0]).write(ValRaw::i64(ptr as i64));
1821        map_maybe_uninit!(dst[1]).write(ValRaw::i64(len as i64));
1822        Ok(())
1823    }
1824
1825    fn linear_lower_to_memory<U>(
1826        &self,
1827        cx: &mut LowerContext<'_, U>,
1828        ty: InterfaceType,
1829        offset: usize,
1830    ) -> Result<()> {
1831        let elem = match ty {
1832            InterfaceType::List(i) => cx.types[i].element,
1833            _ => bad_type_info(),
1834        };
1835        debug_assert!(offset % (Self::ALIGN32 as usize) == 0);
1836        let (ptr, len) = lower_list(cx, elem, self)?;
1837        *cx.get(offset + 0) = u32::try_from(ptr).unwrap().to_le_bytes();
1838        *cx.get(offset + 4) = u32::try_from(len).unwrap().to_le_bytes();
1839        Ok(())
1840    }
1841}
1842
1843// FIXME: this is not a memcpy for `T` where `T` is something like `u8`.
1844//
1845// Some attempts to fix this have proved not fruitful. In isolation an attempt
1846// was made where:
1847//
1848// * `MemoryMut` stored a `*mut [u8]` as its "last view" of memory to avoid
1849//   reloading the base pointer constantly. This view is reset on `realloc`.
1850// * The bounds-checks in `MemoryMut::get` were removed (replaced with unsafe
1851//   indexing)
1852//
1853// Even then though this didn't correctly vectorized for `Vec<u8>`. It's not
1854// entirely clear why but it appeared that it's related to reloading the base
1855// pointer to memory (I guess from `MemoryMut` itself?). Overall I'm not really
1856// clear on what's happening there, but this is surely going to be a performance
1857// bottleneck in the future.
1858fn lower_list<T, U>(
1859    cx: &mut LowerContext<'_, U>,
1860    ty: InterfaceType,
1861    list: &[T],
1862) -> Result<(usize, usize)>
1863where
1864    T: Lower,
1865{
1866    let elem_size = T::SIZE32;
1867    let size = list
1868        .len()
1869        .checked_mul(elem_size)
1870        .ok_or_else(|| format_err!("size overflow copying a list"))?;
1871    let ptr = cx.realloc(0, 0, T::ALIGN32, size)?;
1872    T::linear_store_list_to_memory(cx, ty, ptr, list)?;
1873    Ok((ptr, list.len()))
1874}
1875
1876/// Representation of a list of values that are owned by a WebAssembly instance.
1877///
1878/// For some more commentary about the rationale for this type see the
1879/// documentation of [`WasmStr`]. In summary this type can avoid a copy when
1880/// passing data to the host in some situations but is additionally more
1881/// cumbersome to use by requiring a [`Store`](crate::Store) to be provided.
1882///
1883/// This type is used whenever a `(list T)` is returned from a [`TypedFunc`],
1884/// for example. This type represents a list of values that are stored in linear
1885/// memory which are waiting to be read.
1886///
1887/// Note that this type represents only a valid range of bytes for the list
1888/// itself, it does not represent validity of the elements themselves and that's
1889/// performed when they're iterated.
1890///
1891/// Note that this type does not implement the [`Lower`] trait, only [`Lift`].
1892pub struct WasmList<T> {
1893    ptr: usize,
1894    len: usize,
1895    options: OptionsIndex,
1896    elem: InterfaceType,
1897    instance: Instance,
1898    _marker: marker::PhantomData<T>,
1899}
1900
1901impl<T: Lift> WasmList<T> {
1902    pub(crate) fn new(
1903        ptr: usize,
1904        len: usize,
1905        cx: &mut LiftContext<'_>,
1906        elem: InterfaceType,
1907    ) -> Result<WasmList<T>> {
1908        match len
1909            .checked_mul(T::SIZE32)
1910            .and_then(|len| ptr.checked_add(len))
1911        {
1912            Some(n) if n <= cx.memory().len() => cx.consume_fuel(n - ptr)?,
1913            _ => bail!("list pointer/length out of bounds of memory"),
1914        }
1915        if ptr % usize::try_from(T::ALIGN32)? != 0 {
1916            bail!("list pointer is not aligned")
1917        }
1918        Ok(WasmList {
1919            ptr,
1920            len,
1921            options: cx.options_index(),
1922            elem,
1923            instance: cx.instance_handle(),
1924            _marker: marker::PhantomData,
1925        })
1926    }
1927
1928    /// Returns the item length of this vector
1929    #[inline]
1930    pub fn len(&self) -> usize {
1931        self.len
1932    }
1933
1934    /// Gets the `n`th element of this list.
1935    ///
1936    /// Returns `None` if `index` is out of bounds. Returns `Some(Err(..))` if
1937    /// the value couldn't be decoded (it was invalid). Returns `Some(Ok(..))`
1938    /// if the value is valid.
1939    ///
1940    /// # Panics
1941    ///
1942    /// This function will panic if the string did not originally come from the
1943    /// `store` specified.
1944    //
1945    // TODO: given that interface values are intended to be consumed in one go
1946    // should we even expose a random access iteration API? In theory all
1947    // consumers should be validating through the iterator.
1948    pub fn get(&self, mut store: impl AsContextMut, index: usize) -> Option<Result<T>> {
1949        let store = store.as_context_mut().0;
1950        let mut cx = LiftContext::new(store, self.options, self.instance);
1951        self.get_from_store(&mut cx, index)
1952    }
1953
1954    fn get_from_store(&self, cx: &mut LiftContext<'_>, index: usize) -> Option<Result<T>> {
1955        if index >= self.len {
1956            return None;
1957        }
1958        // Note that this is using panicking indexing and this is expected to
1959        // never fail. The bounds-checking here happened during the construction
1960        // of the `WasmList` itself which means these should always be in-bounds
1961        // (and wasm memory can only grow). This could theoretically be
1962        // unchecked indexing if we're confident enough and it's actually a perf
1963        // issue one day.
1964        let bytes = &cx.memory()[self.ptr + index * T::SIZE32..][..T::SIZE32];
1965        Some(T::linear_lift_from_memory(cx, self.elem, bytes))
1966    }
1967
1968    /// Returns an iterator over the elements of this list.
1969    ///
1970    /// Each item of the list may fail to decode and is represented through the
1971    /// `Result` value of the iterator.
1972    pub fn iter<'a, U: 'static>(
1973        &'a self,
1974        store: impl Into<StoreContextMut<'a, U>>,
1975    ) -> impl ExactSizeIterator<Item = Result<T>> + 'a {
1976        let store = store.into().0;
1977        let mut cx = LiftContext::new(store, self.options, self.instance);
1978        (0..self.len).map(move |i| self.get_from_store(&mut cx, i).unwrap())
1979    }
1980}
1981
1982macro_rules! raw_wasm_list_accessors {
1983    ($($i:ident)*) => ($(
1984        impl WasmList<$i> {
1985            /// Get access to the raw underlying memory for this list.
1986            ///
1987            /// This method will return a direct slice into the original wasm
1988            /// module's linear memory where the data for this slice is stored.
1989            /// This allows the embedder to have efficient access to the
1990            /// underlying memory if needed and avoid copies and such if
1991            /// desired.
1992            ///
1993            /// Note that multi-byte integers are stored in little-endian format
1994            /// so portable processing of this slice must be aware of the host's
1995            /// byte-endianness. The `from_le` constructors in the Rust standard
1996            /// library should be suitable for converting from little-endian.
1997            ///
1998            /// # Panics
1999            ///
2000            /// Panics if the `store` provided is not the one from which this
2001            /// slice originated.
2002            pub fn as_le_slice<'a, T: 'static>(&self, store: impl Into<StoreContext<'a, T>>) -> &'a [$i] {
2003                let memory = self.instance.options_memory(store.into().0, self.options);
2004                self._as_le_slice(memory)
2005            }
2006
2007            fn _as_le_slice<'a>(&self, all_of_memory: &'a [u8]) -> &'a [$i] {
2008                // See comments in `WasmList::get` for the panicking indexing
2009                let byte_size = self.len * mem::size_of::<$i>();
2010                let bytes = &all_of_memory[self.ptr..][..byte_size];
2011
2012                // The canonical ABI requires that everything is aligned to its
2013                // own size, so this should be an aligned array. Furthermore the
2014                // alignment of primitive integers for hosts should be smaller
2015                // than or equal to the size of the primitive itself, meaning
2016                // that a wasm canonical-abi-aligned list is also aligned for
2017                // the host. That should mean that the head/tail slices here are
2018                // empty.
2019                //
2020                // Also note that the `unsafe` here is needed since the type
2021                // we're aligning to isn't guaranteed to be valid, but in our
2022                // case it's just integers and bytes so this should be safe.
2023                unsafe {
2024                    let (head, body, tail) = bytes.align_to::<$i>();
2025                    assert!(head.is_empty() && tail.is_empty());
2026                    body
2027                }
2028            }
2029        }
2030    )*)
2031}
2032
2033raw_wasm_list_accessors! {
2034    i8 i16 i32 i64
2035    u8 u16 u32 u64
2036}
2037
2038// Note that this is similar to `ComponentType for str` except it can only be
2039// used for lifting, not lowering.
2040unsafe impl<T: ComponentType> ComponentType for WasmList<T> {
2041    type Lower = <[T] as ComponentType>::Lower;
2042
2043    const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
2044
2045    fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
2046        <[T] as ComponentType>::typecheck(ty, types)
2047    }
2048}
2049
2050unsafe impl<T: Lift> Lift for WasmList<T> {
2051    fn linear_lift_from_flat(
2052        cx: &mut LiftContext<'_>,
2053        ty: InterfaceType,
2054        src: &Self::Lower,
2055    ) -> Result<Self> {
2056        let elem = match ty {
2057            InterfaceType::List(i) => cx.types[i].element,
2058            _ => bad_type_info(),
2059        };
2060        // FIXME(#4311): needs memory64 treatment
2061        let ptr = src[0].get_u32();
2062        let len = src[1].get_u32();
2063        let (ptr, len) = (usize::try_from(ptr)?, usize::try_from(len)?);
2064        WasmList::new(ptr, len, cx, elem)
2065    }
2066
2067    fn linear_lift_from_memory(
2068        cx: &mut LiftContext<'_>,
2069        ty: InterfaceType,
2070        bytes: &[u8],
2071    ) -> Result<Self> {
2072        let elem = match ty {
2073            InterfaceType::List(i) => cx.types[i].element,
2074            _ => bad_type_info(),
2075        };
2076        debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
2077        // FIXME(#4311): needs memory64 treatment
2078        let ptr = u32::from_le_bytes(bytes[..4].try_into().unwrap());
2079        let len = u32::from_le_bytes(bytes[4..].try_into().unwrap());
2080        let (ptr, len) = (usize::try_from(ptr)?, usize::try_from(len)?);
2081        WasmList::new(ptr, len, cx, elem)
2082    }
2083}
2084
2085/// Verify that the given wasm type is a tuple with the expected fields in the right order.
2086fn typecheck_tuple(
2087    ty: &InterfaceType,
2088    types: &InstanceType<'_>,
2089    expected: &[fn(&InterfaceType, &InstanceType<'_>) -> Result<()>],
2090) -> Result<()> {
2091    match ty {
2092        InterfaceType::Tuple(t) => {
2093            let tuple = &types.types[*t];
2094            if tuple.types.len() != expected.len() {
2095                bail!(
2096                    "expected {}-tuple, found {}-tuple",
2097                    expected.len(),
2098                    tuple.types.len()
2099                );
2100            }
2101            for (ty, check) in tuple.types.iter().zip(expected) {
2102                check(ty, types)?;
2103            }
2104            Ok(())
2105        }
2106        other => bail!("expected `tuple` found `{}`", desc(other)),
2107    }
2108}
2109
2110/// Verify that the given wasm type is a record with the expected fields in the right order and with the right
2111/// names.
2112pub fn typecheck_record(
2113    ty: &InterfaceType,
2114    types: &InstanceType<'_>,
2115    expected: &[(&str, fn(&InterfaceType, &InstanceType<'_>) -> Result<()>)],
2116) -> Result<()> {
2117    match ty {
2118        InterfaceType::Record(index) => {
2119            let fields = &types.types[*index].fields;
2120
2121            if fields.len() != expected.len() {
2122                bail!(
2123                    "expected record of {} fields, found {} fields",
2124                    expected.len(),
2125                    fields.len()
2126                );
2127            }
2128
2129            for (field, &(name, check)) in fields.iter().zip(expected) {
2130                check(&field.ty, types)
2131                    .with_context(|| format!("type mismatch for field {name}"))?;
2132
2133                if field.name != name {
2134                    bail!("expected record field named {}, found {}", name, field.name);
2135                }
2136            }
2137
2138            Ok(())
2139        }
2140        other => bail!("expected `record` found `{}`", desc(other)),
2141    }
2142}
2143
2144/// Verify that the given wasm type is a variant with the expected cases in the right order and with the right
2145/// names.
2146pub fn typecheck_variant(
2147    ty: &InterfaceType,
2148    types: &InstanceType<'_>,
2149    expected: &[(
2150        &str,
2151        Option<fn(&InterfaceType, &InstanceType<'_>) -> Result<()>>,
2152    )],
2153) -> Result<()> {
2154    match ty {
2155        InterfaceType::Variant(index) => {
2156            let cases = &types.types[*index].cases;
2157
2158            if cases.len() != expected.len() {
2159                bail!(
2160                    "expected variant of {} cases, found {} cases",
2161                    expected.len(),
2162                    cases.len()
2163                );
2164            }
2165
2166            for ((case_name, case_ty), &(name, check)) in cases.iter().zip(expected) {
2167                if *case_name != name {
2168                    bail!("expected variant case named {name}, found {case_name}");
2169                }
2170
2171                match (check, case_ty) {
2172                    (Some(check), Some(ty)) => check(ty, types)
2173                        .with_context(|| format!("type mismatch for case {name}"))?,
2174                    (None, None) => {}
2175                    (Some(_), None) => {
2176                        bail!("case `{name}` has no type but one was expected")
2177                    }
2178                    (None, Some(_)) => {
2179                        bail!("case `{name}` has a type but none was expected")
2180                    }
2181                }
2182            }
2183
2184            Ok(())
2185        }
2186        other => bail!("expected `variant` found `{}`", desc(other)),
2187    }
2188}
2189
2190/// Verify that the given wasm type is a enum with the expected cases in the right order and with the right
2191/// names.
2192pub fn typecheck_enum(
2193    ty: &InterfaceType,
2194    types: &InstanceType<'_>,
2195    expected: &[&str],
2196) -> Result<()> {
2197    match ty {
2198        InterfaceType::Enum(index) => {
2199            let names = &types.types[*index].names;
2200
2201            if names.len() != expected.len() {
2202                bail!(
2203                    "expected enum of {} names, found {} names",
2204                    expected.len(),
2205                    names.len()
2206                );
2207            }
2208
2209            for (name, expected) in names.iter().zip(expected) {
2210                if name != expected {
2211                    bail!("expected enum case named {expected}, found {name}");
2212                }
2213            }
2214
2215            Ok(())
2216        }
2217        other => bail!("expected `enum` found `{}`", desc(other)),
2218    }
2219}
2220
2221/// Verify that the given wasm type is a flags type with the expected flags in the right order and with the right
2222/// names.
2223pub fn typecheck_flags(
2224    ty: &InterfaceType,
2225    types: &InstanceType<'_>,
2226    expected: &[&str],
2227) -> Result<()> {
2228    match ty {
2229        InterfaceType::Flags(index) => {
2230            let names = &types.types[*index].names;
2231
2232            if names.len() != expected.len() {
2233                bail!(
2234                    "expected flags type with {} names, found {} names",
2235                    expected.len(),
2236                    names.len()
2237                );
2238            }
2239
2240            for (name, expected) in names.iter().zip(expected) {
2241                if name != expected {
2242                    bail!("expected flag named {expected}, found {name}");
2243                }
2244            }
2245
2246            Ok(())
2247        }
2248        other => bail!("expected `flags` found `{}`", desc(other)),
2249    }
2250}
2251
2252/// Format the specified bitflags using the specified names for debugging
2253pub fn format_flags(bits: &[u32], names: &[&str], f: &mut fmt::Formatter) -> fmt::Result {
2254    f.write_str("(")?;
2255    let mut wrote = false;
2256    for (index, name) in names.iter().enumerate() {
2257        if ((bits[index / 32] >> (index % 32)) & 1) != 0 {
2258            if wrote {
2259                f.write_str("|")?;
2260            } else {
2261                wrote = true;
2262            }
2263
2264            f.write_str(name)?;
2265        }
2266    }
2267    f.write_str(")")
2268}
2269
2270unsafe impl<T> ComponentType for Option<T>
2271where
2272    T: ComponentType,
2273{
2274    type Lower = TupleLower<<u32 as ComponentType>::Lower, T::Lower>;
2275
2276    const ABI: CanonicalAbiInfo = CanonicalAbiInfo::variant_static(&[None, Some(T::ABI)]);
2277
2278    fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
2279        match ty {
2280            InterfaceType::Option(t) => T::typecheck(&types.types[*t].ty, types),
2281            other => bail!("expected `option` found `{}`", desc(other)),
2282        }
2283    }
2284}
2285
2286unsafe impl<T> ComponentVariant for Option<T>
2287where
2288    T: ComponentType,
2289{
2290    const CASES: &'static [Option<CanonicalAbiInfo>] = &[None, Some(T::ABI)];
2291}
2292
2293unsafe impl<T> Lower for Option<T>
2294where
2295    T: Lower,
2296{
2297    fn linear_lower_to_flat<U>(
2298        &self,
2299        cx: &mut LowerContext<'_, U>,
2300        ty: InterfaceType,
2301        dst: &mut MaybeUninit<Self::Lower>,
2302    ) -> Result<()> {
2303        let payload = match ty {
2304            InterfaceType::Option(ty) => cx.types[ty].ty,
2305            _ => bad_type_info(),
2306        };
2307        match self {
2308            None => {
2309                map_maybe_uninit!(dst.A1).write(ValRaw::i32(0));
2310                // Note that this is unsafe as we're writing an arbitrary
2311                // bit-pattern to an arbitrary type, but part of the unsafe
2312                // contract of the `ComponentType` trait is that we can assign
2313                // any bit-pattern. By writing all zeros here we're ensuring
2314                // that the core wasm arguments this translates to will all be
2315                // zeros (as the canonical ABI requires).
2316                unsafe {
2317                    map_maybe_uninit!(dst.A2).as_mut_ptr().write_bytes(0u8, 1);
2318                }
2319            }
2320            Some(val) => {
2321                map_maybe_uninit!(dst.A1).write(ValRaw::i32(1));
2322                val.linear_lower_to_flat(cx, payload, map_maybe_uninit!(dst.A2))?;
2323            }
2324        }
2325        Ok(())
2326    }
2327
2328    fn linear_lower_to_memory<U>(
2329        &self,
2330        cx: &mut LowerContext<'_, U>,
2331        ty: InterfaceType,
2332        offset: usize,
2333    ) -> Result<()> {
2334        debug_assert!(offset % (Self::ALIGN32 as usize) == 0);
2335        let payload = match ty {
2336            InterfaceType::Option(ty) => cx.types[ty].ty,
2337            _ => bad_type_info(),
2338        };
2339        match self {
2340            None => {
2341                cx.get::<1>(offset)[0] = 0;
2342            }
2343            Some(val) => {
2344                cx.get::<1>(offset)[0] = 1;
2345                val.linear_lower_to_memory(
2346                    cx,
2347                    payload,
2348                    offset + (Self::INFO.payload_offset32 as usize),
2349                )?;
2350            }
2351        }
2352        Ok(())
2353    }
2354}
2355
2356unsafe impl<T> Lift for Option<T>
2357where
2358    T: Lift,
2359{
2360    fn linear_lift_from_flat(
2361        cx: &mut LiftContext<'_>,
2362        ty: InterfaceType,
2363        src: &Self::Lower,
2364    ) -> Result<Self> {
2365        let payload = match ty {
2366            InterfaceType::Option(ty) => cx.types[ty].ty,
2367            _ => bad_type_info(),
2368        };
2369        Ok(match src.A1.get_i32() {
2370            0 => None,
2371            1 => Some(T::linear_lift_from_flat(cx, payload, &src.A2)?),
2372            _ => bail!("invalid option discriminant"),
2373        })
2374    }
2375
2376    fn linear_lift_from_memory(
2377        cx: &mut LiftContext<'_>,
2378        ty: InterfaceType,
2379        bytes: &[u8],
2380    ) -> Result<Self> {
2381        debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
2382        let payload_ty = match ty {
2383            InterfaceType::Option(ty) => cx.types[ty].ty,
2384            _ => bad_type_info(),
2385        };
2386        let discrim = bytes[0];
2387        let payload = &bytes[Self::INFO.payload_offset32 as usize..];
2388        match discrim {
2389            0 => Ok(None),
2390            1 => Ok(Some(T::linear_lift_from_memory(cx, payload_ty, payload)?)),
2391            _ => bail!("invalid option discriminant"),
2392        }
2393    }
2394}
2395
2396#[derive(Clone, Copy)]
2397#[repr(C)]
2398pub struct ResultLower<T: Copy, E: Copy> {
2399    tag: ValRaw,
2400    payload: ResultLowerPayload<T, E>,
2401}
2402
2403#[derive(Clone, Copy)]
2404#[repr(C)]
2405union ResultLowerPayload<T: Copy, E: Copy> {
2406    ok: T,
2407    err: E,
2408}
2409
2410unsafe impl<T, E> ComponentType for Result<T, E>
2411where
2412    T: ComponentType,
2413    E: ComponentType,
2414{
2415    type Lower = ResultLower<T::Lower, E::Lower>;
2416
2417    const ABI: CanonicalAbiInfo = CanonicalAbiInfo::variant_static(&[Some(T::ABI), Some(E::ABI)]);
2418
2419    fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
2420        match ty {
2421            InterfaceType::Result(r) => {
2422                let result = &types.types[*r];
2423                match &result.ok {
2424                    Some(ty) => T::typecheck(ty, types)?,
2425                    None if T::IS_RUST_UNIT_TYPE => {}
2426                    None => bail!("expected no `ok` type"),
2427                }
2428                match &result.err {
2429                    Some(ty) => E::typecheck(ty, types)?,
2430                    None if E::IS_RUST_UNIT_TYPE => {}
2431                    None => bail!("expected no `err` type"),
2432                }
2433                Ok(())
2434            }
2435            other => bail!("expected `result` found `{}`", desc(other)),
2436        }
2437    }
2438}
2439
2440/// Lowers the payload of a variant into the storage for the entire payload,
2441/// handling writing zeros at the end of the representation if this payload is
2442/// smaller than the entire flat representation.
2443///
2444/// * `payload` - the flat storage space for the entire payload of the variant
2445/// * `typed_payload` - projection from the payload storage space to the
2446///   individual storage space for this variant.
2447/// * `lower` - lowering operation used to initialize the `typed_payload` return
2448///   value.
2449///
2450/// For more information on this se the comments in the `Lower for Result`
2451/// implementation below.
2452pub unsafe fn lower_payload<P, T>(
2453    payload: &mut MaybeUninit<P>,
2454    typed_payload: impl FnOnce(&mut MaybeUninit<P>) -> &mut MaybeUninit<T>,
2455    lower: impl FnOnce(&mut MaybeUninit<T>) -> Result<()>,
2456) -> Result<()> {
2457    let typed = typed_payload(payload);
2458    lower(typed)?;
2459
2460    let typed_len = unsafe { storage_as_slice(typed).len() };
2461    let payload = unsafe { storage_as_slice_mut(payload) };
2462    for slot in payload[typed_len..].iter_mut() {
2463        slot.write(ValRaw::u64(0));
2464    }
2465    Ok(())
2466}
2467
2468unsafe impl<T, E> ComponentVariant for Result<T, E>
2469where
2470    T: ComponentType,
2471    E: ComponentType,
2472{
2473    const CASES: &'static [Option<CanonicalAbiInfo>] = &[Some(T::ABI), Some(E::ABI)];
2474}
2475
2476unsafe impl<T, E> Lower for Result<T, E>
2477where
2478    T: Lower,
2479    E: Lower,
2480{
2481    fn linear_lower_to_flat<U>(
2482        &self,
2483        cx: &mut LowerContext<'_, U>,
2484        ty: InterfaceType,
2485        dst: &mut MaybeUninit<Self::Lower>,
2486    ) -> Result<()> {
2487        let (ok, err) = match ty {
2488            InterfaceType::Result(ty) => {
2489                let ty = &cx.types[ty];
2490                (ty.ok, ty.err)
2491            }
2492            _ => bad_type_info(),
2493        };
2494
2495        // This implementation of `Lower::lower`, if you're reading these from
2496        // the top of this file, is the first location that the "join" logic of
2497        // the component model's canonical ABI encountered. The rough problem is
2498        // that let's say we have a component model type of the form:
2499        //
2500        //      (result u64 (error (tuple f32 u16)))
2501        //
2502        // The flat representation of this is actually pretty tricky. Currently
2503        // it is:
2504        //
2505        //      i32 i64 i32
2506        //
2507        // The first `i32` is the discriminant for the `result`, and the payload
2508        // is represented by `i64 i32`. The "ok" variant will only use the `i64`
2509        // and the "err" variant will use both `i64` and `i32`.
2510        //
2511        // In the "ok" variant the first issue is encountered. The size of one
2512        // variant may not match the size of the other variants. All variants
2513        // start at the "front" but when lowering a type we need to be sure to
2514        // initialize the later variants (lest we leak random host memory into
2515        // the guest module). Due to how the `Lower` type is represented as a
2516        // `union` of all the variants what ends up happening here is that
2517        // internally within the `lower_payload` after the typed payload is
2518        // lowered the remaining bits of the payload that weren't initialized
2519        // are all set to zero. This will guarantee that we'll write to all the
2520        // slots for each variant.
2521        //
2522        // The "err" variant encounters the second issue, however, which is that
2523        // the flat representation for each type may differ between payloads. In
2524        // the "ok" arm an `i64` is written, but the `lower` implementation for
2525        // the "err" arm will write an `f32` and then an `i32`. For this
2526        // implementation of `lower` to be valid the `f32` needs to get inflated
2527        // to an `i64` with zero-padding in the upper bits. What may be
2528        // surprising, however, is that none of this is handled in this file.
2529        // This implementation looks like it's blindly deferring to `E::lower`
2530        // and hoping it does the right thing.
2531        //
2532        // In reality, however, the correctness of variant lowering relies on
2533        // two subtle details of the `ValRaw` implementation in Wasmtime:
2534        //
2535        // 1. First the `ValRaw` value always contains little-endian values.
2536        //    This means that if a `u32` is written, a `u64` is read, and then
2537        //    the `u64` has its upper bits truncated the original value will
2538        //    always be retained. This is primarily here for big-endian
2539        //    platforms where if it weren't little endian then the opposite
2540        //    would occur and the wrong value would be read.
2541        //
2542        // 2. Second, and perhaps even more subtly, the `ValRaw` constructors
2543        //    for 32-bit types actually always initialize 64-bits of the
2544        //    `ValRaw`. In the component model flat ABI only 32 and 64-bit types
2545        //    are used so 64-bits is big enough to contain everything. This
2546        //    means that when a `ValRaw` is written into the destination it will
2547        //    always, whether it's needed or not, be "ready" to get extended up
2548        //    to 64-bits.
2549        //
2550        // Put together these two subtle guarantees means that all `Lower`
2551        // implementations can be written "naturally" as one might naively
2552        // expect. Variants will, on each arm, zero out remaining fields and all
2553        // writes to the flat representation will automatically be 64-bit writes
2554        // meaning that if the value is read as a 64-bit value, which isn't
2555        // known at the time of the write, it'll still be correct.
2556        match self {
2557            Ok(e) => {
2558                map_maybe_uninit!(dst.tag).write(ValRaw::i32(0));
2559                unsafe {
2560                    lower_payload(
2561                        map_maybe_uninit!(dst.payload),
2562                        |payload| map_maybe_uninit!(payload.ok),
2563                        |dst| match ok {
2564                            Some(ok) => e.linear_lower_to_flat(cx, ok, dst),
2565                            None => Ok(()),
2566                        },
2567                    )
2568                }
2569            }
2570            Err(e) => {
2571                map_maybe_uninit!(dst.tag).write(ValRaw::i32(1));
2572                unsafe {
2573                    lower_payload(
2574                        map_maybe_uninit!(dst.payload),
2575                        |payload| map_maybe_uninit!(payload.err),
2576                        |dst| match err {
2577                            Some(err) => e.linear_lower_to_flat(cx, err, dst),
2578                            None => Ok(()),
2579                        },
2580                    )
2581                }
2582            }
2583        }
2584    }
2585
2586    fn linear_lower_to_memory<U>(
2587        &self,
2588        cx: &mut LowerContext<'_, U>,
2589        ty: InterfaceType,
2590        offset: usize,
2591    ) -> Result<()> {
2592        let (ok, err) = match ty {
2593            InterfaceType::Result(ty) => {
2594                let ty = &cx.types[ty];
2595                (ty.ok, ty.err)
2596            }
2597            _ => bad_type_info(),
2598        };
2599        debug_assert!(offset % (Self::ALIGN32 as usize) == 0);
2600        let payload_offset = Self::INFO.payload_offset32 as usize;
2601        match self {
2602            Ok(e) => {
2603                cx.get::<1>(offset)[0] = 0;
2604                if let Some(ok) = ok {
2605                    e.linear_lower_to_memory(cx, ok, offset + payload_offset)?;
2606                }
2607            }
2608            Err(e) => {
2609                cx.get::<1>(offset)[0] = 1;
2610                if let Some(err) = err {
2611                    e.linear_lower_to_memory(cx, err, offset + payload_offset)?;
2612                }
2613            }
2614        }
2615        Ok(())
2616    }
2617}
2618
2619unsafe impl<T, E> Lift for Result<T, E>
2620where
2621    T: Lift,
2622    E: Lift,
2623{
2624    #[inline]
2625    fn linear_lift_from_flat(
2626        cx: &mut LiftContext<'_>,
2627        ty: InterfaceType,
2628        src: &Self::Lower,
2629    ) -> Result<Self> {
2630        let (ok, err) = match ty {
2631            InterfaceType::Result(ty) => {
2632                let ty = &cx.types[ty];
2633                (ty.ok, ty.err)
2634            }
2635            _ => bad_type_info(),
2636        };
2637        // Note that this implementation specifically isn't trying to actually
2638        // reinterpret or alter the bits of `lower` depending on which variant
2639        // we're lifting. This ends up all working out because the value is
2640        // stored in little-endian format.
2641        //
2642        // When stored in little-endian format the `{T,E}::Lower`, when each
2643        // individual `ValRaw` is read, means that if an i64 value, extended
2644        // from an i32 value, was stored then when the i32 value is read it'll
2645        // automatically ignore the upper bits.
2646        //
2647        // This "trick" allows us to seamlessly pass through the `Self::Lower`
2648        // representation into the lifting/lowering without trying to handle
2649        // "join"ed types as per the canonical ABI. It just so happens that i64
2650        // bits will naturally be reinterpreted as f64. Additionally if the
2651        // joined type is i64 but only the lower bits are read that's ok and we
2652        // don't need to validate the upper bits.
2653        //
2654        // This is largely enabled by WebAssembly/component-model#35 where no
2655        // validation needs to be performed for ignored bits and bytes here.
2656        Ok(match src.tag.get_i32() {
2657            0 => Ok(unsafe { lift_option(cx, ok, &src.payload.ok)? }),
2658            1 => Err(unsafe { lift_option(cx, err, &src.payload.err)? }),
2659            _ => bail!("invalid expected discriminant"),
2660        })
2661    }
2662
2663    #[inline]
2664    fn linear_lift_from_memory(
2665        cx: &mut LiftContext<'_>,
2666        ty: InterfaceType,
2667        bytes: &[u8],
2668    ) -> Result<Self> {
2669        debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
2670        let discrim = bytes[0];
2671        let payload = &bytes[Self::INFO.payload_offset32 as usize..];
2672        let (ok, err) = match ty {
2673            InterfaceType::Result(ty) => {
2674                let ty = &cx.types[ty];
2675                (ty.ok, ty.err)
2676            }
2677            _ => bad_type_info(),
2678        };
2679        match discrim {
2680            0 => Ok(Ok(load_option(cx, ok, &payload[..T::SIZE32])?)),
2681            1 => Ok(Err(load_option(cx, err, &payload[..E::SIZE32])?)),
2682            _ => bail!("invalid expected discriminant"),
2683        }
2684    }
2685}
2686
2687fn lift_option<T>(cx: &mut LiftContext<'_>, ty: Option<InterfaceType>, src: &T::Lower) -> Result<T>
2688where
2689    T: Lift,
2690{
2691    match ty {
2692        Some(ty) => T::linear_lift_from_flat(cx, ty, src),
2693        None => Ok(empty_lift()),
2694    }
2695}
2696
2697fn load_option<T>(cx: &mut LiftContext<'_>, ty: Option<InterfaceType>, bytes: &[u8]) -> Result<T>
2698where
2699    T: Lift,
2700{
2701    match ty {
2702        Some(ty) => T::linear_lift_from_memory(cx, ty, bytes),
2703        None => Ok(empty_lift()),
2704    }
2705}
2706
2707fn empty_lift<T>() -> T
2708where
2709    T: Lift,
2710{
2711    assert!(T::IS_RUST_UNIT_TYPE);
2712    assert_eq!(mem::size_of::<T>(), 0);
2713    unsafe { MaybeUninit::uninit().assume_init() }
2714}
2715
2716/// Helper structure to define `Lower` for tuples below.
2717///
2718/// Uses default type parameters to have fields be zero-sized and not present
2719/// in memory for smaller tuple values.
2720#[expect(non_snake_case, reason = "more amenable to macro-generated code")]
2721#[doc(hidden)]
2722#[derive(Clone, Copy)]
2723#[repr(C)]
2724pub struct TupleLower<
2725    T1 = (),
2726    T2 = (),
2727    T3 = (),
2728    T4 = (),
2729    T5 = (),
2730    T6 = (),
2731    T7 = (),
2732    T8 = (),
2733    T9 = (),
2734    T10 = (),
2735    T11 = (),
2736    T12 = (),
2737    T13 = (),
2738    T14 = (),
2739    T15 = (),
2740    T16 = (),
2741    T17 = (),
2742> {
2743    // NB: these names match the names in `for_each_function_signature!`
2744    A1: T1,
2745    A2: T2,
2746    A3: T3,
2747    A4: T4,
2748    A5: T5,
2749    A6: T6,
2750    A7: T7,
2751    A8: T8,
2752    A9: T9,
2753    A10: T10,
2754    A11: T11,
2755    A12: T12,
2756    A13: T13,
2757    A14: T14,
2758    A15: T15,
2759    A16: T16,
2760    A17: T17,
2761    _align_tuple_lower0_correctly: [ValRaw; 0],
2762}
2763
2764macro_rules! impl_component_ty_for_tuples {
2765    ($n:tt $($t:ident)*) => {
2766        #[allow(non_snake_case, reason = "macro-generated code")]
2767        unsafe impl<$($t,)*> ComponentType for ($($t,)*)
2768            where $($t: ComponentType),*
2769        {
2770            type Lower = TupleLower<$($t::Lower),*>;
2771
2772            const ABI: CanonicalAbiInfo = CanonicalAbiInfo::record_static(&[
2773                $($t::ABI),*
2774            ]);
2775
2776            const IS_RUST_UNIT_TYPE: bool = {
2777                let mut _is_unit = true;
2778                $(
2779                    let _anything_to_bind_the_macro_variable = $t::IS_RUST_UNIT_TYPE;
2780                    _is_unit = false;
2781                )*
2782                _is_unit
2783            };
2784
2785            fn typecheck(
2786                ty: &InterfaceType,
2787                types: &InstanceType<'_>,
2788            ) -> Result<()> {
2789                typecheck_tuple(ty, types, &[$($t::typecheck),*])
2790            }
2791        }
2792
2793        #[allow(non_snake_case, reason = "macro-generated code")]
2794        unsafe impl<$($t,)*> Lower for ($($t,)*)
2795            where $($t: Lower),*
2796        {
2797            fn linear_lower_to_flat<U>(
2798                &self,
2799                cx: &mut LowerContext<'_, U>,
2800                ty: InterfaceType,
2801                _dst: &mut MaybeUninit<Self::Lower>,
2802            ) -> Result<()> {
2803                let types = match ty {
2804                    InterfaceType::Tuple(t) => &cx.types[t].types,
2805                    _ => bad_type_info(),
2806                };
2807                let ($($t,)*) = self;
2808                let mut _types = types.iter();
2809                $(
2810                    let ty = *_types.next().unwrap_or_else(bad_type_info);
2811                    $t.linear_lower_to_flat(cx, ty, map_maybe_uninit!(_dst.$t))?;
2812                )*
2813                Ok(())
2814            }
2815
2816            fn linear_lower_to_memory<U>(
2817                &self,
2818                cx: &mut LowerContext<'_, U>,
2819                ty: InterfaceType,
2820                mut _offset: usize,
2821            ) -> Result<()> {
2822                debug_assert!(_offset % (Self::ALIGN32 as usize) == 0);
2823                let types = match ty {
2824                    InterfaceType::Tuple(t) => &cx.types[t].types,
2825                    _ => bad_type_info(),
2826                };
2827                let ($($t,)*) = self;
2828                let mut _types = types.iter();
2829                $(
2830                    let ty = *_types.next().unwrap_or_else(bad_type_info);
2831                    $t.linear_lower_to_memory(cx, ty, $t::ABI.next_field32_size(&mut _offset))?;
2832                )*
2833                Ok(())
2834            }
2835        }
2836
2837        #[allow(non_snake_case, reason = "macro-generated code")]
2838        unsafe impl<$($t,)*> Lift for ($($t,)*)
2839            where $($t: Lift),*
2840        {
2841            #[inline]
2842            fn linear_lift_from_flat(cx: &mut LiftContext<'_>, ty: InterfaceType, _src: &Self::Lower) -> Result<Self> {
2843                let types = match ty {
2844                    InterfaceType::Tuple(t) => &cx.types[t].types,
2845                    _ => bad_type_info(),
2846                };
2847                let mut _types = types.iter();
2848                Ok(($(
2849                    $t::linear_lift_from_flat(
2850                        cx,
2851                        *_types.next().unwrap_or_else(bad_type_info),
2852                        &_src.$t,
2853                    )?,
2854                )*))
2855            }
2856
2857            #[inline]
2858            fn linear_lift_from_memory(cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
2859                debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
2860                let types = match ty {
2861                    InterfaceType::Tuple(t) => &cx.types[t].types,
2862                    _ => bad_type_info(),
2863                };
2864                let mut _types = types.iter();
2865                let mut _offset = 0;
2866                $(
2867                    let ty = *_types.next().unwrap_or_else(bad_type_info);
2868                    let $t = $t::linear_lift_from_memory(cx, ty, &bytes[$t::ABI.next_field32_size(&mut _offset)..][..$t::SIZE32])?;
2869                )*
2870                Ok(($($t,)*))
2871            }
2872        }
2873
2874        #[allow(non_snake_case, reason = "macro-generated code")]
2875        unsafe impl<$($t,)*> ComponentNamedList for ($($t,)*)
2876            where $($t: ComponentType),*
2877        {}
2878    };
2879}
2880
2881for_each_function_signature!(impl_component_ty_for_tuples);
2882
2883pub fn desc(ty: &InterfaceType) -> &'static str {
2884    match ty {
2885        InterfaceType::U8 => "u8",
2886        InterfaceType::S8 => "s8",
2887        InterfaceType::U16 => "u16",
2888        InterfaceType::S16 => "s16",
2889        InterfaceType::U32 => "u32",
2890        InterfaceType::S32 => "s32",
2891        InterfaceType::U64 => "u64",
2892        InterfaceType::S64 => "s64",
2893        InterfaceType::Float32 => "f32",
2894        InterfaceType::Float64 => "f64",
2895        InterfaceType::Bool => "bool",
2896        InterfaceType::Char => "char",
2897        InterfaceType::String => "string",
2898        InterfaceType::List(_) => "list",
2899        InterfaceType::Tuple(_) => "tuple",
2900        InterfaceType::Option(_) => "option",
2901        InterfaceType::Result(_) => "result",
2902
2903        InterfaceType::Record(_) => "record",
2904        InterfaceType::Variant(_) => "variant",
2905        InterfaceType::Flags(_) => "flags",
2906        InterfaceType::Enum(_) => "enum",
2907        InterfaceType::Own(_) => "owned resource",
2908        InterfaceType::Borrow(_) => "borrowed resource",
2909        InterfaceType::Future(_) => "future",
2910        InterfaceType::Stream(_) => "stream",
2911        InterfaceType::ErrorContext(_) => "error-context",
2912        InterfaceType::FixedLengthList(_) => "list<_, N>",
2913    }
2914}
2915
2916#[cold]
2917#[doc(hidden)]
2918pub fn bad_type_info<T>() -> T {
2919    // NB: should consider something like `unreachable_unchecked` here if this
2920    // becomes a performance bottleneck at some point, but that also comes with
2921    // a tradeoff of propagating a lot of unsafety, so it may not be worth it.
2922    panic!("bad type information detected");
2923}