Skip to main content

wasmtime/runtime/component/func/
typed.rs

1use crate::component::Instance;
2use crate::component::func::{Func, LiftContext, LowerContext};
3use crate::component::matching::InstanceType;
4use crate::component::storage::{storage_as_slice, storage_as_slice_mut};
5#[cfg(not(feature = "std"))]
6use crate::hash_map::HashMap;
7use crate::prelude::*;
8use crate::{AsContextMut, StoreContext, StoreContextMut, ValRaw};
9use alloc::borrow::Cow;
10use core::fmt;
11use core::hash::Hash;
12use core::iter;
13use core::marker;
14use core::mem::{self, MaybeUninit};
15use core::str;
16use wasmtime_environ::component::{
17    CanonicalAbiInfo, ComponentTypes, InterfaceType, MAX_FLAT_PARAMS, MAX_FLAT_RESULTS,
18    OptionsIndex, StringEncoding, VariantInfo,
19};
20
21#[cfg(feature = "component-model-async")]
22use crate::component::concurrent::{self, AsAccessor, PreparedCall};
23
24#[cfg(feature = "std")]
25use wasmtime_environ::collections::TryHashMap;
26
27/// A statically-typed version of [`Func`] which takes `Params` as input and
28/// returns `Return`.
29///
30/// This is an efficient way to invoke a WebAssembly component where if the
31/// inputs and output are statically known this can eschew the vast majority of
32/// machinery and checks when calling WebAssembly. This is the most optimized
33/// way to call a WebAssembly component.
34///
35/// Note that like [`Func`] this is a pointer within a [`Store`](crate::Store)
36/// and usage will panic if used with the wrong store.
37///
38/// This type is primarily created with the [`Func::typed`] API.
39///
40/// See [`ComponentType`] for more information about supported types.
41pub struct TypedFunc<Params, Return> {
42    func: Func,
43
44    // The definition of this field is somewhat subtle and may be surprising.
45    // Naively one might expect something like
46    //
47    //      _marker: marker::PhantomData<fn(Params) -> Return>,
48    //
49    // Since this is a function pointer after all. The problem with this
50    // definition though is that it imposes the wrong variance on `Params` from
51    // what we want. Abstractly a `fn(Params)` is able to store `Params` within
52    // it meaning you can only give it `Params` that live longer than the
53    // function pointer.
54    //
55    // With a component model function, however, we're always copying data from
56    // the host into the guest, so we are never storing pointers to `Params`
57    // into the guest outside the duration of a `call`, meaning we can actually
58    // accept values in `TypedFunc::call` which live for a shorter duration
59    // than the `Params` argument on the struct.
60    //
61    // This all means that we don't use a phantom function pointer, but instead
62    // feign phantom storage here to get the variance desired.
63    _marker: marker::PhantomData<(Params, Return)>,
64}
65
66impl<Params, Return> Copy for TypedFunc<Params, Return> {}
67
68impl<Params, Return> Clone for TypedFunc<Params, Return> {
69    fn clone(&self) -> TypedFunc<Params, Return> {
70        *self
71    }
72}
73
74impl<Params, Return> TypedFunc<Params, Return>
75where
76    Params: ComponentNamedList + Lower,
77    Return: ComponentNamedList + Lift,
78{
79    /// Creates a new [`TypedFunc`] from the provided component [`Func`],
80    /// unsafely asserting that the underlying function takes `Params` as
81    /// input and returns `Return`.
82    ///
83    /// # Unsafety
84    ///
85    /// This is an unsafe function because it does not verify that the [`Func`]
86    /// provided actually implements this signature. It's up to the caller to
87    /// have performed some other sort of check to ensure that the signature is
88    /// correct.
89    pub unsafe fn new_unchecked(func: Func) -> TypedFunc<Params, Return> {
90        TypedFunc {
91            _marker: marker::PhantomData,
92            func,
93        }
94    }
95
96    /// Returns the underlying un-typed [`Func`] that this [`TypedFunc`]
97    /// references.
98    pub fn func(&self) -> &Func {
99        &self.func
100    }
101
102    /// Calls the underlying WebAssembly component function using the provided
103    /// `params` as input.
104    ///
105    /// This method is used to enter into a component. Execution happens within
106    /// the `store` provided. The `params` are copied into WebAssembly memory
107    /// as appropriate and a core wasm function is invoked.
108    ///
109    /// # Post-return
110    ///
111    /// In the component model each function can have a "post return" specified
112    /// which allows cleaning up the arguments returned to the host. For example
113    /// if WebAssembly returns a string to the host then it might be a uniquely
114    /// allocated string which, after the host finishes processing it, needs to
115    /// be deallocated in the wasm instance's own linear memory to prevent
116    /// memory leaks in wasm itself. The `post-return` canonical abi option is
117    /// used to configured this.
118    ///
119    /// If a post-return function is present, it will be called automatically by
120    /// this function.
121    ///
122    /// # Errors
123    ///
124    /// This function can return an error for a number of reasons:
125    ///
126    /// * If the wasm itself traps during execution.
127    /// * If the wasm traps while copying arguments into memory.
128    /// * If the wasm provides bad allocation pointers when copying arguments
129    ///   into memory.
130    /// * If the wasm returns a value which violates the canonical ABI.
131    /// * If this function's instances cannot be entered, for example if the
132    ///   instance is currently calling a host function.
133    /// * If `store` requires using [`Self::call_async`] instead, see
134    ///   [crate documentation](crate#async) for more info.
135    ///
136    /// In general there are many ways that things could go wrong when copying
137    /// types in and out of a wasm module with the canonical ABI, and certain
138    /// error conditions are specific to certain types. For example a
139    /// WebAssembly module can't return an invalid `char`. When allocating space
140    /// for this host to copy a string into the returned pointer must be
141    /// in-bounds in memory.
142    ///
143    /// If an error happens then the error should contain detailed enough
144    /// information to understand which part of the canonical ABI went wrong
145    /// and what to inspect.
146    ///
147    /// # Panics
148    ///
149    /// Panics if `store` does not own this function.
150    pub fn call(&self, mut store: impl AsContextMut, params: Params) -> Result<Return> {
151        let mut store = store.as_context_mut();
152        store.0.validate_sync_call()?;
153        self.call_impl(store.as_context_mut(), params)
154    }
155
156    /// Exactly like [`Self::call`], except for invoking WebAssembly
157    /// [asynchronously](crate#async).
158    ///
159    /// # Panics
160    ///
161    /// Panics if `store` does not own this function.
162    #[cfg(feature = "async")]
163    pub async fn call_async(
164        &self,
165        mut store: impl AsContextMut<Data: Send>,
166        params: Params,
167    ) -> Result<Return>
168    where
169        Return: 'static,
170    {
171        let mut store = store.as_context_mut();
172
173        #[cfg(feature = "component-model-async")]
174        if store.0.concurrency_support() {
175            use crate::component::concurrent::TaskId;
176            use crate::runtime::vm::SendSyncPtr;
177            use core::ptr::NonNull;
178
179            let ptr = SendSyncPtr::from(NonNull::from(&params).cast::<u8>());
180            let prepared =
181                self.prepare_call(store.as_context_mut(), true, move |cx, ty, dst| {
182                    // SAFETY: The goal here is to get `Params`, a non-`'static`
183                    // value, to live long enough to the lowering of the
184                    // parameters. We're guaranteed that `Params` lives in the
185                    // future of the outer function (we're in an `async fn`) so it'll
186                    // stay alive as long as the future itself. That is distinct,
187                    // for example, from the signature of `call_concurrent` below.
188                    //
189                    // Here a pointer to `Params` is smuggled to this location
190                    // through a `SendSyncPtr<u8>` to thwart the `'static` check
191                    // of rustc and the signature of `prepare_call`.
192                    //
193                    // Note the use of `SignalOnDrop` in the code that follows
194                    // this closure, which ensures that the task will be removed
195                    // from the concurrent state to which it belongs when the
196                    // containing `Future` is dropped, so long as the parameters
197                    // have not yet been lowered. Since this closure is removed from
198                    // the task after the parameters are lowered, it will never be called
199                    // after the containing `Future` is dropped.
200                    let params = unsafe { ptr.cast::<Params>().as_ref() };
201                    Self::lower_args(cx, ty, dst, params)
202                })?;
203
204            struct SignalOnDrop<'a, T: 'static> {
205                store: StoreContextMut<'a, T>,
206                task: TaskId,
207            }
208
209            impl<'a, T> Drop for SignalOnDrop<'a, T> {
210                fn drop(&mut self) {
211                    self.task
212                        .host_future_dropped(self.store.as_context_mut())
213                        .unwrap();
214                }
215            }
216
217            let mut wrapper = SignalOnDrop {
218                store,
219                task: prepared.task_id(),
220            };
221
222            let result = concurrent::queue_call(wrapper.store.as_context_mut(), prepared)?;
223            return wrapper
224                .store
225                .as_context_mut()
226                .run_concurrent_trap_on_idle(async |_| Ok(result.await?))
227                .await?;
228        }
229
230        store
231            .on_fiber(|store| self.call_impl(store, params))
232            .await?
233    }
234
235    /// Start a concurrent call to this function.
236    ///
237    /// Concurrency is achieved by relying on the [`Accessor`] argument, which
238    /// can be obtained by calling [`StoreContextMut::run_concurrent`].
239    ///
240    /// Unlike [`Self::call`] and [`Self::call_async`] (both of which require
241    /// exclusive access to the store until the completion of the call), calls
242    /// made using this method may run concurrently with other calls to the same
243    /// instance.  In addition, the runtime will call the `post-return` function
244    /// (if any) automatically when the guest task completes.
245    ///
246    /// This function will return an error if [`Config::concurrency_support`] is
247    /// disabled.
248    ///
249    /// [`Config::concurrency_support`]: crate::Config::concurrency_support
250    ///
251    /// # Progress and Cancellation
252    ///
253    /// For more information about how to make progress on the wasm task or how
254    /// to cancel the wasm task see the documentation for
255    /// [`Func::call_concurrent`].
256    ///
257    /// [`Func::call_concurrent`]: crate::component::Func::call_concurrent
258    ///
259    /// # Panics
260    ///
261    /// Panics if the store that the [`Accessor`] is derived from does not own
262    /// this function.
263    ///
264    /// [`Accessor`]: crate::component::Accessor
265    ///
266    /// # Example
267    ///
268    /// Using [`StoreContextMut::run_concurrent`] to get an [`Accessor`]:
269    ///
270    /// ```
271    /// # use {
272    /// #   wasmtime::{
273    /// #     error::{Result},
274    /// #     component::{Component, Linker, ResourceTable},
275    /// #     Config, Engine, Store
276    /// #   },
277    /// # };
278    /// #
279    /// # struct Ctx { table: ResourceTable }
280    /// #
281    /// # async fn foo() -> Result<()> {
282    /// # let mut config = Config::new();
283    /// # let engine = Engine::new(&config)?;
284    /// # let mut store = Store::new(&engine, Ctx { table: ResourceTable::new() });
285    /// # let mut linker = Linker::new(&engine);
286    /// # let component = Component::new(&engine, "")?;
287    /// # let instance = linker.instantiate_async(&mut store, &component).await?;
288    /// let my_typed_func = instance.get_typed_func::<(), ()>(&mut store, "my_typed_func")?;
289    /// store.run_concurrent(async |accessor| -> wasmtime::Result<_> {
290    ///    my_typed_func.call_concurrent(accessor, ()).await?;
291    ///    Ok(())
292    /// }).await??;
293    /// # Ok(())
294    /// # }
295    /// ```
296    #[cfg(feature = "component-model-async")]
297    pub async fn call_concurrent(
298        self,
299        accessor: impl AsAccessor<Data: Send>,
300        params: Params,
301    ) -> Result<Return>
302    where
303        Params: 'static,
304        Return: 'static,
305    {
306        let result = accessor.as_accessor().with(|mut store| {
307            let mut store = store.as_context_mut();
308            ensure!(
309                store.0.concurrency_support(),
310                "cannot use `call_concurrent` Config::concurrency_support disabled",
311            );
312
313            let prepared =
314                self.prepare_call(store.as_context_mut(), false, move |cx, ty, dst| {
315                    Self::lower_args(cx, ty, dst, &params)
316                })?;
317            concurrent::queue_call(store, prepared)
318        });
319        Ok(result?.await?)
320    }
321
322    fn lower_args<T>(
323        cx: &mut LowerContext<T>,
324        ty: InterfaceType,
325        dst: &mut [MaybeUninit<ValRaw>],
326        params: &Params,
327    ) -> Result<()> {
328        use crate::component::storage::slice_to_storage_mut;
329
330        if Params::flatten_count() <= MAX_FLAT_PARAMS {
331            // SAFETY: the safety of `slice_to_storage_mut` relies on
332            // `Params::Lower` being represented by a sequence of
333            // `ValRaw`, and that's a guarantee upheld by the `Lower`
334            // trait itself.
335            let dst: &mut MaybeUninit<Params::Lower> = unsafe { slice_to_storage_mut(dst) };
336            Self::lower_stack_args(cx, &params, ty, dst)
337        } else {
338            Self::lower_heap_args(cx, &params, ty, &mut dst[0])
339        }
340    }
341
342    /// Calls `concurrent::prepare_call` with monomorphized functions for
343    /// lowering the parameters and lifting the result according to the number
344    /// of core Wasm parameters and results in the signature of the function to
345    /// be called.
346    #[cfg(feature = "component-model-async")]
347    fn prepare_call<T>(
348        self,
349        store: StoreContextMut<'_, T>,
350        host_future_present: bool,
351        lower: impl FnOnce(
352            &mut LowerContext<T>,
353            InterfaceType,
354            &mut [MaybeUninit<ValRaw>],
355        ) -> Result<()>
356        + Send
357        + Sync
358        + 'static,
359    ) -> Result<PreparedCall<Return>>
360    where
361        Return: 'static,
362    {
363        use crate::component::storage::slice_to_storage;
364        debug_assert!(store.0.concurrency_support());
365
366        let param_count = if Params::flatten_count() <= MAX_FLAT_PARAMS {
367            Params::flatten_count()
368        } else {
369            1
370        };
371        let max_results = if self.func.abi_async(store.0) {
372            MAX_FLAT_PARAMS
373        } else {
374            MAX_FLAT_RESULTS
375        };
376        concurrent::prepare_call(
377            store,
378            self.func,
379            param_count,
380            host_future_present,
381            move |func, store, params_out| {
382                func.with_lower_context(store, |cx, ty| lower(cx, ty, params_out))
383            },
384            move |func, store, results| {
385                let result = if Return::flatten_count() <= max_results {
386                    func.with_lift_context(store, |cx, ty| {
387                        // SAFETY: Per the safety requiments documented for the
388                        // `ComponentType` trait, `Return::Lower` must be
389                        // compatible at the binary level with a `[ValRaw; N]`,
390                        // where `N` is `mem::size_of::<Return::Lower>() /
391                        // mem::size_of::<ValRaw>()`.  And since this function
392                        // is only used when `Return::flatten_count() <=
393                        // MAX_FLAT_RESULTS` and `MAX_FLAT_RESULTS == 1`, `N`
394                        // can only either be 0 or 1.
395                        //
396                        // See `ComponentInstance::exit_call` for where we use
397                        // the result count passed from
398                        // `wasmtime_environ::fact::trampoline`-generated code
399                        // to ensure the slice has the correct length, and also
400                        // `concurrent::start_call` for where we conservatively
401                        // use a slice length of 1 unconditionally.  Also note
402                        // that, as of this writing `slice_to_storage`
403                        // double-checks the slice length is sufficient.
404                        let results: &Return::Lower = unsafe { slice_to_storage(results) };
405                        Self::lift_stack_result(cx, ty, results)
406                    })?
407                } else {
408                    func.with_lift_context(store, |cx, ty| {
409                        Self::lift_heap_result(cx, ty, &results[0])
410                    })?
411                };
412                Ok(Box::new(result))
413            },
414        )
415    }
416
417    fn call_impl(&self, mut store: impl AsContextMut, params: Params) -> Result<Return> {
418        let mut store = store.as_context_mut();
419
420        if self.func.abi_async(store.0) {
421            bail!("must enable the `component-model-async` feature to call async-lifted exports")
422        }
423
424        // Note that this is in theory simpler than it might read at this time.
425        // Here we're doing a runtime dispatch on the `flatten_count` for the
426        // params/results to see whether they're inbounds. This creates 4 cases
427        // to handle. In reality this is a highly optimizable branch where LLVM
428        // will easily figure out that only one branch here is taken.
429        //
430        // Otherwise this current construction is done to ensure that the stack
431        // space reserved for the params/results is always of the appropriate
432        // size (as the params/results needed differ depending on the "flatten"
433        // count)
434        //
435        // SAFETY: the safety of these invocations of `call_raw` depends on the
436        // correctness of the ascription of the `LowerParams` and `LowerReturn`
437        // types on the `call_raw` function. That's upheld here through the
438        // safety requirements of `Lift` and `Lower` on `Params` and `Return` in
439        // combination with checking the various possible branches here and
440        // dispatching to appropriately typed functions.
441        let (result, post_return_arg) = unsafe {
442            // This type is used as `LowerParams` for `call_raw` which is either
443            // `Params::Lower` or `ValRaw` representing it's either on the stack
444            // or it's on the heap. This allocates 1 extra `ValRaw` on the stack
445            // if `Params` is empty and `Return` is also empty, but that's a
446            // reasonable enough price to pay for now given the current code
447            // organization.
448            #[derive(Copy, Clone)]
449            union Union<T: Copy, U: Copy> {
450                _a: T,
451                _b: U,
452            }
453
454            if Return::flatten_count() <= MAX_FLAT_RESULTS {
455                self.func.call_raw(
456                    store.as_context_mut(),
457                    |cx, ty, dst: &mut MaybeUninit<Union<Params::Lower, ValRaw>>| {
458                        let dst = storage_as_slice_mut(dst);
459                        Self::lower_args(cx, ty, dst, &params)
460                    },
461                    Self::lift_stack_result,
462                )
463            } else {
464                self.func.call_raw(
465                    store.as_context_mut(),
466                    |cx, ty, dst: &mut MaybeUninit<Union<Params::Lower, ValRaw>>| {
467                        let dst = storage_as_slice_mut(dst);
468                        Self::lower_args(cx, ty, dst, &params)
469                    },
470                    Self::lift_heap_result,
471                )
472            }
473        }?;
474
475        self.func.post_return_impl(store, post_return_arg)?;
476
477        Ok(result)
478    }
479
480    /// Lower parameters directly onto the stack specified by the `dst`
481    /// location.
482    ///
483    /// This is only valid to call when the "flatten count" is small enough, or
484    /// when the canonical ABI says arguments go through the stack rather than
485    /// the heap.
486    fn lower_stack_args<T>(
487        cx: &mut LowerContext<'_, T>,
488        params: &Params,
489        ty: InterfaceType,
490        dst: &mut MaybeUninit<Params::Lower>,
491    ) -> Result<()> {
492        assert!(Params::flatten_count() <= MAX_FLAT_PARAMS);
493        params.linear_lower_to_flat(cx, ty, dst)?;
494        Ok(())
495    }
496
497    /// Lower parameters onto a heap-allocated location.
498    ///
499    /// This is used when the stack space to be used for the arguments is above
500    /// the `MAX_FLAT_PARAMS` threshold. Here the wasm's `realloc` function is
501    /// invoked to allocate space and then parameters are stored at that heap
502    /// pointer location.
503    fn lower_heap_args<T>(
504        cx: &mut LowerContext<'_, T>,
505        params: &Params,
506        ty: InterfaceType,
507        dst: &mut MaybeUninit<ValRaw>,
508    ) -> Result<()> {
509        // Memory must exist via validation if the arguments are stored on the
510        // heap, so we can create a `MemoryMut` at this point. Afterwards
511        // `realloc` is used to allocate space for all the arguments and then
512        // they're all stored in linear memory.
513        //
514        // Note that `realloc` will bake in a check that the returned pointer is
515        // in-bounds.
516        let ptr = cx.realloc(0, 0, Params::ALIGN32, Params::SIZE32)?;
517        params.linear_lower_to_memory(cx, ty, ptr)?;
518
519        // Note that the pointer here is stored as a 64-bit integer. This allows
520        // this to work with either 32 or 64-bit memories. For a 32-bit memory
521        // it'll just ignore the upper 32 zero bits, and for 64-bit memories
522        // this'll have the full 64-bits. Note that for 32-bit memories the call
523        // to `realloc` above guarantees that the `ptr` is in-bounds meaning
524        // that we will know that the zero-extended upper bits of `ptr` are
525        // guaranteed to be zero.
526        //
527        // This comment about 64-bit integers is also referred to below with
528        // "WRITEPTR64".
529        dst.write(ValRaw::i64(ptr as i64));
530
531        Ok(())
532    }
533
534    /// Lift the result of a function directly from the stack result.
535    ///
536    /// This is only used when the result fits in the maximum number of stack
537    /// slots.
538    fn lift_stack_result(
539        cx: &mut LiftContext<'_>,
540        ty: InterfaceType,
541        dst: &Return::Lower,
542    ) -> Result<Return> {
543        Return::linear_lift_from_flat(cx, ty, dst)
544    }
545
546    /// Lift the result of a function where the result is stored indirectly on
547    /// the heap.
548    fn lift_heap_result(
549        cx: &mut LiftContext<'_>,
550        ty: InterfaceType,
551        dst: &ValRaw,
552    ) -> Result<Return> {
553        assert!(Return::flatten_count() > MAX_FLAT_RESULTS);
554        // FIXME(#4311): needs to read an i64 for memory64
555        let ptr = usize::try_from(dst.get_u32())?;
556        if ptr % usize::try_from(Return::ALIGN32)? != 0 {
557            bail!("return pointer not aligned");
558        }
559
560        let bytes = cx
561            .memory()
562            .get(ptr..)
563            .and_then(|b| b.get(..Return::SIZE32))
564            .ok_or_else(|| crate::format_err!("pointer out of bounds of memory"))?;
565        Return::linear_lift_from_memory(cx, ty, bytes)
566    }
567
568    #[doc(hidden)]
569    #[deprecated(note = "no longer needs to be called; this function has no effect")]
570    pub fn post_return(&self, _store: impl AsContextMut) -> Result<()> {
571        Ok(())
572    }
573
574    #[doc(hidden)]
575    #[deprecated(note = "no longer needs to be called; this function has no effect")]
576    #[cfg(feature = "async")]
577    pub async fn post_return_async<T: Send>(
578        &self,
579        _store: impl AsContextMut<Data = T>,
580    ) -> Result<()> {
581        Ok(())
582    }
583}
584
585/// A trait representing a static list of named types that can be passed to or
586/// returned from a [`TypedFunc`].
587///
588/// This trait is implemented for a number of tuple types and is not expected
589/// to be implemented externally. The contents of this trait are hidden as it's
590/// intended to be an implementation detail of Wasmtime. The contents of this
591/// trait are not covered by Wasmtime's stability guarantees.
592///
593/// For more information about this trait see [`Func::typed`] and
594/// [`TypedFunc`].
595//
596// Note that this is an `unsafe` trait, and the unsafety means that
597// implementations of this trait must be correct or otherwise [`TypedFunc`]
598// would not be memory safe. The main reason this is `unsafe` is the
599// `typecheck` function which must operate correctly relative to the `AsTuple`
600// interpretation of the implementor.
601pub unsafe trait ComponentNamedList: ComponentType {}
602
603/// A trait representing types which can be passed to and read from components
604/// with the canonical ABI.
605///
606/// This trait is implemented for Rust types which can be communicated to
607/// components. The [`Func::typed`] and [`TypedFunc`] Rust items are the main
608/// consumers of this trait.
609///
610/// Supported Rust types include:
611///
612/// | Component Model Type              | Rust Type                            |
613/// |-----------------------------------|--------------------------------------|
614/// | `{s,u}{8,16,32,64}`               | `{i,u}{8,16,32,64}`                  |
615/// | `f{32,64}`                        | `f{32,64}`                           |
616/// | `bool`                            | `bool`                               |
617/// | `char`                            | `char`                               |
618/// | `tuple<A, B>`                     | `(A, B)`                             |
619/// | `option<T>`                       | `Option<T>`                          |
620/// | `result`                          | `Result<(), ()>`                     |
621/// | `result<T>`                       | `Result<T, ()>`                      |
622/// | `result<_, E>`                    | `Result<(), E>`                      |
623/// | `result<T, E>`                    | `Result<T, E>`                       |
624/// | `string`                          | `String`, `&str`, or [`WasmStr`]     |
625/// | `list<T>`                         | `Vec<T>`, `&[T]`, or [`WasmList`]    |
626/// | `map<K, V>`                       | `HashMap<K, V>`                      |
627/// | `own<T>`, `borrow<T>`             | [`Resource<T>`] or [`ResourceAny`]   |
628/// | `record`                          | [`#[derive(ComponentType)]`][d-cm]   |
629/// | `variant`                         | [`#[derive(ComponentType)]`][d-cm]   |
630/// | `enum`                            | [`#[derive(ComponentType)]`][d-cm]   |
631/// | `flags`                           | [`flags!`][f-m]                      |
632///
633/// [`Resource<T>`]: crate::component::Resource
634/// [`ResourceAny`]: crate::component::ResourceAny
635/// [d-cm]: macro@crate::component::ComponentType
636/// [f-m]: crate::component::flags
637///
638/// Rust standard library pointers such as `&T`, `Box<T>`, and `Arc<T>`
639/// additionally represent whatever type `T` represents in the component model.
640/// Note that types such as `record`, `variant`, `enum`, and `flags` are
641/// generated by the embedder at compile time. These macros derive
642/// implementation of this trait for custom types to map to custom types in the
643/// component model. Note that for `record`, `variant`, `enum`, and `flags`
644/// those types are often generated by the
645/// [`bindgen!`](crate::component::bindgen) macro from WIT definitions.
646///
647/// Types that implement [`ComponentType`] are used for `Params` and `Return`
648/// in [`TypedFunc`] and [`Func::typed`].
649///
650/// The contents of this trait are hidden as it's intended to be an
651/// implementation detail of Wasmtime. The contents of this trait are not
652/// covered by Wasmtime's stability guarantees.
653///
654/// # Safety
655///
656/// Note that this is an `unsafe` trait as `TypedFunc`'s safety heavily relies on
657/// the correctness of the implementations of this trait. Some ways in which this
658/// trait must be correct to be safe are:
659///
660/// * The `Lower` associated type must be a `ValRaw` sequence. It doesn't have to
661///   literally be `[ValRaw; N]` but when laid out in memory it must be adjacent
662///   `ValRaw` values and have a multiple of the size of `ValRaw` and the same
663///   alignment.
664///
665/// * The `lower` function must initialize the bits within `Lower` that are going
666///   to be read by the trampoline that's used to enter core wasm. A trampoline
667///   is passed `*mut Lower` and will read the canonical abi arguments in
668///   sequence, so all of the bits must be correctly initialized.
669///
670/// * The `size` and `align` functions must be correct for this value stored in
671///   the canonical ABI. The `Cursor<T>` iteration of these bytes rely on this
672///   for correctness as they otherwise eschew bounds-checking.
673///
674/// There are likely some other correctness issues which aren't documented as
675/// well, this isn't currently an exhaustive list. It suffices to say, though,
676/// that correctness bugs in this trait implementation are highly likely to
677/// lead to security bugs, which again leads to the `unsafe` in the trait.
678///
679/// Note that this trait specifically is not sealed because `bindgen!`-generated
680/// types must be able to implement this trait using a `#[derive]` macro. For
681/// users it's recommended to not implement this trait manually given the
682/// non-exhaustive list of safety requirements that must be upheld. This trait
683/// is implemented at your own risk if you do so.
684///
685/// # Send and Sync
686///
687/// While on the topic of safety it's worth discussing the `Send` and `Sync`
688/// bounds here as well. These bounds might naively seem like they shouldn't be
689/// required for all component types as they're host-level types not guest-level
690/// types persisted anywhere. Various subtleties lead to these bounds, however:
691///
692/// * Fibers require that all stack-local variables are `Send` and `Sync` for
693///   fibers themselves to be send/sync. Unfortunately we have no help from the
694///   compiler on this one so it's up to Wasmtime's discipline to maintain this.
695///   One instance of this is that return values are placed on the stack as
696///   they're lowered into guest memory. This lowering operation can involve
697///   malloc and context switches, so return values must be Send/Sync.
698///
699/// * In the implementation of component model async it's not uncommon for types
700///   to be "buffered" in the store temporarily. For example parameters might
701///   reside in a store temporarily while wasm has backpressure turned on.
702///
703/// Overall it's generally easiest to require `Send` and `Sync` for all
704/// component types. There additionally aren't known use case for non-`Send` or
705/// non-`Sync` types at this time.
706pub unsafe trait ComponentType: Send + Sync {
707    /// Representation of the "lowered" form of this component value.
708    ///
709    /// Lowerings lower into core wasm values which are represented by `ValRaw`.
710    /// This `Lower` type must be a list of `ValRaw` as either a literal array
711    /// or a struct where every field is a `ValRaw`. This must be `Copy` (as
712    /// `ValRaw` is `Copy`) and support all byte patterns. This being correct is
713    /// one reason why the trait is unsafe.
714    #[doc(hidden)]
715    type Lower: Copy;
716
717    /// The information about this type's canonical ABI (size/align/etc).
718    #[doc(hidden)]
719    const ABI: CanonicalAbiInfo;
720
721    #[doc(hidden)]
722    const SIZE32: usize = Self::ABI.size32 as usize;
723    #[doc(hidden)]
724    const ALIGN32: u32 = Self::ABI.align32;
725
726    #[doc(hidden)]
727    const IS_RUST_UNIT_TYPE: bool = false;
728
729    /// Whether this type might require a call to the guest's realloc function
730    /// to allocate linear memory when lowering (e.g. a non-empty `string`).
731    ///
732    /// If this is `false`, Wasmtime may optimize lowering by using
733    /// `LowerContext::new_without_realloc` and lowering values outside of any
734    /// fiber.  That will panic if the lowering process ends up needing realloc
735    /// after all, so `true` is a conservative default.
736    #[doc(hidden)]
737    const MAY_REQUIRE_REALLOC: bool = true;
738
739    /// Returns the number of core wasm abi values will be used to represent
740    /// this type in its lowered form.
741    ///
742    /// This divides the size of `Self::Lower` by the size of `ValRaw`.
743    #[doc(hidden)]
744    fn flatten_count() -> usize {
745        assert!(mem::size_of::<Self::Lower>() % mem::size_of::<ValRaw>() == 0);
746        assert!(mem::align_of::<Self::Lower>() == mem::align_of::<ValRaw>());
747        mem::size_of::<Self::Lower>() / mem::size_of::<ValRaw>()
748    }
749
750    /// Performs a type-check to see whether this component value type matches
751    /// the interface type `ty` provided.
752    #[doc(hidden)]
753    fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()>;
754}
755
756#[doc(hidden)]
757pub unsafe trait ComponentVariant: ComponentType {
758    const CASES: &'static [Option<CanonicalAbiInfo>];
759    const INFO: VariantInfo = VariantInfo::new_static(Self::CASES);
760    const PAYLOAD_OFFSET32: usize = Self::INFO.payload_offset32 as usize;
761}
762
763/// Host types which can be passed to WebAssembly components.
764///
765/// This trait is implemented for all types that can be passed to components
766/// either as parameters of component exports or returns of component imports.
767/// This trait represents the ability to convert from the native host
768/// representation to the canonical ABI.
769///
770/// Built-in types to Rust such as `Option<T>` implement this trait as
771/// appropriate. For a mapping of component model to Rust types see
772/// [`ComponentType`].
773///
774/// For user-defined types, for example `record` types mapped to Rust `struct`s,
775/// this crate additionally has
776/// [`#[derive(Lower)]`](macro@crate::component::Lower).
777///
778/// Note that like [`ComponentType`] the definition of this trait is intended to
779/// be an internal implementation detail of Wasmtime at this time. It's
780/// recommended to use the `#[derive(Lower)]` implementation instead.
781pub unsafe trait Lower: ComponentType {
782    /// Performs the "lower" function in the linear memory version of the
783    /// canonical ABI.
784    ///
785    /// This method will lower the current value into a component. The `lower`
786    /// function performs a "flat" lowering into the `dst` specified which is
787    /// allowed to be uninitialized entering this method but is guaranteed to be
788    /// fully initialized if the method returns `Ok(())`.
789    ///
790    /// The `cx` context provided is the context within which this lowering is
791    /// happening. This contains information such as canonical options specified
792    /// (e.g. string encodings, memories, etc), the store itself, along with
793    /// type information.
794    ///
795    /// The `ty` parameter is the destination type that is being lowered into.
796    /// For example this is the component's "view" of the type that is being
797    /// lowered. This is guaranteed to have passed a `typecheck` earlier.
798    ///
799    /// This will only be called if `typecheck` passes for `Op::Lower`.
800    #[doc(hidden)]
801    fn linear_lower_to_flat<T>(
802        &self,
803        cx: &mut LowerContext<'_, T>,
804        ty: InterfaceType,
805        dst: &mut MaybeUninit<Self::Lower>,
806    ) -> Result<()>;
807
808    /// Performs the "store" operation in the linear memory version of the
809    /// canonical ABI.
810    ///
811    /// This function will store `self` into the linear memory described by
812    /// `cx` at the `offset` provided.
813    ///
814    /// It is expected that `offset` is a valid offset in memory for
815    /// `Self::SIZE32` bytes. At this time that's not an unsafe contract as it's
816    /// always re-checked on all stores, but this is something that will need to
817    /// be improved in the future to remove extra bounds checks. For now this
818    /// function will panic if there's a bug and `offset` isn't valid within
819    /// memory.
820    ///
821    /// The `ty` type information passed here is the same as the type
822    /// information passed to `lower` above, and is the component's own view of
823    /// what the resulting type should be.
824    ///
825    /// This will only be called if `typecheck` passes for `Op::Lower`.
826    #[doc(hidden)]
827    fn linear_lower_to_memory<T>(
828        &self,
829        cx: &mut LowerContext<'_, T>,
830        ty: InterfaceType,
831        offset: usize,
832    ) -> Result<()>;
833
834    /// Provided method to lower a list of `Self` into memory.
835    ///
836    /// Requires that `offset` has already been checked for alignment and
837    /// validity in terms of being in-bounds, otherwise this may panic.
838    ///
839    /// This is primarily here to get overridden for implementations of integers
840    /// which can avoid some extra fluff and use a pattern that's more easily
841    /// optimizable by LLVM.
842    #[doc(hidden)]
843    fn linear_store_list_to_memory<T>(
844        cx: &mut LowerContext<'_, T>,
845        ty: InterfaceType,
846        mut offset: usize,
847        items: &[Self],
848    ) -> Result<()>
849    where
850        Self: Sized,
851    {
852        for item in items {
853            item.linear_lower_to_memory(cx, ty, offset)?;
854            offset += Self::SIZE32;
855        }
856        Ok(())
857    }
858}
859
860/// Host types which can be created from the canonical ABI.
861///
862/// This is the mirror of the [`Lower`] trait where it represents the capability
863/// of acquiring items from WebAssembly and passing them to the host.
864///
865/// Built-in types to Rust such as `Option<T>` implement this trait as
866/// appropriate. For a mapping of component model to Rust types see
867/// [`ComponentType`].
868///
869/// For user-defined types, for example `record` types mapped to Rust `struct`s,
870/// this crate additionally has
871/// [`#[derive(Lift)]`](macro@crate::component::Lift).
872///
873/// Note that like [`ComponentType`] the definition of this trait is intended to
874/// be an internal implementation detail of Wasmtime at this time. It's
875/// recommended to use the `#[derive(Lift)]` implementation instead.
876pub unsafe trait Lift: Sized + ComponentType {
877    /// Performs the "lift" operation in the linear memory version of the
878    /// canonical ABI.
879    ///
880    /// This function performs a "flat" lift operation from the `src` specified
881    /// which is a sequence of core wasm values. The lifting operation will
882    /// validate core wasm values and produce a `Self` on success.
883    ///
884    /// The `cx` provided contains contextual information such as the store
885    /// that's being loaded from, canonical options, and type information.
886    ///
887    /// The `ty` parameter is the origin component's specification for what the
888    /// type that is being lifted is. For example this is the record type or the
889    /// resource type that is being lifted.
890    ///
891    /// Note that this has a default implementation but if `typecheck` passes
892    /// for `Op::Lift` this needs to be overridden.
893    #[doc(hidden)]
894    fn linear_lift_from_flat(
895        cx: &mut LiftContext<'_>,
896        ty: InterfaceType,
897        src: &Self::Lower,
898    ) -> Result<Self>;
899
900    /// Performs the "load" operation in the linear memory version of the
901    /// canonical ABI.
902    ///
903    /// This will read the `bytes` provided, which are a sub-slice into the
904    /// linear memory described by `cx`. The `bytes` array provided is
905    /// guaranteed to be `Self::SIZE32` bytes large. All of memory is then also
906    /// available through `cx` for bounds-checks and such as necessary for
907    /// strings/lists.
908    ///
909    /// The `ty` argument is the type that's being loaded, as described by the
910    /// original component.
911    ///
912    /// Note that this has a default implementation but if `typecheck` passes
913    /// for `Op::Lift` this needs to be overridden.
914    #[doc(hidden)]
915    fn linear_lift_from_memory(
916        cx: &mut LiftContext<'_>,
917        ty: InterfaceType,
918        bytes: &[u8],
919    ) -> Result<Self>;
920
921    /// Converts `list` into a `Vec<T>`, used in `Lift for Vec<T>`.
922    #[doc(hidden)]
923    fn linear_lift_list_from_memory(
924        cx: &mut LiftContext<'_>,
925        list: &WasmList<Self>,
926    ) -> Result<Vec<Self>>
927    where
928        Self: Sized,
929    {
930        let mut dst = Vec::with_capacity(list.len);
931        Self::linear_lift_into_from_memory(cx, list, &mut dst)?;
932        Ok(dst)
933    }
934
935    /// Load no more than `max_count` items from `list` into `dst`.
936    ///
937    /// This is primarily here to get overridden for implementations of integers
938    /// which can avoid some extra fluff and use a pattern that's more easily
939    /// optimizable by LLVM.
940    #[doc(hidden)]
941    fn linear_lift_into_from_memory(
942        cx: &mut LiftContext<'_>,
943        list: &WasmList<Self>,
944        dst: &mut impl Extend<Self>,
945    ) -> Result<()>
946    where
947        Self: Sized,
948    {
949        for i in 0..list.len {
950            dst.extend(Some(list.get_from_store(cx, i).unwrap()?));
951        }
952        Ok(())
953    }
954}
955
956// Macro to help generate "forwarding implementations" of `ComponentType` to
957// another type, used for wrappers in Rust like `&T`, `Box<T>`, etc. Note that
958// these wrappers only implement lowering because lifting native Rust types
959// cannot be done.
960macro_rules! forward_type_impls {
961    ($(($($generics:tt)*) $a:ty => $b:ty,)*) => ($(
962        unsafe impl <$($generics)*> ComponentType for $a {
963            type Lower = <$b as ComponentType>::Lower;
964
965            const ABI: CanonicalAbiInfo = <$b as ComponentType>::ABI;
966
967            #[inline]
968            fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
969                <$b as ComponentType>::typecheck(ty, types)
970            }
971        }
972    )*)
973}
974
975forward_type_impls! {
976    (T: ComponentType + ?Sized) &'_ T => T,
977    (T: ComponentType + ?Sized) Box<T> => T,
978    (T: ComponentType + ?Sized) alloc::sync::Arc<T> => T,
979    () String => str,
980    (T: ComponentType) Vec<T> => [T],
981}
982
983macro_rules! forward_lowers {
984    ($(($($generics:tt)*) $a:ty => $b:ty,)*) => ($(
985        unsafe impl <$($generics)*> Lower for $a {
986            fn linear_lower_to_flat<U>(
987                &self,
988                cx: &mut LowerContext<'_, U>,
989                ty: InterfaceType,
990                dst: &mut MaybeUninit<Self::Lower>,
991            ) -> Result<()> {
992                <$b as Lower>::linear_lower_to_flat(self, cx, ty, dst)
993            }
994
995            fn linear_lower_to_memory<U>(
996                &self,
997                cx: &mut LowerContext<'_, U>,
998                ty: InterfaceType,
999                offset: usize,
1000            ) -> Result<()> {
1001                <$b as Lower>::linear_lower_to_memory(self, cx, ty, offset)
1002            }
1003        }
1004    )*)
1005}
1006
1007forward_lowers! {
1008    (T: Lower + ?Sized) &'_ T => T,
1009    (T: Lower + ?Sized) Box<T> => T,
1010    (T: Lower + ?Sized) alloc::sync::Arc<T> => T,
1011    () String => str,
1012    (T: Lower) Vec<T> => [T],
1013}
1014
1015macro_rules! forward_string_lifts {
1016    ($($a:ty,)*) => ($(
1017        unsafe impl Lift for $a {
1018            #[inline]
1019            fn linear_lift_from_flat(cx: &mut LiftContext<'_>, ty: InterfaceType, src: &Self::Lower) -> Result<Self> {
1020                let s = <WasmStr as Lift>::linear_lift_from_flat(cx, ty, src)?;
1021                let encoding = cx.options().string_encoding;
1022                Ok(s.to_str_from_memory(encoding, cx.memory())?.into())
1023            }
1024
1025            #[inline]
1026            fn linear_lift_from_memory(cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
1027                let s = <WasmStr as Lift>::linear_lift_from_memory(cx, ty, bytes)?;
1028                let encoding = cx.options().string_encoding;
1029                Ok(s.to_str_from_memory(encoding, cx.memory())?.into())
1030            }
1031        }
1032    )*)
1033}
1034
1035forward_string_lifts! {
1036    Box<str>,
1037    alloc::sync::Arc<str>,
1038    String,
1039}
1040
1041macro_rules! forward_list_lifts {
1042    ($($a:ty,)*) => ($(
1043        unsafe impl <T: Lift> Lift for $a {
1044            fn linear_lift_from_flat(cx: &mut LiftContext<'_>, ty: InterfaceType, src: &Self::Lower) -> Result<Self> {
1045                let list = <WasmList::<T> as Lift>::linear_lift_from_flat(cx, ty, src)?;
1046                Ok(T::linear_lift_list_from_memory(cx, &list)?.into())
1047            }
1048
1049            fn linear_lift_from_memory(cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
1050                let list = <WasmList::<T> as Lift>::linear_lift_from_memory(cx, ty, bytes)?;
1051                Ok(T::linear_lift_list_from_memory(cx, &list)?.into())
1052            }
1053        }
1054    )*)
1055}
1056
1057forward_list_lifts! {
1058    Box<[T]>,
1059    alloc::sync::Arc<[T]>,
1060    Vec<T>,
1061}
1062
1063// Macro to help generate `ComponentType` implementations for primitive types
1064// such as integers, char, bool, etc.
1065macro_rules! integers {
1066    ($($primitive:ident = $ty:ident in $field:ident/$get:ident with abi:$abi:ident,)*) => ($(
1067        unsafe impl ComponentType for $primitive {
1068            type Lower = ValRaw;
1069
1070            const ABI: CanonicalAbiInfo = CanonicalAbiInfo::$abi;
1071
1072            const MAY_REQUIRE_REALLOC: bool = false;
1073
1074            fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1075                match ty {
1076                    InterfaceType::$ty => Ok(()),
1077                    other => bail!("expected `{}` found `{}`", desc(&InterfaceType::$ty), desc(other))
1078                }
1079            }
1080        }
1081
1082        unsafe impl Lower for $primitive {
1083            #[inline]
1084            #[allow(trivial_numeric_casts, reason = "macro-generated code")]
1085            fn linear_lower_to_flat<T>(
1086                &self,
1087                _cx: &mut LowerContext<'_, T>,
1088                ty: InterfaceType,
1089                dst: &mut MaybeUninit<Self::Lower>,
1090            ) -> Result<()> {
1091                debug_assert!(matches!(ty, InterfaceType::$ty));
1092                dst.write(ValRaw::$field(*self as $field));
1093                Ok(())
1094            }
1095
1096            #[inline]
1097            fn linear_lower_to_memory<T>(
1098                &self,
1099                cx: &mut LowerContext<'_, T>,
1100                ty: InterfaceType,
1101                offset: usize,
1102            ) -> Result<()> {
1103                debug_assert!(matches!(ty, InterfaceType::$ty));
1104                debug_assert!(offset % Self::SIZE32 == 0);
1105                *cx.get(offset) = self.to_le_bytes();
1106                Ok(())
1107            }
1108
1109            fn linear_store_list_to_memory<T>(
1110                cx: &mut LowerContext<'_, T>,
1111                ty: InterfaceType,
1112                offset: usize,
1113                items: &[Self],
1114            ) -> Result<()> {
1115                debug_assert!(matches!(ty, InterfaceType::$ty));
1116
1117                // Double-check that the CM alignment is at least the host's
1118                // alignment for this type which should be true for all
1119                // platforms.
1120                assert!((Self::ALIGN32 as usize) >= mem::align_of::<Self>());
1121
1122                // Slice `cx`'s memory to the window that we'll be modifying.
1123                // This should all have already been verified in terms of
1124                // alignment and sizing meaning that these assertions here are
1125                // not truly necessary but are instead double-checks.
1126                //
1127                // Note that we're casting a `[u8]` slice to `[Self]` with
1128                // `align_to_mut` which is not safe in general but is safe in
1129                // our specific case as all `u8` patterns are valid `Self`
1130                // patterns since `Self` is an integral type.
1131                let dst = &mut cx.as_slice_mut()[offset..][..items.len() * Self::SIZE32];
1132                let (before, middle, end) = unsafe { dst.align_to_mut::<Self>() };
1133                assert!(before.is_empty() && end.is_empty());
1134                assert_eq!(middle.len(), items.len());
1135
1136                // And with all that out of the way perform the copying loop.
1137                // This is not a `copy_from_slice` because endianness needs to
1138                // be handled here, but LLVM should pretty easily transform this
1139                // into a memcpy on little-endian platforms.
1140                for (dst, src) in middle.iter_mut().zip(items) {
1141                    *dst = src.to_le();
1142                }
1143                Ok(())
1144            }
1145        }
1146
1147        unsafe impl Lift for $primitive {
1148            #[inline]
1149            #[allow(
1150                trivial_numeric_casts,
1151                clippy::cast_possible_truncation,
1152                reason = "macro-generated code"
1153            )]
1154            fn linear_lift_from_flat(_cx: &mut LiftContext<'_>, ty: InterfaceType, src: &Self::Lower) -> Result<Self> {
1155                debug_assert!(matches!(ty, InterfaceType::$ty));
1156                Ok(src.$get() as $primitive)
1157            }
1158
1159            #[inline]
1160            fn linear_lift_from_memory(_cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
1161                debug_assert!(matches!(ty, InterfaceType::$ty));
1162                debug_assert!((bytes.as_ptr() as usize) % Self::SIZE32 == 0);
1163                Ok($primitive::from_le_bytes(bytes.try_into().unwrap()))
1164            }
1165
1166            fn linear_lift_into_from_memory(
1167                cx: &mut LiftContext<'_>,
1168                list: &WasmList<Self>,
1169                dst: &mut impl Extend<Self>,
1170            ) -> Result<()>
1171            where
1172                Self: Sized,
1173            {
1174                dst.extend(list._as_le_slice(cx.memory())
1175                           .iter()
1176                           .map(|i| Self::from_le(*i)));
1177                Ok(())
1178            }
1179        }
1180    )*)
1181}
1182
1183integers! {
1184    i8 = S8 in i32/get_i32 with abi:SCALAR1,
1185    u8 = U8 in u32/get_u32 with abi:SCALAR1,
1186    i16 = S16 in i32/get_i32 with abi:SCALAR2,
1187    u16 = U16 in u32/get_u32 with abi:SCALAR2,
1188    i32 = S32 in i32/get_i32 with abi:SCALAR4,
1189    u32 = U32 in u32/get_u32 with abi:SCALAR4,
1190    i64 = S64 in i64/get_i64 with abi:SCALAR8,
1191    u64 = U64 in u64/get_u64 with abi:SCALAR8,
1192}
1193
1194macro_rules! floats {
1195    ($($float:ident/$get_float:ident = $ty:ident with abi:$abi:ident)*) => ($(const _: () = {
1196        unsafe impl ComponentType for $float {
1197            type Lower = ValRaw;
1198
1199            const ABI: CanonicalAbiInfo = CanonicalAbiInfo::$abi;
1200
1201            fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1202                match ty {
1203                    InterfaceType::$ty => Ok(()),
1204                    other => bail!("expected `{}` found `{}`", desc(&InterfaceType::$ty), desc(other))
1205                }
1206            }
1207        }
1208
1209        unsafe impl Lower for $float {
1210            #[inline]
1211            fn linear_lower_to_flat<T>(
1212                &self,
1213                _cx: &mut LowerContext<'_, T>,
1214                ty: InterfaceType,
1215                dst: &mut MaybeUninit<Self::Lower>,
1216            ) -> Result<()> {
1217                debug_assert!(matches!(ty, InterfaceType::$ty));
1218                dst.write(ValRaw::$float(self.to_bits()));
1219                Ok(())
1220            }
1221
1222            #[inline]
1223            fn linear_lower_to_memory<T>(
1224                &self,
1225                cx: &mut LowerContext<'_, T>,
1226                ty: InterfaceType,
1227                offset: usize,
1228            ) -> Result<()> {
1229                debug_assert!(matches!(ty, InterfaceType::$ty));
1230                debug_assert!(offset % Self::SIZE32 == 0);
1231                let ptr = cx.get(offset);
1232                *ptr = self.to_bits().to_le_bytes();
1233                Ok(())
1234            }
1235
1236            fn linear_store_list_to_memory<T>(
1237                cx: &mut LowerContext<'_, T>,
1238                ty: InterfaceType,
1239                offset: usize,
1240                items: &[Self],
1241            ) -> Result<()> {
1242                debug_assert!(matches!(ty, InterfaceType::$ty));
1243
1244                // Double-check that the CM alignment is at least the host's
1245                // alignment for this type which should be true for all
1246                // platforms.
1247                assert!((Self::ALIGN32 as usize) >= mem::align_of::<Self>());
1248
1249                // Slice `cx`'s memory to the window that we'll be modifying.
1250                // This should all have already been verified in terms of
1251                // alignment and sizing meaning that these assertions here are
1252                // not truly necessary but are instead double-checks.
1253                let dst = &mut cx.as_slice_mut()[offset..][..items.len() * Self::SIZE32];
1254                assert!(dst.as_ptr().cast::<Self>().is_aligned());
1255
1256                // And with all that out of the way perform the copying loop.
1257                // This is not a `copy_from_slice` because endianness needs to
1258                // be handled here, but LLVM should pretty easily transform this
1259                // into a memcpy on little-endian platforms.
1260                // TODO use `as_chunks` when https://github.com/rust-lang/rust/issues/74985
1261                // is stabilized
1262                for (dst, src) in iter::zip(dst.chunks_exact_mut(Self::SIZE32), items) {
1263                    let dst: &mut [u8; Self::SIZE32] = dst.try_into().unwrap();
1264                    *dst = src.to_le_bytes();
1265                }
1266                Ok(())
1267            }
1268        }
1269
1270        unsafe impl Lift for $float {
1271            #[inline]
1272            fn linear_lift_from_flat(_cx: &mut LiftContext<'_>, ty: InterfaceType, src: &Self::Lower) -> Result<Self> {
1273                debug_assert!(matches!(ty, InterfaceType::$ty));
1274                Ok($float::from_bits(src.$get_float()))
1275            }
1276
1277            #[inline]
1278            fn linear_lift_from_memory(_cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
1279                debug_assert!(matches!(ty, InterfaceType::$ty));
1280                debug_assert!((bytes.as_ptr() as usize) % Self::SIZE32 == 0);
1281                Ok($float::from_le_bytes(bytes.try_into().unwrap()))
1282            }
1283
1284            fn linear_lift_list_from_memory(cx: &mut LiftContext<'_>, list: &WasmList<Self>) -> Result<Vec<Self>> where Self: Sized {
1285                // See comments in `WasmList::get` for the panicking indexing
1286                let byte_size = list.len * mem::size_of::<Self>();
1287                let bytes = &cx.memory()[list.ptr..][..byte_size];
1288
1289                // The canonical ABI requires that everything is aligned to its
1290                // own size, so this should be an aligned array.
1291                assert!(bytes.as_ptr().cast::<Self>().is_aligned());
1292
1293                // Copy the resulting slice to a new Vec, handling endianness
1294                // in the process
1295                // TODO use `as_chunks` when https://github.com/rust-lang/rust/issues/74985
1296                // is stabilized
1297                Ok(
1298                    bytes
1299                        .chunks_exact(Self::SIZE32)
1300                        .map(|i| $float::from_le_bytes(i.try_into().unwrap()))
1301                        .collect()
1302                )
1303            }
1304        }
1305    };)*)
1306}
1307
1308floats! {
1309    f32/get_f32 = Float32 with abi:SCALAR4
1310    f64/get_f64 = Float64 with abi:SCALAR8
1311}
1312
1313unsafe impl ComponentType for bool {
1314    type Lower = ValRaw;
1315
1316    const ABI: CanonicalAbiInfo = CanonicalAbiInfo::SCALAR1;
1317
1318    fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1319        match ty {
1320            InterfaceType::Bool => Ok(()),
1321            other => bail!("expected `bool` found `{}`", desc(other)),
1322        }
1323    }
1324}
1325
1326unsafe impl Lower for bool {
1327    fn linear_lower_to_flat<T>(
1328        &self,
1329        _cx: &mut LowerContext<'_, T>,
1330        ty: InterfaceType,
1331        dst: &mut MaybeUninit<Self::Lower>,
1332    ) -> Result<()> {
1333        debug_assert!(matches!(ty, InterfaceType::Bool));
1334        dst.write(ValRaw::i32(*self as i32));
1335        Ok(())
1336    }
1337
1338    fn linear_lower_to_memory<T>(
1339        &self,
1340        cx: &mut LowerContext<'_, T>,
1341        ty: InterfaceType,
1342        offset: usize,
1343    ) -> Result<()> {
1344        debug_assert!(matches!(ty, InterfaceType::Bool));
1345        debug_assert!(offset % Self::SIZE32 == 0);
1346        cx.get::<1>(offset)[0] = *self as u8;
1347        Ok(())
1348    }
1349}
1350
1351unsafe impl Lift for bool {
1352    #[inline]
1353    fn linear_lift_from_flat(
1354        _cx: &mut LiftContext<'_>,
1355        ty: InterfaceType,
1356        src: &Self::Lower,
1357    ) -> Result<Self> {
1358        debug_assert!(matches!(ty, InterfaceType::Bool));
1359        match src.get_i32() {
1360            0 => Ok(false),
1361            _ => Ok(true),
1362        }
1363    }
1364
1365    #[inline]
1366    fn linear_lift_from_memory(
1367        _cx: &mut LiftContext<'_>,
1368        ty: InterfaceType,
1369        bytes: &[u8],
1370    ) -> Result<Self> {
1371        debug_assert!(matches!(ty, InterfaceType::Bool));
1372        match bytes[0] {
1373            0 => Ok(false),
1374            _ => Ok(true),
1375        }
1376    }
1377}
1378
1379unsafe impl ComponentType for char {
1380    type Lower = ValRaw;
1381
1382    const ABI: CanonicalAbiInfo = CanonicalAbiInfo::SCALAR4;
1383
1384    fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1385        match ty {
1386            InterfaceType::Char => Ok(()),
1387            other => bail!("expected `char` found `{}`", desc(other)),
1388        }
1389    }
1390}
1391
1392unsafe impl Lower for char {
1393    #[inline]
1394    fn linear_lower_to_flat<T>(
1395        &self,
1396        _cx: &mut LowerContext<'_, T>,
1397        ty: InterfaceType,
1398        dst: &mut MaybeUninit<Self::Lower>,
1399    ) -> Result<()> {
1400        debug_assert!(matches!(ty, InterfaceType::Char));
1401        dst.write(ValRaw::u32(u32::from(*self)));
1402        Ok(())
1403    }
1404
1405    #[inline]
1406    fn linear_lower_to_memory<T>(
1407        &self,
1408        cx: &mut LowerContext<'_, T>,
1409        ty: InterfaceType,
1410        offset: usize,
1411    ) -> Result<()> {
1412        debug_assert!(matches!(ty, InterfaceType::Char));
1413        debug_assert!(offset % Self::SIZE32 == 0);
1414        *cx.get::<4>(offset) = u32::from(*self).to_le_bytes();
1415        Ok(())
1416    }
1417}
1418
1419unsafe impl Lift for char {
1420    #[inline]
1421    fn linear_lift_from_flat(
1422        _cx: &mut LiftContext<'_>,
1423        ty: InterfaceType,
1424        src: &Self::Lower,
1425    ) -> Result<Self> {
1426        debug_assert!(matches!(ty, InterfaceType::Char));
1427        Ok(char::try_from(src.get_u32())?)
1428    }
1429
1430    #[inline]
1431    fn linear_lift_from_memory(
1432        _cx: &mut LiftContext<'_>,
1433        ty: InterfaceType,
1434        bytes: &[u8],
1435    ) -> Result<Self> {
1436        debug_assert!(matches!(ty, InterfaceType::Char));
1437        debug_assert!((bytes.as_ptr() as usize) % Self::SIZE32 == 0);
1438        let bits = u32::from_le_bytes(bytes.try_into().unwrap());
1439        Ok(char::try_from(bits)?)
1440    }
1441}
1442
1443// FIXME(#4311): these probably need different constants for memory64
1444const UTF16_TAG: usize = 1 << 31;
1445const MAX_STRING_BYTE_LENGTH: usize = (1 << 31) - 1;
1446
1447// Note that this is similar to `ComponentType for WasmStr` except it can only
1448// be used for lowering, not lifting.
1449unsafe impl ComponentType for str {
1450    type Lower = [ValRaw; 2];
1451
1452    const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
1453
1454    fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1455        match ty {
1456            InterfaceType::String => Ok(()),
1457            other => bail!("expected `string` found `{}`", desc(other)),
1458        }
1459    }
1460}
1461
1462unsafe impl Lower for str {
1463    fn linear_lower_to_flat<T>(
1464        &self,
1465        cx: &mut LowerContext<'_, T>,
1466        ty: InterfaceType,
1467        dst: &mut MaybeUninit<[ValRaw; 2]>,
1468    ) -> Result<()> {
1469        debug_assert!(matches!(ty, InterfaceType::String));
1470        let (ptr, len) = lower_string(cx, self)?;
1471        // See "WRITEPTR64" above for why this is always storing a 64-bit
1472        // integer.
1473        map_maybe_uninit!(dst[0]).write(ValRaw::i64(ptr as i64));
1474        map_maybe_uninit!(dst[1]).write(ValRaw::i64(len as i64));
1475        Ok(())
1476    }
1477
1478    fn linear_lower_to_memory<T>(
1479        &self,
1480        cx: &mut LowerContext<'_, T>,
1481        ty: InterfaceType,
1482        offset: usize,
1483    ) -> Result<()> {
1484        debug_assert!(matches!(ty, InterfaceType::String));
1485        debug_assert!(offset % (Self::ALIGN32 as usize) == 0);
1486        let (ptr, len) = lower_string(cx, self)?;
1487        // FIXME(#4311): needs memory64 handling
1488        *cx.get(offset + 0) = u32::try_from(ptr).unwrap().to_le_bytes();
1489        *cx.get(offset + 4) = u32::try_from(len).unwrap().to_le_bytes();
1490        Ok(())
1491    }
1492}
1493
1494fn lower_string<T>(cx: &mut LowerContext<'_, T>, string: &str) -> Result<(usize, usize)> {
1495    // Note that in general the wasm module can't assume anything about what the
1496    // host strings are encoded as. Additionally hosts are allowed to have
1497    // differently-encoded strings at runtime. Finally when copying a string
1498    // into wasm it's somewhat strict in the sense that the various patterns of
1499    // allocation and such are already dictated for us.
1500    //
1501    // In general what this means is that when copying a string from the host
1502    // into the destination we need to follow one of the cases of copying into
1503    // WebAssembly. It doesn't particularly matter which case as long as it ends
1504    // up in the right encoding. For example a destination encoding of
1505    // latin1+utf16 has a number of ways to get copied into and we do something
1506    // here that isn't the default "utf8 to latin1+utf16" since we have access
1507    // to simd-accelerated helpers in the `encoding_rs` crate. This is ok though
1508    // because we can fake that the host string was already stored in latin1
1509    // format and follow that copy pattern instead.
1510    match cx.options().string_encoding {
1511        // This corresponds to `store_string_copy` in the canonical ABI where
1512        // the host's representation is utf-8 and the wasm module wants utf-8 so
1513        // a copy is all that's needed (and the `realloc` can be precise for the
1514        // initial memory allocation).
1515        StringEncoding::Utf8 => {
1516            if string.len() > MAX_STRING_BYTE_LENGTH {
1517                bail!(
1518                    "string length of {} too large to copy into wasm",
1519                    string.len()
1520                );
1521            }
1522            let ptr = cx.realloc(0, 0, 1, string.len())?;
1523            cx.as_slice_mut()[ptr..][..string.len()].copy_from_slice(string.as_bytes());
1524            Ok((ptr, string.len()))
1525        }
1526
1527        // This corresponds to `store_utf8_to_utf16` in the canonical ABI. Here
1528        // an over-large allocation is performed and then shrunk afterwards if
1529        // necessary.
1530        StringEncoding::Utf16 => {
1531            let size = string.len() * 2;
1532            if size > MAX_STRING_BYTE_LENGTH {
1533                bail!(
1534                    "string length of {} too large to copy into wasm",
1535                    string.len()
1536                );
1537            }
1538            let mut ptr = cx.realloc(0, 0, 2, size)?;
1539            let mut copied = 0;
1540            let bytes = &mut cx.as_slice_mut()[ptr..][..size];
1541            for (u, bytes) in string.encode_utf16().zip(bytes.chunks_mut(2)) {
1542                let u_bytes = u.to_le_bytes();
1543                bytes[0] = u_bytes[0];
1544                bytes[1] = u_bytes[1];
1545                copied += 1;
1546            }
1547            if (copied * 2) < size {
1548                ptr = cx.realloc(ptr, size, 2, copied * 2)?;
1549            }
1550            Ok((ptr, copied))
1551        }
1552
1553        StringEncoding::CompactUtf16 => {
1554            // This corresponds to `store_string_to_latin1_or_utf16`
1555            let bytes = string.as_bytes();
1556            let mut iter = string.char_indices();
1557            let mut ptr = cx.realloc(0, 0, 2, bytes.len())?;
1558            let mut dst = &mut cx.as_slice_mut()[ptr..][..bytes.len()];
1559            let mut result = 0;
1560            while let Some((i, ch)) = iter.next() {
1561                // Test if this `char` fits into the latin1 encoding.
1562                if let Ok(byte) = u8::try_from(u32::from(ch)) {
1563                    dst[result] = byte;
1564                    result += 1;
1565                    continue;
1566                }
1567
1568                // .. if utf16 is forced to be used then the allocation is
1569                // bumped up to the maximum size.
1570                let worst_case = bytes
1571                    .len()
1572                    .checked_mul(2)
1573                    .ok_or_else(|| format_err!("byte length overflow"))?;
1574                if worst_case > MAX_STRING_BYTE_LENGTH {
1575                    bail!("byte length too large");
1576                }
1577                ptr = cx.realloc(ptr, bytes.len(), 2, worst_case)?;
1578                dst = &mut cx.as_slice_mut()[ptr..][..worst_case];
1579
1580                // Previously encoded latin1 bytes are inflated to their 16-bit
1581                // size for utf16
1582                for i in (0..result).rev() {
1583                    dst[2 * i] = dst[i];
1584                    dst[2 * i + 1] = 0;
1585                }
1586
1587                // and then the remainder of the string is encoded.
1588                for (u, bytes) in string[i..]
1589                    .encode_utf16()
1590                    .zip(dst[2 * result..].chunks_mut(2))
1591                {
1592                    let u_bytes = u.to_le_bytes();
1593                    bytes[0] = u_bytes[0];
1594                    bytes[1] = u_bytes[1];
1595                    result += 1;
1596                }
1597                if worst_case > 2 * result {
1598                    ptr = cx.realloc(ptr, worst_case, 2, 2 * result)?;
1599                }
1600                return Ok((ptr, result | UTF16_TAG));
1601            }
1602            if result < bytes.len() {
1603                ptr = cx.realloc(ptr, bytes.len(), 2, result)?;
1604            }
1605            Ok((ptr, result))
1606        }
1607    }
1608}
1609
1610/// Representation of a string located in linear memory in a WebAssembly
1611/// instance.
1612///
1613/// This type can be used in place of `String` and `str` for string-taking APIs
1614/// in some situations. The purpose of this type is to represent a range of
1615/// validated bytes within a component but does not actually copy the bytes. The
1616/// primary method, [`WasmStr::to_str`], attempts to return a reference to the
1617/// string directly located in the component's memory, avoiding a copy into the
1618/// host if possible.
1619///
1620/// The downside of this type, however, is that accessing a string requires a
1621/// [`Store`](crate::Store) pointer (via [`StoreContext`]). Bindings generated
1622/// by [`bindgen!`](crate::component::bindgen), for example, do not have access
1623/// to [`StoreContext`] and thus can't use this type.
1624///
1625/// This is intended for more advanced use cases such as defining functions
1626/// directly in a [`Linker`](crate::component::Linker). It's expected that in
1627/// the future [`bindgen!`](crate::component::bindgen) will also have a way to
1628/// use this type.
1629///
1630/// This type is used with [`TypedFunc`], for example, when WebAssembly returns
1631/// a string. This type cannot be used to give a string to WebAssembly, instead
1632/// `&str` should be used for that (since it's coming from the host).
1633///
1634/// Note that this type represents an in-bounds string in linear memory, but it
1635/// does not represent a valid string (e.g. valid utf-8). Validation happens
1636/// when [`WasmStr::to_str`] is called.
1637///
1638/// Also note that this type does not implement [`Lower`], it only implements
1639/// [`Lift`].
1640pub struct WasmStr {
1641    ptr: usize,
1642    len: usize,
1643    options: OptionsIndex,
1644    instance: Instance,
1645}
1646
1647impl WasmStr {
1648    pub(crate) fn new(ptr: usize, len: usize, cx: &mut LiftContext<'_>) -> Result<WasmStr> {
1649        let byte_len = match cx.options().string_encoding {
1650            StringEncoding::Utf8 => Some(len),
1651            StringEncoding::Utf16 => len.checked_mul(2),
1652            StringEncoding::CompactUtf16 => {
1653                if len & UTF16_TAG == 0 {
1654                    Some(len)
1655                } else {
1656                    (len ^ UTF16_TAG).checked_mul(2)
1657                }
1658            }
1659        };
1660        match byte_len.and_then(|len| ptr.checked_add(len)) {
1661            Some(n) if n <= cx.memory().len() => cx.consume_fuel(n - ptr)?,
1662            _ => bail!("string pointer/length out of bounds of memory"),
1663        }
1664        Ok(WasmStr {
1665            ptr,
1666            len,
1667            options: cx.options_index(),
1668            instance: cx.instance_handle(),
1669        })
1670    }
1671
1672    /// Returns the underlying string that this cursor points to.
1673    ///
1674    /// Note that this will internally decode the string from the wasm's
1675    /// encoding to utf-8 and additionally perform validation.
1676    ///
1677    /// The `store` provided must be the store where this string lives to
1678    /// access the correct memory.
1679    ///
1680    /// # Errors
1681    ///
1682    /// Returns an error if the string wasn't encoded correctly (e.g. invalid
1683    /// utf-8).
1684    ///
1685    /// # Panics
1686    ///
1687    /// Panics if this string is not owned by `store`.
1688    //
1689    // TODO: should add accessors for specifically utf-8 and utf-16 that perhaps
1690    // in an opt-in basis don't do validation. Additionally there should be some
1691    // method that returns `[u16]` after validating to avoid the utf16-to-utf8
1692    // transcode.
1693    pub fn to_str<'a, T: 'static>(
1694        &self,
1695        store: impl Into<StoreContext<'a, T>>,
1696    ) -> Result<Cow<'a, str>> {
1697        let store = store.into().0;
1698        let memory = self.instance.options_memory(store, self.options);
1699        let encoding = self.instance.options(store, self.options).string_encoding;
1700        self.to_str_from_memory(encoding, memory)
1701    }
1702
1703    pub(crate) fn to_str_from_memory<'a>(
1704        &self,
1705        encoding: StringEncoding,
1706        memory: &'a [u8],
1707    ) -> Result<Cow<'a, str>> {
1708        match encoding {
1709            StringEncoding::Utf8 => self.decode_utf8(memory),
1710            StringEncoding::Utf16 => self.decode_utf16(memory, self.len),
1711            StringEncoding::CompactUtf16 => {
1712                if self.len & UTF16_TAG == 0 {
1713                    self.decode_latin1(memory)
1714                } else {
1715                    self.decode_utf16(memory, self.len ^ UTF16_TAG)
1716                }
1717            }
1718        }
1719    }
1720
1721    fn decode_utf8<'a>(&self, memory: &'a [u8]) -> Result<Cow<'a, str>> {
1722        // Note that bounds-checking already happen in construction of `WasmStr`
1723        // so this is never expected to panic. This could theoretically be
1724        // unchecked indexing if we're feeling wild enough.
1725        Ok(str::from_utf8(&memory[self.ptr..][..self.len])?.into())
1726    }
1727
1728    fn decode_utf16<'a>(&self, memory: &'a [u8], len: usize) -> Result<Cow<'a, str>> {
1729        // See notes in `decode_utf8` for why this is panicking indexing.
1730        let memory = &memory[self.ptr..][..len * 2];
1731        Ok(core::char::decode_utf16(
1732            memory
1733                .chunks(2)
1734                .map(|chunk| u16::from_le_bytes(chunk.try_into().unwrap())),
1735        )
1736        .collect::<Result<String, _>>()?
1737        .into())
1738    }
1739
1740    fn decode_latin1<'a>(&self, memory: &'a [u8]) -> Result<Cow<'a, str>> {
1741        // See notes in `decode_utf8` for why this is panicking indexing.
1742        Ok(encoding_rs::mem::decode_latin1(
1743            &memory[self.ptr..][..self.len],
1744        ))
1745    }
1746}
1747
1748// Note that this is similar to `ComponentType for str` except it can only be
1749// used for lifting, not lowering.
1750unsafe impl ComponentType for WasmStr {
1751    type Lower = <str as ComponentType>::Lower;
1752
1753    const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
1754
1755    fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1756        match ty {
1757            InterfaceType::String => Ok(()),
1758            other => bail!("expected `string` found `{}`", desc(other)),
1759        }
1760    }
1761}
1762
1763unsafe impl Lift for WasmStr {
1764    #[inline]
1765    fn linear_lift_from_flat(
1766        cx: &mut LiftContext<'_>,
1767        ty: InterfaceType,
1768        src: &Self::Lower,
1769    ) -> Result<Self> {
1770        debug_assert!(matches!(ty, InterfaceType::String));
1771        // FIXME(#4311): needs memory64 treatment
1772        let ptr = src[0].get_u32();
1773        let len = src[1].get_u32();
1774        let (ptr, len) = (usize::try_from(ptr)?, usize::try_from(len)?);
1775        WasmStr::new(ptr, len, cx)
1776    }
1777
1778    #[inline]
1779    fn linear_lift_from_memory(
1780        cx: &mut LiftContext<'_>,
1781        ty: InterfaceType,
1782        bytes: &[u8],
1783    ) -> Result<Self> {
1784        debug_assert!(matches!(ty, InterfaceType::String));
1785        debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
1786        // FIXME(#4311): needs memory64 treatment
1787        let ptr = u32::from_le_bytes(bytes[..4].try_into().unwrap());
1788        let len = u32::from_le_bytes(bytes[4..].try_into().unwrap());
1789        let (ptr, len) = (usize::try_from(ptr)?, usize::try_from(len)?);
1790        WasmStr::new(ptr, len, cx)
1791    }
1792}
1793
1794unsafe impl<T> ComponentType for [T]
1795where
1796    T: ComponentType,
1797{
1798    type Lower = [ValRaw; 2];
1799
1800    const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
1801
1802    fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
1803        match ty {
1804            InterfaceType::List(t) => T::typecheck(&types.types[*t].element, types),
1805            other => bail!("expected `list` found `{}`", desc(other)),
1806        }
1807    }
1808}
1809
1810unsafe impl<T> Lower for [T]
1811where
1812    T: Lower,
1813{
1814    fn linear_lower_to_flat<U>(
1815        &self,
1816        cx: &mut LowerContext<'_, U>,
1817        ty: InterfaceType,
1818        dst: &mut MaybeUninit<[ValRaw; 2]>,
1819    ) -> Result<()> {
1820        let elem = match ty {
1821            InterfaceType::List(i) => cx.types[i].element,
1822            _ => bad_type_info(),
1823        };
1824        let (ptr, len) = lower_list(cx, elem, self)?;
1825        // See "WRITEPTR64" above for why this is always storing a 64-bit
1826        // integer.
1827        map_maybe_uninit!(dst[0]).write(ValRaw::i64(ptr as i64));
1828        map_maybe_uninit!(dst[1]).write(ValRaw::i64(len as i64));
1829        Ok(())
1830    }
1831
1832    fn linear_lower_to_memory<U>(
1833        &self,
1834        cx: &mut LowerContext<'_, U>,
1835        ty: InterfaceType,
1836        offset: usize,
1837    ) -> Result<()> {
1838        let elem = match ty {
1839            InterfaceType::List(i) => cx.types[i].element,
1840            _ => bad_type_info(),
1841        };
1842        debug_assert!(offset % (Self::ALIGN32 as usize) == 0);
1843        let (ptr, len) = lower_list(cx, elem, self)?;
1844        *cx.get(offset + 0) = u32::try_from(ptr).unwrap().to_le_bytes();
1845        *cx.get(offset + 4) = u32::try_from(len).unwrap().to_le_bytes();
1846        Ok(())
1847    }
1848}
1849
1850// FIXME: this is not a memcpy for `T` where `T` is something like `u8`.
1851//
1852// Some attempts to fix this have proved not fruitful. In isolation an attempt
1853// was made where:
1854//
1855// * `MemoryMut` stored a `*mut [u8]` as its "last view" of memory to avoid
1856//   reloading the base pointer constantly. This view is reset on `realloc`.
1857// * The bounds-checks in `MemoryMut::get` were removed (replaced with unsafe
1858//   indexing)
1859//
1860// Even then though this didn't correctly vectorized for `Vec<u8>`. It's not
1861// entirely clear why but it appeared that it's related to reloading the base
1862// pointer to memory (I guess from `MemoryMut` itself?). Overall I'm not really
1863// clear on what's happening there, but this is surely going to be a performance
1864// bottleneck in the future.
1865fn lower_list<T, U>(
1866    cx: &mut LowerContext<'_, U>,
1867    ty: InterfaceType,
1868    list: &[T],
1869) -> Result<(usize, usize)>
1870where
1871    T: Lower,
1872{
1873    let elem_size = T::SIZE32;
1874    let size = list
1875        .len()
1876        .checked_mul(elem_size)
1877        .ok_or_else(|| format_err!("size overflow copying a list"))?;
1878    let ptr = cx.realloc(0, 0, T::ALIGN32, size)?;
1879    T::linear_store_list_to_memory(cx, ty, ptr, list)?;
1880    Ok((ptr, list.len()))
1881}
1882
1883/// Representation of a list of values that are owned by a WebAssembly instance.
1884///
1885/// For some more commentary about the rationale for this type see the
1886/// documentation of [`WasmStr`]. In summary this type can avoid a copy when
1887/// passing data to the host in some situations but is additionally more
1888/// cumbersome to use by requiring a [`Store`](crate::Store) to be provided.
1889///
1890/// This type is used whenever a `(list T)` is returned from a [`TypedFunc`],
1891/// for example. This type represents a list of values that are stored in linear
1892/// memory which are waiting to be read.
1893///
1894/// Note that this type represents only a valid range of bytes for the list
1895/// itself, it does not represent validity of the elements themselves and that's
1896/// performed when they're iterated.
1897///
1898/// Note that this type does not implement the [`Lower`] trait, only [`Lift`].
1899pub struct WasmList<T> {
1900    ptr: usize,
1901    len: usize,
1902    options: OptionsIndex,
1903    elem: InterfaceType,
1904    instance: Instance,
1905    _marker: marker::PhantomData<T>,
1906}
1907
1908impl<T: Lift> WasmList<T> {
1909    pub(crate) fn new(
1910        ptr: usize,
1911        len: usize,
1912        cx: &mut LiftContext<'_>,
1913        elem: InterfaceType,
1914    ) -> Result<WasmList<T>> {
1915        match len
1916            .checked_mul(T::SIZE32)
1917            .and_then(|len| ptr.checked_add(len))
1918        {
1919            Some(n) if n <= cx.memory().len() => cx.consume_fuel_array(len, size_of::<T>())?,
1920            _ => bail!("list pointer/length out of bounds of memory"),
1921        }
1922        if ptr % usize::try_from(T::ALIGN32)? != 0 {
1923            bail!("list pointer is not aligned")
1924        }
1925        Ok(WasmList {
1926            ptr,
1927            len,
1928            options: cx.options_index(),
1929            elem,
1930            instance: cx.instance_handle(),
1931            _marker: marker::PhantomData,
1932        })
1933    }
1934
1935    /// Returns the item length of this vector
1936    #[inline]
1937    pub fn len(&self) -> usize {
1938        self.len
1939    }
1940
1941    /// Gets the `n`th element of this list.
1942    ///
1943    /// Returns `None` if `index` is out of bounds. Returns `Some(Err(..))` if
1944    /// the value couldn't be decoded (it was invalid). Returns `Some(Ok(..))`
1945    /// if the value is valid.
1946    ///
1947    /// # Panics
1948    ///
1949    /// This function will panic if the string did not originally come from the
1950    /// `store` specified.
1951    //
1952    // TODO: given that interface values are intended to be consumed in one go
1953    // should we even expose a random access iteration API? In theory all
1954    // consumers should be validating through the iterator.
1955    pub fn get(&self, mut store: impl AsContextMut, index: usize) -> Option<Result<T>> {
1956        let store = store.as_context_mut().0;
1957        let mut cx = LiftContext::new(store, self.options, self.instance);
1958        self.get_from_store(&mut cx, index)
1959    }
1960
1961    fn get_from_store(&self, cx: &mut LiftContext<'_>, index: usize) -> Option<Result<T>> {
1962        if index >= self.len {
1963            return None;
1964        }
1965        // Note that this is using panicking indexing and this is expected to
1966        // never fail. The bounds-checking here happened during the construction
1967        // of the `WasmList` itself which means these should always be in-bounds
1968        // (and wasm memory can only grow). This could theoretically be
1969        // unchecked indexing if we're confident enough and it's actually a perf
1970        // issue one day.
1971        let bytes = &cx.memory()[self.ptr + index * T::SIZE32..][..T::SIZE32];
1972        Some(T::linear_lift_from_memory(cx, self.elem, bytes))
1973    }
1974
1975    /// Returns an iterator over the elements of this list.
1976    ///
1977    /// Each item of the list may fail to decode and is represented through the
1978    /// `Result` value of the iterator.
1979    pub fn iter<'a, U: 'static>(
1980        &'a self,
1981        store: impl Into<StoreContextMut<'a, U>>,
1982    ) -> impl ExactSizeIterator<Item = Result<T>> + 'a {
1983        let store = store.into().0;
1984        let mut cx = LiftContext::new(store, self.options, self.instance);
1985        (0..self.len).map(move |i| self.get_from_store(&mut cx, i).unwrap())
1986    }
1987}
1988
1989macro_rules! raw_wasm_list_accessors {
1990    ($($i:ident)*) => ($(
1991        impl WasmList<$i> {
1992            /// Get access to the raw underlying memory for this list.
1993            ///
1994            /// This method will return a direct slice into the original wasm
1995            /// module's linear memory where the data for this slice is stored.
1996            /// This allows the embedder to have efficient access to the
1997            /// underlying memory if needed and avoid copies and such if
1998            /// desired.
1999            ///
2000            /// Note that multi-byte integers are stored in little-endian format
2001            /// so portable processing of this slice must be aware of the host's
2002            /// byte-endianness. The `from_le` constructors in the Rust standard
2003            /// library should be suitable for converting from little-endian.
2004            ///
2005            /// # Panics
2006            ///
2007            /// Panics if the `store` provided is not the one from which this
2008            /// slice originated.
2009            pub fn as_le_slice<'a, T: 'static>(&self, store: impl Into<StoreContext<'a, T>>) -> &'a [$i] {
2010                let memory = self.instance.options_memory(store.into().0, self.options);
2011                self._as_le_slice(memory)
2012            }
2013
2014            fn _as_le_slice<'a>(&self, all_of_memory: &'a [u8]) -> &'a [$i] {
2015                // See comments in `WasmList::get` for the panicking indexing
2016                let byte_size = self.len * mem::size_of::<$i>();
2017                let bytes = &all_of_memory[self.ptr..][..byte_size];
2018
2019                // The canonical ABI requires that everything is aligned to its
2020                // own size, so this should be an aligned array. Furthermore the
2021                // alignment of primitive integers for hosts should be smaller
2022                // than or equal to the size of the primitive itself, meaning
2023                // that a wasm canonical-abi-aligned list is also aligned for
2024                // the host. That should mean that the head/tail slices here are
2025                // empty.
2026                //
2027                // Also note that the `unsafe` here is needed since the type
2028                // we're aligning to isn't guaranteed to be valid, but in our
2029                // case it's just integers and bytes so this should be safe.
2030                unsafe {
2031                    let (head, body, tail) = bytes.align_to::<$i>();
2032                    assert!(head.is_empty() && tail.is_empty());
2033                    body
2034                }
2035            }
2036        }
2037    )*)
2038}
2039
2040raw_wasm_list_accessors! {
2041    i8 i16 i32 i64
2042    u8 u16 u32 u64
2043}
2044
2045// Note that this is similar to `ComponentType for str` except it can only be
2046// used for lifting, not lowering.
2047unsafe impl<T: ComponentType> ComponentType for WasmList<T> {
2048    type Lower = <[T] as ComponentType>::Lower;
2049
2050    const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
2051
2052    fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
2053        <[T] as ComponentType>::typecheck(ty, types)
2054    }
2055}
2056
2057unsafe impl<T: Lift> Lift for WasmList<T> {
2058    fn linear_lift_from_flat(
2059        cx: &mut LiftContext<'_>,
2060        ty: InterfaceType,
2061        src: &Self::Lower,
2062    ) -> Result<Self> {
2063        let elem = match ty {
2064            InterfaceType::List(i) => cx.types[i].element,
2065            _ => bad_type_info(),
2066        };
2067        // FIXME(#4311): needs memory64 treatment
2068        let ptr = src[0].get_u32();
2069        let len = src[1].get_u32();
2070        let (ptr, len) = (usize::try_from(ptr)?, usize::try_from(len)?);
2071        WasmList::new(ptr, len, cx, elem)
2072    }
2073
2074    fn linear_lift_from_memory(
2075        cx: &mut LiftContext<'_>,
2076        ty: InterfaceType,
2077        bytes: &[u8],
2078    ) -> Result<Self> {
2079        let elem = match ty {
2080            InterfaceType::List(i) => cx.types[i].element,
2081            _ => bad_type_info(),
2082        };
2083        debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
2084        // FIXME(#4311): needs memory64 treatment
2085        let ptr = u32::from_le_bytes(bytes[..4].try_into().unwrap());
2086        let len = u32::from_le_bytes(bytes[4..].try_into().unwrap());
2087        let (ptr, len) = (usize::try_from(ptr)?, usize::try_from(len)?);
2088        WasmList::new(ptr, len, cx, elem)
2089    }
2090}
2091
2092// =============================================================================
2093// HashMap<K, V> support for component model `map<K, V>`
2094//
2095// Maps are represented as `list<tuple<K, V>>` in the canonical ABI, so the
2096// lowered form is a (pointer, length) pair just like lists.
2097
2098fn typecheck_map<K, V>(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()>
2099where
2100    K: ComponentType,
2101    V: ComponentType,
2102{
2103    match ty {
2104        InterfaceType::Map(t) => {
2105            let map_ty = &types.types[*t];
2106            K::typecheck(&map_ty.key, types)?;
2107            V::typecheck(&map_ty.value, types)?;
2108            Ok(())
2109        }
2110        other => bail!("expected `map` found `{}`", desc(other)),
2111    }
2112}
2113
2114#[derive(Copy, Clone)]
2115struct MapAbi32 {
2116    key_ty: InterfaceType,
2117    value_ty: InterfaceType,
2118    tuple_size: usize,
2119    tuple_align: u32,
2120    value_offset: usize,
2121}
2122
2123fn map_abi32(ty: InterfaceType, types: &ComponentTypes) -> MapAbi32 {
2124    match ty {
2125        InterfaceType::Map(i) => {
2126            let m = &types[i];
2127            MapAbi32 {
2128                key_ty: m.key,
2129                value_ty: m.value,
2130                tuple_size: usize::try_from(m.entry_abi.size32).unwrap(),
2131                tuple_align: m.entry_abi.align32,
2132                value_offset: usize::try_from(m.value_offset32).unwrap(),
2133            }
2134        }
2135        _ => bad_type_info(),
2136    }
2137}
2138
2139#[cfg(not(feature = "std"))]
2140unsafe impl<K, V> ComponentType for HashMap<K, V>
2141where
2142    K: ComponentType,
2143    V: ComponentType,
2144{
2145    type Lower = [ValRaw; 2];
2146
2147    const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
2148
2149    fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
2150        typecheck_map::<K, V>(ty, types)
2151    }
2152}
2153
2154#[cfg(not(feature = "std"))]
2155unsafe impl<K, V> Lower for HashMap<K, V>
2156where
2157    K: Lower,
2158    V: Lower,
2159{
2160    fn linear_lower_to_flat<U>(
2161        &self,
2162        cx: &mut LowerContext<'_, U>,
2163        ty: InterfaceType,
2164        dst: &mut MaybeUninit<[ValRaw; 2]>,
2165    ) -> Result<()> {
2166        linear_lower_map_to_flat(cx, ty, self.len(), self.iter(), dst)
2167    }
2168
2169    fn linear_lower_to_memory<U>(
2170        &self,
2171        cx: &mut LowerContext<'_, U>,
2172        ty: InterfaceType,
2173        offset: usize,
2174    ) -> Result<()> {
2175        linear_lower_map_to_memory(cx, ty, self.len(), self.iter(), offset)
2176    }
2177}
2178
2179fn lower_map_iter<'a, K, V, U>(
2180    cx: &mut LowerContext<'_, U>,
2181    map: MapAbi32,
2182    len: usize,
2183    iter: impl Iterator<Item = (&'a K, &'a V)>,
2184) -> Result<(usize, usize)>
2185where
2186    K: Lower + 'a,
2187    V: Lower + 'a,
2188{
2189    let size = len
2190        .checked_mul(map.tuple_size)
2191        .ok_or_else(|| format_err!("size overflow copying a map"))?;
2192    let ptr = cx.realloc(0, 0, map.tuple_align, size)?;
2193
2194    let mut entry_offset = ptr;
2195    for (key, value) in iter {
2196        // Keys are the first field in each entry tuple.
2197        <K as Lower>::linear_lower_to_memory(key, cx, map.key_ty, entry_offset)?;
2198        // Values start at the precomputed value offset within the tuple.
2199        <V as Lower>::linear_lower_to_memory(
2200            value,
2201            cx,
2202            map.value_ty,
2203            entry_offset + map.value_offset,
2204        )?;
2205        entry_offset += map.tuple_size;
2206    }
2207
2208    Ok((ptr, len))
2209}
2210
2211fn linear_lower_map_to_flat<'a, K, V, U>(
2212    cx: &mut LowerContext<'_, U>,
2213    ty: InterfaceType,
2214    len: usize,
2215    iter: impl Iterator<Item = (&'a K, &'a V)>,
2216    dst: &mut MaybeUninit<[ValRaw; 2]>,
2217) -> Result<()>
2218where
2219    K: Lower + 'a,
2220    V: Lower + 'a,
2221{
2222    let map = map_abi32(ty, &cx.types);
2223    let (ptr, len) = lower_map_iter(cx, map, len, iter)?;
2224    // See "WRITEPTR64" above for why this is always storing a 64-bit integer.
2225    map_maybe_uninit!(dst[0]).write(ValRaw::i64(ptr as i64));
2226    map_maybe_uninit!(dst[1]).write(ValRaw::i64(len as i64));
2227    Ok(())
2228}
2229
2230fn linear_lower_map_to_memory<'a, K, V, U>(
2231    cx: &mut LowerContext<'_, U>,
2232    ty: InterfaceType,
2233    len: usize,
2234    iter: impl Iterator<Item = (&'a K, &'a V)>,
2235    offset: usize,
2236) -> Result<()>
2237where
2238    K: Lower + 'a,
2239    V: Lower + 'a,
2240{
2241    let map = map_abi32(ty, &cx.types);
2242    debug_assert!(offset % (CanonicalAbiInfo::POINTER_PAIR.align32 as usize) == 0);
2243    let (ptr, len) = lower_map_iter(cx, map, len, iter)?;
2244    *cx.get(offset + 0) = u32::try_from(ptr).unwrap().to_le_bytes();
2245    *cx.get(offset + 4) = u32::try_from(len).unwrap().to_le_bytes();
2246    Ok(())
2247}
2248
2249#[cfg(not(feature = "std"))]
2250unsafe impl<K, V> Lift for HashMap<K, V>
2251where
2252    K: Lift + Eq + Hash,
2253    V: Lift,
2254{
2255    fn linear_lift_from_flat(
2256        cx: &mut LiftContext<'_>,
2257        ty: InterfaceType,
2258        src: &Self::Lower,
2259    ) -> Result<Self> {
2260        let map = map_abi32(ty, &cx.types);
2261        // FIXME(#4311): needs memory64 treatment
2262        let ptr = src[0].get_u32();
2263        let len = src[1].get_u32();
2264        let (ptr, len) = (usize::try_from(ptr)?, usize::try_from(len)?);
2265        lift_map(cx, map, ptr, len)
2266    }
2267
2268    fn linear_lift_from_memory(
2269        cx: &mut LiftContext<'_>,
2270        ty: InterfaceType,
2271        bytes: &[u8],
2272    ) -> Result<Self> {
2273        let map = map_abi32(ty, &cx.types);
2274        debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
2275        // FIXME(#4311): needs memory64 treatment
2276        let ptr = u32::from_le_bytes(bytes[..4].try_into().unwrap());
2277        let len = u32::from_le_bytes(bytes[4..].try_into().unwrap());
2278        let (ptr, len) = (usize::try_from(ptr)?, usize::try_from(len)?);
2279        lift_map(cx, map, ptr, len)
2280    }
2281}
2282
2283/// Shared helper that validates a map's memory region and lifts each
2284/// (key, value) pair, forwarding them to `insert`.
2285fn lift_map_pairs<K, V>(
2286    cx: &mut LiftContext<'_>,
2287    map: MapAbi32,
2288    ptr: usize,
2289    len: usize,
2290    mut insert: impl FnMut(K, V) -> Result<()>,
2291) -> Result<()>
2292where
2293    K: Lift,
2294    V: Lift,
2295{
2296    match len
2297        .checked_mul(map.tuple_size)
2298        .and_then(|total| ptr.checked_add(total))
2299    {
2300        Some(n) if n <= cx.memory().len() => {}
2301        _ => bail!("map pointer/length out of bounds of memory"),
2302    }
2303    if ptr % (map.tuple_align as usize) != 0 {
2304        bail!("map pointer is not aligned");
2305    }
2306
2307    for i in 0..len {
2308        let entry_base = ptr + (i * map.tuple_size);
2309
2310        let key_bytes = &cx.memory()[entry_base..][..K::SIZE32];
2311        let key = K::linear_lift_from_memory(cx, map.key_ty, key_bytes)?;
2312
2313        let value_bytes = &cx.memory()[entry_base + map.value_offset..][..V::SIZE32];
2314        let value = V::linear_lift_from_memory(cx, map.value_ty, value_bytes)?;
2315
2316        insert(key, value)?;
2317    }
2318
2319    Ok(())
2320}
2321
2322#[cfg(not(feature = "std"))]
2323fn lift_map<K, V>(
2324    cx: &mut LiftContext<'_>,
2325    map: MapAbi32,
2326    ptr: usize,
2327    len: usize,
2328) -> Result<HashMap<K, V>>
2329where
2330    K: Lift + Eq + Hash,
2331    V: Lift,
2332{
2333    let mut result = HashMap::with_capacity(len);
2334    lift_map_pairs(cx, map, ptr, len, |k, v| {
2335        result.insert(k, v);
2336        Ok(())
2337    })?;
2338    Ok(result)
2339}
2340
2341// =============================================================================
2342// std::collections::HashMap<K, V> support for component model `map<K, V>`
2343
2344#[cfg(feature = "std")]
2345unsafe impl<K, V> ComponentType for std::collections::HashMap<K, V>
2346where
2347    K: ComponentType,
2348    V: ComponentType,
2349{
2350    type Lower = [ValRaw; 2];
2351
2352    const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
2353
2354    fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
2355        typecheck_map::<K, V>(ty, types)
2356    }
2357}
2358
2359#[cfg(feature = "std")]
2360unsafe impl<K, V> Lower for std::collections::HashMap<K, V>
2361where
2362    K: Lower,
2363    V: Lower,
2364{
2365    fn linear_lower_to_flat<U>(
2366        &self,
2367        cx: &mut LowerContext<'_, U>,
2368        ty: InterfaceType,
2369        dst: &mut MaybeUninit<[ValRaw; 2]>,
2370    ) -> Result<()> {
2371        linear_lower_map_to_flat(cx, ty, self.len(), self.iter(), dst)
2372    }
2373
2374    fn linear_lower_to_memory<U>(
2375        &self,
2376        cx: &mut LowerContext<'_, U>,
2377        ty: InterfaceType,
2378        offset: usize,
2379    ) -> Result<()> {
2380        linear_lower_map_to_memory(cx, ty, self.len(), self.iter(), offset)
2381    }
2382}
2383
2384#[cfg(feature = "std")]
2385unsafe impl<K, V> Lift for std::collections::HashMap<K, V>
2386where
2387    K: Lift + Eq + Hash,
2388    V: Lift,
2389{
2390    fn linear_lift_from_flat(
2391        cx: &mut LiftContext<'_>,
2392        ty: InterfaceType,
2393        src: &Self::Lower,
2394    ) -> Result<Self> {
2395        let try_map =
2396            <wasmtime_environ::collections::TryHashMap<K, V> as Lift>::linear_lift_from_flat(
2397                cx, ty, src,
2398            )?;
2399        Ok(try_map.into_iter().collect())
2400    }
2401
2402    fn linear_lift_from_memory(
2403        cx: &mut LiftContext<'_>,
2404        ty: InterfaceType,
2405        bytes: &[u8],
2406    ) -> Result<Self> {
2407        let try_map =
2408            <wasmtime_environ::collections::TryHashMap<K, V> as Lift>::linear_lift_from_memory(
2409                cx, ty, bytes,
2410            )?;
2411        Ok(try_map.into_iter().collect())
2412    }
2413}
2414
2415#[cfg(feature = "std")]
2416unsafe impl<K, V> ComponentType for wasmtime_environ::collections::TryHashMap<K, V>
2417where
2418    K: ComponentType,
2419    V: ComponentType,
2420{
2421    type Lower = [ValRaw; 2];
2422
2423    const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
2424
2425    fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
2426        typecheck_map::<K, V>(ty, types)
2427    }
2428}
2429
2430#[cfg(feature = "std")]
2431unsafe impl<K, V> Lower for wasmtime_environ::collections::TryHashMap<K, V>
2432where
2433    K: Lower,
2434    V: Lower,
2435{
2436    fn linear_lower_to_flat<U>(
2437        &self,
2438        cx: &mut LowerContext<'_, U>,
2439        ty: InterfaceType,
2440        dst: &mut MaybeUninit<[ValRaw; 2]>,
2441    ) -> Result<()> {
2442        linear_lower_map_to_flat(cx, ty, self.len(), self.iter(), dst)
2443    }
2444
2445    fn linear_lower_to_memory<U>(
2446        &self,
2447        cx: &mut LowerContext<'_, U>,
2448        ty: InterfaceType,
2449        offset: usize,
2450    ) -> Result<()> {
2451        linear_lower_map_to_memory(cx, ty, self.len(), self.iter(), offset)
2452    }
2453}
2454
2455#[cfg(feature = "std")]
2456unsafe impl<K, V> Lift for wasmtime_environ::collections::TryHashMap<K, V>
2457where
2458    K: Lift + Eq + Hash,
2459    V: Lift,
2460{
2461    fn linear_lift_from_flat(
2462        cx: &mut LiftContext<'_>,
2463        ty: InterfaceType,
2464        src: &Self::Lower,
2465    ) -> Result<Self> {
2466        let map = map_abi32(ty, &cx.types);
2467        // FIXME(#4311): needs memory64 treatment
2468        let ptr = src[0].get_u32();
2469        let len = src[1].get_u32();
2470        let (ptr, len) = (usize::try_from(ptr)?, usize::try_from(len)?);
2471        lift_try_map(cx, map, ptr, len)
2472    }
2473
2474    fn linear_lift_from_memory(
2475        cx: &mut LiftContext<'_>,
2476        ty: InterfaceType,
2477        bytes: &[u8],
2478    ) -> Result<Self> {
2479        let map = map_abi32(ty, &cx.types);
2480        debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
2481        // FIXME(#4311): needs memory64 treatment
2482        let ptr = u32::from_le_bytes(bytes[..4].try_into().unwrap());
2483        let len = u32::from_le_bytes(bytes[4..].try_into().unwrap());
2484        let (ptr, len) = (usize::try_from(ptr)?, usize::try_from(len)?);
2485        lift_try_map(cx, map, ptr, len)
2486    }
2487}
2488
2489#[cfg(feature = "std")]
2490fn lift_try_map<K, V>(
2491    cx: &mut LiftContext<'_>,
2492    map: MapAbi32,
2493    ptr: usize,
2494    len: usize,
2495) -> Result<TryHashMap<K, V>>
2496where
2497    K: Lift + Eq + Hash,
2498    V: Lift,
2499{
2500    let mut result = TryHashMap::with_capacity(len)?;
2501    lift_map_pairs(cx, map, ptr, len, |k, v| {
2502        result.insert(k, v).map(drop).map_err(Into::into)
2503    })?;
2504    Ok(result)
2505}
2506
2507/// Verify that the given wasm type is a tuple with the expected fields in the right order.
2508fn typecheck_tuple(
2509    ty: &InterfaceType,
2510    types: &InstanceType<'_>,
2511    expected: &[fn(&InterfaceType, &InstanceType<'_>) -> Result<()>],
2512) -> Result<()> {
2513    match ty {
2514        InterfaceType::Tuple(t) => {
2515            let tuple = &types.types[*t];
2516            if tuple.types.len() != expected.len() {
2517                bail!(
2518                    "expected {}-tuple, found {}-tuple",
2519                    expected.len(),
2520                    tuple.types.len()
2521                );
2522            }
2523            for (ty, check) in tuple.types.iter().zip(expected) {
2524                check(ty, types)?;
2525            }
2526            Ok(())
2527        }
2528        other => bail!("expected `tuple` found `{}`", desc(other)),
2529    }
2530}
2531
2532/// Verify that the given wasm type is a record with the expected fields in the right order and with the right
2533/// names.
2534pub fn typecheck_record(
2535    ty: &InterfaceType,
2536    types: &InstanceType<'_>,
2537    expected: &[(&str, fn(&InterfaceType, &InstanceType<'_>) -> Result<()>)],
2538) -> Result<()> {
2539    match ty {
2540        InterfaceType::Record(index) => {
2541            let fields = &types.types[*index].fields;
2542
2543            if fields.len() != expected.len() {
2544                bail!(
2545                    "expected record of {} fields, found {} fields",
2546                    expected.len(),
2547                    fields.len()
2548                );
2549            }
2550
2551            for (field, &(name, check)) in fields.iter().zip(expected) {
2552                check(&field.ty, types)
2553                    .with_context(|| format!("type mismatch for field {name}"))?;
2554
2555                if field.name != name {
2556                    bail!("expected record field named {}, found {}", name, field.name);
2557                }
2558            }
2559
2560            Ok(())
2561        }
2562        other => bail!("expected `record` found `{}`", desc(other)),
2563    }
2564}
2565
2566/// Verify that the given wasm type is a variant with the expected cases in the right order and with the right
2567/// names.
2568pub fn typecheck_variant(
2569    ty: &InterfaceType,
2570    types: &InstanceType<'_>,
2571    expected: &[(
2572        &str,
2573        Option<fn(&InterfaceType, &InstanceType<'_>) -> Result<()>>,
2574    )],
2575) -> Result<()> {
2576    match ty {
2577        InterfaceType::Variant(index) => {
2578            let cases = &types.types[*index].cases;
2579
2580            if cases.len() != expected.len() {
2581                bail!(
2582                    "expected variant of {} cases, found {} cases",
2583                    expected.len(),
2584                    cases.len()
2585                );
2586            }
2587
2588            for ((case_name, case_ty), &(name, check)) in cases.iter().zip(expected) {
2589                if *case_name != name {
2590                    bail!("expected variant case named {name}, found {case_name}");
2591                }
2592
2593                match (check, case_ty) {
2594                    (Some(check), Some(ty)) => check(ty, types)
2595                        .with_context(|| format!("type mismatch for case {name}"))?,
2596                    (None, None) => {}
2597                    (Some(_), None) => {
2598                        bail!("case `{name}` has no type but one was expected")
2599                    }
2600                    (None, Some(_)) => {
2601                        bail!("case `{name}` has a type but none was expected")
2602                    }
2603                }
2604            }
2605
2606            Ok(())
2607        }
2608        other => bail!("expected `variant` found `{}`", desc(other)),
2609    }
2610}
2611
2612/// Verify that the given wasm type is a enum with the expected cases in the right order and with the right
2613/// names.
2614pub fn typecheck_enum(
2615    ty: &InterfaceType,
2616    types: &InstanceType<'_>,
2617    expected: &[&str],
2618) -> Result<()> {
2619    match ty {
2620        InterfaceType::Enum(index) => {
2621            let names = &types.types[*index].names;
2622
2623            if names.len() != expected.len() {
2624                bail!(
2625                    "expected enum of {} names, found {} names",
2626                    expected.len(),
2627                    names.len()
2628                );
2629            }
2630
2631            for (name, expected) in names.iter().zip(expected) {
2632                if name != expected {
2633                    bail!("expected enum case named {expected}, found {name}");
2634                }
2635            }
2636
2637            Ok(())
2638        }
2639        other => bail!("expected `enum` found `{}`", desc(other)),
2640    }
2641}
2642
2643/// Verify that the given wasm type is a flags type with the expected flags in the right order and with the right
2644/// names.
2645pub fn typecheck_flags(
2646    ty: &InterfaceType,
2647    types: &InstanceType<'_>,
2648    expected: &[&str],
2649) -> Result<()> {
2650    match ty {
2651        InterfaceType::Flags(index) => {
2652            let names = &types.types[*index].names;
2653
2654            if names.len() != expected.len() {
2655                bail!(
2656                    "expected flags type with {} names, found {} names",
2657                    expected.len(),
2658                    names.len()
2659                );
2660            }
2661
2662            for (name, expected) in names.iter().zip(expected) {
2663                if name != expected {
2664                    bail!("expected flag named {expected}, found {name}");
2665                }
2666            }
2667
2668            Ok(())
2669        }
2670        other => bail!("expected `flags` found `{}`", desc(other)),
2671    }
2672}
2673
2674/// Format the specified bitflags using the specified names for debugging
2675pub fn format_flags(bits: &[u32], names: &[&str], f: &mut fmt::Formatter) -> fmt::Result {
2676    f.write_str("(")?;
2677    let mut wrote = false;
2678    for (index, name) in names.iter().enumerate() {
2679        if ((bits[index / 32] >> (index % 32)) & 1) != 0 {
2680            if wrote {
2681                f.write_str("|")?;
2682            } else {
2683                wrote = true;
2684            }
2685
2686            f.write_str(name)?;
2687        }
2688    }
2689    f.write_str(")")
2690}
2691
2692unsafe impl<T> ComponentType for Option<T>
2693where
2694    T: ComponentType,
2695{
2696    type Lower = TupleLower<<u32 as ComponentType>::Lower, T::Lower>;
2697
2698    const ABI: CanonicalAbiInfo = CanonicalAbiInfo::variant_static(&[None, Some(T::ABI)]);
2699
2700    fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
2701        match ty {
2702            InterfaceType::Option(t) => T::typecheck(&types.types[*t].ty, types),
2703            other => bail!("expected `option` found `{}`", desc(other)),
2704        }
2705    }
2706}
2707
2708unsafe impl<T> ComponentVariant for Option<T>
2709where
2710    T: ComponentType,
2711{
2712    const CASES: &'static [Option<CanonicalAbiInfo>] = &[None, Some(T::ABI)];
2713}
2714
2715unsafe impl<T> Lower for Option<T>
2716where
2717    T: Lower,
2718{
2719    fn linear_lower_to_flat<U>(
2720        &self,
2721        cx: &mut LowerContext<'_, U>,
2722        ty: InterfaceType,
2723        dst: &mut MaybeUninit<Self::Lower>,
2724    ) -> Result<()> {
2725        let payload = match ty {
2726            InterfaceType::Option(ty) => cx.types[ty].ty,
2727            _ => bad_type_info(),
2728        };
2729        match self {
2730            None => {
2731                map_maybe_uninit!(dst.A1).write(ValRaw::i32(0));
2732                // Note that this is unsafe as we're writing an arbitrary
2733                // bit-pattern to an arbitrary type, but part of the unsafe
2734                // contract of the `ComponentType` trait is that we can assign
2735                // any bit-pattern. By writing all zeros here we're ensuring
2736                // that the core wasm arguments this translates to will all be
2737                // zeros (as the canonical ABI requires).
2738                unsafe {
2739                    map_maybe_uninit!(dst.A2).as_mut_ptr().write_bytes(0u8, 1);
2740                }
2741            }
2742            Some(val) => {
2743                map_maybe_uninit!(dst.A1).write(ValRaw::i32(1));
2744                val.linear_lower_to_flat(cx, payload, map_maybe_uninit!(dst.A2))?;
2745            }
2746        }
2747        Ok(())
2748    }
2749
2750    fn linear_lower_to_memory<U>(
2751        &self,
2752        cx: &mut LowerContext<'_, U>,
2753        ty: InterfaceType,
2754        offset: usize,
2755    ) -> Result<()> {
2756        debug_assert!(offset % (Self::ALIGN32 as usize) == 0);
2757        let payload = match ty {
2758            InterfaceType::Option(ty) => cx.types[ty].ty,
2759            _ => bad_type_info(),
2760        };
2761        match self {
2762            None => {
2763                cx.get::<1>(offset)[0] = 0;
2764            }
2765            Some(val) => {
2766                cx.get::<1>(offset)[0] = 1;
2767                val.linear_lower_to_memory(
2768                    cx,
2769                    payload,
2770                    offset + (Self::INFO.payload_offset32 as usize),
2771                )?;
2772            }
2773        }
2774        Ok(())
2775    }
2776}
2777
2778unsafe impl<T> Lift for Option<T>
2779where
2780    T: Lift,
2781{
2782    fn linear_lift_from_flat(
2783        cx: &mut LiftContext<'_>,
2784        ty: InterfaceType,
2785        src: &Self::Lower,
2786    ) -> Result<Self> {
2787        let payload = match ty {
2788            InterfaceType::Option(ty) => cx.types[ty].ty,
2789            _ => bad_type_info(),
2790        };
2791        Ok(match src.A1.get_i32() {
2792            0 => None,
2793            1 => Some(T::linear_lift_from_flat(cx, payload, &src.A2)?),
2794            _ => bail!("invalid option discriminant"),
2795        })
2796    }
2797
2798    fn linear_lift_from_memory(
2799        cx: &mut LiftContext<'_>,
2800        ty: InterfaceType,
2801        bytes: &[u8],
2802    ) -> Result<Self> {
2803        debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
2804        let payload_ty = match ty {
2805            InterfaceType::Option(ty) => cx.types[ty].ty,
2806            _ => bad_type_info(),
2807        };
2808        let discrim = bytes[0];
2809        let payload = &bytes[Self::INFO.payload_offset32 as usize..];
2810        match discrim {
2811            0 => Ok(None),
2812            1 => Ok(Some(T::linear_lift_from_memory(cx, payload_ty, payload)?)),
2813            _ => bail!("invalid option discriminant"),
2814        }
2815    }
2816}
2817
2818#[derive(Clone, Copy)]
2819#[repr(C)]
2820pub struct ResultLower<T: Copy, E: Copy> {
2821    tag: ValRaw,
2822    payload: ResultLowerPayload<T, E>,
2823}
2824
2825#[derive(Clone, Copy)]
2826#[repr(C)]
2827union ResultLowerPayload<T: Copy, E: Copy> {
2828    ok: T,
2829    err: E,
2830}
2831
2832unsafe impl<T, E> ComponentType for Result<T, E>
2833where
2834    T: ComponentType,
2835    E: ComponentType,
2836{
2837    type Lower = ResultLower<T::Lower, E::Lower>;
2838
2839    const ABI: CanonicalAbiInfo = CanonicalAbiInfo::variant_static(&[Some(T::ABI), Some(E::ABI)]);
2840
2841    fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
2842        match ty {
2843            InterfaceType::Result(r) => {
2844                let result = &types.types[*r];
2845                match &result.ok {
2846                    Some(ty) => T::typecheck(ty, types)?,
2847                    None if T::IS_RUST_UNIT_TYPE => {}
2848                    None => bail!("expected no `ok` type"),
2849                }
2850                match &result.err {
2851                    Some(ty) => E::typecheck(ty, types)?,
2852                    None if E::IS_RUST_UNIT_TYPE => {}
2853                    None => bail!("expected no `err` type"),
2854                }
2855                Ok(())
2856            }
2857            other => bail!("expected `result` found `{}`", desc(other)),
2858        }
2859    }
2860}
2861
2862/// Lowers the payload of a variant into the storage for the entire payload,
2863/// handling writing zeros at the end of the representation if this payload is
2864/// smaller than the entire flat representation.
2865///
2866/// * `payload` - the flat storage space for the entire payload of the variant
2867/// * `typed_payload` - projection from the payload storage space to the
2868///   individual storage space for this variant.
2869/// * `lower` - lowering operation used to initialize the `typed_payload` return
2870///   value.
2871///
2872/// For more information on this se the comments in the `Lower for Result`
2873/// implementation below.
2874pub unsafe fn lower_payload<P, T>(
2875    payload: &mut MaybeUninit<P>,
2876    typed_payload: impl FnOnce(&mut MaybeUninit<P>) -> &mut MaybeUninit<T>,
2877    lower: impl FnOnce(&mut MaybeUninit<T>) -> Result<()>,
2878) -> Result<()> {
2879    let typed = typed_payload(payload);
2880    lower(typed)?;
2881
2882    let typed_len = unsafe { storage_as_slice(typed).len() };
2883    let payload = unsafe { storage_as_slice_mut(payload) };
2884    for slot in payload[typed_len..].iter_mut() {
2885        slot.write(ValRaw::u64(0));
2886    }
2887    Ok(())
2888}
2889
2890unsafe impl<T, E> ComponentVariant for Result<T, E>
2891where
2892    T: ComponentType,
2893    E: ComponentType,
2894{
2895    const CASES: &'static [Option<CanonicalAbiInfo>] = &[Some(T::ABI), Some(E::ABI)];
2896}
2897
2898unsafe impl<T, E> Lower for Result<T, E>
2899where
2900    T: Lower,
2901    E: Lower,
2902{
2903    fn linear_lower_to_flat<U>(
2904        &self,
2905        cx: &mut LowerContext<'_, U>,
2906        ty: InterfaceType,
2907        dst: &mut MaybeUninit<Self::Lower>,
2908    ) -> Result<()> {
2909        let (ok, err) = match ty {
2910            InterfaceType::Result(ty) => {
2911                let ty = &cx.types[ty];
2912                (ty.ok, ty.err)
2913            }
2914            _ => bad_type_info(),
2915        };
2916
2917        // This implementation of `Lower::lower`, if you're reading these from
2918        // the top of this file, is the first location that the "join" logic of
2919        // the component model's canonical ABI encountered. The rough problem is
2920        // that let's say we have a component model type of the form:
2921        //
2922        //      (result u64 (error (tuple f32 u16)))
2923        //
2924        // The flat representation of this is actually pretty tricky. Currently
2925        // it is:
2926        //
2927        //      i32 i64 i32
2928        //
2929        // The first `i32` is the discriminant for the `result`, and the payload
2930        // is represented by `i64 i32`. The "ok" variant will only use the `i64`
2931        // and the "err" variant will use both `i64` and `i32`.
2932        //
2933        // In the "ok" variant the first issue is encountered. The size of one
2934        // variant may not match the size of the other variants. All variants
2935        // start at the "front" but when lowering a type we need to be sure to
2936        // initialize the later variants (lest we leak random host memory into
2937        // the guest module). Due to how the `Lower` type is represented as a
2938        // `union` of all the variants what ends up happening here is that
2939        // internally within the `lower_payload` after the typed payload is
2940        // lowered the remaining bits of the payload that weren't initialized
2941        // are all set to zero. This will guarantee that we'll write to all the
2942        // slots for each variant.
2943        //
2944        // The "err" variant encounters the second issue, however, which is that
2945        // the flat representation for each type may differ between payloads. In
2946        // the "ok" arm an `i64` is written, but the `lower` implementation for
2947        // the "err" arm will write an `f32` and then an `i32`. For this
2948        // implementation of `lower` to be valid the `f32` needs to get inflated
2949        // to an `i64` with zero-padding in the upper bits. What may be
2950        // surprising, however, is that none of this is handled in this file.
2951        // This implementation looks like it's blindly deferring to `E::lower`
2952        // and hoping it does the right thing.
2953        //
2954        // In reality, however, the correctness of variant lowering relies on
2955        // two subtle details of the `ValRaw` implementation in Wasmtime:
2956        //
2957        // 1. First the `ValRaw` value always contains little-endian values.
2958        //    This means that if a `u32` is written, a `u64` is read, and then
2959        //    the `u64` has its upper bits truncated the original value will
2960        //    always be retained. This is primarily here for big-endian
2961        //    platforms where if it weren't little endian then the opposite
2962        //    would occur and the wrong value would be read.
2963        //
2964        // 2. Second, and perhaps even more subtly, the `ValRaw` constructors
2965        //    for 32-bit types actually always initialize 64-bits of the
2966        //    `ValRaw`. In the component model flat ABI only 32 and 64-bit types
2967        //    are used so 64-bits is big enough to contain everything. This
2968        //    means that when a `ValRaw` is written into the destination it will
2969        //    always, whether it's needed or not, be "ready" to get extended up
2970        //    to 64-bits.
2971        //
2972        // Put together these two subtle guarantees means that all `Lower`
2973        // implementations can be written "naturally" as one might naively
2974        // expect. Variants will, on each arm, zero out remaining fields and all
2975        // writes to the flat representation will automatically be 64-bit writes
2976        // meaning that if the value is read as a 64-bit value, which isn't
2977        // known at the time of the write, it'll still be correct.
2978        match self {
2979            Ok(e) => {
2980                map_maybe_uninit!(dst.tag).write(ValRaw::i32(0));
2981                unsafe {
2982                    lower_payload(
2983                        map_maybe_uninit!(dst.payload),
2984                        |payload| map_maybe_uninit!(payload.ok),
2985                        |dst| match ok {
2986                            Some(ok) => e.linear_lower_to_flat(cx, ok, dst),
2987                            None => Ok(()),
2988                        },
2989                    )
2990                }
2991            }
2992            Err(e) => {
2993                map_maybe_uninit!(dst.tag).write(ValRaw::i32(1));
2994                unsafe {
2995                    lower_payload(
2996                        map_maybe_uninit!(dst.payload),
2997                        |payload| map_maybe_uninit!(payload.err),
2998                        |dst| match err {
2999                            Some(err) => e.linear_lower_to_flat(cx, err, dst),
3000                            None => Ok(()),
3001                        },
3002                    )
3003                }
3004            }
3005        }
3006    }
3007
3008    fn linear_lower_to_memory<U>(
3009        &self,
3010        cx: &mut LowerContext<'_, U>,
3011        ty: InterfaceType,
3012        offset: usize,
3013    ) -> Result<()> {
3014        let (ok, err) = match ty {
3015            InterfaceType::Result(ty) => {
3016                let ty = &cx.types[ty];
3017                (ty.ok, ty.err)
3018            }
3019            _ => bad_type_info(),
3020        };
3021        debug_assert!(offset % (Self::ALIGN32 as usize) == 0);
3022        let payload_offset = Self::INFO.payload_offset32 as usize;
3023        match self {
3024            Ok(e) => {
3025                cx.get::<1>(offset)[0] = 0;
3026                if let Some(ok) = ok {
3027                    e.linear_lower_to_memory(cx, ok, offset + payload_offset)?;
3028                }
3029            }
3030            Err(e) => {
3031                cx.get::<1>(offset)[0] = 1;
3032                if let Some(err) = err {
3033                    e.linear_lower_to_memory(cx, err, offset + payload_offset)?;
3034                }
3035            }
3036        }
3037        Ok(())
3038    }
3039}
3040
3041unsafe impl<T, E> Lift for Result<T, E>
3042where
3043    T: Lift,
3044    E: Lift,
3045{
3046    #[inline]
3047    fn linear_lift_from_flat(
3048        cx: &mut LiftContext<'_>,
3049        ty: InterfaceType,
3050        src: &Self::Lower,
3051    ) -> Result<Self> {
3052        let (ok, err) = match ty {
3053            InterfaceType::Result(ty) => {
3054                let ty = &cx.types[ty];
3055                (ty.ok, ty.err)
3056            }
3057            _ => bad_type_info(),
3058        };
3059        // Note that this implementation specifically isn't trying to actually
3060        // reinterpret or alter the bits of `lower` depending on which variant
3061        // we're lifting. This ends up all working out because the value is
3062        // stored in little-endian format.
3063        //
3064        // When stored in little-endian format the `{T,E}::Lower`, when each
3065        // individual `ValRaw` is read, means that if an i64 value, extended
3066        // from an i32 value, was stored then when the i32 value is read it'll
3067        // automatically ignore the upper bits.
3068        //
3069        // This "trick" allows us to seamlessly pass through the `Self::Lower`
3070        // representation into the lifting/lowering without trying to handle
3071        // "join"ed types as per the canonical ABI. It just so happens that i64
3072        // bits will naturally be reinterpreted as f64. Additionally if the
3073        // joined type is i64 but only the lower bits are read that's ok and we
3074        // don't need to validate the upper bits.
3075        //
3076        // This is largely enabled by WebAssembly/component-model#35 where no
3077        // validation needs to be performed for ignored bits and bytes here.
3078        Ok(match src.tag.get_i32() {
3079            0 => Ok(unsafe { lift_option(cx, ok, &src.payload.ok)? }),
3080            1 => Err(unsafe { lift_option(cx, err, &src.payload.err)? }),
3081            _ => bail!("invalid expected discriminant"),
3082        })
3083    }
3084
3085    #[inline]
3086    fn linear_lift_from_memory(
3087        cx: &mut LiftContext<'_>,
3088        ty: InterfaceType,
3089        bytes: &[u8],
3090    ) -> Result<Self> {
3091        debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
3092        let discrim = bytes[0];
3093        let payload = &bytes[Self::INFO.payload_offset32 as usize..];
3094        let (ok, err) = match ty {
3095            InterfaceType::Result(ty) => {
3096                let ty = &cx.types[ty];
3097                (ty.ok, ty.err)
3098            }
3099            _ => bad_type_info(),
3100        };
3101        match discrim {
3102            0 => Ok(Ok(load_option(cx, ok, &payload[..T::SIZE32])?)),
3103            1 => Ok(Err(load_option(cx, err, &payload[..E::SIZE32])?)),
3104            _ => bail!("invalid expected discriminant"),
3105        }
3106    }
3107}
3108
3109fn lift_option<T>(cx: &mut LiftContext<'_>, ty: Option<InterfaceType>, src: &T::Lower) -> Result<T>
3110where
3111    T: Lift,
3112{
3113    match ty {
3114        Some(ty) => T::linear_lift_from_flat(cx, ty, src),
3115        None => Ok(empty_lift()),
3116    }
3117}
3118
3119fn load_option<T>(cx: &mut LiftContext<'_>, ty: Option<InterfaceType>, bytes: &[u8]) -> Result<T>
3120where
3121    T: Lift,
3122{
3123    match ty {
3124        Some(ty) => T::linear_lift_from_memory(cx, ty, bytes),
3125        None => Ok(empty_lift()),
3126    }
3127}
3128
3129fn empty_lift<T>() -> T
3130where
3131    T: Lift,
3132{
3133    assert!(T::IS_RUST_UNIT_TYPE);
3134    assert_eq!(mem::size_of::<T>(), 0);
3135    unsafe { MaybeUninit::uninit().assume_init() }
3136}
3137
3138/// Helper structure to define `Lower` for tuples below.
3139///
3140/// Uses default type parameters to have fields be zero-sized and not present
3141/// in memory for smaller tuple values.
3142#[expect(non_snake_case, reason = "more amenable to macro-generated code")]
3143#[doc(hidden)]
3144#[derive(Clone, Copy)]
3145#[repr(C)]
3146pub struct TupleLower<
3147    T1 = (),
3148    T2 = (),
3149    T3 = (),
3150    T4 = (),
3151    T5 = (),
3152    T6 = (),
3153    T7 = (),
3154    T8 = (),
3155    T9 = (),
3156    T10 = (),
3157    T11 = (),
3158    T12 = (),
3159    T13 = (),
3160    T14 = (),
3161    T15 = (),
3162    T16 = (),
3163    T17 = (),
3164> {
3165    // NB: these names match the names in `for_each_function_signature!`
3166    A1: T1,
3167    A2: T2,
3168    A3: T3,
3169    A4: T4,
3170    A5: T5,
3171    A6: T6,
3172    A7: T7,
3173    A8: T8,
3174    A9: T9,
3175    A10: T10,
3176    A11: T11,
3177    A12: T12,
3178    A13: T13,
3179    A14: T14,
3180    A15: T15,
3181    A16: T16,
3182    A17: T17,
3183    _align_tuple_lower0_correctly: [ValRaw; 0],
3184}
3185
3186macro_rules! impl_component_ty_for_tuples {
3187    ($n:tt $($t:ident)*) => {
3188        #[allow(non_snake_case, reason = "macro-generated code")]
3189        unsafe impl<$($t,)*> ComponentType for ($($t,)*)
3190            where $($t: ComponentType),*
3191        {
3192            type Lower = TupleLower<$($t::Lower),*>;
3193
3194            const ABI: CanonicalAbiInfo = CanonicalAbiInfo::record_static(&[
3195                $($t::ABI),*
3196            ]);
3197
3198            const IS_RUST_UNIT_TYPE: bool = {
3199                let mut _is_unit = true;
3200                $(
3201                    let _anything_to_bind_the_macro_variable = $t::IS_RUST_UNIT_TYPE;
3202                    _is_unit = false;
3203                )*
3204                _is_unit
3205            };
3206
3207            fn typecheck(
3208                ty: &InterfaceType,
3209                types: &InstanceType<'_>,
3210            ) -> Result<()> {
3211                typecheck_tuple(ty, types, &[$($t::typecheck),*])
3212            }
3213        }
3214
3215        #[allow(non_snake_case, reason = "macro-generated code")]
3216        unsafe impl<$($t,)*> Lower for ($($t,)*)
3217            where $($t: Lower),*
3218        {
3219            fn linear_lower_to_flat<U>(
3220                &self,
3221                cx: &mut LowerContext<'_, U>,
3222                ty: InterfaceType,
3223                _dst: &mut MaybeUninit<Self::Lower>,
3224            ) -> Result<()> {
3225                let types = match ty {
3226                    InterfaceType::Tuple(t) => &cx.types[t].types,
3227                    _ => bad_type_info(),
3228                };
3229                let ($($t,)*) = self;
3230                let mut _types = types.iter();
3231                $(
3232                    let ty = *_types.next().unwrap_or_else(bad_type_info);
3233                    $t.linear_lower_to_flat(cx, ty, map_maybe_uninit!(_dst.$t))?;
3234                )*
3235                Ok(())
3236            }
3237
3238            fn linear_lower_to_memory<U>(
3239                &self,
3240                cx: &mut LowerContext<'_, U>,
3241                ty: InterfaceType,
3242                mut _offset: usize,
3243            ) -> Result<()> {
3244                debug_assert!(_offset % (Self::ALIGN32 as usize) == 0);
3245                let types = match ty {
3246                    InterfaceType::Tuple(t) => &cx.types[t].types,
3247                    _ => bad_type_info(),
3248                };
3249                let ($($t,)*) = self;
3250                let mut _types = types.iter();
3251                $(
3252                    let ty = *_types.next().unwrap_or_else(bad_type_info);
3253                    $t.linear_lower_to_memory(cx, ty, $t::ABI.next_field32_size(&mut _offset))?;
3254                )*
3255                Ok(())
3256            }
3257        }
3258
3259        #[allow(non_snake_case, reason = "macro-generated code")]
3260        unsafe impl<$($t,)*> Lift for ($($t,)*)
3261            where $($t: Lift),*
3262        {
3263            #[inline]
3264            fn linear_lift_from_flat(cx: &mut LiftContext<'_>, ty: InterfaceType, _src: &Self::Lower) -> Result<Self> {
3265                let types = match ty {
3266                    InterfaceType::Tuple(t) => &cx.types[t].types,
3267                    _ => bad_type_info(),
3268                };
3269                let mut _types = types.iter();
3270                Ok(($(
3271                    $t::linear_lift_from_flat(
3272                        cx,
3273                        *_types.next().unwrap_or_else(bad_type_info),
3274                        &_src.$t,
3275                    )?,
3276                )*))
3277            }
3278
3279            #[inline]
3280            fn linear_lift_from_memory(cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
3281                debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
3282                let types = match ty {
3283                    InterfaceType::Tuple(t) => &cx.types[t].types,
3284                    _ => bad_type_info(),
3285                };
3286                let mut _types = types.iter();
3287                let mut _offset = 0;
3288                $(
3289                    let ty = *_types.next().unwrap_or_else(bad_type_info);
3290                    let $t = $t::linear_lift_from_memory(cx, ty, &bytes[$t::ABI.next_field32_size(&mut _offset)..][..$t::SIZE32])?;
3291                )*
3292                Ok(($($t,)*))
3293            }
3294        }
3295
3296        #[allow(non_snake_case, reason = "macro-generated code")]
3297        unsafe impl<$($t,)*> ComponentNamedList for ($($t,)*)
3298            where $($t: ComponentType),*
3299        {}
3300    };
3301}
3302
3303for_each_function_signature!(impl_component_ty_for_tuples);
3304
3305pub fn desc(ty: &InterfaceType) -> &'static str {
3306    match ty {
3307        InterfaceType::U8 => "u8",
3308        InterfaceType::S8 => "s8",
3309        InterfaceType::U16 => "u16",
3310        InterfaceType::S16 => "s16",
3311        InterfaceType::U32 => "u32",
3312        InterfaceType::S32 => "s32",
3313        InterfaceType::U64 => "u64",
3314        InterfaceType::S64 => "s64",
3315        InterfaceType::Float32 => "f32",
3316        InterfaceType::Float64 => "f64",
3317        InterfaceType::Bool => "bool",
3318        InterfaceType::Char => "char",
3319        InterfaceType::String => "string",
3320        InterfaceType::List(_) => "list",
3321        InterfaceType::Tuple(_) => "tuple",
3322        InterfaceType::Option(_) => "option",
3323        InterfaceType::Result(_) => "result",
3324
3325        InterfaceType::Record(_) => "record",
3326        InterfaceType::Variant(_) => "variant",
3327        InterfaceType::Flags(_) => "flags",
3328        InterfaceType::Enum(_) => "enum",
3329        InterfaceType::Own(_) => "owned resource",
3330        InterfaceType::Borrow(_) => "borrowed resource",
3331        InterfaceType::Future(_) => "future",
3332        InterfaceType::Stream(_) => "stream",
3333        InterfaceType::ErrorContext(_) => "error-context",
3334        InterfaceType::Map(_) => "map",
3335        InterfaceType::FixedLengthList(_) => "list<_, N>",
3336    }
3337}
3338
3339#[cold]
3340#[doc(hidden)]
3341pub fn bad_type_info<T>() -> T {
3342    // NB: should consider something like `unreachable_unchecked` here if this
3343    // becomes a performance bottleneck at some point, but that also comes with
3344    // a tradeoff of propagating a lot of unsafety, so it may not be worth it.
3345    panic!("bad type information detected");
3346}