wasmtime/runtime/component/func/typed.rs
1use crate::component::Instance;
2use crate::component::func::{Func, LiftContext, LowerContext};
3use crate::component::matching::InstanceType;
4use crate::component::storage::{storage_as_slice, storage_as_slice_mut};
5use crate::hash_map::HashMap;
6use crate::prelude::*;
7use crate::{AsContextMut, StoreContext, StoreContextMut, ValRaw};
8use alloc::borrow::Cow;
9use core::fmt;
10use core::hash::Hash;
11use core::iter;
12use core::marker;
13use core::mem::{self, MaybeUninit};
14use core::str;
15use wasmtime_environ::component::{
16 CanonicalAbiInfo, ComponentTypes, InterfaceType, MAX_FLAT_PARAMS, MAX_FLAT_RESULTS,
17 OptionsIndex, StringEncoding, TypeMap, VariantInfo,
18};
19
20#[cfg(feature = "component-model-async")]
21use crate::component::concurrent::{self, AsAccessor, PreparedCall};
22
23/// A statically-typed version of [`Func`] which takes `Params` as input and
24/// returns `Return`.
25///
26/// This is an efficient way to invoke a WebAssembly component where if the
27/// inputs and output are statically known this can eschew the vast majority of
28/// machinery and checks when calling WebAssembly. This is the most optimized
29/// way to call a WebAssembly component.
30///
31/// Note that like [`Func`] this is a pointer within a [`Store`](crate::Store)
32/// and usage will panic if used with the wrong store.
33///
34/// This type is primarily created with the [`Func::typed`] API.
35///
36/// See [`ComponentType`] for more information about supported types.
37pub struct TypedFunc<Params, Return> {
38 func: Func,
39
40 // The definition of this field is somewhat subtle and may be surprising.
41 // Naively one might expect something like
42 //
43 // _marker: marker::PhantomData<fn(Params) -> Return>,
44 //
45 // Since this is a function pointer after all. The problem with this
46 // definition though is that it imposes the wrong variance on `Params` from
47 // what we want. Abstractly a `fn(Params)` is able to store `Params` within
48 // it meaning you can only give it `Params` that live longer than the
49 // function pointer.
50 //
51 // With a component model function, however, we're always copying data from
52 // the host into the guest, so we are never storing pointers to `Params`
53 // into the guest outside the duration of a `call`, meaning we can actually
54 // accept values in `TypedFunc::call` which live for a shorter duration
55 // than the `Params` argument on the struct.
56 //
57 // This all means that we don't use a phantom function pointer, but instead
58 // feign phantom storage here to get the variance desired.
59 _marker: marker::PhantomData<(Params, Return)>,
60}
61
62impl<Params, Return> Copy for TypedFunc<Params, Return> {}
63
64impl<Params, Return> Clone for TypedFunc<Params, Return> {
65 fn clone(&self) -> TypedFunc<Params, Return> {
66 *self
67 }
68}
69
70impl<Params, Return> TypedFunc<Params, Return>
71where
72 Params: ComponentNamedList + Lower,
73 Return: ComponentNamedList + Lift,
74{
75 /// Creates a new [`TypedFunc`] from the provided component [`Func`],
76 /// unsafely asserting that the underlying function takes `Params` as
77 /// input and returns `Return`.
78 ///
79 /// # Unsafety
80 ///
81 /// This is an unsafe function because it does not verify that the [`Func`]
82 /// provided actually implements this signature. It's up to the caller to
83 /// have performed some other sort of check to ensure that the signature is
84 /// correct.
85 pub unsafe fn new_unchecked(func: Func) -> TypedFunc<Params, Return> {
86 TypedFunc {
87 _marker: marker::PhantomData,
88 func,
89 }
90 }
91
92 /// Returns the underlying un-typed [`Func`] that this [`TypedFunc`]
93 /// references.
94 pub fn func(&self) -> &Func {
95 &self.func
96 }
97
98 /// Calls the underlying WebAssembly component function using the provided
99 /// `params` as input.
100 ///
101 /// This method is used to enter into a component. Execution happens within
102 /// the `store` provided. The `params` are copied into WebAssembly memory
103 /// as appropriate and a core wasm function is invoked.
104 ///
105 /// # Post-return
106 ///
107 /// In the component model each function can have a "post return" specified
108 /// which allows cleaning up the arguments returned to the host. For example
109 /// if WebAssembly returns a string to the host then it might be a uniquely
110 /// allocated string which, after the host finishes processing it, needs to
111 /// be deallocated in the wasm instance's own linear memory to prevent
112 /// memory leaks in wasm itself. The `post-return` canonical abi option is
113 /// used to configured this.
114 ///
115 /// If a post-return function is present, it will be called automatically by
116 /// this function.
117 ///
118 /// # Errors
119 ///
120 /// This function can return an error for a number of reasons:
121 ///
122 /// * If the wasm itself traps during execution.
123 /// * If the wasm traps while copying arguments into memory.
124 /// * If the wasm provides bad allocation pointers when copying arguments
125 /// into memory.
126 /// * If the wasm returns a value which violates the canonical ABI.
127 /// * If this function's instances cannot be entered, for example if the
128 /// instance is currently calling a host function.
129 /// * If `store` requires using [`Self::call_async`] instead, see
130 /// [crate documentation](crate#async) for more info.
131 ///
132 /// In general there are many ways that things could go wrong when copying
133 /// types in and out of a wasm module with the canonical ABI, and certain
134 /// error conditions are specific to certain types. For example a
135 /// WebAssembly module can't return an invalid `char`. When allocating space
136 /// for this host to copy a string into the returned pointer must be
137 /// in-bounds in memory.
138 ///
139 /// If an error happens then the error should contain detailed enough
140 /// information to understand which part of the canonical ABI went wrong
141 /// and what to inspect.
142 ///
143 /// This function will return an [`OutOfMemory`][crate::OutOfMemory] error when
144 /// memory allocation fails. See the `OutOfMemory` type's documentation for
145 /// details on Wasmtime's out-of-memory handling.
146 ///
147 /// # Panics
148 ///
149 /// Panics if `store` does not own this function.
150 pub fn call(&self, mut store: impl AsContextMut, params: Params) -> Result<Return> {
151 let mut store = store.as_context_mut();
152 store.0.validate_sync_call()?;
153 self.call_impl(store.as_context_mut(), params)
154 }
155
156 /// Exactly like [`Self::call`], except for invoking WebAssembly
157 /// [asynchronously](crate#async).
158 ///
159 /// # Errors
160 ///
161 /// This function will return an [`OutOfMemory`][crate::OutOfMemory] error when
162 /// memory allocation fails. See the `OutOfMemory` type's documentation for
163 /// details on Wasmtime's out-of-memory handling.
164 ///
165 /// # Panics
166 ///
167 /// Panics if `store` does not own this function.
168 #[cfg(feature = "async")]
169 pub async fn call_async(
170 &self,
171 mut store: impl AsContextMut<Data: Send>,
172 params: Params,
173 ) -> Result<Return>
174 where
175 Return: 'static,
176 {
177 let mut store = store.as_context_mut();
178
179 #[cfg(feature = "component-model-async")]
180 if store.0.concurrency_support() {
181 use crate::component::concurrent::TaskId;
182 use crate::runtime::vm::SendSyncPtr;
183 use core::ptr::NonNull;
184
185 let ptr = SendSyncPtr::from(NonNull::from(¶ms).cast::<u8>());
186 let prepared =
187 self.prepare_call(store.as_context_mut(), true, move |cx, ty, dst| {
188 // SAFETY: The goal here is to get `Params`, a non-`'static`
189 // value, to live long enough to the lowering of the
190 // parameters. We're guaranteed that `Params` lives in the
191 // future of the outer function (we're in an `async fn`) so it'll
192 // stay alive as long as the future itself. That is distinct,
193 // for example, from the signature of `call_concurrent` below.
194 //
195 // Here a pointer to `Params` is smuggled to this location
196 // through a `SendSyncPtr<u8>` to thwart the `'static` check
197 // of rustc and the signature of `prepare_call`.
198 //
199 // Note the use of `SignalOnDrop` in the code that follows
200 // this closure, which ensures that the task will be removed
201 // from the concurrent state to which it belongs when the
202 // containing `Future` is dropped, so long as the parameters
203 // have not yet been lowered. Since this closure is removed from
204 // the task after the parameters are lowered, it will never be called
205 // after the containing `Future` is dropped.
206 let params = unsafe { ptr.cast::<Params>().as_ref() };
207 Self::lower_args(cx, ty, dst, params)
208 })?;
209
210 struct SignalOnDrop<'a, T: 'static> {
211 store: StoreContextMut<'a, T>,
212 task: TaskId,
213 }
214
215 impl<'a, T> Drop for SignalOnDrop<'a, T> {
216 fn drop(&mut self) {
217 self.task.host_future_dropped(self.store.0).unwrap();
218 }
219 }
220
221 let mut wrapper = SignalOnDrop {
222 store,
223 task: prepared.task_id(),
224 };
225
226 let result = concurrent::queue_call(wrapper.store.as_context_mut(), prepared)?;
227 return wrapper
228 .store
229 .as_context_mut()
230 .run_concurrent_trap_on_idle(async |_| Ok(result.await?))
231 .await?;
232 }
233
234 store
235 .on_fiber(|store| self.call_impl(store, params))
236 .await?
237 }
238
239 /// Start a concurrent call to this function.
240 ///
241 /// Concurrency is achieved by relying on the [`Accessor`] argument, which
242 /// can be obtained by calling [`StoreContextMut::run_concurrent`].
243 ///
244 /// Unlike [`Self::call`] and [`Self::call_async`] (both of which require
245 /// exclusive access to the store until the completion of the call), calls
246 /// made using this method may run concurrently with other calls to the same
247 /// instance. In addition, the runtime will call the `post-return` function
248 /// (if any) automatically when the guest task completes.
249 ///
250 /// This function will return an error if [`Config::concurrency_support`] is
251 /// disabled.
252 ///
253 /// [`Config::concurrency_support`]: crate::Config::concurrency_support
254 ///
255 /// # Progress and Cancellation
256 ///
257 /// For more information about how to make progress on the wasm task or how
258 /// to cancel the wasm task see the documentation for
259 /// [`Func::call_concurrent`].
260 ///
261 /// [`Func::call_concurrent`]: crate::component::Func::call_concurrent
262 ///
263 /// # Panics
264 ///
265 /// Panics if the store that the [`Accessor`] is derived from does not own
266 /// this function.
267 ///
268 /// [`Accessor`]: crate::component::Accessor
269 ///
270 /// # Example
271 ///
272 /// Using [`StoreContextMut::run_concurrent`] to get an [`Accessor`]:
273 ///
274 /// ```
275 /// # use {
276 /// # wasmtime::{
277 /// # error::{Result},
278 /// # component::{Component, Linker, ResourceTable},
279 /// # Config, Engine, Store
280 /// # },
281 /// # };
282 /// #
283 /// # struct Ctx { table: ResourceTable }
284 /// #
285 /// # async fn foo() -> Result<()> {
286 /// # let mut config = Config::new();
287 /// # let engine = Engine::new(&config)?;
288 /// # let mut store = Store::new(&engine, Ctx { table: ResourceTable::new() });
289 /// # let mut linker = Linker::new(&engine);
290 /// # let component = Component::new(&engine, "")?;
291 /// # let instance = linker.instantiate_async(&mut store, &component).await?;
292 /// let my_typed_func = instance.get_typed_func::<(), ()>(&mut store, "my_typed_func")?;
293 /// store.run_concurrent(async |accessor| -> wasmtime::Result<_> {
294 /// my_typed_func.call_concurrent(accessor, ()).await?;
295 /// Ok(())
296 /// }).await??;
297 /// # Ok(())
298 /// # }
299 /// ```
300 #[cfg(feature = "component-model-async")]
301 pub async fn call_concurrent(
302 self,
303 accessor: impl AsAccessor<Data: Send>,
304 params: Params,
305 ) -> Result<Return>
306 where
307 Params: 'static,
308 Return: 'static,
309 {
310 let result = accessor.as_accessor().with(|mut store| {
311 let mut store = store.as_context_mut();
312 ensure!(
313 store.0.concurrency_support(),
314 "cannot use `call_concurrent` Config::concurrency_support disabled",
315 );
316
317 let prepared =
318 self.prepare_call(store.as_context_mut(), false, move |cx, ty, dst| {
319 Self::lower_args(cx, ty, dst, ¶ms)
320 })?;
321 concurrent::queue_call(store, prepared)
322 });
323 Ok(result?.await?)
324 }
325
326 fn lower_args<T>(
327 cx: &mut LowerContext<T>,
328 ty: InterfaceType,
329 dst: &mut [MaybeUninit<ValRaw>],
330 params: &Params,
331 ) -> Result<()> {
332 use crate::component::storage::slice_to_storage_mut;
333
334 if Params::flatten_count() <= MAX_FLAT_PARAMS {
335 // SAFETY: the safety of `slice_to_storage_mut` relies on
336 // `Params::Lower` being represented by a sequence of
337 // `ValRaw`, and that's a guarantee upheld by the `Lower`
338 // trait itself.
339 let dst: &mut MaybeUninit<Params::Lower> = unsafe { slice_to_storage_mut(dst) };
340 Self::lower_stack_args(cx, ¶ms, ty, dst)
341 } else {
342 Self::lower_heap_args(cx, ¶ms, ty, &mut dst[0])
343 }
344 }
345
346 /// Calls `concurrent::prepare_call` with monomorphized functions for
347 /// lowering the parameters and lifting the result according to the number
348 /// of core Wasm parameters and results in the signature of the function to
349 /// be called.
350 #[cfg(feature = "component-model-async")]
351 fn prepare_call<T>(
352 self,
353 store: StoreContextMut<'_, T>,
354 host_future_present: bool,
355 lower: impl FnOnce(
356 &mut LowerContext<T>,
357 InterfaceType,
358 &mut [MaybeUninit<ValRaw>],
359 ) -> Result<()>
360 + Send
361 + Sync
362 + 'static,
363 ) -> Result<PreparedCall<Return>>
364 where
365 Return: 'static,
366 {
367 use crate::component::storage::slice_to_storage;
368 debug_assert!(store.0.concurrency_support());
369
370 let param_count = if Params::flatten_count() <= MAX_FLAT_PARAMS {
371 Params::flatten_count()
372 } else {
373 1
374 };
375 let max_results = if self.func.abi_async(store.0) {
376 MAX_FLAT_PARAMS
377 } else {
378 MAX_FLAT_RESULTS
379 };
380 concurrent::prepare_call(
381 store,
382 self.func,
383 param_count,
384 host_future_present,
385 move |func, store, params_out| {
386 func.with_lower_context(store, |cx, ty| lower(cx, ty, params_out))
387 },
388 move |func, store, results| {
389 let result = if Return::flatten_count() <= max_results {
390 func.with_lift_context(store, |cx, ty| {
391 // SAFETY: Per the safety requiments documented for the
392 // `ComponentType` trait, `Return::Lower` must be
393 // compatible at the binary level with a `[ValRaw; N]`,
394 // where `N` is `mem::size_of::<Return::Lower>() /
395 // mem::size_of::<ValRaw>()`. And since this function
396 // is only used when `Return::flatten_count() <=
397 // MAX_FLAT_RESULTS` and `MAX_FLAT_RESULTS == 1`, `N`
398 // can only either be 0 or 1.
399 //
400 // See `ComponentInstance::exit_call` for where we use
401 // the result count passed from
402 // `wasmtime_environ::fact::trampoline`-generated code
403 // to ensure the slice has the correct length, and also
404 // `concurrent::start_call` for where we conservatively
405 // use a slice length of 1 unconditionally. Also note
406 // that, as of this writing `slice_to_storage`
407 // double-checks the slice length is sufficient.
408 let results: &Return::Lower = unsafe { slice_to_storage(results) };
409 Self::lift_stack_result(cx, ty, results)
410 })?
411 } else {
412 func.with_lift_context(store, |cx, ty| {
413 Self::lift_heap_result(cx, ty, &results[0])
414 })?
415 };
416 Ok(Box::new(result))
417 },
418 )
419 }
420
421 fn call_impl(&self, mut store: impl AsContextMut, params: Params) -> Result<Return> {
422 let mut store = store.as_context_mut();
423
424 if self.func.abi_async(store.0) {
425 bail!("must enable the `component-model-async` feature to call async-lifted exports")
426 }
427
428 // Note that this is in theory simpler than it might read at this time.
429 // Here we're doing a runtime dispatch on the `flatten_count` for the
430 // params/results to see whether they're inbounds. This creates 4 cases
431 // to handle. In reality this is a highly optimizable branch where LLVM
432 // will easily figure out that only one branch here is taken.
433 //
434 // Otherwise this current construction is done to ensure that the stack
435 // space reserved for the params/results is always of the appropriate
436 // size (as the params/results needed differ depending on the "flatten"
437 // count)
438 //
439 // SAFETY: the safety of these invocations of `call_raw` depends on the
440 // correctness of the ascription of the `LowerParams` and `LowerReturn`
441 // types on the `call_raw` function. That's upheld here through the
442 // safety requirements of `Lift` and `Lower` on `Params` and `Return` in
443 // combination with checking the various possible branches here and
444 // dispatching to appropriately typed functions.
445 let (result, post_return_arg) = unsafe {
446 // This type is used as `LowerParams` for `call_raw` which is either
447 // `Params::Lower` or `ValRaw` representing it's either on the stack
448 // or it's on the heap. This allocates 1 extra `ValRaw` on the stack
449 // if `Params` is empty and `Return` is also empty, but that's a
450 // reasonable enough price to pay for now given the current code
451 // organization.
452 #[derive(Copy, Clone)]
453 union Union<T: Copy, U: Copy> {
454 _a: T,
455 _b: U,
456 }
457
458 if Return::flatten_count() <= MAX_FLAT_RESULTS {
459 self.func.call_raw(
460 store.as_context_mut(),
461 |cx, ty, dst: &mut MaybeUninit<Union<Params::Lower, ValRaw>>| {
462 let dst = storage_as_slice_mut(dst);
463 Self::lower_args(cx, ty, dst, ¶ms)
464 },
465 Self::lift_stack_result,
466 )
467 } else {
468 self.func.call_raw(
469 store.as_context_mut(),
470 |cx, ty, dst: &mut MaybeUninit<Union<Params::Lower, ValRaw>>| {
471 let dst = storage_as_slice_mut(dst);
472 Self::lower_args(cx, ty, dst, ¶ms)
473 },
474 Self::lift_heap_result,
475 )
476 }
477 }?;
478
479 self.func.post_return_impl(store, post_return_arg)?;
480
481 Ok(result)
482 }
483
484 /// Lower parameters directly onto the stack specified by the `dst`
485 /// location.
486 ///
487 /// This is only valid to call when the "flatten count" is small enough, or
488 /// when the canonical ABI says arguments go through the stack rather than
489 /// the heap.
490 fn lower_stack_args<T>(
491 cx: &mut LowerContext<'_, T>,
492 params: &Params,
493 ty: InterfaceType,
494 dst: &mut MaybeUninit<Params::Lower>,
495 ) -> Result<()> {
496 assert!(Params::flatten_count() <= MAX_FLAT_PARAMS);
497 params.linear_lower_to_flat(cx, ty, dst)?;
498 Ok(())
499 }
500
501 /// Lower parameters onto a heap-allocated location.
502 ///
503 /// This is used when the stack space to be used for the arguments is above
504 /// the `MAX_FLAT_PARAMS` threshold. Here the wasm's `realloc` function is
505 /// invoked to allocate space and then parameters are stored at that heap
506 /// pointer location.
507 fn lower_heap_args<T>(
508 cx: &mut LowerContext<'_, T>,
509 params: &Params,
510 ty: InterfaceType,
511 dst: &mut MaybeUninit<ValRaw>,
512 ) -> Result<()> {
513 // Memory must exist via validation if the arguments are stored on the
514 // heap, so we can create a `MemoryMut` at this point. Afterwards
515 // `realloc` is used to allocate space for all the arguments and then
516 // they're all stored in linear memory.
517 //
518 // Note that `realloc` will bake in a check that the returned pointer is
519 // in-bounds.
520 let ptr = cx.realloc(0, 0, Params::ALIGN32, Params::SIZE32)?;
521 params.linear_lower_to_memory(cx, ty, ptr)?;
522
523 // Note that the pointer here is stored as a 64-bit integer. This allows
524 // this to work with either 32 or 64-bit memories. For a 32-bit memory
525 // it'll just ignore the upper 32 zero bits, and for 64-bit memories
526 // this'll have the full 64-bits. Note that for 32-bit memories the call
527 // to `realloc` above guarantees that the `ptr` is in-bounds meaning
528 // that we will know that the zero-extended upper bits of `ptr` are
529 // guaranteed to be zero.
530 //
531 // This comment about 64-bit integers is also referred to below with
532 // "WRITEPTR64".
533 dst.write(ValRaw::i64(ptr as i64));
534
535 Ok(())
536 }
537
538 /// Lift the result of a function directly from the stack result.
539 ///
540 /// This is only used when the result fits in the maximum number of stack
541 /// slots.
542 fn lift_stack_result(
543 cx: &mut LiftContext<'_>,
544 ty: InterfaceType,
545 dst: &Return::Lower,
546 ) -> Result<Return> {
547 Return::linear_lift_from_flat(cx, ty, dst)
548 }
549
550 /// Lift the result of a function where the result is stored indirectly on
551 /// the heap.
552 fn lift_heap_result(
553 cx: &mut LiftContext<'_>,
554 ty: InterfaceType,
555 dst: &ValRaw,
556 ) -> Result<Return> {
557 assert!(Return::flatten_count() > MAX_FLAT_RESULTS);
558 // FIXME(#4311): needs to read an i64 for memory64
559 let ptr = usize::try_from(dst.get_u32())?;
560 if ptr % usize::try_from(Return::ALIGN32)? != 0 {
561 bail!("return pointer not aligned");
562 }
563
564 let bytes = cx
565 .memory()
566 .get(ptr..)
567 .and_then(|b| b.get(..Return::SIZE32))
568 .ok_or_else(|| crate::format_err!("pointer out of bounds of memory"))?;
569 Return::linear_lift_from_memory(cx, ty, bytes)
570 }
571
572 #[doc(hidden)]
573 #[deprecated(note = "no longer needs to be called; this function has no effect")]
574 pub fn post_return(&self, _store: impl AsContextMut) -> Result<()> {
575 Ok(())
576 }
577
578 #[doc(hidden)]
579 #[deprecated(note = "no longer needs to be called; this function has no effect")]
580 #[cfg(feature = "async")]
581 pub async fn post_return_async<T: Send>(
582 &self,
583 _store: impl AsContextMut<Data = T>,
584 ) -> Result<()> {
585 Ok(())
586 }
587}
588
589/// A trait representing a static list of named types that can be passed to or
590/// returned from a [`TypedFunc`].
591///
592/// This trait is implemented for a number of tuple types and is not expected
593/// to be implemented externally. The contents of this trait are hidden as it's
594/// intended to be an implementation detail of Wasmtime. The contents of this
595/// trait are not covered by Wasmtime's stability guarantees.
596///
597/// For more information about this trait see [`Func::typed`] and
598/// [`TypedFunc`].
599//
600// Note that this is an `unsafe` trait, and the unsafety means that
601// implementations of this trait must be correct or otherwise [`TypedFunc`]
602// would not be memory safe. The main reason this is `unsafe` is the
603// `typecheck` function which must operate correctly relative to the `AsTuple`
604// interpretation of the implementor.
605pub unsafe trait ComponentNamedList: ComponentType {}
606
607/// A trait representing types which can be passed to and read from components
608/// with the canonical ABI.
609///
610/// This trait is implemented for Rust types which can be communicated to
611/// components. The [`Func::typed`] and [`TypedFunc`] Rust items are the main
612/// consumers of this trait.
613///
614/// Supported Rust types include:
615///
616/// | Component Model Type | Rust Type |
617/// |-----------------------------------|--------------------------------------|
618/// | `{s,u}{8,16,32,64}` | `{i,u}{8,16,32,64}` |
619/// | `f{32,64}` | `f{32,64}` |
620/// | `bool` | `bool` |
621/// | `char` | `char` |
622/// | `tuple<A, B>` | `(A, B)` |
623/// | `option<T>` | `Option<T>` |
624/// | `result` | `Result<(), ()>` |
625/// | `result<T>` | `Result<T, ()>` |
626/// | `result<_, E>` | `Result<(), E>` |
627/// | `result<T, E>` | `Result<T, E>` |
628/// | `string` | `String`, `&str`, or [`WasmStr`] |
629/// | `list<T>` | `Vec<T>`, `&[T]`, or [`WasmList`] |
630/// | `map<K, V>` | `HashMap<K, V>` |
631/// | `own<T>`, `borrow<T>` | [`Resource<T>`] or [`ResourceAny`] |
632/// | `record` | [`#[derive(ComponentType)]`][d-cm] |
633/// | `variant` | [`#[derive(ComponentType)]`][d-cm] |
634/// | `enum` | [`#[derive(ComponentType)]`][d-cm] |
635/// | `flags` | [`flags!`][f-m] |
636///
637/// [`Resource<T>`]: crate::component::Resource
638/// [`ResourceAny`]: crate::component::ResourceAny
639/// [d-cm]: macro@crate::component::ComponentType
640/// [f-m]: crate::component::flags
641///
642/// Rust standard library pointers such as `&T`, `Box<T>`, and `Arc<T>`
643/// additionally represent whatever type `T` represents in the component model.
644/// Note that types such as `record`, `variant`, `enum`, and `flags` are
645/// generated by the embedder at compile time. These macros derive
646/// implementation of this trait for custom types to map to custom types in the
647/// component model. Note that for `record`, `variant`, `enum`, and `flags`
648/// those types are often generated by the
649/// [`bindgen!`](crate::component::bindgen) macro from WIT definitions.
650///
651/// Types that implement [`ComponentType`] are used for `Params` and `Return`
652/// in [`TypedFunc`] and [`Func::typed`].
653///
654/// The contents of this trait are hidden as it's intended to be an
655/// implementation detail of Wasmtime. The contents of this trait are not
656/// covered by Wasmtime's stability guarantees.
657///
658/// # Safety
659///
660/// Note that this is an `unsafe` trait as `TypedFunc`'s safety heavily relies on
661/// the correctness of the implementations of this trait. Some ways in which this
662/// trait must be correct to be safe are:
663///
664/// * The `Lower` associated type must be a `ValRaw` sequence. It doesn't have to
665/// literally be `[ValRaw; N]` but when laid out in memory it must be adjacent
666/// `ValRaw` values and have a multiple of the size of `ValRaw` and the same
667/// alignment.
668///
669/// * The `lower` function must initialize the bits within `Lower` that are going
670/// to be read by the trampoline that's used to enter core wasm. A trampoline
671/// is passed `*mut Lower` and will read the canonical abi arguments in
672/// sequence, so all of the bits must be correctly initialized.
673///
674/// * The `size` and `align` functions must be correct for this value stored in
675/// the canonical ABI. The `Cursor<T>` iteration of these bytes rely on this
676/// for correctness as they otherwise eschew bounds-checking.
677///
678/// There are likely some other correctness issues which aren't documented as
679/// well, this isn't currently an exhaustive list. It suffices to say, though,
680/// that correctness bugs in this trait implementation are highly likely to
681/// lead to security bugs, which again leads to the `unsafe` in the trait.
682///
683/// Note that this trait specifically is not sealed because `bindgen!`-generated
684/// types must be able to implement this trait using a `#[derive]` macro. For
685/// users it's recommended to not implement this trait manually given the
686/// non-exhaustive list of safety requirements that must be upheld. This trait
687/// is implemented at your own risk if you do so.
688///
689/// # Send and Sync
690///
691/// While on the topic of safety it's worth discussing the `Send` and `Sync`
692/// bounds here as well. These bounds might naively seem like they shouldn't be
693/// required for all component types as they're host-level types not guest-level
694/// types persisted anywhere. Various subtleties lead to these bounds, however:
695///
696/// * Fibers require that all stack-local variables are `Send` and `Sync` for
697/// fibers themselves to be send/sync. Unfortunately we have no help from the
698/// compiler on this one so it's up to Wasmtime's discipline to maintain this.
699/// One instance of this is that return values are placed on the stack as
700/// they're lowered into guest memory. This lowering operation can involve
701/// malloc and context switches, so return values must be Send/Sync.
702///
703/// * In the implementation of component model async it's not uncommon for types
704/// to be "buffered" in the store temporarily. For example parameters might
705/// reside in a store temporarily while wasm has backpressure turned on.
706///
707/// Overall it's generally easiest to require `Send` and `Sync` for all
708/// component types. There additionally aren't known use case for non-`Send` or
709/// non-`Sync` types at this time.
710pub unsafe trait ComponentType: Send + Sync {
711 /// Representation of the "lowered" form of this component value.
712 ///
713 /// Lowerings lower into core wasm values which are represented by `ValRaw`.
714 /// This `Lower` type must be a list of `ValRaw` as either a literal array
715 /// or a struct where every field is a `ValRaw`. This must be `Copy` (as
716 /// `ValRaw` is `Copy`) and support all byte patterns. This being correct is
717 /// one reason why the trait is unsafe.
718 #[doc(hidden)]
719 type Lower: Copy;
720
721 /// The information about this type's canonical ABI (size/align/etc).
722 #[doc(hidden)]
723 const ABI: CanonicalAbiInfo;
724
725 #[doc(hidden)]
726 const SIZE32: usize = Self::ABI.size32 as usize;
727 #[doc(hidden)]
728 const ALIGN32: u32 = Self::ABI.align32;
729
730 #[doc(hidden)]
731 const IS_RUST_UNIT_TYPE: bool = false;
732
733 /// Whether this type might require a call to the guest's realloc function
734 /// to allocate linear memory when lowering (e.g. a non-empty `string`).
735 ///
736 /// If this is `false`, Wasmtime may optimize lowering by using
737 /// `LowerContext::new_without_realloc` and lowering values outside of any
738 /// fiber. That will panic if the lowering process ends up needing realloc
739 /// after all, so `true` is a conservative default.
740 #[doc(hidden)]
741 const MAY_REQUIRE_REALLOC: bool = true;
742
743 /// Returns the number of core wasm abi values will be used to represent
744 /// this type in its lowered form.
745 ///
746 /// This divides the size of `Self::Lower` by the size of `ValRaw`.
747 #[doc(hidden)]
748 fn flatten_count() -> usize {
749 assert!(mem::size_of::<Self::Lower>() % mem::size_of::<ValRaw>() == 0);
750 assert!(mem::align_of::<Self::Lower>() == mem::align_of::<ValRaw>());
751 mem::size_of::<Self::Lower>() / mem::size_of::<ValRaw>()
752 }
753
754 /// Performs a type-check to see whether this component value type matches
755 /// the interface type `ty` provided.
756 #[doc(hidden)]
757 fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()>;
758}
759
760#[doc(hidden)]
761pub unsafe trait ComponentVariant: ComponentType {
762 const CASES: &'static [Option<CanonicalAbiInfo>];
763 const INFO: VariantInfo = VariantInfo::new_static(Self::CASES);
764 const PAYLOAD_OFFSET32: usize = Self::INFO.payload_offset32 as usize;
765}
766
767/// Host types which can be passed to WebAssembly components.
768///
769/// This trait is implemented for all types that can be passed to components
770/// either as parameters of component exports or returns of component imports.
771/// This trait represents the ability to convert from the native host
772/// representation to the canonical ABI.
773///
774/// Built-in types to Rust such as `Option<T>` implement this trait as
775/// appropriate. For a mapping of component model to Rust types see
776/// [`ComponentType`].
777///
778/// For user-defined types, for example `record` types mapped to Rust `struct`s,
779/// this crate additionally has
780/// [`#[derive(Lower)]`](macro@crate::component::Lower).
781///
782/// Note that like [`ComponentType`] the definition of this trait is intended to
783/// be an internal implementation detail of Wasmtime at this time. It's
784/// recommended to use the `#[derive(Lower)]` implementation instead.
785pub unsafe trait Lower: ComponentType {
786 /// Performs the "lower" function in the linear memory version of the
787 /// canonical ABI.
788 ///
789 /// This method will lower the current value into a component. The `lower`
790 /// function performs a "flat" lowering into the `dst` specified which is
791 /// allowed to be uninitialized entering this method but is guaranteed to be
792 /// fully initialized if the method returns `Ok(())`.
793 ///
794 /// The `cx` context provided is the context within which this lowering is
795 /// happening. This contains information such as canonical options specified
796 /// (e.g. string encodings, memories, etc), the store itself, along with
797 /// type information.
798 ///
799 /// The `ty` parameter is the destination type that is being lowered into.
800 /// For example this is the component's "view" of the type that is being
801 /// lowered. This is guaranteed to have passed a `typecheck` earlier.
802 ///
803 /// This will only be called if `typecheck` passes for `Op::Lower`.
804 #[doc(hidden)]
805 fn linear_lower_to_flat<T>(
806 &self,
807 cx: &mut LowerContext<'_, T>,
808 ty: InterfaceType,
809 dst: &mut MaybeUninit<Self::Lower>,
810 ) -> Result<()>;
811
812 /// Performs the "store" operation in the linear memory version of the
813 /// canonical ABI.
814 ///
815 /// This function will store `self` into the linear memory described by
816 /// `cx` at the `offset` provided.
817 ///
818 /// It is expected that `offset` is a valid offset in memory for
819 /// `Self::SIZE32` bytes. At this time that's not an unsafe contract as it's
820 /// always re-checked on all stores, but this is something that will need to
821 /// be improved in the future to remove extra bounds checks. For now this
822 /// function will panic if there's a bug and `offset` isn't valid within
823 /// memory.
824 ///
825 /// The `ty` type information passed here is the same as the type
826 /// information passed to `lower` above, and is the component's own view of
827 /// what the resulting type should be.
828 ///
829 /// This will only be called if `typecheck` passes for `Op::Lower`.
830 #[doc(hidden)]
831 fn linear_lower_to_memory<T>(
832 &self,
833 cx: &mut LowerContext<'_, T>,
834 ty: InterfaceType,
835 offset: usize,
836 ) -> Result<()>;
837
838 /// Provided method to lower a list of `Self` into memory.
839 ///
840 /// Requires that `offset` has already been checked for alignment and
841 /// validity in terms of being in-bounds, otherwise this may panic.
842 ///
843 /// This is primarily here to get overridden for implementations of integers
844 /// which can avoid some extra fluff and use a pattern that's more easily
845 /// optimizable by LLVM.
846 #[doc(hidden)]
847 fn linear_store_list_to_memory<T>(
848 cx: &mut LowerContext<'_, T>,
849 ty: InterfaceType,
850 mut offset: usize,
851 items: &[Self],
852 ) -> Result<()>
853 where
854 Self: Sized,
855 {
856 for item in items {
857 item.linear_lower_to_memory(cx, ty, offset)?;
858 offset += Self::SIZE32;
859 }
860 Ok(())
861 }
862}
863
864/// Host types which can be created from the canonical ABI.
865///
866/// This is the mirror of the [`Lower`] trait where it represents the capability
867/// of acquiring items from WebAssembly and passing them to the host.
868///
869/// Built-in types to Rust such as `Option<T>` implement this trait as
870/// appropriate. For a mapping of component model to Rust types see
871/// [`ComponentType`].
872///
873/// For user-defined types, for example `record` types mapped to Rust `struct`s,
874/// this crate additionally has
875/// [`#[derive(Lift)]`](macro@crate::component::Lift).
876///
877/// Note that like [`ComponentType`] the definition of this trait is intended to
878/// be an internal implementation detail of Wasmtime at this time. It's
879/// recommended to use the `#[derive(Lift)]` implementation instead.
880pub unsafe trait Lift: Sized + ComponentType {
881 /// Performs the "lift" operation in the linear memory version of the
882 /// canonical ABI.
883 ///
884 /// This function performs a "flat" lift operation from the `src` specified
885 /// which is a sequence of core wasm values. The lifting operation will
886 /// validate core wasm values and produce a `Self` on success.
887 ///
888 /// The `cx` provided contains contextual information such as the store
889 /// that's being loaded from, canonical options, and type information.
890 ///
891 /// The `ty` parameter is the origin component's specification for what the
892 /// type that is being lifted is. For example this is the record type or the
893 /// resource type that is being lifted.
894 ///
895 /// Note that this has a default implementation but if `typecheck` passes
896 /// for `Op::Lift` this needs to be overridden.
897 #[doc(hidden)]
898 fn linear_lift_from_flat(
899 cx: &mut LiftContext<'_>,
900 ty: InterfaceType,
901 src: &Self::Lower,
902 ) -> Result<Self>;
903
904 /// Performs the "load" operation in the linear memory version of the
905 /// canonical ABI.
906 ///
907 /// This will read the `bytes` provided, which are a sub-slice into the
908 /// linear memory described by `cx`. The `bytes` array provided is
909 /// guaranteed to be `Self::SIZE32` bytes large. All of memory is then also
910 /// available through `cx` for bounds-checks and such as necessary for
911 /// strings/lists.
912 ///
913 /// The `ty` argument is the type that's being loaded, as described by the
914 /// original component.
915 ///
916 /// Note that this has a default implementation but if `typecheck` passes
917 /// for `Op::Lift` this needs to be overridden.
918 #[doc(hidden)]
919 fn linear_lift_from_memory(
920 cx: &mut LiftContext<'_>,
921 ty: InterfaceType,
922 bytes: &[u8],
923 ) -> Result<Self>;
924
925 /// Converts `list` into a `Vec<T>`, used in `Lift for Vec<T>`.
926 #[doc(hidden)]
927 fn linear_lift_list_from_memory(
928 cx: &mut LiftContext<'_>,
929 list: &WasmList<Self>,
930 ) -> Result<Vec<Self>>
931 where
932 Self: Sized,
933 {
934 let mut dst = Vec::with_capacity(list.len);
935 Self::linear_lift_into_from_memory(cx, list, &mut dst)?;
936 Ok(dst)
937 }
938
939 /// Load no more than `max_count` items from `list` into `dst`.
940 ///
941 /// This is primarily here to get overridden for implementations of integers
942 /// which can avoid some extra fluff and use a pattern that's more easily
943 /// optimizable by LLVM.
944 #[doc(hidden)]
945 fn linear_lift_into_from_memory(
946 cx: &mut LiftContext<'_>,
947 list: &WasmList<Self>,
948 dst: &mut impl Extend<Self>,
949 ) -> Result<()>
950 where
951 Self: Sized,
952 {
953 for i in 0..list.len {
954 dst.extend(Some(list.get_from_store(cx, i).unwrap()?));
955 }
956 Ok(())
957 }
958}
959
960// Macro to help generate "forwarding implementations" of `ComponentType` to
961// another type, used for wrappers in Rust like `&T`, `Box<T>`, etc. Note that
962// these wrappers only implement lowering because lifting native Rust types
963// cannot be done.
964macro_rules! forward_type_impls {
965 ($(($($generics:tt)*) $a:ty => $b:ty,)*) => ($(
966 unsafe impl <$($generics)*> ComponentType for $a {
967 type Lower = <$b as ComponentType>::Lower;
968
969 const ABI: CanonicalAbiInfo = <$b as ComponentType>::ABI;
970 const MAY_REQUIRE_REALLOC: bool = <$b as ComponentType>::MAY_REQUIRE_REALLOC;
971
972 #[inline]
973 fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
974 <$b as ComponentType>::typecheck(ty, types)
975 }
976 }
977 )*)
978}
979
980forward_type_impls! {
981 (T: ComponentType + ?Sized) &'_ T => T,
982 (T: ComponentType + ?Sized) Box<T> => T,
983 (T: ComponentType + ?Sized) alloc::sync::Arc<T> => T,
984 () String => str,
985 (T: ComponentType) Vec<T> => [T],
986}
987
988macro_rules! forward_lowers {
989 ($(($($generics:tt)*) $a:ty => $b:ty,)*) => ($(
990 unsafe impl <$($generics)*> Lower for $a {
991 fn linear_lower_to_flat<U>(
992 &self,
993 cx: &mut LowerContext<'_, U>,
994 ty: InterfaceType,
995 dst: &mut MaybeUninit<Self::Lower>,
996 ) -> Result<()> {
997 <$b as Lower>::linear_lower_to_flat(self, cx, ty, dst)
998 }
999
1000 fn linear_lower_to_memory<U>(
1001 &self,
1002 cx: &mut LowerContext<'_, U>,
1003 ty: InterfaceType,
1004 offset: usize,
1005 ) -> Result<()> {
1006 <$b as Lower>::linear_lower_to_memory(self, cx, ty, offset)
1007 }
1008 }
1009 )*)
1010}
1011
1012forward_lowers! {
1013 (T: Lower + ?Sized) &'_ T => T,
1014 (T: Lower + ?Sized) Box<T> => T,
1015 (T: Lower + ?Sized) alloc::sync::Arc<T> => T,
1016 () String => str,
1017 (T: Lower) Vec<T> => [T],
1018}
1019
1020macro_rules! forward_string_lifts {
1021 ($($a:ty,)*) => ($(
1022 unsafe impl Lift for $a {
1023 #[inline]
1024 fn linear_lift_from_flat(cx: &mut LiftContext<'_>, ty: InterfaceType, src: &Self::Lower) -> Result<Self> {
1025 let s = <WasmStr as Lift>::linear_lift_from_flat(cx, ty, src)?;
1026 let encoding = cx.options().string_encoding;
1027 Ok(s.to_str_from_memory(encoding, cx.memory())?.into())
1028 }
1029
1030 #[inline]
1031 fn linear_lift_from_memory(cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
1032 let s = <WasmStr as Lift>::linear_lift_from_memory(cx, ty, bytes)?;
1033 let encoding = cx.options().string_encoding;
1034 Ok(s.to_str_from_memory(encoding, cx.memory())?.into())
1035 }
1036 }
1037 )*)
1038}
1039
1040forward_string_lifts! {
1041 Box<str>,
1042 alloc::sync::Arc<str>,
1043 String,
1044}
1045
1046macro_rules! forward_list_lifts {
1047 ($($a:ty,)*) => ($(
1048 unsafe impl <T: Lift> Lift for $a {
1049 fn linear_lift_from_flat(cx: &mut LiftContext<'_>, ty: InterfaceType, src: &Self::Lower) -> Result<Self> {
1050 let list = <WasmList::<T> as Lift>::linear_lift_from_flat(cx, ty, src)?;
1051 Ok(T::linear_lift_list_from_memory(cx, &list)?.into())
1052 }
1053
1054 fn linear_lift_from_memory(cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
1055 let list = <WasmList::<T> as Lift>::linear_lift_from_memory(cx, ty, bytes)?;
1056 Ok(T::linear_lift_list_from_memory(cx, &list)?.into())
1057 }
1058 }
1059 )*)
1060}
1061
1062forward_list_lifts! {
1063 Box<[T]>,
1064 alloc::sync::Arc<[T]>,
1065 Vec<T>,
1066}
1067
1068// Macro to help generate `ComponentType` implementations for primitive types
1069// such as integers, char, bool, etc.
1070macro_rules! integers {
1071 ($($primitive:ident = $ty:ident in $field:ident/$get:ident with abi:$abi:ident,)*) => ($(
1072 unsafe impl ComponentType for $primitive {
1073 type Lower = ValRaw;
1074
1075 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::$abi;
1076
1077 const MAY_REQUIRE_REALLOC: bool = false;
1078
1079 fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1080 match ty {
1081 InterfaceType::$ty => Ok(()),
1082 other => bail!("expected `{}` found `{}`", desc(&InterfaceType::$ty), desc(other))
1083 }
1084 }
1085 }
1086
1087 unsafe impl Lower for $primitive {
1088 #[inline]
1089 #[allow(trivial_numeric_casts, reason = "macro-generated code")]
1090 fn linear_lower_to_flat<T>(
1091 &self,
1092 _cx: &mut LowerContext<'_, T>,
1093 ty: InterfaceType,
1094 dst: &mut MaybeUninit<Self::Lower>,
1095 ) -> Result<()> {
1096 debug_assert!(matches!(ty, InterfaceType::$ty));
1097 dst.write(ValRaw::$field(*self as $field));
1098 Ok(())
1099 }
1100
1101 #[inline]
1102 fn linear_lower_to_memory<T>(
1103 &self,
1104 cx: &mut LowerContext<'_, T>,
1105 ty: InterfaceType,
1106 offset: usize,
1107 ) -> Result<()> {
1108 debug_assert!(matches!(ty, InterfaceType::$ty));
1109 debug_assert!(offset % Self::SIZE32 == 0);
1110 *cx.get(offset) = self.to_le_bytes();
1111 Ok(())
1112 }
1113
1114 fn linear_store_list_to_memory<T>(
1115 cx: &mut LowerContext<'_, T>,
1116 ty: InterfaceType,
1117 offset: usize,
1118 items: &[Self],
1119 ) -> Result<()> {
1120 debug_assert!(matches!(ty, InterfaceType::$ty));
1121
1122 // Double-check that the CM alignment is at least the host's
1123 // alignment for this type which should be true for all
1124 // platforms.
1125 assert!((Self::ALIGN32 as usize) >= mem::align_of::<Self>());
1126
1127 // Slice `cx`'s memory to the window that we'll be modifying.
1128 // This should all have already been verified in terms of
1129 // alignment and sizing meaning that these assertions here are
1130 // not truly necessary but are instead double-checks.
1131 //
1132 // Note that we're casting a `[u8]` slice to `[Self]` with
1133 // `align_to_mut` which is not safe in general but is safe in
1134 // our specific case as all `u8` patterns are valid `Self`
1135 // patterns since `Self` is an integral type.
1136 let dst = &mut cx.as_slice_mut()[offset..][..items.len() * Self::SIZE32];
1137 let (before, middle, end) = unsafe { dst.align_to_mut::<Self>() };
1138 assert!(before.is_empty() && end.is_empty());
1139 assert_eq!(middle.len(), items.len());
1140
1141 // And with all that out of the way perform the copying loop.
1142 // This is not a `copy_from_slice` because endianness needs to
1143 // be handled here, but LLVM should pretty easily transform this
1144 // into a memcpy on little-endian platforms.
1145 for (dst, src) in middle.iter_mut().zip(items) {
1146 *dst = src.to_le();
1147 }
1148 Ok(())
1149 }
1150 }
1151
1152 unsafe impl Lift for $primitive {
1153 #[inline]
1154 #[allow(
1155 trivial_numeric_casts,
1156 clippy::cast_possible_truncation,
1157 reason = "macro-generated code"
1158 )]
1159 fn linear_lift_from_flat(_cx: &mut LiftContext<'_>, ty: InterfaceType, src: &Self::Lower) -> Result<Self> {
1160 debug_assert!(matches!(ty, InterfaceType::$ty));
1161 Ok(src.$get() as $primitive)
1162 }
1163
1164 #[inline]
1165 fn linear_lift_from_memory(_cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
1166 debug_assert!(matches!(ty, InterfaceType::$ty));
1167 debug_assert!((bytes.as_ptr() as usize) % Self::SIZE32 == 0);
1168 Ok($primitive::from_le_bytes(*bytes.as_array().unwrap()))
1169 }
1170
1171 fn linear_lift_into_from_memory(
1172 cx: &mut LiftContext<'_>,
1173 list: &WasmList<Self>,
1174 dst: &mut impl Extend<Self>,
1175 ) -> Result<()>
1176 where
1177 Self: Sized,
1178 {
1179 dst.extend(list._as_le_slice(cx.memory())
1180 .iter()
1181 .map(|i| Self::from_le(*i)));
1182 Ok(())
1183 }
1184 }
1185 )*)
1186}
1187
1188integers! {
1189 i8 = S8 in i32/get_i32 with abi:SCALAR1,
1190 u8 = U8 in u32/get_u32 with abi:SCALAR1,
1191 i16 = S16 in i32/get_i32 with abi:SCALAR2,
1192 u16 = U16 in u32/get_u32 with abi:SCALAR2,
1193 i32 = S32 in i32/get_i32 with abi:SCALAR4,
1194 u32 = U32 in u32/get_u32 with abi:SCALAR4,
1195 i64 = S64 in i64/get_i64 with abi:SCALAR8,
1196 u64 = U64 in u64/get_u64 with abi:SCALAR8,
1197}
1198
1199macro_rules! floats {
1200 ($($float:ident/$get_float:ident = $ty:ident with abi:$abi:ident)*) => ($(const _: () = {
1201 unsafe impl ComponentType for $float {
1202 type Lower = ValRaw;
1203
1204 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::$abi;
1205 const MAY_REQUIRE_REALLOC: bool = false;
1206
1207 fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1208 match ty {
1209 InterfaceType::$ty => Ok(()),
1210 other => bail!("expected `{}` found `{}`", desc(&InterfaceType::$ty), desc(other))
1211 }
1212 }
1213 }
1214
1215 unsafe impl Lower for $float {
1216 #[inline]
1217 fn linear_lower_to_flat<T>(
1218 &self,
1219 _cx: &mut LowerContext<'_, T>,
1220 ty: InterfaceType,
1221 dst: &mut MaybeUninit<Self::Lower>,
1222 ) -> Result<()> {
1223 debug_assert!(matches!(ty, InterfaceType::$ty));
1224 dst.write(ValRaw::$float(self.to_bits()));
1225 Ok(())
1226 }
1227
1228 #[inline]
1229 fn linear_lower_to_memory<T>(
1230 &self,
1231 cx: &mut LowerContext<'_, T>,
1232 ty: InterfaceType,
1233 offset: usize,
1234 ) -> Result<()> {
1235 debug_assert!(matches!(ty, InterfaceType::$ty));
1236 debug_assert!(offset % Self::SIZE32 == 0);
1237 let ptr = cx.get(offset);
1238 *ptr = self.to_bits().to_le_bytes();
1239 Ok(())
1240 }
1241
1242 fn linear_store_list_to_memory<T>(
1243 cx: &mut LowerContext<'_, T>,
1244 ty: InterfaceType,
1245 offset: usize,
1246 items: &[Self],
1247 ) -> Result<()> {
1248 debug_assert!(matches!(ty, InterfaceType::$ty));
1249
1250 // Double-check that the CM alignment is at least the host's
1251 // alignment for this type which should be true for all
1252 // platforms.
1253 assert!((Self::ALIGN32 as usize) >= mem::align_of::<Self>());
1254
1255 // Slice `cx`'s memory to the window that we'll be modifying.
1256 // This should all have already been verified in terms of
1257 // alignment and sizing meaning that these assertions here are
1258 // not truly necessary but are instead double-checks.
1259 let dst = &mut cx.as_slice_mut()[offset..][..items.len() * Self::SIZE32];
1260 assert!(dst.as_ptr().cast::<Self>().is_aligned());
1261
1262 // And with all that out of the way perform the copying loop.
1263 // This is not a `copy_from_slice` because endianness needs to
1264 // be handled here, but LLVM should pretty easily transform this
1265 // into a memcpy on little-endian platforms.
1266 // TODO use `as_chunks` when https://github.com/rust-lang/rust/issues/74985
1267 // is stabilized
1268 let (dst, rest) = dst.as_chunks_mut::<{Self::SIZE32}>();
1269 debug_assert!(rest.is_empty());
1270 for (dst, src) in iter::zip(dst, items) {
1271 *dst = src.to_le_bytes();
1272 }
1273 Ok(())
1274 }
1275 }
1276
1277 unsafe impl Lift for $float {
1278 #[inline]
1279 fn linear_lift_from_flat(_cx: &mut LiftContext<'_>, ty: InterfaceType, src: &Self::Lower) -> Result<Self> {
1280 debug_assert!(matches!(ty, InterfaceType::$ty));
1281 Ok($float::from_bits(src.$get_float()))
1282 }
1283
1284 #[inline]
1285 fn linear_lift_from_memory(_cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
1286 debug_assert!(matches!(ty, InterfaceType::$ty));
1287 debug_assert!((bytes.as_ptr() as usize) % Self::SIZE32 == 0);
1288 Ok($float::from_le_bytes(*bytes.as_array().unwrap()))
1289 }
1290
1291 fn linear_lift_list_from_memory(cx: &mut LiftContext<'_>, list: &WasmList<Self>) -> Result<Vec<Self>> where Self: Sized {
1292 // See comments in `WasmList::get` for the panicking indexing
1293 let byte_size = list.len * mem::size_of::<Self>();
1294 let bytes = &cx.memory()[list.ptr..][..byte_size];
1295
1296 // The canonical ABI requires that everything is aligned to its
1297 // own size, so this should be an aligned array.
1298 assert!(bytes.as_ptr().cast::<Self>().is_aligned());
1299
1300 // Copy the resulting slice to a new Vec, handling endianness
1301 // in the process
1302 // TODO use `as_chunks` when https://github.com/rust-lang/rust/issues/74985
1303 // is stabilized
1304 Ok(
1305 bytes
1306 .chunks_exact(Self::SIZE32)
1307 .map(|i| $float::from_le_bytes(*i.as_array().unwrap()))
1308 .collect()
1309 )
1310 }
1311 }
1312 };)*)
1313}
1314
1315floats! {
1316 f32/get_f32 = Float32 with abi:SCALAR4
1317 f64/get_f64 = Float64 with abi:SCALAR8
1318}
1319
1320unsafe impl ComponentType for bool {
1321 type Lower = ValRaw;
1322
1323 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::SCALAR1;
1324 const MAY_REQUIRE_REALLOC: bool = false;
1325
1326 fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1327 match ty {
1328 InterfaceType::Bool => Ok(()),
1329 other => bail!("expected `bool` found `{}`", desc(other)),
1330 }
1331 }
1332}
1333
1334unsafe impl Lower for bool {
1335 fn linear_lower_to_flat<T>(
1336 &self,
1337 _cx: &mut LowerContext<'_, T>,
1338 ty: InterfaceType,
1339 dst: &mut MaybeUninit<Self::Lower>,
1340 ) -> Result<()> {
1341 debug_assert!(matches!(ty, InterfaceType::Bool));
1342 dst.write(ValRaw::i32(*self as i32));
1343 Ok(())
1344 }
1345
1346 fn linear_lower_to_memory<T>(
1347 &self,
1348 cx: &mut LowerContext<'_, T>,
1349 ty: InterfaceType,
1350 offset: usize,
1351 ) -> Result<()> {
1352 debug_assert!(matches!(ty, InterfaceType::Bool));
1353 debug_assert!(offset % Self::SIZE32 == 0);
1354 cx.get::<1>(offset)[0] = *self as u8;
1355 Ok(())
1356 }
1357}
1358
1359unsafe impl Lift for bool {
1360 #[inline]
1361 fn linear_lift_from_flat(
1362 _cx: &mut LiftContext<'_>,
1363 ty: InterfaceType,
1364 src: &Self::Lower,
1365 ) -> Result<Self> {
1366 debug_assert!(matches!(ty, InterfaceType::Bool));
1367 match src.get_i32() {
1368 0 => Ok(false),
1369 _ => Ok(true),
1370 }
1371 }
1372
1373 #[inline]
1374 fn linear_lift_from_memory(
1375 _cx: &mut LiftContext<'_>,
1376 ty: InterfaceType,
1377 bytes: &[u8],
1378 ) -> Result<Self> {
1379 debug_assert!(matches!(ty, InterfaceType::Bool));
1380 match bytes[0] {
1381 0 => Ok(false),
1382 _ => Ok(true),
1383 }
1384 }
1385}
1386
1387unsafe impl ComponentType for char {
1388 type Lower = ValRaw;
1389
1390 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::SCALAR4;
1391 const MAY_REQUIRE_REALLOC: bool = false;
1392
1393 fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1394 match ty {
1395 InterfaceType::Char => Ok(()),
1396 other => bail!("expected `char` found `{}`", desc(other)),
1397 }
1398 }
1399}
1400
1401unsafe impl Lower for char {
1402 #[inline]
1403 fn linear_lower_to_flat<T>(
1404 &self,
1405 _cx: &mut LowerContext<'_, T>,
1406 ty: InterfaceType,
1407 dst: &mut MaybeUninit<Self::Lower>,
1408 ) -> Result<()> {
1409 debug_assert!(matches!(ty, InterfaceType::Char));
1410 dst.write(ValRaw::u32(u32::from(*self)));
1411 Ok(())
1412 }
1413
1414 #[inline]
1415 fn linear_lower_to_memory<T>(
1416 &self,
1417 cx: &mut LowerContext<'_, T>,
1418 ty: InterfaceType,
1419 offset: usize,
1420 ) -> Result<()> {
1421 debug_assert!(matches!(ty, InterfaceType::Char));
1422 debug_assert!(offset % Self::SIZE32 == 0);
1423 *cx.get::<4>(offset) = u32::from(*self).to_le_bytes();
1424 Ok(())
1425 }
1426}
1427
1428unsafe impl Lift for char {
1429 #[inline]
1430 fn linear_lift_from_flat(
1431 _cx: &mut LiftContext<'_>,
1432 ty: InterfaceType,
1433 src: &Self::Lower,
1434 ) -> Result<Self> {
1435 debug_assert!(matches!(ty, InterfaceType::Char));
1436 Ok(char::try_from(src.get_u32())?)
1437 }
1438
1439 #[inline]
1440 fn linear_lift_from_memory(
1441 _cx: &mut LiftContext<'_>,
1442 ty: InterfaceType,
1443 bytes: &[u8],
1444 ) -> Result<Self> {
1445 debug_assert!(matches!(ty, InterfaceType::Char));
1446 debug_assert!((bytes.as_ptr() as usize) % Self::SIZE32 == 0);
1447 let bits = u32::from_le_bytes(*bytes.as_array().unwrap());
1448 Ok(char::try_from(bits)?)
1449 }
1450}
1451
1452fn lift_pointer_pair_from_flat(
1453 cx: &mut LiftContext<'_>,
1454 src: &[ValRaw; 2],
1455) -> Result<(usize, usize)> {
1456 // FIXME(#4311): needs memory64 treatment
1457 let _ = cx; // this will be needed for memory64 in the future
1458 let ptr = src[0].get_u32();
1459 let len = src[1].get_u32();
1460 Ok((usize::try_from(ptr)?, usize::try_from(len)?))
1461}
1462
1463fn lift_pointer_pair_from_memory(cx: &mut LiftContext<'_>, bytes: &[u8]) -> Result<(usize, usize)> {
1464 // FIXME(#4311): needs memory64 treatment
1465 let _ = cx; // this will be needed for memory64 in the future
1466 let ptr = u32::from_le_bytes(*bytes[..4].as_array().unwrap());
1467 let len = u32::from_le_bytes(*bytes[4..].as_array().unwrap());
1468 Ok((usize::try_from(ptr)?, usize::try_from(len)?))
1469}
1470
1471fn lower_pointer_pair_to_flat<T>(
1472 cx: &mut LowerContext<T>,
1473 dst: &mut MaybeUninit<[ValRaw; 2]>,
1474 ptr: usize,
1475 len: usize,
1476) {
1477 // See "WRITEPTR64" above for why this is always storing a 64-bit
1478 // integer.
1479 let _ = cx; // this will eventually be needed for memory64 information.
1480 map_maybe_uninit!(dst[0]).write(ValRaw::i64(ptr as i64));
1481 map_maybe_uninit!(dst[1]).write(ValRaw::i64(len as i64));
1482}
1483
1484fn lower_pointer_pair_to_memory<T>(
1485 cx: &mut LowerContext<T>,
1486 offset: usize,
1487 ptr: usize,
1488 len: usize,
1489) {
1490 // FIXME(#4311): needs memory64 handling
1491 *cx.get(offset + 0) = u32::try_from(ptr).unwrap().to_le_bytes();
1492 *cx.get(offset + 4) = u32::try_from(len).unwrap().to_le_bytes();
1493}
1494
1495// FIXME(#4311): these probably need different constants for memory64
1496const UTF16_TAG: usize = 1 << 31;
1497const MAX_STRING_BYTE_LENGTH: usize = (1 << 31) - 1;
1498
1499// Note that this is similar to `ComponentType for WasmStr` except it can only
1500// be used for lowering, not lifting.
1501unsafe impl ComponentType for str {
1502 type Lower = [ValRaw; 2];
1503
1504 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
1505
1506 fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1507 match ty {
1508 InterfaceType::String => Ok(()),
1509 other => bail!("expected `string` found `{}`", desc(other)),
1510 }
1511 }
1512}
1513
1514unsafe impl Lower for str {
1515 fn linear_lower_to_flat<T>(
1516 &self,
1517 cx: &mut LowerContext<'_, T>,
1518 ty: InterfaceType,
1519 dst: &mut MaybeUninit<[ValRaw; 2]>,
1520 ) -> Result<()> {
1521 debug_assert!(matches!(ty, InterfaceType::String));
1522 let (ptr, len) = lower_string(cx, self)?;
1523 lower_pointer_pair_to_flat(cx, dst, ptr, len);
1524 Ok(())
1525 }
1526
1527 fn linear_lower_to_memory<T>(
1528 &self,
1529 cx: &mut LowerContext<'_, T>,
1530 ty: InterfaceType,
1531 offset: usize,
1532 ) -> Result<()> {
1533 debug_assert!(matches!(ty, InterfaceType::String));
1534 debug_assert!(offset % (Self::ALIGN32 as usize) == 0);
1535 let (ptr, len) = lower_string(cx, self)?;
1536 lower_pointer_pair_to_memory(cx, offset, ptr, len);
1537 Ok(())
1538 }
1539}
1540
1541fn lower_string<T>(cx: &mut LowerContext<'_, T>, string: &str) -> Result<(usize, usize)> {
1542 // Note that in general the wasm module can't assume anything about what the
1543 // host strings are encoded as. Additionally hosts are allowed to have
1544 // differently-encoded strings at runtime. Finally when copying a string
1545 // into wasm it's somewhat strict in the sense that the various patterns of
1546 // allocation and such are already dictated for us.
1547 //
1548 // In general what this means is that when copying a string from the host
1549 // into the destination we need to follow one of the cases of copying into
1550 // WebAssembly. It doesn't particularly matter which case as long as it ends
1551 // up in the right encoding. For example a destination encoding of
1552 // latin1+utf16 has a number of ways to get copied into and we do something
1553 // here that isn't the default "utf8 to latin1+utf16" since we have access
1554 // to simd-accelerated helpers in the `encoding_rs` crate. This is ok though
1555 // because we can fake that the host string was already stored in latin1
1556 // format and follow that copy pattern instead.
1557 match cx.options().string_encoding {
1558 // This corresponds to `store_string_copy` in the canonical ABI where
1559 // the host's representation is utf-8 and the wasm module wants utf-8 so
1560 // a copy is all that's needed (and the `realloc` can be precise for the
1561 // initial memory allocation).
1562 StringEncoding::Utf8 => {
1563 if string.len() > MAX_STRING_BYTE_LENGTH {
1564 bail!(
1565 "string length of {} too large to copy into wasm",
1566 string.len()
1567 );
1568 }
1569 let ptr = cx.realloc(0, 0, 1, string.len())?;
1570 cx.as_slice_mut()[ptr..][..string.len()].copy_from_slice(string.as_bytes());
1571 Ok((ptr, string.len()))
1572 }
1573
1574 // This corresponds to `store_utf8_to_utf16` in the canonical ABI. Here
1575 // an over-large allocation is performed and then shrunk afterwards if
1576 // necessary.
1577 StringEncoding::Utf16 => {
1578 let size = string.len() * 2;
1579 if size > MAX_STRING_BYTE_LENGTH {
1580 bail!(
1581 "string length of {} too large to copy into wasm",
1582 string.len()
1583 );
1584 }
1585 let mut ptr = cx.realloc(0, 0, 2, size)?;
1586 let mut copied = 0;
1587 let bytes = &mut cx.as_slice_mut()[ptr..][..size];
1588 for (u, bytes) in string.encode_utf16().zip(bytes.chunks_mut(2)) {
1589 let u_bytes = u.to_le_bytes();
1590 bytes[0] = u_bytes[0];
1591 bytes[1] = u_bytes[1];
1592 copied += 1;
1593 }
1594 if (copied * 2) < size {
1595 ptr = cx.realloc(ptr, size, 2, copied * 2)?;
1596 }
1597 Ok((ptr, copied))
1598 }
1599
1600 StringEncoding::CompactUtf16 => {
1601 // This corresponds to `store_string_to_latin1_or_utf16`
1602 let bytes = string.as_bytes();
1603 let mut iter = string.char_indices();
1604 let mut ptr = cx.realloc(0, 0, 2, bytes.len())?;
1605 let mut dst = &mut cx.as_slice_mut()[ptr..][..bytes.len()];
1606 let mut result = 0;
1607 while let Some((i, ch)) = iter.next() {
1608 // Test if this `char` fits into the latin1 encoding.
1609 if let Ok(byte) = u8::try_from(u32::from(ch)) {
1610 dst[result] = byte;
1611 result += 1;
1612 continue;
1613 }
1614
1615 // .. if utf16 is forced to be used then the allocation is
1616 // bumped up to the maximum size.
1617 let worst_case = bytes
1618 .len()
1619 .checked_mul(2)
1620 .ok_or_else(|| format_err!("byte length overflow"))?;
1621 if worst_case > MAX_STRING_BYTE_LENGTH {
1622 bail!("byte length too large");
1623 }
1624 ptr = cx.realloc(ptr, bytes.len(), 2, worst_case)?;
1625 dst = &mut cx.as_slice_mut()[ptr..][..worst_case];
1626
1627 // Previously encoded latin1 bytes are inflated to their 16-bit
1628 // size for utf16
1629 for i in (0..result).rev() {
1630 dst[2 * i] = dst[i];
1631 dst[2 * i + 1] = 0;
1632 }
1633
1634 // and then the remainder of the string is encoded.
1635 for (u, bytes) in string[i..]
1636 .encode_utf16()
1637 .zip(dst[2 * result..].chunks_mut(2))
1638 {
1639 let u_bytes = u.to_le_bytes();
1640 bytes[0] = u_bytes[0];
1641 bytes[1] = u_bytes[1];
1642 result += 1;
1643 }
1644 if worst_case > 2 * result {
1645 ptr = cx.realloc(ptr, worst_case, 2, 2 * result)?;
1646 }
1647 return Ok((ptr, result | UTF16_TAG));
1648 }
1649 if result < bytes.len() {
1650 ptr = cx.realloc(ptr, bytes.len(), 2, result)?;
1651 }
1652 Ok((ptr, result))
1653 }
1654 }
1655}
1656
1657/// Representation of a string located in linear memory in a WebAssembly
1658/// instance.
1659///
1660/// This type can be used in place of `String` and `str` for string-taking APIs
1661/// in some situations. The purpose of this type is to represent a range of
1662/// validated bytes within a component but does not actually copy the bytes. The
1663/// primary method, [`WasmStr::to_str`], attempts to return a reference to the
1664/// string directly located in the component's memory, avoiding a copy into the
1665/// host if possible.
1666///
1667/// The downside of this type, however, is that accessing a string requires a
1668/// [`Store`](crate::Store) pointer (via [`StoreContext`]). Bindings generated
1669/// by [`bindgen!`](crate::component::bindgen), for example, do not have access
1670/// to [`StoreContext`] and thus can't use this type.
1671///
1672/// This is intended for more advanced use cases such as defining functions
1673/// directly in a [`Linker`](crate::component::Linker). It's expected that in
1674/// the future [`bindgen!`](crate::component::bindgen) will also have a way to
1675/// use this type.
1676///
1677/// This type is used with [`TypedFunc`], for example, when WebAssembly returns
1678/// a string. This type cannot be used to give a string to WebAssembly, instead
1679/// `&str` should be used for that (since it's coming from the host).
1680///
1681/// Note that this type represents an in-bounds string in linear memory, but it
1682/// does not represent a valid string (e.g. valid utf-8). Validation happens
1683/// when [`WasmStr::to_str`] is called.
1684///
1685/// Also note that this type does not implement [`Lower`], it only implements
1686/// [`Lift`].
1687pub struct WasmStr {
1688 ptr: usize,
1689 len: usize,
1690 options: OptionsIndex,
1691 instance: Instance,
1692}
1693
1694impl WasmStr {
1695 pub(crate) fn new(ptr: usize, len: usize, cx: &mut LiftContext<'_>) -> Result<WasmStr> {
1696 let (byte_len, align) = match cx.options().string_encoding {
1697 StringEncoding::Utf8 => (Some(len), 1_usize),
1698 StringEncoding::Utf16 => (len.checked_mul(2), 2),
1699 StringEncoding::CompactUtf16 => {
1700 if len & UTF16_TAG == 0 {
1701 (Some(len), 2)
1702 } else {
1703 ((len ^ UTF16_TAG).checked_mul(2), 2)
1704 }
1705 }
1706 };
1707 debug_assert!(align.is_power_of_two());
1708 if ptr & (align - 1) != 0 {
1709 bail!("string pointer not aligned to {align}");
1710 }
1711 match byte_len.and_then(|len| ptr.checked_add(len)) {
1712 Some(n) if n <= cx.memory().len() => cx.consume_fuel(n - ptr)?,
1713 _ => bail!("string pointer/length out of bounds of memory"),
1714 }
1715 Ok(WasmStr {
1716 ptr,
1717 len,
1718 options: cx.options_index(),
1719 instance: cx.instance_handle(),
1720 })
1721 }
1722
1723 /// Returns the underlying string that this cursor points to.
1724 ///
1725 /// Note that this will internally decode the string from the wasm's
1726 /// encoding to utf-8 and additionally perform validation.
1727 ///
1728 /// The `store` provided must be the store where this string lives to
1729 /// access the correct memory.
1730 ///
1731 /// # Errors
1732 ///
1733 /// Returns an error if the string wasn't encoded correctly (e.g. invalid
1734 /// utf-8).
1735 ///
1736 /// # Panics
1737 ///
1738 /// Panics if this string is not owned by `store`.
1739 //
1740 // TODO: should add accessors for specifically utf-8 and utf-16 that perhaps
1741 // in an opt-in basis don't do validation. Additionally there should be some
1742 // method that returns `[u16]` after validating to avoid the utf16-to-utf8
1743 // transcode.
1744 pub fn to_str<'a, T: 'static>(
1745 &self,
1746 store: impl Into<StoreContext<'a, T>>,
1747 ) -> Result<Cow<'a, str>> {
1748 let store = store.into().0;
1749 let memory = self.instance.options_memory(store, self.options);
1750 let encoding = self.instance.options(store, self.options).string_encoding;
1751 self.to_str_from_memory(encoding, memory)
1752 }
1753
1754 pub(crate) fn to_str_from_memory<'a>(
1755 &self,
1756 encoding: StringEncoding,
1757 memory: &'a [u8],
1758 ) -> Result<Cow<'a, str>> {
1759 match encoding {
1760 StringEncoding::Utf8 => self.decode_utf8(memory),
1761 StringEncoding::Utf16 => self.decode_utf16(memory, self.len),
1762 StringEncoding::CompactUtf16 => {
1763 if self.len & UTF16_TAG == 0 {
1764 self.decode_latin1(memory)
1765 } else {
1766 self.decode_utf16(memory, self.len ^ UTF16_TAG)
1767 }
1768 }
1769 }
1770 }
1771
1772 fn decode_utf8<'a>(&self, memory: &'a [u8]) -> Result<Cow<'a, str>> {
1773 // Note that bounds-checking already happen in construction of `WasmStr`
1774 // so this is never expected to panic. This could theoretically be
1775 // unchecked indexing if we're feeling wild enough.
1776 Ok(str::from_utf8(&memory[self.ptr..][..self.len])?.into())
1777 }
1778
1779 fn decode_utf16<'a>(&self, memory: &'a [u8], len: usize) -> Result<Cow<'a, str>> {
1780 // See notes in `decode_utf8` for why this is panicking indexing.
1781 let (chunks, rest) = &memory[self.ptr..][..len * 2].as_chunks::<2>();
1782 debug_assert!(rest.is_empty());
1783 Ok(
1784 core::char::decode_utf16(chunks.iter().map(|chunk| u16::from_le_bytes(*chunk)))
1785 .collect::<Result<String, _>>()?
1786 .into(),
1787 )
1788 }
1789
1790 fn decode_latin1<'a>(&self, memory: &'a [u8]) -> Result<Cow<'a, str>> {
1791 // See notes in `decode_utf8` for why this is panicking indexing.
1792 Ok(encoding_rs::mem::decode_latin1(
1793 &memory[self.ptr..][..self.len],
1794 ))
1795 }
1796}
1797
1798// Note that this is similar to `ComponentType for str` except it can only be
1799// used for lifting, not lowering.
1800unsafe impl ComponentType for WasmStr {
1801 type Lower = <str as ComponentType>::Lower;
1802
1803 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
1804
1805 fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1806 match ty {
1807 InterfaceType::String => Ok(()),
1808 other => bail!("expected `string` found `{}`", desc(other)),
1809 }
1810 }
1811}
1812
1813unsafe impl Lift for WasmStr {
1814 #[inline]
1815 fn linear_lift_from_flat(
1816 cx: &mut LiftContext<'_>,
1817 ty: InterfaceType,
1818 src: &Self::Lower,
1819 ) -> Result<Self> {
1820 debug_assert!(matches!(ty, InterfaceType::String));
1821 let (ptr, len) = lift_pointer_pair_from_flat(cx, src)?;
1822 WasmStr::new(ptr, len, cx)
1823 }
1824
1825 #[inline]
1826 fn linear_lift_from_memory(
1827 cx: &mut LiftContext<'_>,
1828 ty: InterfaceType,
1829 bytes: &[u8],
1830 ) -> Result<Self> {
1831 debug_assert!(matches!(ty, InterfaceType::String));
1832 debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
1833 let (ptr, len) = lift_pointer_pair_from_memory(cx, bytes)?;
1834 WasmStr::new(ptr, len, cx)
1835 }
1836}
1837
1838unsafe impl<T> ComponentType for [T]
1839where
1840 T: ComponentType,
1841{
1842 type Lower = [ValRaw; 2];
1843
1844 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
1845
1846 fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
1847 match ty {
1848 InterfaceType::List(t) => T::typecheck(&types.types[*t].element, types),
1849 other => bail!("expected `list` found `{}`", desc(other)),
1850 }
1851 }
1852}
1853
1854unsafe impl<T> Lower for [T]
1855where
1856 T: Lower,
1857{
1858 fn linear_lower_to_flat<U>(
1859 &self,
1860 cx: &mut LowerContext<'_, U>,
1861 ty: InterfaceType,
1862 dst: &mut MaybeUninit<[ValRaw; 2]>,
1863 ) -> Result<()> {
1864 let elem = match ty {
1865 InterfaceType::List(i) => cx.types[i].element,
1866 _ => bad_type_info(),
1867 };
1868 let (ptr, len) = lower_list(cx, elem, self)?;
1869 lower_pointer_pair_to_flat(cx, dst, ptr, len);
1870 Ok(())
1871 }
1872
1873 fn linear_lower_to_memory<U>(
1874 &self,
1875 cx: &mut LowerContext<'_, U>,
1876 ty: InterfaceType,
1877 offset: usize,
1878 ) -> Result<()> {
1879 let elem = match ty {
1880 InterfaceType::List(i) => cx.types[i].element,
1881 _ => bad_type_info(),
1882 };
1883 debug_assert!(offset % (Self::ALIGN32 as usize) == 0);
1884 let (ptr, len) = lower_list(cx, elem, self)?;
1885 lower_pointer_pair_to_memory(cx, offset, ptr, len);
1886 Ok(())
1887 }
1888}
1889
1890// FIXME: this is not a memcpy for `T` where `T` is something like `u8`.
1891//
1892// Some attempts to fix this have proved not fruitful. In isolation an attempt
1893// was made where:
1894//
1895// * `MemoryMut` stored a `*mut [u8]` as its "last view" of memory to avoid
1896// reloading the base pointer constantly. This view is reset on `realloc`.
1897// * The bounds-checks in `MemoryMut::get` were removed (replaced with unsafe
1898// indexing)
1899//
1900// Even then though this didn't correctly vectorized for `Vec<u8>`. It's not
1901// entirely clear why but it appeared that it's related to reloading the base
1902// pointer to memory (I guess from `MemoryMut` itself?). Overall I'm not really
1903// clear on what's happening there, but this is surely going to be a performance
1904// bottleneck in the future.
1905fn lower_list<T, U>(
1906 cx: &mut LowerContext<'_, U>,
1907 ty: InterfaceType,
1908 list: &[T],
1909) -> Result<(usize, usize)>
1910where
1911 T: Lower,
1912{
1913 let elem_size = T::SIZE32;
1914 let size = list
1915 .len()
1916 .checked_mul(elem_size)
1917 .ok_or_else(|| format_err!("size overflow copying a list"))?;
1918 let ptr = cx.realloc(0, 0, T::ALIGN32, size)?;
1919 T::linear_store_list_to_memory(cx, ty, ptr, list)?;
1920 Ok((ptr, list.len()))
1921}
1922
1923/// Representation of a list of values that are owned by a WebAssembly instance.
1924///
1925/// For some more commentary about the rationale for this type see the
1926/// documentation of [`WasmStr`]. In summary this type can avoid a copy when
1927/// passing data to the host in some situations but is additionally more
1928/// cumbersome to use by requiring a [`Store`](crate::Store) to be provided.
1929///
1930/// This type is used whenever a `(list T)` is returned from a [`TypedFunc`],
1931/// for example. This type represents a list of values that are stored in linear
1932/// memory which are waiting to be read.
1933///
1934/// Note that this type represents only a valid range of bytes for the list
1935/// itself, it does not represent validity of the elements themselves and that's
1936/// performed when they're iterated.
1937///
1938/// Note that this type does not implement the [`Lower`] trait, only [`Lift`].
1939pub struct WasmList<T> {
1940 ptr: usize,
1941 len: usize,
1942 options: OptionsIndex,
1943 elem: InterfaceType,
1944 instance: Instance,
1945 _marker: marker::PhantomData<T>,
1946}
1947
1948impl<T: Lift> WasmList<T> {
1949 pub(crate) fn new(
1950 ptr: usize,
1951 len: usize,
1952 cx: &mut LiftContext<'_>,
1953 elem: InterfaceType,
1954 ) -> Result<WasmList<T>> {
1955 match len
1956 .checked_mul(T::SIZE32)
1957 .and_then(|len| ptr.checked_add(len))
1958 {
1959 Some(n) if n <= cx.memory().len() => cx.consume_fuel_array(len, size_of::<T>())?,
1960 _ => bail!("list pointer/length out of bounds of memory"),
1961 }
1962 if ptr % usize::try_from(T::ALIGN32)? != 0 {
1963 bail!("list pointer is not aligned")
1964 }
1965 Ok(WasmList {
1966 ptr,
1967 len,
1968 options: cx.options_index(),
1969 elem,
1970 instance: cx.instance_handle(),
1971 _marker: marker::PhantomData,
1972 })
1973 }
1974
1975 /// Returns the item length of this vector
1976 #[inline]
1977 pub fn len(&self) -> usize {
1978 self.len
1979 }
1980
1981 /// Gets the `n`th element of this list.
1982 ///
1983 /// Returns `None` if `index` is out of bounds. Returns `Some(Err(..))` if
1984 /// the value couldn't be decoded (it was invalid). Returns `Some(Ok(..))`
1985 /// if the value is valid.
1986 ///
1987 /// # Panics
1988 ///
1989 /// This function will panic if the string did not originally come from the
1990 /// `store` specified.
1991 //
1992 // TODO: given that interface values are intended to be consumed in one go
1993 // should we even expose a random access iteration API? In theory all
1994 // consumers should be validating through the iterator.
1995 pub fn get(&self, mut store: impl AsContextMut, index: usize) -> Option<Result<T>> {
1996 let store = store.as_context_mut().0;
1997 let mut cx = LiftContext::new(store, self.options, self.instance);
1998 self.get_from_store(&mut cx, index)
1999 }
2000
2001 fn get_from_store(&self, cx: &mut LiftContext<'_>, index: usize) -> Option<Result<T>> {
2002 if index >= self.len {
2003 return None;
2004 }
2005 // Note that this is using panicking indexing and this is expected to
2006 // never fail. The bounds-checking here happened during the construction
2007 // of the `WasmList` itself which means these should always be in-bounds
2008 // (and wasm memory can only grow). This could theoretically be
2009 // unchecked indexing if we're confident enough and it's actually a perf
2010 // issue one day.
2011 let bytes = &cx.memory()[self.ptr + index * T::SIZE32..][..T::SIZE32];
2012 Some(T::linear_lift_from_memory(cx, self.elem, bytes))
2013 }
2014
2015 /// Returns an iterator over the elements of this list.
2016 ///
2017 /// Each item of the list may fail to decode and is represented through the
2018 /// `Result` value of the iterator.
2019 pub fn iter<'a, U: 'static>(
2020 &'a self,
2021 store: impl Into<StoreContextMut<'a, U>>,
2022 ) -> impl ExactSizeIterator<Item = Result<T>> + 'a {
2023 let store = store.into().0;
2024 let mut cx = LiftContext::new(store, self.options, self.instance);
2025 (0..self.len).map(move |i| self.get_from_store(&mut cx, i).unwrap())
2026 }
2027}
2028
2029macro_rules! raw_wasm_list_accessors {
2030 ($($i:ident)*) => ($(
2031 impl WasmList<$i> {
2032 /// Get access to the raw underlying memory for this list.
2033 ///
2034 /// This method will return a direct slice into the original wasm
2035 /// module's linear memory where the data for this slice is stored.
2036 /// This allows the embedder to have efficient access to the
2037 /// underlying memory if needed and avoid copies and such if
2038 /// desired.
2039 ///
2040 /// Note that multi-byte integers are stored in little-endian format
2041 /// so portable processing of this slice must be aware of the host's
2042 /// byte-endianness. The `from_le` constructors in the Rust standard
2043 /// library should be suitable for converting from little-endian.
2044 ///
2045 /// # Panics
2046 ///
2047 /// Panics if the `store` provided is not the one from which this
2048 /// slice originated.
2049 pub fn as_le_slice<'a, T: 'static>(&self, store: impl Into<StoreContext<'a, T>>) -> &'a [$i] {
2050 let memory = self.instance.options_memory(store.into().0, self.options);
2051 self._as_le_slice(memory)
2052 }
2053
2054 fn _as_le_slice<'a>(&self, all_of_memory: &'a [u8]) -> &'a [$i] {
2055 // See comments in `WasmList::get` for the panicking indexing
2056 let byte_size = self.len * mem::size_of::<$i>();
2057 let bytes = &all_of_memory[self.ptr..][..byte_size];
2058
2059 // The canonical ABI requires that everything is aligned to its
2060 // own size, so this should be an aligned array. Furthermore the
2061 // alignment of primitive integers for hosts should be smaller
2062 // than or equal to the size of the primitive itself, meaning
2063 // that a wasm canonical-abi-aligned list is also aligned for
2064 // the host. That should mean that the head/tail slices here are
2065 // empty.
2066 //
2067 // Also note that the `unsafe` here is needed since the type
2068 // we're aligning to isn't guaranteed to be valid, but in our
2069 // case it's just integers and bytes so this should be safe.
2070 unsafe {
2071 let (head, body, tail) = bytes.align_to::<$i>();
2072 assert!(head.is_empty() && tail.is_empty());
2073 body
2074 }
2075 }
2076 }
2077 )*)
2078}
2079
2080raw_wasm_list_accessors! {
2081 i8 i16 i32 i64
2082 u8 u16 u32 u64
2083}
2084
2085// Note that this is similar to `ComponentType for str` except it can only be
2086// used for lifting, not lowering.
2087unsafe impl<T: ComponentType> ComponentType for WasmList<T> {
2088 type Lower = <[T] as ComponentType>::Lower;
2089
2090 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
2091
2092 fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
2093 <[T] as ComponentType>::typecheck(ty, types)
2094 }
2095}
2096
2097unsafe impl<T: Lift> Lift for WasmList<T> {
2098 fn linear_lift_from_flat(
2099 cx: &mut LiftContext<'_>,
2100 ty: InterfaceType,
2101 src: &Self::Lower,
2102 ) -> Result<Self> {
2103 let elem = match ty {
2104 InterfaceType::List(i) => cx.types[i].element,
2105 _ => bad_type_info(),
2106 };
2107 let (ptr, len) = lift_pointer_pair_from_flat(cx, src)?;
2108 WasmList::new(ptr, len, cx, elem)
2109 }
2110
2111 fn linear_lift_from_memory(
2112 cx: &mut LiftContext<'_>,
2113 ty: InterfaceType,
2114 bytes: &[u8],
2115 ) -> Result<Self> {
2116 let elem = match ty {
2117 InterfaceType::List(i) => cx.types[i].element,
2118 _ => bad_type_info(),
2119 };
2120 debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
2121 let (ptr, len) = lift_pointer_pair_from_memory(cx, bytes)?;
2122 WasmList::new(ptr, len, cx, elem)
2123 }
2124}
2125
2126// =============================================================================
2127// HashMap<K, V> support for component model `map<K, V>`
2128//
2129// Maps are represented as `list<tuple<K, V>>` in the canonical ABI, so the
2130// lowered form is a (pointer, length) pair just like lists.
2131
2132fn map_abi<'a>(ty: InterfaceType, types: &'a ComponentTypes) -> &'a TypeMap {
2133 match ty {
2134 InterfaceType::Map(i) => &types[i],
2135 _ => bad_type_info(),
2136 }
2137}
2138
2139unsafe impl<K, V> ComponentType for HashMap<K, V>
2140where
2141 K: ComponentType,
2142 V: ComponentType,
2143{
2144 type Lower = [ValRaw; 2];
2145
2146 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
2147
2148 fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
2149 TryHashMap::<K, V>::typecheck(ty, types)
2150 }
2151}
2152
2153unsafe impl<K, V> Lower for HashMap<K, V>
2154where
2155 K: Lower,
2156 V: Lower,
2157{
2158 fn linear_lower_to_flat<U>(
2159 &self,
2160 cx: &mut LowerContext<'_, U>,
2161 ty: InterfaceType,
2162 dst: &mut MaybeUninit<[ValRaw; 2]>,
2163 ) -> Result<()> {
2164 let map = map_abi(ty, &cx.types);
2165 let (ptr, len) = lower_map_iter(cx, map, self.len(), self.iter())?;
2166 lower_pointer_pair_to_flat(cx, dst, ptr, len);
2167 Ok(())
2168 }
2169
2170 fn linear_lower_to_memory<U>(
2171 &self,
2172 cx: &mut LowerContext<'_, U>,
2173 ty: InterfaceType,
2174 offset: usize,
2175 ) -> Result<()> {
2176 let map = map_abi(ty, &cx.types);
2177 debug_assert!(offset % (CanonicalAbiInfo::POINTER_PAIR.align32 as usize) == 0);
2178 let (ptr, len) = lower_map_iter(cx, map, self.len(), self.iter())?;
2179 lower_pointer_pair_to_memory(cx, offset, ptr, len);
2180 Ok(())
2181 }
2182}
2183
2184unsafe impl<K, V> Lift for HashMap<K, V>
2185where
2186 K: Lift + Eq + Hash,
2187 V: Lift,
2188{
2189 fn linear_lift_from_flat(
2190 cx: &mut LiftContext<'_>,
2191 ty: InterfaceType,
2192 src: &Self::Lower,
2193 ) -> Result<Self> {
2194 Ok(TryHashMap::<K, V>::linear_lift_from_flat(cx, ty, src)?.into())
2195 }
2196
2197 fn linear_lift_from_memory(
2198 cx: &mut LiftContext<'_>,
2199 ty: InterfaceType,
2200 bytes: &[u8],
2201 ) -> Result<Self> {
2202 Ok(TryHashMap::<K, V>::linear_lift_from_memory(cx, ty, bytes)?.into())
2203 }
2204}
2205
2206fn lower_map_iter<'a, K, V, U>(
2207 cx: &mut LowerContext<'_, U>,
2208 map: &TypeMap,
2209 len: usize,
2210 iter: impl Iterator<Item = (&'a K, &'a V)>,
2211) -> Result<(usize, usize)>
2212where
2213 K: Lower + 'a,
2214 V: Lower + 'a,
2215{
2216 let size = len
2217 .checked_mul(usize::try_from(map.entry_abi.size32)?)
2218 .ok_or_else(|| format_err!("size overflow copying a map"))?;
2219 let ptr = cx.realloc(0, 0, map.entry_abi.align32, size)?;
2220
2221 let mut entry_offset = ptr;
2222 for (key, value) in iter {
2223 // Keys are the first field in each entry tuple.
2224 <K as Lower>::linear_lower_to_memory(key, cx, map.key, entry_offset)?;
2225 // Values start at the precomputed value offset within the tuple.
2226 <V as Lower>::linear_lower_to_memory(
2227 value,
2228 cx,
2229 map.value,
2230 entry_offset + usize::try_from(map.value_offset32)?,
2231 )?;
2232 entry_offset += usize::try_from(map.entry_abi.size32)?;
2233 }
2234
2235 Ok((ptr, len))
2236}
2237
2238unsafe impl<K, V> ComponentType for TryHashMap<K, V>
2239where
2240 K: ComponentType,
2241 V: ComponentType,
2242{
2243 type Lower = [ValRaw; 2];
2244
2245 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
2246
2247 fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
2248 match ty {
2249 InterfaceType::Map(t) => {
2250 let map_ty = &types.types[*t];
2251 K::typecheck(&map_ty.key, types)?;
2252 V::typecheck(&map_ty.value, types)?;
2253 Ok(())
2254 }
2255 other => bail!("expected `map` found `{}`", desc(other)),
2256 }
2257 }
2258}
2259
2260unsafe impl<K, V> Lower for TryHashMap<K, V>
2261where
2262 K: Lower,
2263 V: Lower,
2264{
2265 fn linear_lower_to_flat<U>(
2266 &self,
2267 cx: &mut LowerContext<'_, U>,
2268 ty: InterfaceType,
2269 dst: &mut MaybeUninit<[ValRaw; 2]>,
2270 ) -> Result<()> {
2271 let map = map_abi(ty, &cx.types);
2272 let (ptr, len) = lower_map_iter(cx, map, self.len(), self.iter())?;
2273 lower_pointer_pair_to_flat(cx, dst, ptr, len);
2274 Ok(())
2275 }
2276
2277 fn linear_lower_to_memory<U>(
2278 &self,
2279 cx: &mut LowerContext<'_, U>,
2280 ty: InterfaceType,
2281 offset: usize,
2282 ) -> Result<()> {
2283 let map = map_abi(ty, &cx.types);
2284 debug_assert!(offset % (CanonicalAbiInfo::POINTER_PAIR.align32 as usize) == 0);
2285 let (ptr, len) = lower_map_iter(cx, map, self.len(), self.iter())?;
2286 lower_pointer_pair_to_memory(cx, offset, ptr, len);
2287 Ok(())
2288 }
2289}
2290
2291unsafe impl<K, V> Lift for TryHashMap<K, V>
2292where
2293 K: Lift + Eq + Hash,
2294 V: Lift,
2295{
2296 fn linear_lift_from_flat(
2297 cx: &mut LiftContext<'_>,
2298 ty: InterfaceType,
2299 src: &Self::Lower,
2300 ) -> Result<Self> {
2301 let map = map_abi(ty, &cx.types);
2302 let (ptr, len) = lift_pointer_pair_from_flat(cx, src)?;
2303 lift_try_map(cx, map, ptr, len)
2304 }
2305
2306 fn linear_lift_from_memory(
2307 cx: &mut LiftContext<'_>,
2308 ty: InterfaceType,
2309 bytes: &[u8],
2310 ) -> Result<Self> {
2311 let map = map_abi(ty, &cx.types);
2312 debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
2313 let (ptr, len) = lift_pointer_pair_from_memory(cx, bytes)?;
2314 lift_try_map(cx, map, ptr, len)
2315 }
2316}
2317
2318fn lift_try_map<K, V>(
2319 cx: &mut LiftContext<'_>,
2320 map: &TypeMap,
2321 ptr: usize,
2322 len: usize,
2323) -> Result<TryHashMap<K, V>>
2324where
2325 K: Lift + Eq + Hash,
2326 V: Lift,
2327{
2328 let mut result = TryHashMap::with_capacity(len)?;
2329
2330 match len
2331 .checked_mul(usize::try_from(map.entry_abi.size32)?)
2332 .and_then(|total| ptr.checked_add(total))
2333 {
2334 Some(n) if n <= cx.memory().len() => cx.consume_fuel_array(len, size_of::<(K, V)>())?,
2335 _ => bail!("map pointer/length out of bounds of memory"),
2336 }
2337 if ptr % (map.entry_abi.align32 as usize) != 0 {
2338 bail!("map pointer is not aligned");
2339 }
2340
2341 for i in 0..len {
2342 let entry_base = ptr + (i * usize::try_from(map.entry_abi.size32)?);
2343
2344 let key_bytes = &cx.memory()[entry_base..][..K::SIZE32];
2345 let key = K::linear_lift_from_memory(cx, map.key, key_bytes)?;
2346
2347 let value_bytes =
2348 &cx.memory()[entry_base + usize::try_from(map.value_offset32)?..][..V::SIZE32];
2349 let value = V::linear_lift_from_memory(cx, map.value, value_bytes)?;
2350
2351 result.insert(key, value)?;
2352 }
2353
2354 Ok(result)
2355}
2356
2357/// Verify that the given wasm type is a tuple with the expected fields in the right order.
2358fn typecheck_tuple(
2359 ty: &InterfaceType,
2360 types: &InstanceType<'_>,
2361 expected: &[fn(&InterfaceType, &InstanceType<'_>) -> Result<()>],
2362) -> Result<()> {
2363 match ty {
2364 InterfaceType::Tuple(t) => {
2365 let tuple = &types.types[*t];
2366 if tuple.types.len() != expected.len() {
2367 bail!(
2368 "expected {}-tuple, found {}-tuple",
2369 expected.len(),
2370 tuple.types.len()
2371 );
2372 }
2373 for (ty, check) in tuple.types.iter().zip(expected) {
2374 check(ty, types)?;
2375 }
2376 Ok(())
2377 }
2378 other => bail!("expected `tuple` found `{}`", desc(other)),
2379 }
2380}
2381
2382/// Verify that the given wasm type is a record with the expected fields in the right order and with the right
2383/// names.
2384pub fn typecheck_record(
2385 ty: &InterfaceType,
2386 types: &InstanceType<'_>,
2387 expected: &[(&str, fn(&InterfaceType, &InstanceType<'_>) -> Result<()>)],
2388) -> Result<()> {
2389 match ty {
2390 InterfaceType::Record(index) => {
2391 let fields = &types.types[*index].fields;
2392
2393 if fields.len() != expected.len() {
2394 bail!(
2395 "expected record of {} fields, found {} fields",
2396 expected.len(),
2397 fields.len()
2398 );
2399 }
2400
2401 for (field, &(name, check)) in fields.iter().zip(expected) {
2402 check(&field.ty, types)
2403 .with_context(|| format!("type mismatch for field {name}"))?;
2404
2405 if field.name != name {
2406 bail!("expected record field named {}, found {}", name, field.name);
2407 }
2408 }
2409
2410 Ok(())
2411 }
2412 other => bail!("expected `record` found `{}`", desc(other)),
2413 }
2414}
2415
2416/// Verify that the given wasm type is a variant with the expected cases in the right order and with the right
2417/// names.
2418pub fn typecheck_variant(
2419 ty: &InterfaceType,
2420 types: &InstanceType<'_>,
2421 expected: &[(
2422 &str,
2423 Option<fn(&InterfaceType, &InstanceType<'_>) -> Result<()>>,
2424 )],
2425) -> Result<()> {
2426 match ty {
2427 InterfaceType::Variant(index) => {
2428 let cases = &types.types[*index].cases;
2429
2430 if cases.len() != expected.len() {
2431 bail!(
2432 "expected variant of {} cases, found {} cases",
2433 expected.len(),
2434 cases.len()
2435 );
2436 }
2437
2438 for ((case_name, case_ty), &(name, check)) in cases.iter().zip(expected) {
2439 if *case_name != name {
2440 bail!("expected variant case named {name}, found {case_name}");
2441 }
2442
2443 match (check, case_ty) {
2444 (Some(check), Some(ty)) => check(ty, types)
2445 .with_context(|| format!("type mismatch for case {name}"))?,
2446 (None, None) => {}
2447 (Some(_), None) => {
2448 bail!("case `{name}` has no type but one was expected")
2449 }
2450 (None, Some(_)) => {
2451 bail!("case `{name}` has a type but none was expected")
2452 }
2453 }
2454 }
2455
2456 Ok(())
2457 }
2458 other => bail!("expected `variant` found `{}`", desc(other)),
2459 }
2460}
2461
2462/// Verify that the given wasm type is a enum with the expected cases in the right order and with the right
2463/// names.
2464pub fn typecheck_enum(
2465 ty: &InterfaceType,
2466 types: &InstanceType<'_>,
2467 expected: &[&str],
2468) -> Result<()> {
2469 match ty {
2470 InterfaceType::Enum(index) => {
2471 let names = &types.types[*index].names;
2472
2473 if names.len() != expected.len() {
2474 bail!(
2475 "expected enum of {} names, found {} names",
2476 expected.len(),
2477 names.len()
2478 );
2479 }
2480
2481 for (name, expected) in names.iter().zip(expected) {
2482 if name != expected {
2483 bail!("expected enum case named {expected}, found {name}");
2484 }
2485 }
2486
2487 Ok(())
2488 }
2489 other => bail!("expected `enum` found `{}`", desc(other)),
2490 }
2491}
2492
2493/// Verify that the given wasm type is a flags type with the expected flags in the right order and with the right
2494/// names.
2495pub fn typecheck_flags(
2496 ty: &InterfaceType,
2497 types: &InstanceType<'_>,
2498 expected: &[&str],
2499) -> Result<()> {
2500 match ty {
2501 InterfaceType::Flags(index) => {
2502 let names = &types.types[*index].names;
2503
2504 if names.len() != expected.len() {
2505 bail!(
2506 "expected flags type with {} names, found {} names",
2507 expected.len(),
2508 names.len()
2509 );
2510 }
2511
2512 for (name, expected) in names.iter().zip(expected) {
2513 if name != expected {
2514 bail!("expected flag named {expected}, found {name}");
2515 }
2516 }
2517
2518 Ok(())
2519 }
2520 other => bail!("expected `flags` found `{}`", desc(other)),
2521 }
2522}
2523
2524/// Format the specified bitflags using the specified names for debugging
2525pub fn format_flags(bits: &[u32], names: &[&str], f: &mut fmt::Formatter) -> fmt::Result {
2526 f.write_str("(")?;
2527 let mut wrote = false;
2528 for (index, name) in names.iter().enumerate() {
2529 if ((bits[index / 32] >> (index % 32)) & 1) != 0 {
2530 if wrote {
2531 f.write_str("|")?;
2532 } else {
2533 wrote = true;
2534 }
2535
2536 f.write_str(name)?;
2537 }
2538 }
2539 f.write_str(")")
2540}
2541
2542unsafe impl<T> ComponentType for Option<T>
2543where
2544 T: ComponentType,
2545{
2546 type Lower = TupleLower<<u32 as ComponentType>::Lower, T::Lower>;
2547
2548 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::variant_static(&[None, Some(T::ABI)]);
2549 const MAY_REQUIRE_REALLOC: bool = T::MAY_REQUIRE_REALLOC;
2550
2551 fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
2552 match ty {
2553 InterfaceType::Option(t) => T::typecheck(&types.types[*t].ty, types),
2554 other => bail!("expected `option` found `{}`", desc(other)),
2555 }
2556 }
2557}
2558
2559unsafe impl<T> ComponentVariant for Option<T>
2560where
2561 T: ComponentType,
2562{
2563 const CASES: &'static [Option<CanonicalAbiInfo>] = &[None, Some(T::ABI)];
2564}
2565
2566unsafe impl<T> Lower for Option<T>
2567where
2568 T: Lower,
2569{
2570 fn linear_lower_to_flat<U>(
2571 &self,
2572 cx: &mut LowerContext<'_, U>,
2573 ty: InterfaceType,
2574 dst: &mut MaybeUninit<Self::Lower>,
2575 ) -> Result<()> {
2576 let payload = match ty {
2577 InterfaceType::Option(ty) => cx.types[ty].ty,
2578 _ => bad_type_info(),
2579 };
2580 match self {
2581 None => {
2582 map_maybe_uninit!(dst.A1).write(ValRaw::i32(0));
2583 // Note that this is unsafe as we're writing an arbitrary
2584 // bit-pattern to an arbitrary type, but part of the unsafe
2585 // contract of the `ComponentType` trait is that we can assign
2586 // any bit-pattern. By writing all zeros here we're ensuring
2587 // that the core wasm arguments this translates to will all be
2588 // zeros (as the canonical ABI requires).
2589 unsafe {
2590 map_maybe_uninit!(dst.A2).as_mut_ptr().write_bytes(0u8, 1);
2591 }
2592 }
2593 Some(val) => {
2594 map_maybe_uninit!(dst.A1).write(ValRaw::i32(1));
2595 val.linear_lower_to_flat(cx, payload, map_maybe_uninit!(dst.A2))?;
2596 }
2597 }
2598 Ok(())
2599 }
2600
2601 fn linear_lower_to_memory<U>(
2602 &self,
2603 cx: &mut LowerContext<'_, U>,
2604 ty: InterfaceType,
2605 offset: usize,
2606 ) -> Result<()> {
2607 debug_assert!(offset % (Self::ALIGN32 as usize) == 0);
2608 let payload = match ty {
2609 InterfaceType::Option(ty) => cx.types[ty].ty,
2610 _ => bad_type_info(),
2611 };
2612 match self {
2613 None => {
2614 cx.get::<1>(offset)[0] = 0;
2615 }
2616 Some(val) => {
2617 cx.get::<1>(offset)[0] = 1;
2618 val.linear_lower_to_memory(
2619 cx,
2620 payload,
2621 offset + (Self::INFO.payload_offset32 as usize),
2622 )?;
2623 }
2624 }
2625 Ok(())
2626 }
2627}
2628
2629unsafe impl<T> Lift for Option<T>
2630where
2631 T: Lift,
2632{
2633 fn linear_lift_from_flat(
2634 cx: &mut LiftContext<'_>,
2635 ty: InterfaceType,
2636 src: &Self::Lower,
2637 ) -> Result<Self> {
2638 let payload = match ty {
2639 InterfaceType::Option(ty) => cx.types[ty].ty,
2640 _ => bad_type_info(),
2641 };
2642 Ok(match src.A1.get_i32() {
2643 0 => None,
2644 1 => Some(T::linear_lift_from_flat(cx, payload, &src.A2)?),
2645 _ => bail!("invalid option discriminant"),
2646 })
2647 }
2648
2649 fn linear_lift_from_memory(
2650 cx: &mut LiftContext<'_>,
2651 ty: InterfaceType,
2652 bytes: &[u8],
2653 ) -> Result<Self> {
2654 debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
2655 let payload_ty = match ty {
2656 InterfaceType::Option(ty) => cx.types[ty].ty,
2657 _ => bad_type_info(),
2658 };
2659 let discrim = bytes[0];
2660 let payload = &bytes[Self::INFO.payload_offset32 as usize..];
2661 match discrim {
2662 0 => Ok(None),
2663 1 => Ok(Some(T::linear_lift_from_memory(cx, payload_ty, payload)?)),
2664 _ => bail!("invalid option discriminant"),
2665 }
2666 }
2667}
2668
2669#[derive(Clone, Copy)]
2670#[repr(C)]
2671pub struct ResultLower<T: Copy, E: Copy> {
2672 tag: ValRaw,
2673 payload: ResultLowerPayload<T, E>,
2674}
2675
2676#[derive(Clone, Copy)]
2677#[repr(C)]
2678union ResultLowerPayload<T: Copy, E: Copy> {
2679 ok: T,
2680 err: E,
2681}
2682
2683unsafe impl<T, E> ComponentType for Result<T, E>
2684where
2685 T: ComponentType,
2686 E: ComponentType,
2687{
2688 type Lower = ResultLower<T::Lower, E::Lower>;
2689
2690 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::variant_static(&[Some(T::ABI), Some(E::ABI)]);
2691 const MAY_REQUIRE_REALLOC: bool = T::MAY_REQUIRE_REALLOC || E::MAY_REQUIRE_REALLOC;
2692
2693 fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
2694 match ty {
2695 InterfaceType::Result(r) => {
2696 let result = &types.types[*r];
2697 match &result.ok {
2698 Some(ty) => T::typecheck(ty, types)?,
2699 None if T::IS_RUST_UNIT_TYPE => {}
2700 None => bail!("expected no `ok` type"),
2701 }
2702 match &result.err {
2703 Some(ty) => E::typecheck(ty, types)?,
2704 None if E::IS_RUST_UNIT_TYPE => {}
2705 None => bail!("expected no `err` type"),
2706 }
2707 Ok(())
2708 }
2709 other => bail!("expected `result` found `{}`", desc(other)),
2710 }
2711 }
2712}
2713
2714/// Lowers the payload of a variant into the storage for the entire payload,
2715/// handling writing zeros at the end of the representation if this payload is
2716/// smaller than the entire flat representation.
2717///
2718/// * `payload` - the flat storage space for the entire payload of the variant
2719/// * `typed_payload` - projection from the payload storage space to the
2720/// individual storage space for this variant.
2721/// * `lower` - lowering operation used to initialize the `typed_payload` return
2722/// value.
2723///
2724/// For more information on this se the comments in the `Lower for Result`
2725/// implementation below.
2726pub unsafe fn lower_payload<P, T>(
2727 payload: &mut MaybeUninit<P>,
2728 typed_payload: impl FnOnce(&mut MaybeUninit<P>) -> &mut MaybeUninit<T>,
2729 lower: impl FnOnce(&mut MaybeUninit<T>) -> Result<()>,
2730) -> Result<()> {
2731 let typed = typed_payload(payload);
2732 lower(typed)?;
2733
2734 let typed_len = unsafe { storage_as_slice(typed).len() };
2735 let payload = unsafe { storage_as_slice_mut(payload) };
2736 for slot in payload[typed_len..].iter_mut() {
2737 slot.write(ValRaw::u64(0));
2738 }
2739 Ok(())
2740}
2741
2742unsafe impl<T, E> ComponentVariant for Result<T, E>
2743where
2744 T: ComponentType,
2745 E: ComponentType,
2746{
2747 const CASES: &'static [Option<CanonicalAbiInfo>] = &[Some(T::ABI), Some(E::ABI)];
2748}
2749
2750unsafe impl<T, E> Lower for Result<T, E>
2751where
2752 T: Lower,
2753 E: Lower,
2754{
2755 fn linear_lower_to_flat<U>(
2756 &self,
2757 cx: &mut LowerContext<'_, U>,
2758 ty: InterfaceType,
2759 dst: &mut MaybeUninit<Self::Lower>,
2760 ) -> Result<()> {
2761 let (ok, err) = match ty {
2762 InterfaceType::Result(ty) => {
2763 let ty = &cx.types[ty];
2764 (ty.ok, ty.err)
2765 }
2766 _ => bad_type_info(),
2767 };
2768
2769 // This implementation of `Lower::lower`, if you're reading these from
2770 // the top of this file, is the first location that the "join" logic of
2771 // the component model's canonical ABI encountered. The rough problem is
2772 // that let's say we have a component model type of the form:
2773 //
2774 // (result u64 (error (tuple f32 u16)))
2775 //
2776 // The flat representation of this is actually pretty tricky. Currently
2777 // it is:
2778 //
2779 // i32 i64 i32
2780 //
2781 // The first `i32` is the discriminant for the `result`, and the payload
2782 // is represented by `i64 i32`. The "ok" variant will only use the `i64`
2783 // and the "err" variant will use both `i64` and `i32`.
2784 //
2785 // In the "ok" variant the first issue is encountered. The size of one
2786 // variant may not match the size of the other variants. All variants
2787 // start at the "front" but when lowering a type we need to be sure to
2788 // initialize the later variants (lest we leak random host memory into
2789 // the guest module). Due to how the `Lower` type is represented as a
2790 // `union` of all the variants what ends up happening here is that
2791 // internally within the `lower_payload` after the typed payload is
2792 // lowered the remaining bits of the payload that weren't initialized
2793 // are all set to zero. This will guarantee that we'll write to all the
2794 // slots for each variant.
2795 //
2796 // The "err" variant encounters the second issue, however, which is that
2797 // the flat representation for each type may differ between payloads. In
2798 // the "ok" arm an `i64` is written, but the `lower` implementation for
2799 // the "err" arm will write an `f32` and then an `i32`. For this
2800 // implementation of `lower` to be valid the `f32` needs to get inflated
2801 // to an `i64` with zero-padding in the upper bits. What may be
2802 // surprising, however, is that none of this is handled in this file.
2803 // This implementation looks like it's blindly deferring to `E::lower`
2804 // and hoping it does the right thing.
2805 //
2806 // In reality, however, the correctness of variant lowering relies on
2807 // two subtle details of the `ValRaw` implementation in Wasmtime:
2808 //
2809 // 1. First the `ValRaw` value always contains little-endian values.
2810 // This means that if a `u32` is written, a `u64` is read, and then
2811 // the `u64` has its upper bits truncated the original value will
2812 // always be retained. This is primarily here for big-endian
2813 // platforms where if it weren't little endian then the opposite
2814 // would occur and the wrong value would be read.
2815 //
2816 // 2. Second, and perhaps even more subtly, the `ValRaw` constructors
2817 // for 32-bit types actually always initialize 64-bits of the
2818 // `ValRaw`. In the component model flat ABI only 32 and 64-bit types
2819 // are used so 64-bits is big enough to contain everything. This
2820 // means that when a `ValRaw` is written into the destination it will
2821 // always, whether it's needed or not, be "ready" to get extended up
2822 // to 64-bits.
2823 //
2824 // Put together these two subtle guarantees means that all `Lower`
2825 // implementations can be written "naturally" as one might naively
2826 // expect. Variants will, on each arm, zero out remaining fields and all
2827 // writes to the flat representation will automatically be 64-bit writes
2828 // meaning that if the value is read as a 64-bit value, which isn't
2829 // known at the time of the write, it'll still be correct.
2830 match self {
2831 Ok(e) => {
2832 map_maybe_uninit!(dst.tag).write(ValRaw::i32(0));
2833 unsafe {
2834 lower_payload(
2835 map_maybe_uninit!(dst.payload),
2836 |payload| map_maybe_uninit!(payload.ok),
2837 |dst| match ok {
2838 Some(ok) => e.linear_lower_to_flat(cx, ok, dst),
2839 None => Ok(()),
2840 },
2841 )
2842 }
2843 }
2844 Err(e) => {
2845 map_maybe_uninit!(dst.tag).write(ValRaw::i32(1));
2846 unsafe {
2847 lower_payload(
2848 map_maybe_uninit!(dst.payload),
2849 |payload| map_maybe_uninit!(payload.err),
2850 |dst| match err {
2851 Some(err) => e.linear_lower_to_flat(cx, err, dst),
2852 None => Ok(()),
2853 },
2854 )
2855 }
2856 }
2857 }
2858 }
2859
2860 fn linear_lower_to_memory<U>(
2861 &self,
2862 cx: &mut LowerContext<'_, U>,
2863 ty: InterfaceType,
2864 offset: usize,
2865 ) -> Result<()> {
2866 let (ok, err) = match ty {
2867 InterfaceType::Result(ty) => {
2868 let ty = &cx.types[ty];
2869 (ty.ok, ty.err)
2870 }
2871 _ => bad_type_info(),
2872 };
2873 debug_assert!(offset % (Self::ALIGN32 as usize) == 0);
2874 let payload_offset = Self::INFO.payload_offset32 as usize;
2875 match self {
2876 Ok(e) => {
2877 cx.get::<1>(offset)[0] = 0;
2878 if let Some(ok) = ok {
2879 e.linear_lower_to_memory(cx, ok, offset + payload_offset)?;
2880 }
2881 }
2882 Err(e) => {
2883 cx.get::<1>(offset)[0] = 1;
2884 if let Some(err) = err {
2885 e.linear_lower_to_memory(cx, err, offset + payload_offset)?;
2886 }
2887 }
2888 }
2889 Ok(())
2890 }
2891}
2892
2893unsafe impl<T, E> Lift for Result<T, E>
2894where
2895 T: Lift,
2896 E: Lift,
2897{
2898 #[inline]
2899 fn linear_lift_from_flat(
2900 cx: &mut LiftContext<'_>,
2901 ty: InterfaceType,
2902 src: &Self::Lower,
2903 ) -> Result<Self> {
2904 let (ok, err) = match ty {
2905 InterfaceType::Result(ty) => {
2906 let ty = &cx.types[ty];
2907 (ty.ok, ty.err)
2908 }
2909 _ => bad_type_info(),
2910 };
2911 // Note that this implementation specifically isn't trying to actually
2912 // reinterpret or alter the bits of `lower` depending on which variant
2913 // we're lifting. This ends up all working out because the value is
2914 // stored in little-endian format.
2915 //
2916 // When stored in little-endian format the `{T,E}::Lower`, when each
2917 // individual `ValRaw` is read, means that if an i64 value, extended
2918 // from an i32 value, was stored then when the i32 value is read it'll
2919 // automatically ignore the upper bits.
2920 //
2921 // This "trick" allows us to seamlessly pass through the `Self::Lower`
2922 // representation into the lifting/lowering without trying to handle
2923 // "join"ed types as per the canonical ABI. It just so happens that i64
2924 // bits will naturally be reinterpreted as f64. Additionally if the
2925 // joined type is i64 but only the lower bits are read that's ok and we
2926 // don't need to validate the upper bits.
2927 //
2928 // This is largely enabled by WebAssembly/component-model#35 where no
2929 // validation needs to be performed for ignored bits and bytes here.
2930 Ok(match src.tag.get_i32() {
2931 0 => Ok(unsafe { lift_option(cx, ok, &src.payload.ok)? }),
2932 1 => Err(unsafe { lift_option(cx, err, &src.payload.err)? }),
2933 _ => bail!("invalid expected discriminant"),
2934 })
2935 }
2936
2937 #[inline]
2938 fn linear_lift_from_memory(
2939 cx: &mut LiftContext<'_>,
2940 ty: InterfaceType,
2941 bytes: &[u8],
2942 ) -> Result<Self> {
2943 debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
2944 let discrim = bytes[0];
2945 let payload = &bytes[Self::INFO.payload_offset32 as usize..];
2946 let (ok, err) = match ty {
2947 InterfaceType::Result(ty) => {
2948 let ty = &cx.types[ty];
2949 (ty.ok, ty.err)
2950 }
2951 _ => bad_type_info(),
2952 };
2953 match discrim {
2954 0 => Ok(Ok(load_option(cx, ok, &payload[..T::SIZE32])?)),
2955 1 => Ok(Err(load_option(cx, err, &payload[..E::SIZE32])?)),
2956 _ => bail!("invalid expected discriminant"),
2957 }
2958 }
2959}
2960
2961fn lift_option<T>(cx: &mut LiftContext<'_>, ty: Option<InterfaceType>, src: &T::Lower) -> Result<T>
2962where
2963 T: Lift,
2964{
2965 match ty {
2966 Some(ty) => T::linear_lift_from_flat(cx, ty, src),
2967 None => Ok(empty_lift()),
2968 }
2969}
2970
2971fn load_option<T>(cx: &mut LiftContext<'_>, ty: Option<InterfaceType>, bytes: &[u8]) -> Result<T>
2972where
2973 T: Lift,
2974{
2975 match ty {
2976 Some(ty) => T::linear_lift_from_memory(cx, ty, bytes),
2977 None => Ok(empty_lift()),
2978 }
2979}
2980
2981fn empty_lift<T>() -> T
2982where
2983 T: Lift,
2984{
2985 assert!(T::IS_RUST_UNIT_TYPE);
2986 assert_eq!(mem::size_of::<T>(), 0);
2987 unsafe { MaybeUninit::uninit().assume_init() }
2988}
2989
2990/// Helper structure to define `Lower` for tuples below.
2991///
2992/// Uses default type parameters to have fields be zero-sized and not present
2993/// in memory for smaller tuple values.
2994#[expect(non_snake_case, reason = "more amenable to macro-generated code")]
2995#[doc(hidden)]
2996#[derive(Clone, Copy)]
2997#[repr(C)]
2998pub struct TupleLower<
2999 T1 = (),
3000 T2 = (),
3001 T3 = (),
3002 T4 = (),
3003 T5 = (),
3004 T6 = (),
3005 T7 = (),
3006 T8 = (),
3007 T9 = (),
3008 T10 = (),
3009 T11 = (),
3010 T12 = (),
3011 T13 = (),
3012 T14 = (),
3013 T15 = (),
3014 T16 = (),
3015 T17 = (),
3016> {
3017 // NB: these names match the names in `for_each_function_signature!`
3018 A1: T1,
3019 A2: T2,
3020 A3: T3,
3021 A4: T4,
3022 A5: T5,
3023 A6: T6,
3024 A7: T7,
3025 A8: T8,
3026 A9: T9,
3027 A10: T10,
3028 A11: T11,
3029 A12: T12,
3030 A13: T13,
3031 A14: T14,
3032 A15: T15,
3033 A16: T16,
3034 A17: T17,
3035 _align_tuple_lower0_correctly: [ValRaw; 0],
3036}
3037
3038macro_rules! impl_component_ty_for_tuples {
3039 ($n:tt $($t:ident)*) => {
3040 #[allow(non_snake_case, reason = "macro-generated code")]
3041 unsafe impl<$($t,)*> ComponentType for ($($t,)*)
3042 where $($t: ComponentType),*
3043 {
3044 type Lower = TupleLower<$($t::Lower),*>;
3045
3046 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::record_static(&[
3047 $($t::ABI),*
3048 ]);
3049 const MAY_REQUIRE_REALLOC: bool = false $(|| $t::MAY_REQUIRE_REALLOC)*;
3050
3051 const IS_RUST_UNIT_TYPE: bool = {
3052 let mut _is_unit = true;
3053 $(
3054 let _anything_to_bind_the_macro_variable = $t::IS_RUST_UNIT_TYPE;
3055 _is_unit = false;
3056 )*
3057 _is_unit
3058 };
3059
3060 fn typecheck(
3061 ty: &InterfaceType,
3062 types: &InstanceType<'_>,
3063 ) -> Result<()> {
3064 typecheck_tuple(ty, types, &[$($t::typecheck),*])
3065 }
3066 }
3067
3068 #[allow(non_snake_case, reason = "macro-generated code")]
3069 unsafe impl<$($t,)*> Lower for ($($t,)*)
3070 where $($t: Lower),*
3071 {
3072 fn linear_lower_to_flat<U>(
3073 &self,
3074 cx: &mut LowerContext<'_, U>,
3075 ty: InterfaceType,
3076 _dst: &mut MaybeUninit<Self::Lower>,
3077 ) -> Result<()> {
3078 let types = match ty {
3079 InterfaceType::Tuple(t) => &cx.types[t].types,
3080 _ => bad_type_info(),
3081 };
3082 let ($($t,)*) = self;
3083 let mut _types = types.iter();
3084 $(
3085 let ty = *_types.next().unwrap_or_else(bad_type_info);
3086 $t.linear_lower_to_flat(cx, ty, map_maybe_uninit!(_dst.$t))?;
3087 )*
3088 Ok(())
3089 }
3090
3091 fn linear_lower_to_memory<U>(
3092 &self,
3093 cx: &mut LowerContext<'_, U>,
3094 ty: InterfaceType,
3095 mut _offset: usize,
3096 ) -> Result<()> {
3097 debug_assert!(_offset % (Self::ALIGN32 as usize) == 0);
3098 let types = match ty {
3099 InterfaceType::Tuple(t) => &cx.types[t].types,
3100 _ => bad_type_info(),
3101 };
3102 let ($($t,)*) = self;
3103 let mut _types = types.iter();
3104 $(
3105 let ty = *_types.next().unwrap_or_else(bad_type_info);
3106 $t.linear_lower_to_memory(cx, ty, $t::ABI.next_field32_size(&mut _offset))?;
3107 )*
3108 Ok(())
3109 }
3110 }
3111
3112 #[allow(non_snake_case, reason = "macro-generated code")]
3113 unsafe impl<$($t,)*> Lift for ($($t,)*)
3114 where $($t: Lift),*
3115 {
3116 #[inline]
3117 fn linear_lift_from_flat(cx: &mut LiftContext<'_>, ty: InterfaceType, _src: &Self::Lower) -> Result<Self> {
3118 let types = match ty {
3119 InterfaceType::Tuple(t) => &cx.types[t].types,
3120 _ => bad_type_info(),
3121 };
3122 let mut _types = types.iter();
3123 Ok(($(
3124 $t::linear_lift_from_flat(
3125 cx,
3126 *_types.next().unwrap_or_else(bad_type_info),
3127 &_src.$t,
3128 )?,
3129 )*))
3130 }
3131
3132 #[inline]
3133 fn linear_lift_from_memory(cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
3134 debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
3135 let types = match ty {
3136 InterfaceType::Tuple(t) => &cx.types[t].types,
3137 _ => bad_type_info(),
3138 };
3139 let mut _types = types.iter();
3140 let mut _offset = 0;
3141 $(
3142 let ty = *_types.next().unwrap_or_else(bad_type_info);
3143 let $t = $t::linear_lift_from_memory(cx, ty, &bytes[$t::ABI.next_field32_size(&mut _offset)..][..$t::SIZE32])?;
3144 )*
3145 Ok(($($t,)*))
3146 }
3147 }
3148
3149 #[allow(non_snake_case, reason = "macro-generated code")]
3150 unsafe impl<$($t,)*> ComponentNamedList for ($($t,)*)
3151 where $($t: ComponentType),*
3152 {}
3153 };
3154}
3155
3156for_each_function_signature!(impl_component_ty_for_tuples);
3157
3158pub fn desc(ty: &InterfaceType) -> &'static str {
3159 match ty {
3160 InterfaceType::U8 => "u8",
3161 InterfaceType::S8 => "s8",
3162 InterfaceType::U16 => "u16",
3163 InterfaceType::S16 => "s16",
3164 InterfaceType::U32 => "u32",
3165 InterfaceType::S32 => "s32",
3166 InterfaceType::U64 => "u64",
3167 InterfaceType::S64 => "s64",
3168 InterfaceType::Float32 => "f32",
3169 InterfaceType::Float64 => "f64",
3170 InterfaceType::Bool => "bool",
3171 InterfaceType::Char => "char",
3172 InterfaceType::String => "string",
3173 InterfaceType::List(_) => "list",
3174 InterfaceType::Tuple(_) => "tuple",
3175 InterfaceType::Option(_) => "option",
3176 InterfaceType::Result(_) => "result",
3177
3178 InterfaceType::Record(_) => "record",
3179 InterfaceType::Variant(_) => "variant",
3180 InterfaceType::Flags(_) => "flags",
3181 InterfaceType::Enum(_) => "enum",
3182 InterfaceType::Own(_) => "owned resource",
3183 InterfaceType::Borrow(_) => "borrowed resource",
3184 InterfaceType::Future(_) => "future",
3185 InterfaceType::Stream(_) => "stream",
3186 InterfaceType::ErrorContext(_) => "error-context",
3187 InterfaceType::Map(_) => "map",
3188 InterfaceType::FixedLengthList(_) => "list<_, N>",
3189 }
3190}
3191
3192#[cold]
3193#[doc(hidden)]
3194pub fn bad_type_info<T>() -> T {
3195 // NB: should consider something like `unreachable_unchecked` here if this
3196 // becomes a performance bottleneck at some point, but that also comes with
3197 // a tradeoff of propagating a lot of unsafety, so it may not be worth it.
3198 panic!("bad type information detected");
3199}