wasmtime/runtime/component/func/typed.rs
1use crate::component::Instance;
2use crate::component::func::{Func, LiftContext, LowerContext};
3use crate::component::matching::InstanceType;
4use crate::component::storage::{storage_as_slice, storage_as_slice_mut};
5use crate::hash_map::HashMap;
6use crate::prelude::*;
7use crate::{AsContextMut, StoreContext, StoreContextMut, ValRaw};
8use alloc::borrow::Cow;
9use core::fmt;
10use core::hash::Hash;
11use core::iter;
12use core::marker;
13use core::mem::{self, MaybeUninit};
14use core::str;
15use wasmtime_environ::component::{
16 CanonicalAbiInfo, ComponentTypes, InterfaceType, MAX_FLAT_PARAMS, MAX_FLAT_RESULTS,
17 OptionsIndex, StringEncoding, TypeMap, VariantInfo,
18};
19
20#[cfg(feature = "component-model-async")]
21use crate::component::concurrent::{self, AsAccessor, PreparedCall};
22
23/// A statically-typed version of [`Func`] which takes `Params` as input and
24/// returns `Return`.
25///
26/// This is an efficient way to invoke a WebAssembly component where if the
27/// inputs and output are statically known this can eschew the vast majority of
28/// machinery and checks when calling WebAssembly. This is the most optimized
29/// way to call a WebAssembly component.
30///
31/// Note that like [`Func`] this is a pointer within a [`Store`](crate::Store)
32/// and usage will panic if used with the wrong store.
33///
34/// This type is primarily created with the [`Func::typed`] API.
35///
36/// See [`ComponentType`] for more information about supported types.
37pub struct TypedFunc<Params, Return> {
38 func: Func,
39
40 // The definition of this field is somewhat subtle and may be surprising.
41 // Naively one might expect something like
42 //
43 // _marker: marker::PhantomData<fn(Params) -> Return>,
44 //
45 // Since this is a function pointer after all. The problem with this
46 // definition though is that it imposes the wrong variance on `Params` from
47 // what we want. Abstractly a `fn(Params)` is able to store `Params` within
48 // it meaning you can only give it `Params` that live longer than the
49 // function pointer.
50 //
51 // With a component model function, however, we're always copying data from
52 // the host into the guest, so we are never storing pointers to `Params`
53 // into the guest outside the duration of a `call`, meaning we can actually
54 // accept values in `TypedFunc::call` which live for a shorter duration
55 // than the `Params` argument on the struct.
56 //
57 // This all means that we don't use a phantom function pointer, but instead
58 // feign phantom storage here to get the variance desired.
59 _marker: marker::PhantomData<(Params, Return)>,
60}
61
62impl<Params, Return> Copy for TypedFunc<Params, Return> {}
63
64impl<Params, Return> Clone for TypedFunc<Params, Return> {
65 fn clone(&self) -> TypedFunc<Params, Return> {
66 *self
67 }
68}
69
70impl<Params, Return> TypedFunc<Params, Return>
71where
72 Params: ComponentNamedList + Lower,
73 Return: ComponentNamedList + Lift,
74{
75 /// Creates a new [`TypedFunc`] from the provided component [`Func`],
76 /// unsafely asserting that the underlying function takes `Params` as
77 /// input and returns `Return`.
78 ///
79 /// # Unsafety
80 ///
81 /// This is an unsafe function because it does not verify that the [`Func`]
82 /// provided actually implements this signature. It's up to the caller to
83 /// have performed some other sort of check to ensure that the signature is
84 /// correct.
85 pub unsafe fn new_unchecked(func: Func) -> TypedFunc<Params, Return> {
86 TypedFunc {
87 _marker: marker::PhantomData,
88 func,
89 }
90 }
91
92 /// Returns the underlying un-typed [`Func`] that this [`TypedFunc`]
93 /// references.
94 pub fn func(&self) -> &Func {
95 &self.func
96 }
97
98 /// Calls the underlying WebAssembly component function using the provided
99 /// `params` as input.
100 ///
101 /// This method is used to enter into a component. Execution happens within
102 /// the `store` provided. The `params` are copied into WebAssembly memory
103 /// as appropriate and a core wasm function is invoked.
104 ///
105 /// # Post-return
106 ///
107 /// In the component model each function can have a "post return" specified
108 /// which allows cleaning up the arguments returned to the host. For example
109 /// if WebAssembly returns a string to the host then it might be a uniquely
110 /// allocated string which, after the host finishes processing it, needs to
111 /// be deallocated in the wasm instance's own linear memory to prevent
112 /// memory leaks in wasm itself. The `post-return` canonical abi option is
113 /// used to configured this.
114 ///
115 /// If a post-return function is present, it will be called automatically by
116 /// this function.
117 ///
118 /// # Errors
119 ///
120 /// This function can return an error for a number of reasons:
121 ///
122 /// * If the wasm itself traps during execution.
123 /// * If the wasm traps while copying arguments into memory.
124 /// * If the wasm provides bad allocation pointers when copying arguments
125 /// into memory.
126 /// * If the wasm returns a value which violates the canonical ABI.
127 /// * If this function's instances cannot be entered, for example if the
128 /// instance is currently calling a host function.
129 /// * If `store` requires using [`Self::call_async`] instead, see
130 /// [crate documentation](crate#async) for more info.
131 ///
132 /// In general there are many ways that things could go wrong when copying
133 /// types in and out of a wasm module with the canonical ABI, and certain
134 /// error conditions are specific to certain types. For example a
135 /// WebAssembly module can't return an invalid `char`. When allocating space
136 /// for this host to copy a string into the returned pointer must be
137 /// in-bounds in memory.
138 ///
139 /// If an error happens then the error should contain detailed enough
140 /// information to understand which part of the canonical ABI went wrong
141 /// and what to inspect.
142 ///
143 /// # Panics
144 ///
145 /// Panics if `store` does not own this function.
146 pub fn call(&self, mut store: impl AsContextMut, params: Params) -> Result<Return> {
147 let mut store = store.as_context_mut();
148 store.0.validate_sync_call()?;
149 self.call_impl(store.as_context_mut(), params)
150 }
151
152 /// Exactly like [`Self::call`], except for invoking WebAssembly
153 /// [asynchronously](crate#async).
154 ///
155 /// # Panics
156 ///
157 /// Panics if `store` does not own this function.
158 #[cfg(feature = "async")]
159 pub async fn call_async(
160 &self,
161 mut store: impl AsContextMut<Data: Send>,
162 params: Params,
163 ) -> Result<Return>
164 where
165 Return: 'static,
166 {
167 let mut store = store.as_context_mut();
168
169 #[cfg(feature = "component-model-async")]
170 if store.0.concurrency_support() {
171 use crate::component::concurrent::TaskId;
172 use crate::runtime::vm::SendSyncPtr;
173 use core::ptr::NonNull;
174
175 let ptr = SendSyncPtr::from(NonNull::from(¶ms).cast::<u8>());
176 let prepared =
177 self.prepare_call(store.as_context_mut(), true, move |cx, ty, dst| {
178 // SAFETY: The goal here is to get `Params`, a non-`'static`
179 // value, to live long enough to the lowering of the
180 // parameters. We're guaranteed that `Params` lives in the
181 // future of the outer function (we're in an `async fn`) so it'll
182 // stay alive as long as the future itself. That is distinct,
183 // for example, from the signature of `call_concurrent` below.
184 //
185 // Here a pointer to `Params` is smuggled to this location
186 // through a `SendSyncPtr<u8>` to thwart the `'static` check
187 // of rustc and the signature of `prepare_call`.
188 //
189 // Note the use of `SignalOnDrop` in the code that follows
190 // this closure, which ensures that the task will be removed
191 // from the concurrent state to which it belongs when the
192 // containing `Future` is dropped, so long as the parameters
193 // have not yet been lowered. Since this closure is removed from
194 // the task after the parameters are lowered, it will never be called
195 // after the containing `Future` is dropped.
196 let params = unsafe { ptr.cast::<Params>().as_ref() };
197 Self::lower_args(cx, ty, dst, params)
198 })?;
199
200 struct SignalOnDrop<'a, T: 'static> {
201 store: StoreContextMut<'a, T>,
202 task: TaskId,
203 }
204
205 impl<'a, T> Drop for SignalOnDrop<'a, T> {
206 fn drop(&mut self) {
207 self.task
208 .host_future_dropped(self.store.as_context_mut())
209 .unwrap();
210 }
211 }
212
213 let mut wrapper = SignalOnDrop {
214 store,
215 task: prepared.task_id(),
216 };
217
218 let result = concurrent::queue_call(wrapper.store.as_context_mut(), prepared)?;
219 return wrapper
220 .store
221 .as_context_mut()
222 .run_concurrent_trap_on_idle(async |_| Ok(result.await?))
223 .await?;
224 }
225
226 store
227 .on_fiber(|store| self.call_impl(store, params))
228 .await?
229 }
230
231 /// Start a concurrent call to this function.
232 ///
233 /// Concurrency is achieved by relying on the [`Accessor`] argument, which
234 /// can be obtained by calling [`StoreContextMut::run_concurrent`].
235 ///
236 /// Unlike [`Self::call`] and [`Self::call_async`] (both of which require
237 /// exclusive access to the store until the completion of the call), calls
238 /// made using this method may run concurrently with other calls to the same
239 /// instance. In addition, the runtime will call the `post-return` function
240 /// (if any) automatically when the guest task completes.
241 ///
242 /// This function will return an error if [`Config::concurrency_support`] is
243 /// disabled.
244 ///
245 /// [`Config::concurrency_support`]: crate::Config::concurrency_support
246 ///
247 /// # Progress and Cancellation
248 ///
249 /// For more information about how to make progress on the wasm task or how
250 /// to cancel the wasm task see the documentation for
251 /// [`Func::call_concurrent`].
252 ///
253 /// [`Func::call_concurrent`]: crate::component::Func::call_concurrent
254 ///
255 /// # Panics
256 ///
257 /// Panics if the store that the [`Accessor`] is derived from does not own
258 /// this function.
259 ///
260 /// [`Accessor`]: crate::component::Accessor
261 ///
262 /// # Example
263 ///
264 /// Using [`StoreContextMut::run_concurrent`] to get an [`Accessor`]:
265 ///
266 /// ```
267 /// # use {
268 /// # wasmtime::{
269 /// # error::{Result},
270 /// # component::{Component, Linker, ResourceTable},
271 /// # Config, Engine, Store
272 /// # },
273 /// # };
274 /// #
275 /// # struct Ctx { table: ResourceTable }
276 /// #
277 /// # async fn foo() -> Result<()> {
278 /// # let mut config = Config::new();
279 /// # let engine = Engine::new(&config)?;
280 /// # let mut store = Store::new(&engine, Ctx { table: ResourceTable::new() });
281 /// # let mut linker = Linker::new(&engine);
282 /// # let component = Component::new(&engine, "")?;
283 /// # let instance = linker.instantiate_async(&mut store, &component).await?;
284 /// let my_typed_func = instance.get_typed_func::<(), ()>(&mut store, "my_typed_func")?;
285 /// store.run_concurrent(async |accessor| -> wasmtime::Result<_> {
286 /// my_typed_func.call_concurrent(accessor, ()).await?;
287 /// Ok(())
288 /// }).await??;
289 /// # Ok(())
290 /// # }
291 /// ```
292 #[cfg(feature = "component-model-async")]
293 pub async fn call_concurrent(
294 self,
295 accessor: impl AsAccessor<Data: Send>,
296 params: Params,
297 ) -> Result<Return>
298 where
299 Params: 'static,
300 Return: 'static,
301 {
302 let result = accessor.as_accessor().with(|mut store| {
303 let mut store = store.as_context_mut();
304 ensure!(
305 store.0.concurrency_support(),
306 "cannot use `call_concurrent` Config::concurrency_support disabled",
307 );
308
309 let prepared =
310 self.prepare_call(store.as_context_mut(), false, move |cx, ty, dst| {
311 Self::lower_args(cx, ty, dst, ¶ms)
312 })?;
313 concurrent::queue_call(store, prepared)
314 });
315 Ok(result?.await?)
316 }
317
318 fn lower_args<T>(
319 cx: &mut LowerContext<T>,
320 ty: InterfaceType,
321 dst: &mut [MaybeUninit<ValRaw>],
322 params: &Params,
323 ) -> Result<()> {
324 use crate::component::storage::slice_to_storage_mut;
325
326 if Params::flatten_count() <= MAX_FLAT_PARAMS {
327 // SAFETY: the safety of `slice_to_storage_mut` relies on
328 // `Params::Lower` being represented by a sequence of
329 // `ValRaw`, and that's a guarantee upheld by the `Lower`
330 // trait itself.
331 let dst: &mut MaybeUninit<Params::Lower> = unsafe { slice_to_storage_mut(dst) };
332 Self::lower_stack_args(cx, ¶ms, ty, dst)
333 } else {
334 Self::lower_heap_args(cx, ¶ms, ty, &mut dst[0])
335 }
336 }
337
338 /// Calls `concurrent::prepare_call` with monomorphized functions for
339 /// lowering the parameters and lifting the result according to the number
340 /// of core Wasm parameters and results in the signature of the function to
341 /// be called.
342 #[cfg(feature = "component-model-async")]
343 fn prepare_call<T>(
344 self,
345 store: StoreContextMut<'_, T>,
346 host_future_present: bool,
347 lower: impl FnOnce(
348 &mut LowerContext<T>,
349 InterfaceType,
350 &mut [MaybeUninit<ValRaw>],
351 ) -> Result<()>
352 + Send
353 + Sync
354 + 'static,
355 ) -> Result<PreparedCall<Return>>
356 where
357 Return: 'static,
358 {
359 use crate::component::storage::slice_to_storage;
360 debug_assert!(store.0.concurrency_support());
361
362 let param_count = if Params::flatten_count() <= MAX_FLAT_PARAMS {
363 Params::flatten_count()
364 } else {
365 1
366 };
367 let max_results = if self.func.abi_async(store.0) {
368 MAX_FLAT_PARAMS
369 } else {
370 MAX_FLAT_RESULTS
371 };
372 concurrent::prepare_call(
373 store,
374 self.func,
375 param_count,
376 host_future_present,
377 move |func, store, params_out| {
378 func.with_lower_context(store, |cx, ty| lower(cx, ty, params_out))
379 },
380 move |func, store, results| {
381 let result = if Return::flatten_count() <= max_results {
382 func.with_lift_context(store, |cx, ty| {
383 // SAFETY: Per the safety requiments documented for the
384 // `ComponentType` trait, `Return::Lower` must be
385 // compatible at the binary level with a `[ValRaw; N]`,
386 // where `N` is `mem::size_of::<Return::Lower>() /
387 // mem::size_of::<ValRaw>()`. And since this function
388 // is only used when `Return::flatten_count() <=
389 // MAX_FLAT_RESULTS` and `MAX_FLAT_RESULTS == 1`, `N`
390 // can only either be 0 or 1.
391 //
392 // See `ComponentInstance::exit_call` for where we use
393 // the result count passed from
394 // `wasmtime_environ::fact::trampoline`-generated code
395 // to ensure the slice has the correct length, and also
396 // `concurrent::start_call` for where we conservatively
397 // use a slice length of 1 unconditionally. Also note
398 // that, as of this writing `slice_to_storage`
399 // double-checks the slice length is sufficient.
400 let results: &Return::Lower = unsafe { slice_to_storage(results) };
401 Self::lift_stack_result(cx, ty, results)
402 })?
403 } else {
404 func.with_lift_context(store, |cx, ty| {
405 Self::lift_heap_result(cx, ty, &results[0])
406 })?
407 };
408 Ok(Box::new(result))
409 },
410 )
411 }
412
413 fn call_impl(&self, mut store: impl AsContextMut, params: Params) -> Result<Return> {
414 let mut store = store.as_context_mut();
415
416 if self.func.abi_async(store.0) {
417 bail!("must enable the `component-model-async` feature to call async-lifted exports")
418 }
419
420 // Note that this is in theory simpler than it might read at this time.
421 // Here we're doing a runtime dispatch on the `flatten_count` for the
422 // params/results to see whether they're inbounds. This creates 4 cases
423 // to handle. In reality this is a highly optimizable branch where LLVM
424 // will easily figure out that only one branch here is taken.
425 //
426 // Otherwise this current construction is done to ensure that the stack
427 // space reserved for the params/results is always of the appropriate
428 // size (as the params/results needed differ depending on the "flatten"
429 // count)
430 //
431 // SAFETY: the safety of these invocations of `call_raw` depends on the
432 // correctness of the ascription of the `LowerParams` and `LowerReturn`
433 // types on the `call_raw` function. That's upheld here through the
434 // safety requirements of `Lift` and `Lower` on `Params` and `Return` in
435 // combination with checking the various possible branches here and
436 // dispatching to appropriately typed functions.
437 let (result, post_return_arg) = unsafe {
438 // This type is used as `LowerParams` for `call_raw` which is either
439 // `Params::Lower` or `ValRaw` representing it's either on the stack
440 // or it's on the heap. This allocates 1 extra `ValRaw` on the stack
441 // if `Params` is empty and `Return` is also empty, but that's a
442 // reasonable enough price to pay for now given the current code
443 // organization.
444 #[derive(Copy, Clone)]
445 union Union<T: Copy, U: Copy> {
446 _a: T,
447 _b: U,
448 }
449
450 if Return::flatten_count() <= MAX_FLAT_RESULTS {
451 self.func.call_raw(
452 store.as_context_mut(),
453 |cx, ty, dst: &mut MaybeUninit<Union<Params::Lower, ValRaw>>| {
454 let dst = storage_as_slice_mut(dst);
455 Self::lower_args(cx, ty, dst, ¶ms)
456 },
457 Self::lift_stack_result,
458 )
459 } else {
460 self.func.call_raw(
461 store.as_context_mut(),
462 |cx, ty, dst: &mut MaybeUninit<Union<Params::Lower, ValRaw>>| {
463 let dst = storage_as_slice_mut(dst);
464 Self::lower_args(cx, ty, dst, ¶ms)
465 },
466 Self::lift_heap_result,
467 )
468 }
469 }?;
470
471 self.func.post_return_impl(store, post_return_arg)?;
472
473 Ok(result)
474 }
475
476 /// Lower parameters directly onto the stack specified by the `dst`
477 /// location.
478 ///
479 /// This is only valid to call when the "flatten count" is small enough, or
480 /// when the canonical ABI says arguments go through the stack rather than
481 /// the heap.
482 fn lower_stack_args<T>(
483 cx: &mut LowerContext<'_, T>,
484 params: &Params,
485 ty: InterfaceType,
486 dst: &mut MaybeUninit<Params::Lower>,
487 ) -> Result<()> {
488 assert!(Params::flatten_count() <= MAX_FLAT_PARAMS);
489 params.linear_lower_to_flat(cx, ty, dst)?;
490 Ok(())
491 }
492
493 /// Lower parameters onto a heap-allocated location.
494 ///
495 /// This is used when the stack space to be used for the arguments is above
496 /// the `MAX_FLAT_PARAMS` threshold. Here the wasm's `realloc` function is
497 /// invoked to allocate space and then parameters are stored at that heap
498 /// pointer location.
499 fn lower_heap_args<T>(
500 cx: &mut LowerContext<'_, T>,
501 params: &Params,
502 ty: InterfaceType,
503 dst: &mut MaybeUninit<ValRaw>,
504 ) -> Result<()> {
505 // Memory must exist via validation if the arguments are stored on the
506 // heap, so we can create a `MemoryMut` at this point. Afterwards
507 // `realloc` is used to allocate space for all the arguments and then
508 // they're all stored in linear memory.
509 //
510 // Note that `realloc` will bake in a check that the returned pointer is
511 // in-bounds.
512 let ptr = cx.realloc(0, 0, Params::ALIGN32, Params::SIZE32)?;
513 params.linear_lower_to_memory(cx, ty, ptr)?;
514
515 // Note that the pointer here is stored as a 64-bit integer. This allows
516 // this to work with either 32 or 64-bit memories. For a 32-bit memory
517 // it'll just ignore the upper 32 zero bits, and for 64-bit memories
518 // this'll have the full 64-bits. Note that for 32-bit memories the call
519 // to `realloc` above guarantees that the `ptr` is in-bounds meaning
520 // that we will know that the zero-extended upper bits of `ptr` are
521 // guaranteed to be zero.
522 //
523 // This comment about 64-bit integers is also referred to below with
524 // "WRITEPTR64".
525 dst.write(ValRaw::i64(ptr as i64));
526
527 Ok(())
528 }
529
530 /// Lift the result of a function directly from the stack result.
531 ///
532 /// This is only used when the result fits in the maximum number of stack
533 /// slots.
534 fn lift_stack_result(
535 cx: &mut LiftContext<'_>,
536 ty: InterfaceType,
537 dst: &Return::Lower,
538 ) -> Result<Return> {
539 Return::linear_lift_from_flat(cx, ty, dst)
540 }
541
542 /// Lift the result of a function where the result is stored indirectly on
543 /// the heap.
544 fn lift_heap_result(
545 cx: &mut LiftContext<'_>,
546 ty: InterfaceType,
547 dst: &ValRaw,
548 ) -> Result<Return> {
549 assert!(Return::flatten_count() > MAX_FLAT_RESULTS);
550 // FIXME(#4311): needs to read an i64 for memory64
551 let ptr = usize::try_from(dst.get_u32())?;
552 if ptr % usize::try_from(Return::ALIGN32)? != 0 {
553 bail!("return pointer not aligned");
554 }
555
556 let bytes = cx
557 .memory()
558 .get(ptr..)
559 .and_then(|b| b.get(..Return::SIZE32))
560 .ok_or_else(|| crate::format_err!("pointer out of bounds of memory"))?;
561 Return::linear_lift_from_memory(cx, ty, bytes)
562 }
563
564 #[doc(hidden)]
565 #[deprecated(note = "no longer needs to be called; this function has no effect")]
566 pub fn post_return(&self, _store: impl AsContextMut) -> Result<()> {
567 Ok(())
568 }
569
570 #[doc(hidden)]
571 #[deprecated(note = "no longer needs to be called; this function has no effect")]
572 #[cfg(feature = "async")]
573 pub async fn post_return_async<T: Send>(
574 &self,
575 _store: impl AsContextMut<Data = T>,
576 ) -> Result<()> {
577 Ok(())
578 }
579}
580
581/// A trait representing a static list of named types that can be passed to or
582/// returned from a [`TypedFunc`].
583///
584/// This trait is implemented for a number of tuple types and is not expected
585/// to be implemented externally. The contents of this trait are hidden as it's
586/// intended to be an implementation detail of Wasmtime. The contents of this
587/// trait are not covered by Wasmtime's stability guarantees.
588///
589/// For more information about this trait see [`Func::typed`] and
590/// [`TypedFunc`].
591//
592// Note that this is an `unsafe` trait, and the unsafety means that
593// implementations of this trait must be correct or otherwise [`TypedFunc`]
594// would not be memory safe. The main reason this is `unsafe` is the
595// `typecheck` function which must operate correctly relative to the `AsTuple`
596// interpretation of the implementor.
597pub unsafe trait ComponentNamedList: ComponentType {}
598
599/// A trait representing types which can be passed to and read from components
600/// with the canonical ABI.
601///
602/// This trait is implemented for Rust types which can be communicated to
603/// components. The [`Func::typed`] and [`TypedFunc`] Rust items are the main
604/// consumers of this trait.
605///
606/// Supported Rust types include:
607///
608/// | Component Model Type | Rust Type |
609/// |-----------------------------------|--------------------------------------|
610/// | `{s,u}{8,16,32,64}` | `{i,u}{8,16,32,64}` |
611/// | `f{32,64}` | `f{32,64}` |
612/// | `bool` | `bool` |
613/// | `char` | `char` |
614/// | `tuple<A, B>` | `(A, B)` |
615/// | `option<T>` | `Option<T>` |
616/// | `result` | `Result<(), ()>` |
617/// | `result<T>` | `Result<T, ()>` |
618/// | `result<_, E>` | `Result<(), E>` |
619/// | `result<T, E>` | `Result<T, E>` |
620/// | `string` | `String`, `&str`, or [`WasmStr`] |
621/// | `list<T>` | `Vec<T>`, `&[T]`, or [`WasmList`] |
622/// | `map<K, V>` | `HashMap<K, V>` |
623/// | `own<T>`, `borrow<T>` | [`Resource<T>`] or [`ResourceAny`] |
624/// | `record` | [`#[derive(ComponentType)]`][d-cm] |
625/// | `variant` | [`#[derive(ComponentType)]`][d-cm] |
626/// | `enum` | [`#[derive(ComponentType)]`][d-cm] |
627/// | `flags` | [`flags!`][f-m] |
628///
629/// [`Resource<T>`]: crate::component::Resource
630/// [`ResourceAny`]: crate::component::ResourceAny
631/// [d-cm]: macro@crate::component::ComponentType
632/// [f-m]: crate::component::flags
633///
634/// Rust standard library pointers such as `&T`, `Box<T>`, and `Arc<T>`
635/// additionally represent whatever type `T` represents in the component model.
636/// Note that types such as `record`, `variant`, `enum`, and `flags` are
637/// generated by the embedder at compile time. These macros derive
638/// implementation of this trait for custom types to map to custom types in the
639/// component model. Note that for `record`, `variant`, `enum`, and `flags`
640/// those types are often generated by the
641/// [`bindgen!`](crate::component::bindgen) macro from WIT definitions.
642///
643/// Types that implement [`ComponentType`] are used for `Params` and `Return`
644/// in [`TypedFunc`] and [`Func::typed`].
645///
646/// The contents of this trait are hidden as it's intended to be an
647/// implementation detail of Wasmtime. The contents of this trait are not
648/// covered by Wasmtime's stability guarantees.
649///
650/// # Safety
651///
652/// Note that this is an `unsafe` trait as `TypedFunc`'s safety heavily relies on
653/// the correctness of the implementations of this trait. Some ways in which this
654/// trait must be correct to be safe are:
655///
656/// * The `Lower` associated type must be a `ValRaw` sequence. It doesn't have to
657/// literally be `[ValRaw; N]` but when laid out in memory it must be adjacent
658/// `ValRaw` values and have a multiple of the size of `ValRaw` and the same
659/// alignment.
660///
661/// * The `lower` function must initialize the bits within `Lower` that are going
662/// to be read by the trampoline that's used to enter core wasm. A trampoline
663/// is passed `*mut Lower` and will read the canonical abi arguments in
664/// sequence, so all of the bits must be correctly initialized.
665///
666/// * The `size` and `align` functions must be correct for this value stored in
667/// the canonical ABI. The `Cursor<T>` iteration of these bytes rely on this
668/// for correctness as they otherwise eschew bounds-checking.
669///
670/// There are likely some other correctness issues which aren't documented as
671/// well, this isn't currently an exhaustive list. It suffices to say, though,
672/// that correctness bugs in this trait implementation are highly likely to
673/// lead to security bugs, which again leads to the `unsafe` in the trait.
674///
675/// Note that this trait specifically is not sealed because `bindgen!`-generated
676/// types must be able to implement this trait using a `#[derive]` macro. For
677/// users it's recommended to not implement this trait manually given the
678/// non-exhaustive list of safety requirements that must be upheld. This trait
679/// is implemented at your own risk if you do so.
680///
681/// # Send and Sync
682///
683/// While on the topic of safety it's worth discussing the `Send` and `Sync`
684/// bounds here as well. These bounds might naively seem like they shouldn't be
685/// required for all component types as they're host-level types not guest-level
686/// types persisted anywhere. Various subtleties lead to these bounds, however:
687///
688/// * Fibers require that all stack-local variables are `Send` and `Sync` for
689/// fibers themselves to be send/sync. Unfortunately we have no help from the
690/// compiler on this one so it's up to Wasmtime's discipline to maintain this.
691/// One instance of this is that return values are placed on the stack as
692/// they're lowered into guest memory. This lowering operation can involve
693/// malloc and context switches, so return values must be Send/Sync.
694///
695/// * In the implementation of component model async it's not uncommon for types
696/// to be "buffered" in the store temporarily. For example parameters might
697/// reside in a store temporarily while wasm has backpressure turned on.
698///
699/// Overall it's generally easiest to require `Send` and `Sync` for all
700/// component types. There additionally aren't known use case for non-`Send` or
701/// non-`Sync` types at this time.
702pub unsafe trait ComponentType: Send + Sync {
703 /// Representation of the "lowered" form of this component value.
704 ///
705 /// Lowerings lower into core wasm values which are represented by `ValRaw`.
706 /// This `Lower` type must be a list of `ValRaw` as either a literal array
707 /// or a struct where every field is a `ValRaw`. This must be `Copy` (as
708 /// `ValRaw` is `Copy`) and support all byte patterns. This being correct is
709 /// one reason why the trait is unsafe.
710 #[doc(hidden)]
711 type Lower: Copy;
712
713 /// The information about this type's canonical ABI (size/align/etc).
714 #[doc(hidden)]
715 const ABI: CanonicalAbiInfo;
716
717 #[doc(hidden)]
718 const SIZE32: usize = Self::ABI.size32 as usize;
719 #[doc(hidden)]
720 const ALIGN32: u32 = Self::ABI.align32;
721
722 #[doc(hidden)]
723 const IS_RUST_UNIT_TYPE: bool = false;
724
725 /// Whether this type might require a call to the guest's realloc function
726 /// to allocate linear memory when lowering (e.g. a non-empty `string`).
727 ///
728 /// If this is `false`, Wasmtime may optimize lowering by using
729 /// `LowerContext::new_without_realloc` and lowering values outside of any
730 /// fiber. That will panic if the lowering process ends up needing realloc
731 /// after all, so `true` is a conservative default.
732 #[doc(hidden)]
733 const MAY_REQUIRE_REALLOC: bool = true;
734
735 /// Returns the number of core wasm abi values will be used to represent
736 /// this type in its lowered form.
737 ///
738 /// This divides the size of `Self::Lower` by the size of `ValRaw`.
739 #[doc(hidden)]
740 fn flatten_count() -> usize {
741 assert!(mem::size_of::<Self::Lower>() % mem::size_of::<ValRaw>() == 0);
742 assert!(mem::align_of::<Self::Lower>() == mem::align_of::<ValRaw>());
743 mem::size_of::<Self::Lower>() / mem::size_of::<ValRaw>()
744 }
745
746 /// Performs a type-check to see whether this component value type matches
747 /// the interface type `ty` provided.
748 #[doc(hidden)]
749 fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()>;
750}
751
752#[doc(hidden)]
753pub unsafe trait ComponentVariant: ComponentType {
754 const CASES: &'static [Option<CanonicalAbiInfo>];
755 const INFO: VariantInfo = VariantInfo::new_static(Self::CASES);
756 const PAYLOAD_OFFSET32: usize = Self::INFO.payload_offset32 as usize;
757}
758
759/// Host types which can be passed to WebAssembly components.
760///
761/// This trait is implemented for all types that can be passed to components
762/// either as parameters of component exports or returns of component imports.
763/// This trait represents the ability to convert from the native host
764/// representation to the canonical ABI.
765///
766/// Built-in types to Rust such as `Option<T>` implement this trait as
767/// appropriate. For a mapping of component model to Rust types see
768/// [`ComponentType`].
769///
770/// For user-defined types, for example `record` types mapped to Rust `struct`s,
771/// this crate additionally has
772/// [`#[derive(Lower)]`](macro@crate::component::Lower).
773///
774/// Note that like [`ComponentType`] the definition of this trait is intended to
775/// be an internal implementation detail of Wasmtime at this time. It's
776/// recommended to use the `#[derive(Lower)]` implementation instead.
777pub unsafe trait Lower: ComponentType {
778 /// Performs the "lower" function in the linear memory version of the
779 /// canonical ABI.
780 ///
781 /// This method will lower the current value into a component. The `lower`
782 /// function performs a "flat" lowering into the `dst` specified which is
783 /// allowed to be uninitialized entering this method but is guaranteed to be
784 /// fully initialized if the method returns `Ok(())`.
785 ///
786 /// The `cx` context provided is the context within which this lowering is
787 /// happening. This contains information such as canonical options specified
788 /// (e.g. string encodings, memories, etc), the store itself, along with
789 /// type information.
790 ///
791 /// The `ty` parameter is the destination type that is being lowered into.
792 /// For example this is the component's "view" of the type that is being
793 /// lowered. This is guaranteed to have passed a `typecheck` earlier.
794 ///
795 /// This will only be called if `typecheck` passes for `Op::Lower`.
796 #[doc(hidden)]
797 fn linear_lower_to_flat<T>(
798 &self,
799 cx: &mut LowerContext<'_, T>,
800 ty: InterfaceType,
801 dst: &mut MaybeUninit<Self::Lower>,
802 ) -> Result<()>;
803
804 /// Performs the "store" operation in the linear memory version of the
805 /// canonical ABI.
806 ///
807 /// This function will store `self` into the linear memory described by
808 /// `cx` at the `offset` provided.
809 ///
810 /// It is expected that `offset` is a valid offset in memory for
811 /// `Self::SIZE32` bytes. At this time that's not an unsafe contract as it's
812 /// always re-checked on all stores, but this is something that will need to
813 /// be improved in the future to remove extra bounds checks. For now this
814 /// function will panic if there's a bug and `offset` isn't valid within
815 /// memory.
816 ///
817 /// The `ty` type information passed here is the same as the type
818 /// information passed to `lower` above, and is the component's own view of
819 /// what the resulting type should be.
820 ///
821 /// This will only be called if `typecheck` passes for `Op::Lower`.
822 #[doc(hidden)]
823 fn linear_lower_to_memory<T>(
824 &self,
825 cx: &mut LowerContext<'_, T>,
826 ty: InterfaceType,
827 offset: usize,
828 ) -> Result<()>;
829
830 /// Provided method to lower a list of `Self` into memory.
831 ///
832 /// Requires that `offset` has already been checked for alignment and
833 /// validity in terms of being in-bounds, otherwise this may panic.
834 ///
835 /// This is primarily here to get overridden for implementations of integers
836 /// which can avoid some extra fluff and use a pattern that's more easily
837 /// optimizable by LLVM.
838 #[doc(hidden)]
839 fn linear_store_list_to_memory<T>(
840 cx: &mut LowerContext<'_, T>,
841 ty: InterfaceType,
842 mut offset: usize,
843 items: &[Self],
844 ) -> Result<()>
845 where
846 Self: Sized,
847 {
848 for item in items {
849 item.linear_lower_to_memory(cx, ty, offset)?;
850 offset += Self::SIZE32;
851 }
852 Ok(())
853 }
854}
855
856/// Host types which can be created from the canonical ABI.
857///
858/// This is the mirror of the [`Lower`] trait where it represents the capability
859/// of acquiring items from WebAssembly and passing them to the host.
860///
861/// Built-in types to Rust such as `Option<T>` implement this trait as
862/// appropriate. For a mapping of component model to Rust types see
863/// [`ComponentType`].
864///
865/// For user-defined types, for example `record` types mapped to Rust `struct`s,
866/// this crate additionally has
867/// [`#[derive(Lift)]`](macro@crate::component::Lift).
868///
869/// Note that like [`ComponentType`] the definition of this trait is intended to
870/// be an internal implementation detail of Wasmtime at this time. It's
871/// recommended to use the `#[derive(Lift)]` implementation instead.
872pub unsafe trait Lift: Sized + ComponentType {
873 /// Performs the "lift" operation in the linear memory version of the
874 /// canonical ABI.
875 ///
876 /// This function performs a "flat" lift operation from the `src` specified
877 /// which is a sequence of core wasm values. The lifting operation will
878 /// validate core wasm values and produce a `Self` on success.
879 ///
880 /// The `cx` provided contains contextual information such as the store
881 /// that's being loaded from, canonical options, and type information.
882 ///
883 /// The `ty` parameter is the origin component's specification for what the
884 /// type that is being lifted is. For example this is the record type or the
885 /// resource type that is being lifted.
886 ///
887 /// Note that this has a default implementation but if `typecheck` passes
888 /// for `Op::Lift` this needs to be overridden.
889 #[doc(hidden)]
890 fn linear_lift_from_flat(
891 cx: &mut LiftContext<'_>,
892 ty: InterfaceType,
893 src: &Self::Lower,
894 ) -> Result<Self>;
895
896 /// Performs the "load" operation in the linear memory version of the
897 /// canonical ABI.
898 ///
899 /// This will read the `bytes` provided, which are a sub-slice into the
900 /// linear memory described by `cx`. The `bytes` array provided is
901 /// guaranteed to be `Self::SIZE32` bytes large. All of memory is then also
902 /// available through `cx` for bounds-checks and such as necessary for
903 /// strings/lists.
904 ///
905 /// The `ty` argument is the type that's being loaded, as described by the
906 /// original component.
907 ///
908 /// Note that this has a default implementation but if `typecheck` passes
909 /// for `Op::Lift` this needs to be overridden.
910 #[doc(hidden)]
911 fn linear_lift_from_memory(
912 cx: &mut LiftContext<'_>,
913 ty: InterfaceType,
914 bytes: &[u8],
915 ) -> Result<Self>;
916
917 /// Converts `list` into a `Vec<T>`, used in `Lift for Vec<T>`.
918 #[doc(hidden)]
919 fn linear_lift_list_from_memory(
920 cx: &mut LiftContext<'_>,
921 list: &WasmList<Self>,
922 ) -> Result<Vec<Self>>
923 where
924 Self: Sized,
925 {
926 let mut dst = Vec::with_capacity(list.len);
927 Self::linear_lift_into_from_memory(cx, list, &mut dst)?;
928 Ok(dst)
929 }
930
931 /// Load no more than `max_count` items from `list` into `dst`.
932 ///
933 /// This is primarily here to get overridden for implementations of integers
934 /// which can avoid some extra fluff and use a pattern that's more easily
935 /// optimizable by LLVM.
936 #[doc(hidden)]
937 fn linear_lift_into_from_memory(
938 cx: &mut LiftContext<'_>,
939 list: &WasmList<Self>,
940 dst: &mut impl Extend<Self>,
941 ) -> Result<()>
942 where
943 Self: Sized,
944 {
945 for i in 0..list.len {
946 dst.extend(Some(list.get_from_store(cx, i).unwrap()?));
947 }
948 Ok(())
949 }
950}
951
952// Macro to help generate "forwarding implementations" of `ComponentType` to
953// another type, used for wrappers in Rust like `&T`, `Box<T>`, etc. Note that
954// these wrappers only implement lowering because lifting native Rust types
955// cannot be done.
956macro_rules! forward_type_impls {
957 ($(($($generics:tt)*) $a:ty => $b:ty,)*) => ($(
958 unsafe impl <$($generics)*> ComponentType for $a {
959 type Lower = <$b as ComponentType>::Lower;
960
961 const ABI: CanonicalAbiInfo = <$b as ComponentType>::ABI;
962
963 #[inline]
964 fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
965 <$b as ComponentType>::typecheck(ty, types)
966 }
967 }
968 )*)
969}
970
971forward_type_impls! {
972 (T: ComponentType + ?Sized) &'_ T => T,
973 (T: ComponentType + ?Sized) Box<T> => T,
974 (T: ComponentType + ?Sized) alloc::sync::Arc<T> => T,
975 () String => str,
976 (T: ComponentType) Vec<T> => [T],
977}
978
979macro_rules! forward_lowers {
980 ($(($($generics:tt)*) $a:ty => $b:ty,)*) => ($(
981 unsafe impl <$($generics)*> Lower for $a {
982 fn linear_lower_to_flat<U>(
983 &self,
984 cx: &mut LowerContext<'_, U>,
985 ty: InterfaceType,
986 dst: &mut MaybeUninit<Self::Lower>,
987 ) -> Result<()> {
988 <$b as Lower>::linear_lower_to_flat(self, cx, ty, dst)
989 }
990
991 fn linear_lower_to_memory<U>(
992 &self,
993 cx: &mut LowerContext<'_, U>,
994 ty: InterfaceType,
995 offset: usize,
996 ) -> Result<()> {
997 <$b as Lower>::linear_lower_to_memory(self, cx, ty, offset)
998 }
999 }
1000 )*)
1001}
1002
1003forward_lowers! {
1004 (T: Lower + ?Sized) &'_ T => T,
1005 (T: Lower + ?Sized) Box<T> => T,
1006 (T: Lower + ?Sized) alloc::sync::Arc<T> => T,
1007 () String => str,
1008 (T: Lower) Vec<T> => [T],
1009}
1010
1011macro_rules! forward_string_lifts {
1012 ($($a:ty,)*) => ($(
1013 unsafe impl Lift for $a {
1014 #[inline]
1015 fn linear_lift_from_flat(cx: &mut LiftContext<'_>, ty: InterfaceType, src: &Self::Lower) -> Result<Self> {
1016 let s = <WasmStr as Lift>::linear_lift_from_flat(cx, ty, src)?;
1017 let encoding = cx.options().string_encoding;
1018 Ok(s.to_str_from_memory(encoding, cx.memory())?.into())
1019 }
1020
1021 #[inline]
1022 fn linear_lift_from_memory(cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
1023 let s = <WasmStr as Lift>::linear_lift_from_memory(cx, ty, bytes)?;
1024 let encoding = cx.options().string_encoding;
1025 Ok(s.to_str_from_memory(encoding, cx.memory())?.into())
1026 }
1027 }
1028 )*)
1029}
1030
1031forward_string_lifts! {
1032 Box<str>,
1033 alloc::sync::Arc<str>,
1034 String,
1035}
1036
1037macro_rules! forward_list_lifts {
1038 ($($a:ty,)*) => ($(
1039 unsafe impl <T: Lift> Lift for $a {
1040 fn linear_lift_from_flat(cx: &mut LiftContext<'_>, ty: InterfaceType, src: &Self::Lower) -> Result<Self> {
1041 let list = <WasmList::<T> as Lift>::linear_lift_from_flat(cx, ty, src)?;
1042 Ok(T::linear_lift_list_from_memory(cx, &list)?.into())
1043 }
1044
1045 fn linear_lift_from_memory(cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
1046 let list = <WasmList::<T> as Lift>::linear_lift_from_memory(cx, ty, bytes)?;
1047 Ok(T::linear_lift_list_from_memory(cx, &list)?.into())
1048 }
1049 }
1050 )*)
1051}
1052
1053forward_list_lifts! {
1054 Box<[T]>,
1055 alloc::sync::Arc<[T]>,
1056 Vec<T>,
1057}
1058
1059// Macro to help generate `ComponentType` implementations for primitive types
1060// such as integers, char, bool, etc.
1061macro_rules! integers {
1062 ($($primitive:ident = $ty:ident in $field:ident/$get:ident with abi:$abi:ident,)*) => ($(
1063 unsafe impl ComponentType for $primitive {
1064 type Lower = ValRaw;
1065
1066 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::$abi;
1067
1068 const MAY_REQUIRE_REALLOC: bool = false;
1069
1070 fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1071 match ty {
1072 InterfaceType::$ty => Ok(()),
1073 other => bail!("expected `{}` found `{}`", desc(&InterfaceType::$ty), desc(other))
1074 }
1075 }
1076 }
1077
1078 unsafe impl Lower for $primitive {
1079 #[inline]
1080 #[allow(trivial_numeric_casts, reason = "macro-generated code")]
1081 fn linear_lower_to_flat<T>(
1082 &self,
1083 _cx: &mut LowerContext<'_, T>,
1084 ty: InterfaceType,
1085 dst: &mut MaybeUninit<Self::Lower>,
1086 ) -> Result<()> {
1087 debug_assert!(matches!(ty, InterfaceType::$ty));
1088 dst.write(ValRaw::$field(*self as $field));
1089 Ok(())
1090 }
1091
1092 #[inline]
1093 fn linear_lower_to_memory<T>(
1094 &self,
1095 cx: &mut LowerContext<'_, T>,
1096 ty: InterfaceType,
1097 offset: usize,
1098 ) -> Result<()> {
1099 debug_assert!(matches!(ty, InterfaceType::$ty));
1100 debug_assert!(offset % Self::SIZE32 == 0);
1101 *cx.get(offset) = self.to_le_bytes();
1102 Ok(())
1103 }
1104
1105 fn linear_store_list_to_memory<T>(
1106 cx: &mut LowerContext<'_, T>,
1107 ty: InterfaceType,
1108 offset: usize,
1109 items: &[Self],
1110 ) -> Result<()> {
1111 debug_assert!(matches!(ty, InterfaceType::$ty));
1112
1113 // Double-check that the CM alignment is at least the host's
1114 // alignment for this type which should be true for all
1115 // platforms.
1116 assert!((Self::ALIGN32 as usize) >= mem::align_of::<Self>());
1117
1118 // Slice `cx`'s memory to the window that we'll be modifying.
1119 // This should all have already been verified in terms of
1120 // alignment and sizing meaning that these assertions here are
1121 // not truly necessary but are instead double-checks.
1122 //
1123 // Note that we're casting a `[u8]` slice to `[Self]` with
1124 // `align_to_mut` which is not safe in general but is safe in
1125 // our specific case as all `u8` patterns are valid `Self`
1126 // patterns since `Self` is an integral type.
1127 let dst = &mut cx.as_slice_mut()[offset..][..items.len() * Self::SIZE32];
1128 let (before, middle, end) = unsafe { dst.align_to_mut::<Self>() };
1129 assert!(before.is_empty() && end.is_empty());
1130 assert_eq!(middle.len(), items.len());
1131
1132 // And with all that out of the way perform the copying loop.
1133 // This is not a `copy_from_slice` because endianness needs to
1134 // be handled here, but LLVM should pretty easily transform this
1135 // into a memcpy on little-endian platforms.
1136 for (dst, src) in middle.iter_mut().zip(items) {
1137 *dst = src.to_le();
1138 }
1139 Ok(())
1140 }
1141 }
1142
1143 unsafe impl Lift for $primitive {
1144 #[inline]
1145 #[allow(
1146 trivial_numeric_casts,
1147 clippy::cast_possible_truncation,
1148 reason = "macro-generated code"
1149 )]
1150 fn linear_lift_from_flat(_cx: &mut LiftContext<'_>, ty: InterfaceType, src: &Self::Lower) -> Result<Self> {
1151 debug_assert!(matches!(ty, InterfaceType::$ty));
1152 Ok(src.$get() as $primitive)
1153 }
1154
1155 #[inline]
1156 fn linear_lift_from_memory(_cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
1157 debug_assert!(matches!(ty, InterfaceType::$ty));
1158 debug_assert!((bytes.as_ptr() as usize) % Self::SIZE32 == 0);
1159 Ok($primitive::from_le_bytes(bytes.try_into().unwrap()))
1160 }
1161
1162 fn linear_lift_into_from_memory(
1163 cx: &mut LiftContext<'_>,
1164 list: &WasmList<Self>,
1165 dst: &mut impl Extend<Self>,
1166 ) -> Result<()>
1167 where
1168 Self: Sized,
1169 {
1170 dst.extend(list._as_le_slice(cx.memory())
1171 .iter()
1172 .map(|i| Self::from_le(*i)));
1173 Ok(())
1174 }
1175 }
1176 )*)
1177}
1178
1179integers! {
1180 i8 = S8 in i32/get_i32 with abi:SCALAR1,
1181 u8 = U8 in u32/get_u32 with abi:SCALAR1,
1182 i16 = S16 in i32/get_i32 with abi:SCALAR2,
1183 u16 = U16 in u32/get_u32 with abi:SCALAR2,
1184 i32 = S32 in i32/get_i32 with abi:SCALAR4,
1185 u32 = U32 in u32/get_u32 with abi:SCALAR4,
1186 i64 = S64 in i64/get_i64 with abi:SCALAR8,
1187 u64 = U64 in u64/get_u64 with abi:SCALAR8,
1188}
1189
1190macro_rules! floats {
1191 ($($float:ident/$get_float:ident = $ty:ident with abi:$abi:ident)*) => ($(const _: () = {
1192 unsafe impl ComponentType for $float {
1193 type Lower = ValRaw;
1194
1195 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::$abi;
1196
1197 fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1198 match ty {
1199 InterfaceType::$ty => Ok(()),
1200 other => bail!("expected `{}` found `{}`", desc(&InterfaceType::$ty), desc(other))
1201 }
1202 }
1203 }
1204
1205 unsafe impl Lower for $float {
1206 #[inline]
1207 fn linear_lower_to_flat<T>(
1208 &self,
1209 _cx: &mut LowerContext<'_, T>,
1210 ty: InterfaceType,
1211 dst: &mut MaybeUninit<Self::Lower>,
1212 ) -> Result<()> {
1213 debug_assert!(matches!(ty, InterfaceType::$ty));
1214 dst.write(ValRaw::$float(self.to_bits()));
1215 Ok(())
1216 }
1217
1218 #[inline]
1219 fn linear_lower_to_memory<T>(
1220 &self,
1221 cx: &mut LowerContext<'_, T>,
1222 ty: InterfaceType,
1223 offset: usize,
1224 ) -> Result<()> {
1225 debug_assert!(matches!(ty, InterfaceType::$ty));
1226 debug_assert!(offset % Self::SIZE32 == 0);
1227 let ptr = cx.get(offset);
1228 *ptr = self.to_bits().to_le_bytes();
1229 Ok(())
1230 }
1231
1232 fn linear_store_list_to_memory<T>(
1233 cx: &mut LowerContext<'_, T>,
1234 ty: InterfaceType,
1235 offset: usize,
1236 items: &[Self],
1237 ) -> Result<()> {
1238 debug_assert!(matches!(ty, InterfaceType::$ty));
1239
1240 // Double-check that the CM alignment is at least the host's
1241 // alignment for this type which should be true for all
1242 // platforms.
1243 assert!((Self::ALIGN32 as usize) >= mem::align_of::<Self>());
1244
1245 // Slice `cx`'s memory to the window that we'll be modifying.
1246 // This should all have already been verified in terms of
1247 // alignment and sizing meaning that these assertions here are
1248 // not truly necessary but are instead double-checks.
1249 let dst = &mut cx.as_slice_mut()[offset..][..items.len() * Self::SIZE32];
1250 assert!(dst.as_ptr().cast::<Self>().is_aligned());
1251
1252 // And with all that out of the way perform the copying loop.
1253 // This is not a `copy_from_slice` because endianness needs to
1254 // be handled here, but LLVM should pretty easily transform this
1255 // into a memcpy on little-endian platforms.
1256 // TODO use `as_chunks` when https://github.com/rust-lang/rust/issues/74985
1257 // is stabilized
1258 for (dst, src) in iter::zip(dst.chunks_exact_mut(Self::SIZE32), items) {
1259 let dst: &mut [u8; Self::SIZE32] = dst.try_into().unwrap();
1260 *dst = src.to_le_bytes();
1261 }
1262 Ok(())
1263 }
1264 }
1265
1266 unsafe impl Lift for $float {
1267 #[inline]
1268 fn linear_lift_from_flat(_cx: &mut LiftContext<'_>, ty: InterfaceType, src: &Self::Lower) -> Result<Self> {
1269 debug_assert!(matches!(ty, InterfaceType::$ty));
1270 Ok($float::from_bits(src.$get_float()))
1271 }
1272
1273 #[inline]
1274 fn linear_lift_from_memory(_cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
1275 debug_assert!(matches!(ty, InterfaceType::$ty));
1276 debug_assert!((bytes.as_ptr() as usize) % Self::SIZE32 == 0);
1277 Ok($float::from_le_bytes(bytes.try_into().unwrap()))
1278 }
1279
1280 fn linear_lift_list_from_memory(cx: &mut LiftContext<'_>, list: &WasmList<Self>) -> Result<Vec<Self>> where Self: Sized {
1281 // See comments in `WasmList::get` for the panicking indexing
1282 let byte_size = list.len * mem::size_of::<Self>();
1283 let bytes = &cx.memory()[list.ptr..][..byte_size];
1284
1285 // The canonical ABI requires that everything is aligned to its
1286 // own size, so this should be an aligned array.
1287 assert!(bytes.as_ptr().cast::<Self>().is_aligned());
1288
1289 // Copy the resulting slice to a new Vec, handling endianness
1290 // in the process
1291 // TODO use `as_chunks` when https://github.com/rust-lang/rust/issues/74985
1292 // is stabilized
1293 Ok(
1294 bytes
1295 .chunks_exact(Self::SIZE32)
1296 .map(|i| $float::from_le_bytes(i.try_into().unwrap()))
1297 .collect()
1298 )
1299 }
1300 }
1301 };)*)
1302}
1303
1304floats! {
1305 f32/get_f32 = Float32 with abi:SCALAR4
1306 f64/get_f64 = Float64 with abi:SCALAR8
1307}
1308
1309unsafe impl ComponentType for bool {
1310 type Lower = ValRaw;
1311
1312 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::SCALAR1;
1313
1314 fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1315 match ty {
1316 InterfaceType::Bool => Ok(()),
1317 other => bail!("expected `bool` found `{}`", desc(other)),
1318 }
1319 }
1320}
1321
1322unsafe impl Lower for bool {
1323 fn linear_lower_to_flat<T>(
1324 &self,
1325 _cx: &mut LowerContext<'_, T>,
1326 ty: InterfaceType,
1327 dst: &mut MaybeUninit<Self::Lower>,
1328 ) -> Result<()> {
1329 debug_assert!(matches!(ty, InterfaceType::Bool));
1330 dst.write(ValRaw::i32(*self as i32));
1331 Ok(())
1332 }
1333
1334 fn linear_lower_to_memory<T>(
1335 &self,
1336 cx: &mut LowerContext<'_, T>,
1337 ty: InterfaceType,
1338 offset: usize,
1339 ) -> Result<()> {
1340 debug_assert!(matches!(ty, InterfaceType::Bool));
1341 debug_assert!(offset % Self::SIZE32 == 0);
1342 cx.get::<1>(offset)[0] = *self as u8;
1343 Ok(())
1344 }
1345}
1346
1347unsafe impl Lift for bool {
1348 #[inline]
1349 fn linear_lift_from_flat(
1350 _cx: &mut LiftContext<'_>,
1351 ty: InterfaceType,
1352 src: &Self::Lower,
1353 ) -> Result<Self> {
1354 debug_assert!(matches!(ty, InterfaceType::Bool));
1355 match src.get_i32() {
1356 0 => Ok(false),
1357 _ => Ok(true),
1358 }
1359 }
1360
1361 #[inline]
1362 fn linear_lift_from_memory(
1363 _cx: &mut LiftContext<'_>,
1364 ty: InterfaceType,
1365 bytes: &[u8],
1366 ) -> Result<Self> {
1367 debug_assert!(matches!(ty, InterfaceType::Bool));
1368 match bytes[0] {
1369 0 => Ok(false),
1370 _ => Ok(true),
1371 }
1372 }
1373}
1374
1375unsafe impl ComponentType for char {
1376 type Lower = ValRaw;
1377
1378 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::SCALAR4;
1379
1380 fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1381 match ty {
1382 InterfaceType::Char => Ok(()),
1383 other => bail!("expected `char` found `{}`", desc(other)),
1384 }
1385 }
1386}
1387
1388unsafe impl Lower for char {
1389 #[inline]
1390 fn linear_lower_to_flat<T>(
1391 &self,
1392 _cx: &mut LowerContext<'_, T>,
1393 ty: InterfaceType,
1394 dst: &mut MaybeUninit<Self::Lower>,
1395 ) -> Result<()> {
1396 debug_assert!(matches!(ty, InterfaceType::Char));
1397 dst.write(ValRaw::u32(u32::from(*self)));
1398 Ok(())
1399 }
1400
1401 #[inline]
1402 fn linear_lower_to_memory<T>(
1403 &self,
1404 cx: &mut LowerContext<'_, T>,
1405 ty: InterfaceType,
1406 offset: usize,
1407 ) -> Result<()> {
1408 debug_assert!(matches!(ty, InterfaceType::Char));
1409 debug_assert!(offset % Self::SIZE32 == 0);
1410 *cx.get::<4>(offset) = u32::from(*self).to_le_bytes();
1411 Ok(())
1412 }
1413}
1414
1415unsafe impl Lift for char {
1416 #[inline]
1417 fn linear_lift_from_flat(
1418 _cx: &mut LiftContext<'_>,
1419 ty: InterfaceType,
1420 src: &Self::Lower,
1421 ) -> Result<Self> {
1422 debug_assert!(matches!(ty, InterfaceType::Char));
1423 Ok(char::try_from(src.get_u32())?)
1424 }
1425
1426 #[inline]
1427 fn linear_lift_from_memory(
1428 _cx: &mut LiftContext<'_>,
1429 ty: InterfaceType,
1430 bytes: &[u8],
1431 ) -> Result<Self> {
1432 debug_assert!(matches!(ty, InterfaceType::Char));
1433 debug_assert!((bytes.as_ptr() as usize) % Self::SIZE32 == 0);
1434 let bits = u32::from_le_bytes(bytes.try_into().unwrap());
1435 Ok(char::try_from(bits)?)
1436 }
1437}
1438
1439fn lift_pointer_pair_from_flat(
1440 cx: &mut LiftContext<'_>,
1441 src: &[ValRaw; 2],
1442) -> Result<(usize, usize)> {
1443 // FIXME(#4311): needs memory64 treatment
1444 let _ = cx; // this will be needed for memory64 in the future
1445 let ptr = src[0].get_u32();
1446 let len = src[1].get_u32();
1447 Ok((usize::try_from(ptr)?, usize::try_from(len)?))
1448}
1449
1450fn lift_pointer_pair_from_memory(cx: &mut LiftContext<'_>, bytes: &[u8]) -> Result<(usize, usize)> {
1451 // FIXME(#4311): needs memory64 treatment
1452 let _ = cx; // this will be needed for memory64 in the future
1453 let ptr = u32::from_le_bytes(bytes[..4].try_into().unwrap());
1454 let len = u32::from_le_bytes(bytes[4..].try_into().unwrap());
1455 Ok((usize::try_from(ptr)?, usize::try_from(len)?))
1456}
1457
1458fn lower_pointer_pair_to_flat<T>(
1459 cx: &mut LowerContext<T>,
1460 dst: &mut MaybeUninit<[ValRaw; 2]>,
1461 ptr: usize,
1462 len: usize,
1463) {
1464 // See "WRITEPTR64" above for why this is always storing a 64-bit
1465 // integer.
1466 let _ = cx; // this will eventually be needed for memory64 information.
1467 map_maybe_uninit!(dst[0]).write(ValRaw::i64(ptr as i64));
1468 map_maybe_uninit!(dst[1]).write(ValRaw::i64(len as i64));
1469}
1470
1471fn lower_pointer_pair_to_memory<T>(
1472 cx: &mut LowerContext<T>,
1473 offset: usize,
1474 ptr: usize,
1475 len: usize,
1476) {
1477 // FIXME(#4311): needs memory64 handling
1478 *cx.get(offset + 0) = u32::try_from(ptr).unwrap().to_le_bytes();
1479 *cx.get(offset + 4) = u32::try_from(len).unwrap().to_le_bytes();
1480}
1481
1482// FIXME(#4311): these probably need different constants for memory64
1483const UTF16_TAG: usize = 1 << 31;
1484const MAX_STRING_BYTE_LENGTH: usize = (1 << 31) - 1;
1485
1486// Note that this is similar to `ComponentType for WasmStr` except it can only
1487// be used for lowering, not lifting.
1488unsafe impl ComponentType for str {
1489 type Lower = [ValRaw; 2];
1490
1491 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
1492
1493 fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1494 match ty {
1495 InterfaceType::String => Ok(()),
1496 other => bail!("expected `string` found `{}`", desc(other)),
1497 }
1498 }
1499}
1500
1501unsafe impl Lower for str {
1502 fn linear_lower_to_flat<T>(
1503 &self,
1504 cx: &mut LowerContext<'_, T>,
1505 ty: InterfaceType,
1506 dst: &mut MaybeUninit<[ValRaw; 2]>,
1507 ) -> Result<()> {
1508 debug_assert!(matches!(ty, InterfaceType::String));
1509 let (ptr, len) = lower_string(cx, self)?;
1510 lower_pointer_pair_to_flat(cx, dst, ptr, len);
1511 Ok(())
1512 }
1513
1514 fn linear_lower_to_memory<T>(
1515 &self,
1516 cx: &mut LowerContext<'_, T>,
1517 ty: InterfaceType,
1518 offset: usize,
1519 ) -> Result<()> {
1520 debug_assert!(matches!(ty, InterfaceType::String));
1521 debug_assert!(offset % (Self::ALIGN32 as usize) == 0);
1522 let (ptr, len) = lower_string(cx, self)?;
1523 lower_pointer_pair_to_memory(cx, offset, ptr, len);
1524 Ok(())
1525 }
1526}
1527
1528fn lower_string<T>(cx: &mut LowerContext<'_, T>, string: &str) -> Result<(usize, usize)> {
1529 // Note that in general the wasm module can't assume anything about what the
1530 // host strings are encoded as. Additionally hosts are allowed to have
1531 // differently-encoded strings at runtime. Finally when copying a string
1532 // into wasm it's somewhat strict in the sense that the various patterns of
1533 // allocation and such are already dictated for us.
1534 //
1535 // In general what this means is that when copying a string from the host
1536 // into the destination we need to follow one of the cases of copying into
1537 // WebAssembly. It doesn't particularly matter which case as long as it ends
1538 // up in the right encoding. For example a destination encoding of
1539 // latin1+utf16 has a number of ways to get copied into and we do something
1540 // here that isn't the default "utf8 to latin1+utf16" since we have access
1541 // to simd-accelerated helpers in the `encoding_rs` crate. This is ok though
1542 // because we can fake that the host string was already stored in latin1
1543 // format and follow that copy pattern instead.
1544 match cx.options().string_encoding {
1545 // This corresponds to `store_string_copy` in the canonical ABI where
1546 // the host's representation is utf-8 and the wasm module wants utf-8 so
1547 // a copy is all that's needed (and the `realloc` can be precise for the
1548 // initial memory allocation).
1549 StringEncoding::Utf8 => {
1550 if string.len() > MAX_STRING_BYTE_LENGTH {
1551 bail!(
1552 "string length of {} too large to copy into wasm",
1553 string.len()
1554 );
1555 }
1556 let ptr = cx.realloc(0, 0, 1, string.len())?;
1557 cx.as_slice_mut()[ptr..][..string.len()].copy_from_slice(string.as_bytes());
1558 Ok((ptr, string.len()))
1559 }
1560
1561 // This corresponds to `store_utf8_to_utf16` in the canonical ABI. Here
1562 // an over-large allocation is performed and then shrunk afterwards if
1563 // necessary.
1564 StringEncoding::Utf16 => {
1565 let size = string.len() * 2;
1566 if size > MAX_STRING_BYTE_LENGTH {
1567 bail!(
1568 "string length of {} too large to copy into wasm",
1569 string.len()
1570 );
1571 }
1572 let mut ptr = cx.realloc(0, 0, 2, size)?;
1573 let mut copied = 0;
1574 let bytes = &mut cx.as_slice_mut()[ptr..][..size];
1575 for (u, bytes) in string.encode_utf16().zip(bytes.chunks_mut(2)) {
1576 let u_bytes = u.to_le_bytes();
1577 bytes[0] = u_bytes[0];
1578 bytes[1] = u_bytes[1];
1579 copied += 1;
1580 }
1581 if (copied * 2) < size {
1582 ptr = cx.realloc(ptr, size, 2, copied * 2)?;
1583 }
1584 Ok((ptr, copied))
1585 }
1586
1587 StringEncoding::CompactUtf16 => {
1588 // This corresponds to `store_string_to_latin1_or_utf16`
1589 let bytes = string.as_bytes();
1590 let mut iter = string.char_indices();
1591 let mut ptr = cx.realloc(0, 0, 2, bytes.len())?;
1592 let mut dst = &mut cx.as_slice_mut()[ptr..][..bytes.len()];
1593 let mut result = 0;
1594 while let Some((i, ch)) = iter.next() {
1595 // Test if this `char` fits into the latin1 encoding.
1596 if let Ok(byte) = u8::try_from(u32::from(ch)) {
1597 dst[result] = byte;
1598 result += 1;
1599 continue;
1600 }
1601
1602 // .. if utf16 is forced to be used then the allocation is
1603 // bumped up to the maximum size.
1604 let worst_case = bytes
1605 .len()
1606 .checked_mul(2)
1607 .ok_or_else(|| format_err!("byte length overflow"))?;
1608 if worst_case > MAX_STRING_BYTE_LENGTH {
1609 bail!("byte length too large");
1610 }
1611 ptr = cx.realloc(ptr, bytes.len(), 2, worst_case)?;
1612 dst = &mut cx.as_slice_mut()[ptr..][..worst_case];
1613
1614 // Previously encoded latin1 bytes are inflated to their 16-bit
1615 // size for utf16
1616 for i in (0..result).rev() {
1617 dst[2 * i] = dst[i];
1618 dst[2 * i + 1] = 0;
1619 }
1620
1621 // and then the remainder of the string is encoded.
1622 for (u, bytes) in string[i..]
1623 .encode_utf16()
1624 .zip(dst[2 * result..].chunks_mut(2))
1625 {
1626 let u_bytes = u.to_le_bytes();
1627 bytes[0] = u_bytes[0];
1628 bytes[1] = u_bytes[1];
1629 result += 1;
1630 }
1631 if worst_case > 2 * result {
1632 ptr = cx.realloc(ptr, worst_case, 2, 2 * result)?;
1633 }
1634 return Ok((ptr, result | UTF16_TAG));
1635 }
1636 if result < bytes.len() {
1637 ptr = cx.realloc(ptr, bytes.len(), 2, result)?;
1638 }
1639 Ok((ptr, result))
1640 }
1641 }
1642}
1643
1644/// Representation of a string located in linear memory in a WebAssembly
1645/// instance.
1646///
1647/// This type can be used in place of `String` and `str` for string-taking APIs
1648/// in some situations. The purpose of this type is to represent a range of
1649/// validated bytes within a component but does not actually copy the bytes. The
1650/// primary method, [`WasmStr::to_str`], attempts to return a reference to the
1651/// string directly located in the component's memory, avoiding a copy into the
1652/// host if possible.
1653///
1654/// The downside of this type, however, is that accessing a string requires a
1655/// [`Store`](crate::Store) pointer (via [`StoreContext`]). Bindings generated
1656/// by [`bindgen!`](crate::component::bindgen), for example, do not have access
1657/// to [`StoreContext`] and thus can't use this type.
1658///
1659/// This is intended for more advanced use cases such as defining functions
1660/// directly in a [`Linker`](crate::component::Linker). It's expected that in
1661/// the future [`bindgen!`](crate::component::bindgen) will also have a way to
1662/// use this type.
1663///
1664/// This type is used with [`TypedFunc`], for example, when WebAssembly returns
1665/// a string. This type cannot be used to give a string to WebAssembly, instead
1666/// `&str` should be used for that (since it's coming from the host).
1667///
1668/// Note that this type represents an in-bounds string in linear memory, but it
1669/// does not represent a valid string (e.g. valid utf-8). Validation happens
1670/// when [`WasmStr::to_str`] is called.
1671///
1672/// Also note that this type does not implement [`Lower`], it only implements
1673/// [`Lift`].
1674pub struct WasmStr {
1675 ptr: usize,
1676 len: usize,
1677 options: OptionsIndex,
1678 instance: Instance,
1679}
1680
1681impl WasmStr {
1682 pub(crate) fn new(ptr: usize, len: usize, cx: &mut LiftContext<'_>) -> Result<WasmStr> {
1683 let (byte_len, align) = match cx.options().string_encoding {
1684 StringEncoding::Utf8 => (Some(len), 1_usize),
1685 StringEncoding::Utf16 => (len.checked_mul(2), 2),
1686 StringEncoding::CompactUtf16 => {
1687 if len & UTF16_TAG == 0 {
1688 (Some(len), 2)
1689 } else {
1690 ((len ^ UTF16_TAG).checked_mul(2), 2)
1691 }
1692 }
1693 };
1694 debug_assert!(align.is_power_of_two());
1695 if ptr & (align - 1) != 0 {
1696 bail!("string pointer not aligned to {align}");
1697 }
1698 match byte_len.and_then(|len| ptr.checked_add(len)) {
1699 Some(n) if n <= cx.memory().len() => cx.consume_fuel(n - ptr)?,
1700 _ => bail!("string pointer/length out of bounds of memory"),
1701 }
1702 Ok(WasmStr {
1703 ptr,
1704 len,
1705 options: cx.options_index(),
1706 instance: cx.instance_handle(),
1707 })
1708 }
1709
1710 /// Returns the underlying string that this cursor points to.
1711 ///
1712 /// Note that this will internally decode the string from the wasm's
1713 /// encoding to utf-8 and additionally perform validation.
1714 ///
1715 /// The `store` provided must be the store where this string lives to
1716 /// access the correct memory.
1717 ///
1718 /// # Errors
1719 ///
1720 /// Returns an error if the string wasn't encoded correctly (e.g. invalid
1721 /// utf-8).
1722 ///
1723 /// # Panics
1724 ///
1725 /// Panics if this string is not owned by `store`.
1726 //
1727 // TODO: should add accessors for specifically utf-8 and utf-16 that perhaps
1728 // in an opt-in basis don't do validation. Additionally there should be some
1729 // method that returns `[u16]` after validating to avoid the utf16-to-utf8
1730 // transcode.
1731 pub fn to_str<'a, T: 'static>(
1732 &self,
1733 store: impl Into<StoreContext<'a, T>>,
1734 ) -> Result<Cow<'a, str>> {
1735 let store = store.into().0;
1736 let memory = self.instance.options_memory(store, self.options);
1737 let encoding = self.instance.options(store, self.options).string_encoding;
1738 self.to_str_from_memory(encoding, memory)
1739 }
1740
1741 pub(crate) fn to_str_from_memory<'a>(
1742 &self,
1743 encoding: StringEncoding,
1744 memory: &'a [u8],
1745 ) -> Result<Cow<'a, str>> {
1746 match encoding {
1747 StringEncoding::Utf8 => self.decode_utf8(memory),
1748 StringEncoding::Utf16 => self.decode_utf16(memory, self.len),
1749 StringEncoding::CompactUtf16 => {
1750 if self.len & UTF16_TAG == 0 {
1751 self.decode_latin1(memory)
1752 } else {
1753 self.decode_utf16(memory, self.len ^ UTF16_TAG)
1754 }
1755 }
1756 }
1757 }
1758
1759 fn decode_utf8<'a>(&self, memory: &'a [u8]) -> Result<Cow<'a, str>> {
1760 // Note that bounds-checking already happen in construction of `WasmStr`
1761 // so this is never expected to panic. This could theoretically be
1762 // unchecked indexing if we're feeling wild enough.
1763 Ok(str::from_utf8(&memory[self.ptr..][..self.len])?.into())
1764 }
1765
1766 fn decode_utf16<'a>(&self, memory: &'a [u8], len: usize) -> Result<Cow<'a, str>> {
1767 // See notes in `decode_utf8` for why this is panicking indexing.
1768 let memory = &memory[self.ptr..][..len * 2];
1769 Ok(core::char::decode_utf16(
1770 memory
1771 .chunks(2)
1772 .map(|chunk| u16::from_le_bytes(chunk.try_into().unwrap())),
1773 )
1774 .collect::<Result<String, _>>()?
1775 .into())
1776 }
1777
1778 fn decode_latin1<'a>(&self, memory: &'a [u8]) -> Result<Cow<'a, str>> {
1779 // See notes in `decode_utf8` for why this is panicking indexing.
1780 Ok(encoding_rs::mem::decode_latin1(
1781 &memory[self.ptr..][..self.len],
1782 ))
1783 }
1784}
1785
1786// Note that this is similar to `ComponentType for str` except it can only be
1787// used for lifting, not lowering.
1788unsafe impl ComponentType for WasmStr {
1789 type Lower = <str as ComponentType>::Lower;
1790
1791 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
1792
1793 fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> {
1794 match ty {
1795 InterfaceType::String => Ok(()),
1796 other => bail!("expected `string` found `{}`", desc(other)),
1797 }
1798 }
1799}
1800
1801unsafe impl Lift for WasmStr {
1802 #[inline]
1803 fn linear_lift_from_flat(
1804 cx: &mut LiftContext<'_>,
1805 ty: InterfaceType,
1806 src: &Self::Lower,
1807 ) -> Result<Self> {
1808 debug_assert!(matches!(ty, InterfaceType::String));
1809 let (ptr, len) = lift_pointer_pair_from_flat(cx, src)?;
1810 WasmStr::new(ptr, len, cx)
1811 }
1812
1813 #[inline]
1814 fn linear_lift_from_memory(
1815 cx: &mut LiftContext<'_>,
1816 ty: InterfaceType,
1817 bytes: &[u8],
1818 ) -> Result<Self> {
1819 debug_assert!(matches!(ty, InterfaceType::String));
1820 debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
1821 let (ptr, len) = lift_pointer_pair_from_memory(cx, bytes)?;
1822 WasmStr::new(ptr, len, cx)
1823 }
1824}
1825
1826unsafe impl<T> ComponentType for [T]
1827where
1828 T: ComponentType,
1829{
1830 type Lower = [ValRaw; 2];
1831
1832 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
1833
1834 fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
1835 match ty {
1836 InterfaceType::List(t) => T::typecheck(&types.types[*t].element, types),
1837 other => bail!("expected `list` found `{}`", desc(other)),
1838 }
1839 }
1840}
1841
1842unsafe impl<T> Lower for [T]
1843where
1844 T: Lower,
1845{
1846 fn linear_lower_to_flat<U>(
1847 &self,
1848 cx: &mut LowerContext<'_, U>,
1849 ty: InterfaceType,
1850 dst: &mut MaybeUninit<[ValRaw; 2]>,
1851 ) -> Result<()> {
1852 let elem = match ty {
1853 InterfaceType::List(i) => cx.types[i].element,
1854 _ => bad_type_info(),
1855 };
1856 let (ptr, len) = lower_list(cx, elem, self)?;
1857 lower_pointer_pair_to_flat(cx, dst, ptr, len);
1858 Ok(())
1859 }
1860
1861 fn linear_lower_to_memory<U>(
1862 &self,
1863 cx: &mut LowerContext<'_, U>,
1864 ty: InterfaceType,
1865 offset: usize,
1866 ) -> Result<()> {
1867 let elem = match ty {
1868 InterfaceType::List(i) => cx.types[i].element,
1869 _ => bad_type_info(),
1870 };
1871 debug_assert!(offset % (Self::ALIGN32 as usize) == 0);
1872 let (ptr, len) = lower_list(cx, elem, self)?;
1873 lower_pointer_pair_to_memory(cx, offset, ptr, len);
1874 Ok(())
1875 }
1876}
1877
1878// FIXME: this is not a memcpy for `T` where `T` is something like `u8`.
1879//
1880// Some attempts to fix this have proved not fruitful. In isolation an attempt
1881// was made where:
1882//
1883// * `MemoryMut` stored a `*mut [u8]` as its "last view" of memory to avoid
1884// reloading the base pointer constantly. This view is reset on `realloc`.
1885// * The bounds-checks in `MemoryMut::get` were removed (replaced with unsafe
1886// indexing)
1887//
1888// Even then though this didn't correctly vectorized for `Vec<u8>`. It's not
1889// entirely clear why but it appeared that it's related to reloading the base
1890// pointer to memory (I guess from `MemoryMut` itself?). Overall I'm not really
1891// clear on what's happening there, but this is surely going to be a performance
1892// bottleneck in the future.
1893fn lower_list<T, U>(
1894 cx: &mut LowerContext<'_, U>,
1895 ty: InterfaceType,
1896 list: &[T],
1897) -> Result<(usize, usize)>
1898where
1899 T: Lower,
1900{
1901 let elem_size = T::SIZE32;
1902 let size = list
1903 .len()
1904 .checked_mul(elem_size)
1905 .ok_or_else(|| format_err!("size overflow copying a list"))?;
1906 let ptr = cx.realloc(0, 0, T::ALIGN32, size)?;
1907 T::linear_store_list_to_memory(cx, ty, ptr, list)?;
1908 Ok((ptr, list.len()))
1909}
1910
1911/// Representation of a list of values that are owned by a WebAssembly instance.
1912///
1913/// For some more commentary about the rationale for this type see the
1914/// documentation of [`WasmStr`]. In summary this type can avoid a copy when
1915/// passing data to the host in some situations but is additionally more
1916/// cumbersome to use by requiring a [`Store`](crate::Store) to be provided.
1917///
1918/// This type is used whenever a `(list T)` is returned from a [`TypedFunc`],
1919/// for example. This type represents a list of values that are stored in linear
1920/// memory which are waiting to be read.
1921///
1922/// Note that this type represents only a valid range of bytes for the list
1923/// itself, it does not represent validity of the elements themselves and that's
1924/// performed when they're iterated.
1925///
1926/// Note that this type does not implement the [`Lower`] trait, only [`Lift`].
1927pub struct WasmList<T> {
1928 ptr: usize,
1929 len: usize,
1930 options: OptionsIndex,
1931 elem: InterfaceType,
1932 instance: Instance,
1933 _marker: marker::PhantomData<T>,
1934}
1935
1936impl<T: Lift> WasmList<T> {
1937 pub(crate) fn new(
1938 ptr: usize,
1939 len: usize,
1940 cx: &mut LiftContext<'_>,
1941 elem: InterfaceType,
1942 ) -> Result<WasmList<T>> {
1943 match len
1944 .checked_mul(T::SIZE32)
1945 .and_then(|len| ptr.checked_add(len))
1946 {
1947 Some(n) if n <= cx.memory().len() => cx.consume_fuel_array(len, size_of::<T>())?,
1948 _ => bail!("list pointer/length out of bounds of memory"),
1949 }
1950 if ptr % usize::try_from(T::ALIGN32)? != 0 {
1951 bail!("list pointer is not aligned")
1952 }
1953 Ok(WasmList {
1954 ptr,
1955 len,
1956 options: cx.options_index(),
1957 elem,
1958 instance: cx.instance_handle(),
1959 _marker: marker::PhantomData,
1960 })
1961 }
1962
1963 /// Returns the item length of this vector
1964 #[inline]
1965 pub fn len(&self) -> usize {
1966 self.len
1967 }
1968
1969 /// Gets the `n`th element of this list.
1970 ///
1971 /// Returns `None` if `index` is out of bounds. Returns `Some(Err(..))` if
1972 /// the value couldn't be decoded (it was invalid). Returns `Some(Ok(..))`
1973 /// if the value is valid.
1974 ///
1975 /// # Panics
1976 ///
1977 /// This function will panic if the string did not originally come from the
1978 /// `store` specified.
1979 //
1980 // TODO: given that interface values are intended to be consumed in one go
1981 // should we even expose a random access iteration API? In theory all
1982 // consumers should be validating through the iterator.
1983 pub fn get(&self, mut store: impl AsContextMut, index: usize) -> Option<Result<T>> {
1984 let store = store.as_context_mut().0;
1985 let mut cx = LiftContext::new(store, self.options, self.instance);
1986 self.get_from_store(&mut cx, index)
1987 }
1988
1989 fn get_from_store(&self, cx: &mut LiftContext<'_>, index: usize) -> Option<Result<T>> {
1990 if index >= self.len {
1991 return None;
1992 }
1993 // Note that this is using panicking indexing and this is expected to
1994 // never fail. The bounds-checking here happened during the construction
1995 // of the `WasmList` itself which means these should always be in-bounds
1996 // (and wasm memory can only grow). This could theoretically be
1997 // unchecked indexing if we're confident enough and it's actually a perf
1998 // issue one day.
1999 let bytes = &cx.memory()[self.ptr + index * T::SIZE32..][..T::SIZE32];
2000 Some(T::linear_lift_from_memory(cx, self.elem, bytes))
2001 }
2002
2003 /// Returns an iterator over the elements of this list.
2004 ///
2005 /// Each item of the list may fail to decode and is represented through the
2006 /// `Result` value of the iterator.
2007 pub fn iter<'a, U: 'static>(
2008 &'a self,
2009 store: impl Into<StoreContextMut<'a, U>>,
2010 ) -> impl ExactSizeIterator<Item = Result<T>> + 'a {
2011 let store = store.into().0;
2012 let mut cx = LiftContext::new(store, self.options, self.instance);
2013 (0..self.len).map(move |i| self.get_from_store(&mut cx, i).unwrap())
2014 }
2015}
2016
2017macro_rules! raw_wasm_list_accessors {
2018 ($($i:ident)*) => ($(
2019 impl WasmList<$i> {
2020 /// Get access to the raw underlying memory for this list.
2021 ///
2022 /// This method will return a direct slice into the original wasm
2023 /// module's linear memory where the data for this slice is stored.
2024 /// This allows the embedder to have efficient access to the
2025 /// underlying memory if needed and avoid copies and such if
2026 /// desired.
2027 ///
2028 /// Note that multi-byte integers are stored in little-endian format
2029 /// so portable processing of this slice must be aware of the host's
2030 /// byte-endianness. The `from_le` constructors in the Rust standard
2031 /// library should be suitable for converting from little-endian.
2032 ///
2033 /// # Panics
2034 ///
2035 /// Panics if the `store` provided is not the one from which this
2036 /// slice originated.
2037 pub fn as_le_slice<'a, T: 'static>(&self, store: impl Into<StoreContext<'a, T>>) -> &'a [$i] {
2038 let memory = self.instance.options_memory(store.into().0, self.options);
2039 self._as_le_slice(memory)
2040 }
2041
2042 fn _as_le_slice<'a>(&self, all_of_memory: &'a [u8]) -> &'a [$i] {
2043 // See comments in `WasmList::get` for the panicking indexing
2044 let byte_size = self.len * mem::size_of::<$i>();
2045 let bytes = &all_of_memory[self.ptr..][..byte_size];
2046
2047 // The canonical ABI requires that everything is aligned to its
2048 // own size, so this should be an aligned array. Furthermore the
2049 // alignment of primitive integers for hosts should be smaller
2050 // than or equal to the size of the primitive itself, meaning
2051 // that a wasm canonical-abi-aligned list is also aligned for
2052 // the host. That should mean that the head/tail slices here are
2053 // empty.
2054 //
2055 // Also note that the `unsafe` here is needed since the type
2056 // we're aligning to isn't guaranteed to be valid, but in our
2057 // case it's just integers and bytes so this should be safe.
2058 unsafe {
2059 let (head, body, tail) = bytes.align_to::<$i>();
2060 assert!(head.is_empty() && tail.is_empty());
2061 body
2062 }
2063 }
2064 }
2065 )*)
2066}
2067
2068raw_wasm_list_accessors! {
2069 i8 i16 i32 i64
2070 u8 u16 u32 u64
2071}
2072
2073// Note that this is similar to `ComponentType for str` except it can only be
2074// used for lifting, not lowering.
2075unsafe impl<T: ComponentType> ComponentType for WasmList<T> {
2076 type Lower = <[T] as ComponentType>::Lower;
2077
2078 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
2079
2080 fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
2081 <[T] as ComponentType>::typecheck(ty, types)
2082 }
2083}
2084
2085unsafe impl<T: Lift> Lift for WasmList<T> {
2086 fn linear_lift_from_flat(
2087 cx: &mut LiftContext<'_>,
2088 ty: InterfaceType,
2089 src: &Self::Lower,
2090 ) -> Result<Self> {
2091 let elem = match ty {
2092 InterfaceType::List(i) => cx.types[i].element,
2093 _ => bad_type_info(),
2094 };
2095 let (ptr, len) = lift_pointer_pair_from_flat(cx, src)?;
2096 WasmList::new(ptr, len, cx, elem)
2097 }
2098
2099 fn linear_lift_from_memory(
2100 cx: &mut LiftContext<'_>,
2101 ty: InterfaceType,
2102 bytes: &[u8],
2103 ) -> Result<Self> {
2104 let elem = match ty {
2105 InterfaceType::List(i) => cx.types[i].element,
2106 _ => bad_type_info(),
2107 };
2108 debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
2109 let (ptr, len) = lift_pointer_pair_from_memory(cx, bytes)?;
2110 WasmList::new(ptr, len, cx, elem)
2111 }
2112}
2113
2114// =============================================================================
2115// HashMap<K, V> support for component model `map<K, V>`
2116//
2117// Maps are represented as `list<tuple<K, V>>` in the canonical ABI, so the
2118// lowered form is a (pointer, length) pair just like lists.
2119
2120fn map_abi<'a>(ty: InterfaceType, types: &'a ComponentTypes) -> &'a TypeMap {
2121 match ty {
2122 InterfaceType::Map(i) => &types[i],
2123 _ => bad_type_info(),
2124 }
2125}
2126
2127unsafe impl<K, V> ComponentType for HashMap<K, V>
2128where
2129 K: ComponentType,
2130 V: ComponentType,
2131{
2132 type Lower = [ValRaw; 2];
2133
2134 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
2135
2136 fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
2137 TryHashMap::<K, V>::typecheck(ty, types)
2138 }
2139}
2140
2141unsafe impl<K, V> Lower for HashMap<K, V>
2142where
2143 K: Lower,
2144 V: Lower,
2145{
2146 fn linear_lower_to_flat<U>(
2147 &self,
2148 cx: &mut LowerContext<'_, U>,
2149 ty: InterfaceType,
2150 dst: &mut MaybeUninit<[ValRaw; 2]>,
2151 ) -> Result<()> {
2152 let map = map_abi(ty, &cx.types);
2153 let (ptr, len) = lower_map_iter(cx, map, self.len(), self.iter())?;
2154 lower_pointer_pair_to_flat(cx, dst, ptr, len);
2155 Ok(())
2156 }
2157
2158 fn linear_lower_to_memory<U>(
2159 &self,
2160 cx: &mut LowerContext<'_, U>,
2161 ty: InterfaceType,
2162 offset: usize,
2163 ) -> Result<()> {
2164 let map = map_abi(ty, &cx.types);
2165 debug_assert!(offset % (CanonicalAbiInfo::POINTER_PAIR.align32 as usize) == 0);
2166 let (ptr, len) = lower_map_iter(cx, map, self.len(), self.iter())?;
2167 lower_pointer_pair_to_memory(cx, offset, ptr, len);
2168 Ok(())
2169 }
2170}
2171
2172unsafe impl<K, V> Lift for HashMap<K, V>
2173where
2174 K: Lift + Eq + Hash,
2175 V: Lift,
2176{
2177 fn linear_lift_from_flat(
2178 cx: &mut LiftContext<'_>,
2179 ty: InterfaceType,
2180 src: &Self::Lower,
2181 ) -> Result<Self> {
2182 Ok(TryHashMap::<K, V>::linear_lift_from_flat(cx, ty, src)?.into())
2183 }
2184
2185 fn linear_lift_from_memory(
2186 cx: &mut LiftContext<'_>,
2187 ty: InterfaceType,
2188 bytes: &[u8],
2189 ) -> Result<Self> {
2190 Ok(TryHashMap::<K, V>::linear_lift_from_memory(cx, ty, bytes)?.into())
2191 }
2192}
2193
2194fn lower_map_iter<'a, K, V, U>(
2195 cx: &mut LowerContext<'_, U>,
2196 map: &TypeMap,
2197 len: usize,
2198 iter: impl Iterator<Item = (&'a K, &'a V)>,
2199) -> Result<(usize, usize)>
2200where
2201 K: Lower + 'a,
2202 V: Lower + 'a,
2203{
2204 let size = len
2205 .checked_mul(usize::try_from(map.entry_abi.size32)?)
2206 .ok_or_else(|| format_err!("size overflow copying a map"))?;
2207 let ptr = cx.realloc(0, 0, map.entry_abi.align32, size)?;
2208
2209 let mut entry_offset = ptr;
2210 for (key, value) in iter {
2211 // Keys are the first field in each entry tuple.
2212 <K as Lower>::linear_lower_to_memory(key, cx, map.key, entry_offset)?;
2213 // Values start at the precomputed value offset within the tuple.
2214 <V as Lower>::linear_lower_to_memory(
2215 value,
2216 cx,
2217 map.value,
2218 entry_offset + usize::try_from(map.value_offset32)?,
2219 )?;
2220 entry_offset += usize::try_from(map.entry_abi.size32)?;
2221 }
2222
2223 Ok((ptr, len))
2224}
2225
2226unsafe impl<K, V> ComponentType for TryHashMap<K, V>
2227where
2228 K: ComponentType,
2229 V: ComponentType,
2230{
2231 type Lower = [ValRaw; 2];
2232
2233 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::POINTER_PAIR;
2234
2235 fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
2236 match ty {
2237 InterfaceType::Map(t) => {
2238 let map_ty = &types.types[*t];
2239 K::typecheck(&map_ty.key, types)?;
2240 V::typecheck(&map_ty.value, types)?;
2241 Ok(())
2242 }
2243 other => bail!("expected `map` found `{}`", desc(other)),
2244 }
2245 }
2246}
2247
2248unsafe impl<K, V> Lower for TryHashMap<K, V>
2249where
2250 K: Lower,
2251 V: Lower,
2252{
2253 fn linear_lower_to_flat<U>(
2254 &self,
2255 cx: &mut LowerContext<'_, U>,
2256 ty: InterfaceType,
2257 dst: &mut MaybeUninit<[ValRaw; 2]>,
2258 ) -> Result<()> {
2259 let map = map_abi(ty, &cx.types);
2260 let (ptr, len) = lower_map_iter(cx, map, self.len(), self.iter())?;
2261 lower_pointer_pair_to_flat(cx, dst, ptr, len);
2262 Ok(())
2263 }
2264
2265 fn linear_lower_to_memory<U>(
2266 &self,
2267 cx: &mut LowerContext<'_, U>,
2268 ty: InterfaceType,
2269 offset: usize,
2270 ) -> Result<()> {
2271 let map = map_abi(ty, &cx.types);
2272 debug_assert!(offset % (CanonicalAbiInfo::POINTER_PAIR.align32 as usize) == 0);
2273 let (ptr, len) = lower_map_iter(cx, map, self.len(), self.iter())?;
2274 lower_pointer_pair_to_memory(cx, offset, ptr, len);
2275 Ok(())
2276 }
2277}
2278
2279unsafe impl<K, V> Lift for TryHashMap<K, V>
2280where
2281 K: Lift + Eq + Hash,
2282 V: Lift,
2283{
2284 fn linear_lift_from_flat(
2285 cx: &mut LiftContext<'_>,
2286 ty: InterfaceType,
2287 src: &Self::Lower,
2288 ) -> Result<Self> {
2289 let map = map_abi(ty, &cx.types);
2290 let (ptr, len) = lift_pointer_pair_from_flat(cx, src)?;
2291 lift_try_map(cx, map, ptr, len)
2292 }
2293
2294 fn linear_lift_from_memory(
2295 cx: &mut LiftContext<'_>,
2296 ty: InterfaceType,
2297 bytes: &[u8],
2298 ) -> Result<Self> {
2299 let map = map_abi(ty, &cx.types);
2300 debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
2301 let (ptr, len) = lift_pointer_pair_from_memory(cx, bytes)?;
2302 lift_try_map(cx, map, ptr, len)
2303 }
2304}
2305
2306fn lift_try_map<K, V>(
2307 cx: &mut LiftContext<'_>,
2308 map: &TypeMap,
2309 ptr: usize,
2310 len: usize,
2311) -> Result<TryHashMap<K, V>>
2312where
2313 K: Lift + Eq + Hash,
2314 V: Lift,
2315{
2316 let mut result = TryHashMap::with_capacity(len)?;
2317
2318 match len
2319 .checked_mul(usize::try_from(map.entry_abi.size32)?)
2320 .and_then(|total| ptr.checked_add(total))
2321 {
2322 Some(n) if n <= cx.memory().len() => cx.consume_fuel_array(len, size_of::<(K, V)>())?,
2323 _ => bail!("map pointer/length out of bounds of memory"),
2324 }
2325 if ptr % (map.entry_abi.align32 as usize) != 0 {
2326 bail!("map pointer is not aligned");
2327 }
2328
2329 for i in 0..len {
2330 let entry_base = ptr + (i * usize::try_from(map.entry_abi.size32)?);
2331
2332 let key_bytes = &cx.memory()[entry_base..][..K::SIZE32];
2333 let key = K::linear_lift_from_memory(cx, map.key, key_bytes)?;
2334
2335 let value_bytes =
2336 &cx.memory()[entry_base + usize::try_from(map.value_offset32)?..][..V::SIZE32];
2337 let value = V::linear_lift_from_memory(cx, map.value, value_bytes)?;
2338
2339 result.insert(key, value)?;
2340 }
2341
2342 Ok(result)
2343}
2344
2345/// Verify that the given wasm type is a tuple with the expected fields in the right order.
2346fn typecheck_tuple(
2347 ty: &InterfaceType,
2348 types: &InstanceType<'_>,
2349 expected: &[fn(&InterfaceType, &InstanceType<'_>) -> Result<()>],
2350) -> Result<()> {
2351 match ty {
2352 InterfaceType::Tuple(t) => {
2353 let tuple = &types.types[*t];
2354 if tuple.types.len() != expected.len() {
2355 bail!(
2356 "expected {}-tuple, found {}-tuple",
2357 expected.len(),
2358 tuple.types.len()
2359 );
2360 }
2361 for (ty, check) in tuple.types.iter().zip(expected) {
2362 check(ty, types)?;
2363 }
2364 Ok(())
2365 }
2366 other => bail!("expected `tuple` found `{}`", desc(other)),
2367 }
2368}
2369
2370/// Verify that the given wasm type is a record with the expected fields in the right order and with the right
2371/// names.
2372pub fn typecheck_record(
2373 ty: &InterfaceType,
2374 types: &InstanceType<'_>,
2375 expected: &[(&str, fn(&InterfaceType, &InstanceType<'_>) -> Result<()>)],
2376) -> Result<()> {
2377 match ty {
2378 InterfaceType::Record(index) => {
2379 let fields = &types.types[*index].fields;
2380
2381 if fields.len() != expected.len() {
2382 bail!(
2383 "expected record of {} fields, found {} fields",
2384 expected.len(),
2385 fields.len()
2386 );
2387 }
2388
2389 for (field, &(name, check)) in fields.iter().zip(expected) {
2390 check(&field.ty, types)
2391 .with_context(|| format!("type mismatch for field {name}"))?;
2392
2393 if field.name != name {
2394 bail!("expected record field named {}, found {}", name, field.name);
2395 }
2396 }
2397
2398 Ok(())
2399 }
2400 other => bail!("expected `record` found `{}`", desc(other)),
2401 }
2402}
2403
2404/// Verify that the given wasm type is a variant with the expected cases in the right order and with the right
2405/// names.
2406pub fn typecheck_variant(
2407 ty: &InterfaceType,
2408 types: &InstanceType<'_>,
2409 expected: &[(
2410 &str,
2411 Option<fn(&InterfaceType, &InstanceType<'_>) -> Result<()>>,
2412 )],
2413) -> Result<()> {
2414 match ty {
2415 InterfaceType::Variant(index) => {
2416 let cases = &types.types[*index].cases;
2417
2418 if cases.len() != expected.len() {
2419 bail!(
2420 "expected variant of {} cases, found {} cases",
2421 expected.len(),
2422 cases.len()
2423 );
2424 }
2425
2426 for ((case_name, case_ty), &(name, check)) in cases.iter().zip(expected) {
2427 if *case_name != name {
2428 bail!("expected variant case named {name}, found {case_name}");
2429 }
2430
2431 match (check, case_ty) {
2432 (Some(check), Some(ty)) => check(ty, types)
2433 .with_context(|| format!("type mismatch for case {name}"))?,
2434 (None, None) => {}
2435 (Some(_), None) => {
2436 bail!("case `{name}` has no type but one was expected")
2437 }
2438 (None, Some(_)) => {
2439 bail!("case `{name}` has a type but none was expected")
2440 }
2441 }
2442 }
2443
2444 Ok(())
2445 }
2446 other => bail!("expected `variant` found `{}`", desc(other)),
2447 }
2448}
2449
2450/// Verify that the given wasm type is a enum with the expected cases in the right order and with the right
2451/// names.
2452pub fn typecheck_enum(
2453 ty: &InterfaceType,
2454 types: &InstanceType<'_>,
2455 expected: &[&str],
2456) -> Result<()> {
2457 match ty {
2458 InterfaceType::Enum(index) => {
2459 let names = &types.types[*index].names;
2460
2461 if names.len() != expected.len() {
2462 bail!(
2463 "expected enum of {} names, found {} names",
2464 expected.len(),
2465 names.len()
2466 );
2467 }
2468
2469 for (name, expected) in names.iter().zip(expected) {
2470 if name != expected {
2471 bail!("expected enum case named {expected}, found {name}");
2472 }
2473 }
2474
2475 Ok(())
2476 }
2477 other => bail!("expected `enum` found `{}`", desc(other)),
2478 }
2479}
2480
2481/// Verify that the given wasm type is a flags type with the expected flags in the right order and with the right
2482/// names.
2483pub fn typecheck_flags(
2484 ty: &InterfaceType,
2485 types: &InstanceType<'_>,
2486 expected: &[&str],
2487) -> Result<()> {
2488 match ty {
2489 InterfaceType::Flags(index) => {
2490 let names = &types.types[*index].names;
2491
2492 if names.len() != expected.len() {
2493 bail!(
2494 "expected flags type with {} names, found {} names",
2495 expected.len(),
2496 names.len()
2497 );
2498 }
2499
2500 for (name, expected) in names.iter().zip(expected) {
2501 if name != expected {
2502 bail!("expected flag named {expected}, found {name}");
2503 }
2504 }
2505
2506 Ok(())
2507 }
2508 other => bail!("expected `flags` found `{}`", desc(other)),
2509 }
2510}
2511
2512/// Format the specified bitflags using the specified names for debugging
2513pub fn format_flags(bits: &[u32], names: &[&str], f: &mut fmt::Formatter) -> fmt::Result {
2514 f.write_str("(")?;
2515 let mut wrote = false;
2516 for (index, name) in names.iter().enumerate() {
2517 if ((bits[index / 32] >> (index % 32)) & 1) != 0 {
2518 if wrote {
2519 f.write_str("|")?;
2520 } else {
2521 wrote = true;
2522 }
2523
2524 f.write_str(name)?;
2525 }
2526 }
2527 f.write_str(")")
2528}
2529
2530unsafe impl<T> ComponentType for Option<T>
2531where
2532 T: ComponentType,
2533{
2534 type Lower = TupleLower<<u32 as ComponentType>::Lower, T::Lower>;
2535
2536 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::variant_static(&[None, Some(T::ABI)]);
2537
2538 fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
2539 match ty {
2540 InterfaceType::Option(t) => T::typecheck(&types.types[*t].ty, types),
2541 other => bail!("expected `option` found `{}`", desc(other)),
2542 }
2543 }
2544}
2545
2546unsafe impl<T> ComponentVariant for Option<T>
2547where
2548 T: ComponentType,
2549{
2550 const CASES: &'static [Option<CanonicalAbiInfo>] = &[None, Some(T::ABI)];
2551}
2552
2553unsafe impl<T> Lower for Option<T>
2554where
2555 T: Lower,
2556{
2557 fn linear_lower_to_flat<U>(
2558 &self,
2559 cx: &mut LowerContext<'_, U>,
2560 ty: InterfaceType,
2561 dst: &mut MaybeUninit<Self::Lower>,
2562 ) -> Result<()> {
2563 let payload = match ty {
2564 InterfaceType::Option(ty) => cx.types[ty].ty,
2565 _ => bad_type_info(),
2566 };
2567 match self {
2568 None => {
2569 map_maybe_uninit!(dst.A1).write(ValRaw::i32(0));
2570 // Note that this is unsafe as we're writing an arbitrary
2571 // bit-pattern to an arbitrary type, but part of the unsafe
2572 // contract of the `ComponentType` trait is that we can assign
2573 // any bit-pattern. By writing all zeros here we're ensuring
2574 // that the core wasm arguments this translates to will all be
2575 // zeros (as the canonical ABI requires).
2576 unsafe {
2577 map_maybe_uninit!(dst.A2).as_mut_ptr().write_bytes(0u8, 1);
2578 }
2579 }
2580 Some(val) => {
2581 map_maybe_uninit!(dst.A1).write(ValRaw::i32(1));
2582 val.linear_lower_to_flat(cx, payload, map_maybe_uninit!(dst.A2))?;
2583 }
2584 }
2585 Ok(())
2586 }
2587
2588 fn linear_lower_to_memory<U>(
2589 &self,
2590 cx: &mut LowerContext<'_, U>,
2591 ty: InterfaceType,
2592 offset: usize,
2593 ) -> Result<()> {
2594 debug_assert!(offset % (Self::ALIGN32 as usize) == 0);
2595 let payload = match ty {
2596 InterfaceType::Option(ty) => cx.types[ty].ty,
2597 _ => bad_type_info(),
2598 };
2599 match self {
2600 None => {
2601 cx.get::<1>(offset)[0] = 0;
2602 }
2603 Some(val) => {
2604 cx.get::<1>(offset)[0] = 1;
2605 val.linear_lower_to_memory(
2606 cx,
2607 payload,
2608 offset + (Self::INFO.payload_offset32 as usize),
2609 )?;
2610 }
2611 }
2612 Ok(())
2613 }
2614}
2615
2616unsafe impl<T> Lift for Option<T>
2617where
2618 T: Lift,
2619{
2620 fn linear_lift_from_flat(
2621 cx: &mut LiftContext<'_>,
2622 ty: InterfaceType,
2623 src: &Self::Lower,
2624 ) -> Result<Self> {
2625 let payload = match ty {
2626 InterfaceType::Option(ty) => cx.types[ty].ty,
2627 _ => bad_type_info(),
2628 };
2629 Ok(match src.A1.get_i32() {
2630 0 => None,
2631 1 => Some(T::linear_lift_from_flat(cx, payload, &src.A2)?),
2632 _ => bail!("invalid option discriminant"),
2633 })
2634 }
2635
2636 fn linear_lift_from_memory(
2637 cx: &mut LiftContext<'_>,
2638 ty: InterfaceType,
2639 bytes: &[u8],
2640 ) -> Result<Self> {
2641 debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
2642 let payload_ty = match ty {
2643 InterfaceType::Option(ty) => cx.types[ty].ty,
2644 _ => bad_type_info(),
2645 };
2646 let discrim = bytes[0];
2647 let payload = &bytes[Self::INFO.payload_offset32 as usize..];
2648 match discrim {
2649 0 => Ok(None),
2650 1 => Ok(Some(T::linear_lift_from_memory(cx, payload_ty, payload)?)),
2651 _ => bail!("invalid option discriminant"),
2652 }
2653 }
2654}
2655
2656#[derive(Clone, Copy)]
2657#[repr(C)]
2658pub struct ResultLower<T: Copy, E: Copy> {
2659 tag: ValRaw,
2660 payload: ResultLowerPayload<T, E>,
2661}
2662
2663#[derive(Clone, Copy)]
2664#[repr(C)]
2665union ResultLowerPayload<T: Copy, E: Copy> {
2666 ok: T,
2667 err: E,
2668}
2669
2670unsafe impl<T, E> ComponentType for Result<T, E>
2671where
2672 T: ComponentType,
2673 E: ComponentType,
2674{
2675 type Lower = ResultLower<T::Lower, E::Lower>;
2676
2677 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::variant_static(&[Some(T::ABI), Some(E::ABI)]);
2678
2679 fn typecheck(ty: &InterfaceType, types: &InstanceType<'_>) -> Result<()> {
2680 match ty {
2681 InterfaceType::Result(r) => {
2682 let result = &types.types[*r];
2683 match &result.ok {
2684 Some(ty) => T::typecheck(ty, types)?,
2685 None if T::IS_RUST_UNIT_TYPE => {}
2686 None => bail!("expected no `ok` type"),
2687 }
2688 match &result.err {
2689 Some(ty) => E::typecheck(ty, types)?,
2690 None if E::IS_RUST_UNIT_TYPE => {}
2691 None => bail!("expected no `err` type"),
2692 }
2693 Ok(())
2694 }
2695 other => bail!("expected `result` found `{}`", desc(other)),
2696 }
2697 }
2698}
2699
2700/// Lowers the payload of a variant into the storage for the entire payload,
2701/// handling writing zeros at the end of the representation if this payload is
2702/// smaller than the entire flat representation.
2703///
2704/// * `payload` - the flat storage space for the entire payload of the variant
2705/// * `typed_payload` - projection from the payload storage space to the
2706/// individual storage space for this variant.
2707/// * `lower` - lowering operation used to initialize the `typed_payload` return
2708/// value.
2709///
2710/// For more information on this se the comments in the `Lower for Result`
2711/// implementation below.
2712pub unsafe fn lower_payload<P, T>(
2713 payload: &mut MaybeUninit<P>,
2714 typed_payload: impl FnOnce(&mut MaybeUninit<P>) -> &mut MaybeUninit<T>,
2715 lower: impl FnOnce(&mut MaybeUninit<T>) -> Result<()>,
2716) -> Result<()> {
2717 let typed = typed_payload(payload);
2718 lower(typed)?;
2719
2720 let typed_len = unsafe { storage_as_slice(typed).len() };
2721 let payload = unsafe { storage_as_slice_mut(payload) };
2722 for slot in payload[typed_len..].iter_mut() {
2723 slot.write(ValRaw::u64(0));
2724 }
2725 Ok(())
2726}
2727
2728unsafe impl<T, E> ComponentVariant for Result<T, E>
2729where
2730 T: ComponentType,
2731 E: ComponentType,
2732{
2733 const CASES: &'static [Option<CanonicalAbiInfo>] = &[Some(T::ABI), Some(E::ABI)];
2734}
2735
2736unsafe impl<T, E> Lower for Result<T, E>
2737where
2738 T: Lower,
2739 E: Lower,
2740{
2741 fn linear_lower_to_flat<U>(
2742 &self,
2743 cx: &mut LowerContext<'_, U>,
2744 ty: InterfaceType,
2745 dst: &mut MaybeUninit<Self::Lower>,
2746 ) -> Result<()> {
2747 let (ok, err) = match ty {
2748 InterfaceType::Result(ty) => {
2749 let ty = &cx.types[ty];
2750 (ty.ok, ty.err)
2751 }
2752 _ => bad_type_info(),
2753 };
2754
2755 // This implementation of `Lower::lower`, if you're reading these from
2756 // the top of this file, is the first location that the "join" logic of
2757 // the component model's canonical ABI encountered. The rough problem is
2758 // that let's say we have a component model type of the form:
2759 //
2760 // (result u64 (error (tuple f32 u16)))
2761 //
2762 // The flat representation of this is actually pretty tricky. Currently
2763 // it is:
2764 //
2765 // i32 i64 i32
2766 //
2767 // The first `i32` is the discriminant for the `result`, and the payload
2768 // is represented by `i64 i32`. The "ok" variant will only use the `i64`
2769 // and the "err" variant will use both `i64` and `i32`.
2770 //
2771 // In the "ok" variant the first issue is encountered. The size of one
2772 // variant may not match the size of the other variants. All variants
2773 // start at the "front" but when lowering a type we need to be sure to
2774 // initialize the later variants (lest we leak random host memory into
2775 // the guest module). Due to how the `Lower` type is represented as a
2776 // `union` of all the variants what ends up happening here is that
2777 // internally within the `lower_payload` after the typed payload is
2778 // lowered the remaining bits of the payload that weren't initialized
2779 // are all set to zero. This will guarantee that we'll write to all the
2780 // slots for each variant.
2781 //
2782 // The "err" variant encounters the second issue, however, which is that
2783 // the flat representation for each type may differ between payloads. In
2784 // the "ok" arm an `i64` is written, but the `lower` implementation for
2785 // the "err" arm will write an `f32` and then an `i32`. For this
2786 // implementation of `lower` to be valid the `f32` needs to get inflated
2787 // to an `i64` with zero-padding in the upper bits. What may be
2788 // surprising, however, is that none of this is handled in this file.
2789 // This implementation looks like it's blindly deferring to `E::lower`
2790 // and hoping it does the right thing.
2791 //
2792 // In reality, however, the correctness of variant lowering relies on
2793 // two subtle details of the `ValRaw` implementation in Wasmtime:
2794 //
2795 // 1. First the `ValRaw` value always contains little-endian values.
2796 // This means that if a `u32` is written, a `u64` is read, and then
2797 // the `u64` has its upper bits truncated the original value will
2798 // always be retained. This is primarily here for big-endian
2799 // platforms where if it weren't little endian then the opposite
2800 // would occur and the wrong value would be read.
2801 //
2802 // 2. Second, and perhaps even more subtly, the `ValRaw` constructors
2803 // for 32-bit types actually always initialize 64-bits of the
2804 // `ValRaw`. In the component model flat ABI only 32 and 64-bit types
2805 // are used so 64-bits is big enough to contain everything. This
2806 // means that when a `ValRaw` is written into the destination it will
2807 // always, whether it's needed or not, be "ready" to get extended up
2808 // to 64-bits.
2809 //
2810 // Put together these two subtle guarantees means that all `Lower`
2811 // implementations can be written "naturally" as one might naively
2812 // expect. Variants will, on each arm, zero out remaining fields and all
2813 // writes to the flat representation will automatically be 64-bit writes
2814 // meaning that if the value is read as a 64-bit value, which isn't
2815 // known at the time of the write, it'll still be correct.
2816 match self {
2817 Ok(e) => {
2818 map_maybe_uninit!(dst.tag).write(ValRaw::i32(0));
2819 unsafe {
2820 lower_payload(
2821 map_maybe_uninit!(dst.payload),
2822 |payload| map_maybe_uninit!(payload.ok),
2823 |dst| match ok {
2824 Some(ok) => e.linear_lower_to_flat(cx, ok, dst),
2825 None => Ok(()),
2826 },
2827 )
2828 }
2829 }
2830 Err(e) => {
2831 map_maybe_uninit!(dst.tag).write(ValRaw::i32(1));
2832 unsafe {
2833 lower_payload(
2834 map_maybe_uninit!(dst.payload),
2835 |payload| map_maybe_uninit!(payload.err),
2836 |dst| match err {
2837 Some(err) => e.linear_lower_to_flat(cx, err, dst),
2838 None => Ok(()),
2839 },
2840 )
2841 }
2842 }
2843 }
2844 }
2845
2846 fn linear_lower_to_memory<U>(
2847 &self,
2848 cx: &mut LowerContext<'_, U>,
2849 ty: InterfaceType,
2850 offset: usize,
2851 ) -> Result<()> {
2852 let (ok, err) = match ty {
2853 InterfaceType::Result(ty) => {
2854 let ty = &cx.types[ty];
2855 (ty.ok, ty.err)
2856 }
2857 _ => bad_type_info(),
2858 };
2859 debug_assert!(offset % (Self::ALIGN32 as usize) == 0);
2860 let payload_offset = Self::INFO.payload_offset32 as usize;
2861 match self {
2862 Ok(e) => {
2863 cx.get::<1>(offset)[0] = 0;
2864 if let Some(ok) = ok {
2865 e.linear_lower_to_memory(cx, ok, offset + payload_offset)?;
2866 }
2867 }
2868 Err(e) => {
2869 cx.get::<1>(offset)[0] = 1;
2870 if let Some(err) = err {
2871 e.linear_lower_to_memory(cx, err, offset + payload_offset)?;
2872 }
2873 }
2874 }
2875 Ok(())
2876 }
2877}
2878
2879unsafe impl<T, E> Lift for Result<T, E>
2880where
2881 T: Lift,
2882 E: Lift,
2883{
2884 #[inline]
2885 fn linear_lift_from_flat(
2886 cx: &mut LiftContext<'_>,
2887 ty: InterfaceType,
2888 src: &Self::Lower,
2889 ) -> Result<Self> {
2890 let (ok, err) = match ty {
2891 InterfaceType::Result(ty) => {
2892 let ty = &cx.types[ty];
2893 (ty.ok, ty.err)
2894 }
2895 _ => bad_type_info(),
2896 };
2897 // Note that this implementation specifically isn't trying to actually
2898 // reinterpret or alter the bits of `lower` depending on which variant
2899 // we're lifting. This ends up all working out because the value is
2900 // stored in little-endian format.
2901 //
2902 // When stored in little-endian format the `{T,E}::Lower`, when each
2903 // individual `ValRaw` is read, means that if an i64 value, extended
2904 // from an i32 value, was stored then when the i32 value is read it'll
2905 // automatically ignore the upper bits.
2906 //
2907 // This "trick" allows us to seamlessly pass through the `Self::Lower`
2908 // representation into the lifting/lowering without trying to handle
2909 // "join"ed types as per the canonical ABI. It just so happens that i64
2910 // bits will naturally be reinterpreted as f64. Additionally if the
2911 // joined type is i64 but only the lower bits are read that's ok and we
2912 // don't need to validate the upper bits.
2913 //
2914 // This is largely enabled by WebAssembly/component-model#35 where no
2915 // validation needs to be performed for ignored bits and bytes here.
2916 Ok(match src.tag.get_i32() {
2917 0 => Ok(unsafe { lift_option(cx, ok, &src.payload.ok)? }),
2918 1 => Err(unsafe { lift_option(cx, err, &src.payload.err)? }),
2919 _ => bail!("invalid expected discriminant"),
2920 })
2921 }
2922
2923 #[inline]
2924 fn linear_lift_from_memory(
2925 cx: &mut LiftContext<'_>,
2926 ty: InterfaceType,
2927 bytes: &[u8],
2928 ) -> Result<Self> {
2929 debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
2930 let discrim = bytes[0];
2931 let payload = &bytes[Self::INFO.payload_offset32 as usize..];
2932 let (ok, err) = match ty {
2933 InterfaceType::Result(ty) => {
2934 let ty = &cx.types[ty];
2935 (ty.ok, ty.err)
2936 }
2937 _ => bad_type_info(),
2938 };
2939 match discrim {
2940 0 => Ok(Ok(load_option(cx, ok, &payload[..T::SIZE32])?)),
2941 1 => Ok(Err(load_option(cx, err, &payload[..E::SIZE32])?)),
2942 _ => bail!("invalid expected discriminant"),
2943 }
2944 }
2945}
2946
2947fn lift_option<T>(cx: &mut LiftContext<'_>, ty: Option<InterfaceType>, src: &T::Lower) -> Result<T>
2948where
2949 T: Lift,
2950{
2951 match ty {
2952 Some(ty) => T::linear_lift_from_flat(cx, ty, src),
2953 None => Ok(empty_lift()),
2954 }
2955}
2956
2957fn load_option<T>(cx: &mut LiftContext<'_>, ty: Option<InterfaceType>, bytes: &[u8]) -> Result<T>
2958where
2959 T: Lift,
2960{
2961 match ty {
2962 Some(ty) => T::linear_lift_from_memory(cx, ty, bytes),
2963 None => Ok(empty_lift()),
2964 }
2965}
2966
2967fn empty_lift<T>() -> T
2968where
2969 T: Lift,
2970{
2971 assert!(T::IS_RUST_UNIT_TYPE);
2972 assert_eq!(mem::size_of::<T>(), 0);
2973 unsafe { MaybeUninit::uninit().assume_init() }
2974}
2975
2976/// Helper structure to define `Lower` for tuples below.
2977///
2978/// Uses default type parameters to have fields be zero-sized and not present
2979/// in memory for smaller tuple values.
2980#[expect(non_snake_case, reason = "more amenable to macro-generated code")]
2981#[doc(hidden)]
2982#[derive(Clone, Copy)]
2983#[repr(C)]
2984pub struct TupleLower<
2985 T1 = (),
2986 T2 = (),
2987 T3 = (),
2988 T4 = (),
2989 T5 = (),
2990 T6 = (),
2991 T7 = (),
2992 T8 = (),
2993 T9 = (),
2994 T10 = (),
2995 T11 = (),
2996 T12 = (),
2997 T13 = (),
2998 T14 = (),
2999 T15 = (),
3000 T16 = (),
3001 T17 = (),
3002> {
3003 // NB: these names match the names in `for_each_function_signature!`
3004 A1: T1,
3005 A2: T2,
3006 A3: T3,
3007 A4: T4,
3008 A5: T5,
3009 A6: T6,
3010 A7: T7,
3011 A8: T8,
3012 A9: T9,
3013 A10: T10,
3014 A11: T11,
3015 A12: T12,
3016 A13: T13,
3017 A14: T14,
3018 A15: T15,
3019 A16: T16,
3020 A17: T17,
3021 _align_tuple_lower0_correctly: [ValRaw; 0],
3022}
3023
3024macro_rules! impl_component_ty_for_tuples {
3025 ($n:tt $($t:ident)*) => {
3026 #[allow(non_snake_case, reason = "macro-generated code")]
3027 unsafe impl<$($t,)*> ComponentType for ($($t,)*)
3028 where $($t: ComponentType),*
3029 {
3030 type Lower = TupleLower<$($t::Lower),*>;
3031
3032 const ABI: CanonicalAbiInfo = CanonicalAbiInfo::record_static(&[
3033 $($t::ABI),*
3034 ]);
3035
3036 const IS_RUST_UNIT_TYPE: bool = {
3037 let mut _is_unit = true;
3038 $(
3039 let _anything_to_bind_the_macro_variable = $t::IS_RUST_UNIT_TYPE;
3040 _is_unit = false;
3041 )*
3042 _is_unit
3043 };
3044
3045 fn typecheck(
3046 ty: &InterfaceType,
3047 types: &InstanceType<'_>,
3048 ) -> Result<()> {
3049 typecheck_tuple(ty, types, &[$($t::typecheck),*])
3050 }
3051 }
3052
3053 #[allow(non_snake_case, reason = "macro-generated code")]
3054 unsafe impl<$($t,)*> Lower for ($($t,)*)
3055 where $($t: Lower),*
3056 {
3057 fn linear_lower_to_flat<U>(
3058 &self,
3059 cx: &mut LowerContext<'_, U>,
3060 ty: InterfaceType,
3061 _dst: &mut MaybeUninit<Self::Lower>,
3062 ) -> Result<()> {
3063 let types = match ty {
3064 InterfaceType::Tuple(t) => &cx.types[t].types,
3065 _ => bad_type_info(),
3066 };
3067 let ($($t,)*) = self;
3068 let mut _types = types.iter();
3069 $(
3070 let ty = *_types.next().unwrap_or_else(bad_type_info);
3071 $t.linear_lower_to_flat(cx, ty, map_maybe_uninit!(_dst.$t))?;
3072 )*
3073 Ok(())
3074 }
3075
3076 fn linear_lower_to_memory<U>(
3077 &self,
3078 cx: &mut LowerContext<'_, U>,
3079 ty: InterfaceType,
3080 mut _offset: usize,
3081 ) -> Result<()> {
3082 debug_assert!(_offset % (Self::ALIGN32 as usize) == 0);
3083 let types = match ty {
3084 InterfaceType::Tuple(t) => &cx.types[t].types,
3085 _ => bad_type_info(),
3086 };
3087 let ($($t,)*) = self;
3088 let mut _types = types.iter();
3089 $(
3090 let ty = *_types.next().unwrap_or_else(bad_type_info);
3091 $t.linear_lower_to_memory(cx, ty, $t::ABI.next_field32_size(&mut _offset))?;
3092 )*
3093 Ok(())
3094 }
3095 }
3096
3097 #[allow(non_snake_case, reason = "macro-generated code")]
3098 unsafe impl<$($t,)*> Lift for ($($t,)*)
3099 where $($t: Lift),*
3100 {
3101 #[inline]
3102 fn linear_lift_from_flat(cx: &mut LiftContext<'_>, ty: InterfaceType, _src: &Self::Lower) -> Result<Self> {
3103 let types = match ty {
3104 InterfaceType::Tuple(t) => &cx.types[t].types,
3105 _ => bad_type_info(),
3106 };
3107 let mut _types = types.iter();
3108 Ok(($(
3109 $t::linear_lift_from_flat(
3110 cx,
3111 *_types.next().unwrap_or_else(bad_type_info),
3112 &_src.$t,
3113 )?,
3114 )*))
3115 }
3116
3117 #[inline]
3118 fn linear_lift_from_memory(cx: &mut LiftContext<'_>, ty: InterfaceType, bytes: &[u8]) -> Result<Self> {
3119 debug_assert!((bytes.as_ptr() as usize) % (Self::ALIGN32 as usize) == 0);
3120 let types = match ty {
3121 InterfaceType::Tuple(t) => &cx.types[t].types,
3122 _ => bad_type_info(),
3123 };
3124 let mut _types = types.iter();
3125 let mut _offset = 0;
3126 $(
3127 let ty = *_types.next().unwrap_or_else(bad_type_info);
3128 let $t = $t::linear_lift_from_memory(cx, ty, &bytes[$t::ABI.next_field32_size(&mut _offset)..][..$t::SIZE32])?;
3129 )*
3130 Ok(($($t,)*))
3131 }
3132 }
3133
3134 #[allow(non_snake_case, reason = "macro-generated code")]
3135 unsafe impl<$($t,)*> ComponentNamedList for ($($t,)*)
3136 where $($t: ComponentType),*
3137 {}
3138 };
3139}
3140
3141for_each_function_signature!(impl_component_ty_for_tuples);
3142
3143pub fn desc(ty: &InterfaceType) -> &'static str {
3144 match ty {
3145 InterfaceType::U8 => "u8",
3146 InterfaceType::S8 => "s8",
3147 InterfaceType::U16 => "u16",
3148 InterfaceType::S16 => "s16",
3149 InterfaceType::U32 => "u32",
3150 InterfaceType::S32 => "s32",
3151 InterfaceType::U64 => "u64",
3152 InterfaceType::S64 => "s64",
3153 InterfaceType::Float32 => "f32",
3154 InterfaceType::Float64 => "f64",
3155 InterfaceType::Bool => "bool",
3156 InterfaceType::Char => "char",
3157 InterfaceType::String => "string",
3158 InterfaceType::List(_) => "list",
3159 InterfaceType::Tuple(_) => "tuple",
3160 InterfaceType::Option(_) => "option",
3161 InterfaceType::Result(_) => "result",
3162
3163 InterfaceType::Record(_) => "record",
3164 InterfaceType::Variant(_) => "variant",
3165 InterfaceType::Flags(_) => "flags",
3166 InterfaceType::Enum(_) => "enum",
3167 InterfaceType::Own(_) => "owned resource",
3168 InterfaceType::Borrow(_) => "borrowed resource",
3169 InterfaceType::Future(_) => "future",
3170 InterfaceType::Stream(_) => "stream",
3171 InterfaceType::ErrorContext(_) => "error-context",
3172 InterfaceType::Map(_) => "map",
3173 InterfaceType::FixedLengthList(_) => "list<_, N>",
3174 }
3175}
3176
3177#[cold]
3178#[doc(hidden)]
3179pub fn bad_type_info<T>() -> T {
3180 // NB: should consider something like `unreachable_unchecked` here if this
3181 // becomes a performance bottleneck at some point, but that also comes with
3182 // a tradeoff of propagating a lot of unsafety, so it may not be worth it.
3183 panic!("bad type information detected");
3184}