1use crate::stackswitch::*;
33use crate::{RunResult, RuntimeFiberStack};
34use std::boxed::Box;
35use std::cell::Cell;
36use std::io;
37use std::ops::Range;
38use std::ptr;
39
40pub type Error = io::Error;
41
42pub struct FiberStack {
43 base: BasePtr,
44 len: usize,
45
46 storage: FiberStackStorage,
49}
50
51struct BasePtr(*mut u8);
52
53unsafe impl Send for BasePtr {}
54unsafe impl Sync for BasePtr {}
55
56enum FiberStackStorage {
57 Mmap(MmapFiberStack),
58 Unmanaged(usize),
59 Custom(Box<dyn RuntimeFiberStack>),
60}
61
62impl FiberStack {
63 pub fn new(size: usize, zeroed: bool) -> io::Result<Self> {
64 let _ = zeroed;
67
68 if cfg!(asan) {
71 return Self::from_custom(asan::new_fiber_stack(size)?);
72 }
73 let page_size = rustix::param::page_size();
74 let stack = MmapFiberStack::new(size)?;
75
76 Ok(FiberStack {
80 base: BasePtr(stack.mapping_base.wrapping_byte_add(page_size)),
81 len: stack.mapping_len - page_size,
82 storage: FiberStackStorage::Mmap(stack),
83 })
84 }
85
86 pub unsafe fn from_raw_parts(base: *mut u8, guard_size: usize, len: usize) -> io::Result<Self> {
87 if cfg!(asan) {
90 return Self::from_custom(asan::new_fiber_stack(len)?);
91 }
92 Ok(FiberStack {
93 base: BasePtr(base.add(guard_size)),
94 len,
95 storage: FiberStackStorage::Unmanaged(guard_size),
96 })
97 }
98
99 pub fn is_from_raw_parts(&self) -> bool {
100 matches!(self.storage, FiberStackStorage::Unmanaged(_))
101 }
102
103 pub fn from_custom(custom: Box<dyn RuntimeFiberStack>) -> io::Result<Self> {
104 let range = custom.range();
105 let page_size = rustix::param::page_size();
106 let start_ptr = range.start as *mut u8;
107 assert!(
108 start_ptr.align_offset(page_size) == 0,
109 "expected fiber stack base ({start_ptr:?}) to be page aligned ({page_size:#x})",
110 );
111 let end_ptr = range.end as *const u8;
112 assert!(
113 end_ptr.align_offset(page_size) == 0,
114 "expected fiber stack end ({end_ptr:?}) to be page aligned ({page_size:#x})",
115 );
116 Ok(FiberStack {
117 base: BasePtr(start_ptr),
118 len: range.len(),
119 storage: FiberStackStorage::Custom(custom),
120 })
121 }
122
123 pub fn top(&self) -> Option<*mut u8> {
124 Some(self.base.0.wrapping_byte_add(self.len))
125 }
126
127 pub fn range(&self) -> Option<Range<usize>> {
128 let base = self.base.0 as usize;
129 Some(base..base + self.len)
130 }
131
132 pub fn guard_range(&self) -> Option<Range<*mut u8>> {
133 match &self.storage {
134 FiberStackStorage::Unmanaged(guard_size) => unsafe {
135 let start = self.base.0.sub(*guard_size);
136 Some(start..self.base.0)
137 },
138 FiberStackStorage::Mmap(mmap) => Some(mmap.mapping_base..self.base.0),
139 FiberStackStorage::Custom(custom) => Some(custom.guard_range()),
140 }
141 }
142}
143
144struct MmapFiberStack {
145 mapping_base: *mut u8,
146 mapping_len: usize,
147}
148
149unsafe impl Send for MmapFiberStack {}
150unsafe impl Sync for MmapFiberStack {}
151
152impl MmapFiberStack {
153 fn new(size: usize) -> io::Result<Self> {
154 let page_size = rustix::param::page_size();
157 let size = if size == 0 {
158 page_size
159 } else {
160 (size + (page_size - 1)) & (!(page_size - 1))
161 };
162
163 unsafe {
164 let mmap_len = size + page_size;
166 let mmap = rustix::mm::mmap_anonymous(
167 ptr::null_mut(),
168 mmap_len,
169 rustix::mm::ProtFlags::empty(),
170 rustix::mm::MapFlags::PRIVATE,
171 )?;
172
173 rustix::mm::mprotect(
174 mmap.byte_add(page_size),
175 size,
176 rustix::mm::MprotectFlags::READ | rustix::mm::MprotectFlags::WRITE,
177 )?;
178
179 Ok(MmapFiberStack {
180 mapping_base: mmap.cast(),
181 mapping_len: mmap_len,
182 })
183 }
184 }
185}
186
187impl Drop for MmapFiberStack {
188 fn drop(&mut self) {
189 unsafe {
190 let ret = rustix::mm::munmap(self.mapping_base.cast(), self.mapping_len);
191 debug_assert!(ret.is_ok());
192 }
193 }
194}
195
196pub struct Fiber;
197
198pub struct Suspend {
199 top_of_stack: *mut u8,
200 previous: asan::PreviousStack,
201}
202
203extern "C" fn fiber_start<F, A, B, C>(arg0: *mut u8, top_of_stack: *mut u8)
204where
205 F: FnOnce(A, &mut super::Suspend<A, B, C>) -> C,
206{
207 unsafe {
208 let previous = asan::fiber_start_complete();
211
212 let inner = Suspend {
213 top_of_stack,
214 previous,
215 };
216 let initial = inner.take_resume::<A, B, C>();
217 super::Suspend::<A, B, C>::execute(inner, initial, Box::from_raw(arg0.cast::<F>()))
218 }
219}
220
221impl Fiber {
222 pub fn new<F, A, B, C>(stack: &FiberStack, func: F) -> io::Result<Self>
223 where
224 F: FnOnce(A, &mut super::Suspend<A, B, C>) -> C,
225 {
226 if !SUPPORTED_ARCH {
229 return Err(io::Error::new(
230 io::ErrorKind::Other,
231 "fibers not supported on this host architecture",
232 ));
233 }
234 unsafe {
235 let data = Box::into_raw(Box::new(func)).cast();
236 wasmtime_fiber_init(stack.top().unwrap(), fiber_start::<F, A, B, C>, data);
237 }
238
239 Ok(Self)
240 }
241
242 pub(crate) fn resume<A, B, C>(&self, stack: &FiberStack, result: &Cell<RunResult<A, B, C>>) {
243 unsafe {
244 let addr = stack.top().unwrap().cast::<usize>().offset(-1);
249 addr.write(result as *const _ as usize);
250
251 asan::fiber_switch(
252 stack.top().unwrap(),
253 false,
254 &mut asan::PreviousStack::new(stack),
255 );
256
257 addr.write(0);
259 }
260 }
261}
262
263impl Suspend {
264 pub(crate) fn switch<A, B, C>(&mut self, result: RunResult<A, B, C>) -> A {
265 unsafe {
266 let is_finishing = match &result {
267 RunResult::Returned(_) | RunResult::Panicked(_) => true,
268 RunResult::Executing | RunResult::Resuming(_) | RunResult::Yield(_) => false,
269 };
270 (*self.result_location::<A, B, C>()).set(result);
272
273 asan::fiber_switch(self.top_of_stack, is_finishing, &mut self.previous);
274
275 self.take_resume::<A, B, C>()
276 }
277 }
278
279 unsafe fn take_resume<A, B, C>(&self) -> A {
280 match (*self.result_location::<A, B, C>()).replace(RunResult::Executing) {
281 RunResult::Resuming(val) => val,
282 _ => panic!("not in resuming state"),
283 }
284 }
285
286 unsafe fn result_location<A, B, C>(&self) -> *const Cell<RunResult<A, B, C>> {
287 let ret = self.top_of_stack.cast::<*const u8>().offset(-1).read();
288 assert!(!ret.is_null());
289 ret.cast()
290 }
291}
292
293#[cfg(asan)]
304mod asan {
305 use super::{FiberStack, MmapFiberStack, RuntimeFiberStack};
306 use alloc::boxed::Box;
307 use alloc::vec::Vec;
308 use rustix::param::page_size;
309 use std::mem::ManuallyDrop;
310 use std::ops::Range;
311 use std::sync::Mutex;
312
313 pub struct PreviousStack {
316 bottom: *const u8,
317 size: usize,
318 }
319
320 impl PreviousStack {
321 pub fn new(stack: &FiberStack) -> PreviousStack {
322 let range = stack.range().unwrap();
323 PreviousStack {
324 bottom: range.start as *const u8,
325 size: range.len() - 2 * std::mem::size_of::<*const u8>(),
328 }
329 }
330 }
331
332 impl Default for PreviousStack {
333 fn default() -> PreviousStack {
334 PreviousStack {
335 bottom: std::ptr::null(),
336 size: 0,
337 }
338 }
339 }
340
341 pub unsafe fn fiber_switch(
351 top_of_stack: *mut u8,
352 is_finishing: bool,
353 prev: &mut PreviousStack,
354 ) {
355 assert!(super::SUPPORTED_ARCH);
356 let mut private_asan_pointer = std::ptr::null_mut();
357
358 let private_asan_pointer_ref = if is_finishing {
362 None
363 } else {
364 Some(&mut private_asan_pointer)
365 };
366
367 __sanitizer_start_switch_fiber(private_asan_pointer_ref, prev.bottom, prev.size);
374 super::wasmtime_fiber_switch(top_of_stack);
375 __sanitizer_finish_switch_fiber(private_asan_pointer, &mut prev.bottom, &mut prev.size);
376 }
377
378 pub unsafe fn fiber_start_complete() -> PreviousStack {
380 let mut ret = PreviousStack::default();
381 __sanitizer_finish_switch_fiber(std::ptr::null_mut(), &mut ret.bottom, &mut ret.size);
382 ret
383 }
384
385 unsafe extern "C" {
388 fn __sanitizer_start_switch_fiber(
389 private_asan_pointer_save: Option<&mut *mut u8>,
390 bottom: *const u8,
391 size: usize,
392 );
393 fn __sanitizer_finish_switch_fiber(
394 private_asan_pointer: *mut u8,
395 bottom_old: &mut *const u8,
396 size_old: &mut usize,
397 );
398 }
399
400 static FIBER_STACKS: Mutex<Vec<MmapFiberStack>> = Mutex::new(Vec::new());
419
420 pub fn new_fiber_stack(size: usize) -> std::io::Result<Box<dyn RuntimeFiberStack>> {
421 let needed_size = size + page_size();
422 let mut stacks = FIBER_STACKS.lock().unwrap();
423
424 let stack = match stacks.iter().position(|i| needed_size <= i.mapping_len) {
425 Some(i) => stacks.remove(i),
428 None => MmapFiberStack::new(size)?,
430 };
431 let stack = AsanFiberStack(ManuallyDrop::new(stack));
432 Ok(Box::new(stack))
433 }
434
435 struct AsanFiberStack(ManuallyDrop<MmapFiberStack>);
441
442 unsafe impl RuntimeFiberStack for AsanFiberStack {
443 fn top(&self) -> *mut u8 {
444 self.0.mapping_base.wrapping_byte_add(self.0.mapping_len)
445 }
446
447 fn range(&self) -> Range<usize> {
448 let base = self.0.mapping_base as usize;
449 let end = base + self.0.mapping_len;
450 base + page_size()..end
451 }
452
453 fn guard_range(&self) -> Range<*mut u8> {
454 self.0.mapping_base..self.0.mapping_base.wrapping_add(page_size())
455 }
456 }
457
458 impl Drop for AsanFiberStack {
459 fn drop(&mut self) {
460 let stack = unsafe { ManuallyDrop::take(&mut self.0) };
461 FIBER_STACKS.lock().unwrap().push(stack);
462 }
463 }
464}
465
466#[cfg(not(asan))]
468mod asan_disabled {
469 use super::{FiberStack, RuntimeFiberStack};
470 use std::boxed::Box;
471
472 #[derive(Default)]
473 pub struct PreviousStack;
474
475 impl PreviousStack {
476 #[inline]
477 pub fn new(_stack: &FiberStack) -> PreviousStack {
478 PreviousStack
479 }
480 }
481
482 pub unsafe fn fiber_switch(
483 top_of_stack: *mut u8,
484 _is_finishing: bool,
485 _prev: &mut PreviousStack,
486 ) {
487 assert!(super::SUPPORTED_ARCH);
488 super::wasmtime_fiber_switch(top_of_stack);
489 }
490
491 #[inline]
492 pub unsafe fn fiber_start_complete() -> PreviousStack {
493 PreviousStack
494 }
495
496 pub fn new_fiber_stack(_size: usize) -> std::io::Result<Box<dyn RuntimeFiberStack>> {
497 unimplemented!()
498 }
499}
500
501#[cfg(not(asan))]
502use asan_disabled as asan;