1use crate::stackswitch::*;
33use crate::{RunResult, RuntimeFiberStack};
34use std::boxed::Box;
35use std::cell::Cell;
36use std::io;
37use std::ops::Range;
38use std::ptr;
39use std::sync::atomic::{AtomicUsize, Ordering};
40
41pub type Error = io::Error;
42
43pub struct FiberStack {
44 base: BasePtr,
45 len: usize,
46
47 storage: FiberStackStorage,
50}
51
52struct BasePtr(*mut u8);
53
54unsafe impl Send for BasePtr {}
55unsafe impl Sync for BasePtr {}
56
57enum FiberStackStorage {
58 Mmap(MmapFiberStack),
59 Unmanaged(usize),
60 Custom(Box<dyn RuntimeFiberStack>),
61}
62
63fn host_page_size() -> usize {
67 static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0);
68
69 return match PAGE_SIZE.load(Ordering::Relaxed) {
70 0 => {
71 let size = unsafe { libc::sysconf(libc::_SC_PAGESIZE).try_into().unwrap() };
72 assert!(size != 0);
73 PAGE_SIZE.store(size, Ordering::Relaxed);
74 size
75 }
76 n => n,
77 };
78}
79
80impl FiberStack {
81 pub fn new(size: usize, zeroed: bool) -> io::Result<Self> {
82 let page_size = host_page_size();
83 let _ = zeroed;
86
87 if cfg!(asan) {
90 return Self::from_custom(asan::new_fiber_stack(size)?);
91 }
92 let stack = MmapFiberStack::new(size)?;
93
94 Ok(FiberStack {
98 base: BasePtr(stack.mapping_base.wrapping_byte_add(page_size)),
99 len: stack.mapping_len - page_size,
100 storage: FiberStackStorage::Mmap(stack),
101 })
102 }
103
104 pub unsafe fn from_raw_parts(base: *mut u8, guard_size: usize, len: usize) -> io::Result<Self> {
105 if cfg!(asan) {
108 return Self::from_custom(asan::new_fiber_stack(len)?);
109 }
110 Ok(FiberStack {
111 base: BasePtr(unsafe { base.add(guard_size) }),
112 len,
113 storage: FiberStackStorage::Unmanaged(guard_size),
114 })
115 }
116
117 pub fn is_from_raw_parts(&self) -> bool {
118 matches!(self.storage, FiberStackStorage::Unmanaged(_))
119 }
120
121 pub fn from_custom(custom: Box<dyn RuntimeFiberStack>) -> io::Result<Self> {
122 let range = custom.range();
123 let page_size = host_page_size();
124 let start_ptr = range.start as *mut u8;
125 assert!(
126 start_ptr.align_offset(page_size) == 0,
127 "expected fiber stack base ({start_ptr:?}) to be page aligned ({page_size:#x})",
128 );
129 let end_ptr = range.end as *const u8;
130 assert!(
131 end_ptr.align_offset(page_size) == 0,
132 "expected fiber stack end ({end_ptr:?}) to be page aligned ({page_size:#x})",
133 );
134 Ok(FiberStack {
135 base: BasePtr(start_ptr),
136 len: range.len(),
137 storage: FiberStackStorage::Custom(custom),
138 })
139 }
140
141 pub fn top(&self) -> Option<*mut u8> {
142 Some(self.base.0.wrapping_byte_add(self.len))
143 }
144
145 pub fn range(&self) -> Option<Range<usize>> {
146 let base = self.base.0 as usize;
147 Some(base..base + self.len)
148 }
149
150 pub fn guard_range(&self) -> Option<Range<*mut u8>> {
151 match &self.storage {
152 FiberStackStorage::Unmanaged(guard_size) => unsafe {
153 let start = self.base.0.sub(*guard_size);
154 Some(start..self.base.0)
155 },
156 FiberStackStorage::Mmap(mmap) => Some(mmap.mapping_base..self.base.0),
157 FiberStackStorage::Custom(custom) => Some(custom.guard_range()),
158 }
159 }
160}
161
162struct MmapFiberStack {
163 mapping_base: *mut u8,
164 mapping_len: usize,
165}
166
167unsafe impl Send for MmapFiberStack {}
168unsafe impl Sync for MmapFiberStack {}
169
170impl MmapFiberStack {
171 fn new(size: usize) -> io::Result<Self> {
172 let page_size = host_page_size();
175 let size = if size == 0 {
176 page_size
177 } else {
178 (size + (page_size - 1)) & (!(page_size - 1))
179 };
180
181 unsafe {
182 let mmap_len = size + page_size;
184 let mmap = rustix::mm::mmap_anonymous(
185 ptr::null_mut(),
186 mmap_len,
187 rustix::mm::ProtFlags::empty(),
188 rustix::mm::MapFlags::PRIVATE,
189 )?;
190
191 rustix::mm::mprotect(
192 mmap.byte_add(page_size),
193 size,
194 rustix::mm::MprotectFlags::READ | rustix::mm::MprotectFlags::WRITE,
195 )?;
196
197 Ok(MmapFiberStack {
198 mapping_base: mmap.cast(),
199 mapping_len: mmap_len,
200 })
201 }
202 }
203}
204
205impl Drop for MmapFiberStack {
206 fn drop(&mut self) {
207 unsafe {
208 let ret = rustix::mm::munmap(self.mapping_base.cast(), self.mapping_len);
209 debug_assert!(ret.is_ok());
210 }
211 }
212}
213
214pub struct Fiber;
215
216pub struct Suspend {
217 top_of_stack: *mut u8,
218 previous: asan::PreviousStack,
219}
220
221extern "C" fn fiber_start<F, A, B, C>(arg0: *mut u8, top_of_stack: *mut u8)
222where
223 F: FnOnce(A, &mut super::Suspend<A, B, C>) -> C,
224{
225 unsafe {
226 let previous = asan::fiber_start_complete();
229
230 let inner = Suspend {
231 top_of_stack,
232 previous,
233 };
234 let initial = inner.take_resume::<A, B, C>();
235 super::Suspend::<A, B, C>::execute(inner, initial, Box::from_raw(arg0.cast::<F>()))
236 }
237}
238
239impl Fiber {
240 pub fn new<F, A, B, C>(stack: &FiberStack, func: F) -> io::Result<Self>
241 where
242 F: FnOnce(A, &mut super::Suspend<A, B, C>) -> C,
243 {
244 if !SUPPORTED_ARCH {
247 return Err(io::Error::new(
248 io::ErrorKind::Other,
249 "fibers not supported on this host architecture",
250 ));
251 }
252 unsafe {
253 let data = Box::into_raw(Box::new(func)).cast();
254 wasmtime_fiber_init(stack.top().unwrap(), fiber_start::<F, A, B, C>, data);
255 }
256
257 Ok(Self)
258 }
259
260 pub(crate) fn resume<A, B, C>(&self, stack: &FiberStack, result: &Cell<RunResult<A, B, C>>) {
261 unsafe {
262 let addr = stack.top().unwrap().cast::<usize>().offset(-1);
267 addr.write(result as *const _ as usize);
268
269 asan::fiber_switch(
270 stack.top().unwrap(),
271 false,
272 &mut asan::PreviousStack::new(stack),
273 );
274
275 addr.write(0);
277 }
278 }
279
280 pub(crate) unsafe fn drop<A, B, C>(&mut self) {}
281}
282
283impl Suspend {
284 pub(crate) fn switch<A, B, C>(&mut self, result: RunResult<A, B, C>) -> A {
285 unsafe {
286 let is_finishing = match &result {
287 RunResult::Returned(_) | RunResult::Panicked(_) => true,
288 RunResult::Executing | RunResult::Resuming(_) | RunResult::Yield(_) => false,
289 };
290 (*self.result_location::<A, B, C>()).set(result);
292
293 asan::fiber_switch(self.top_of_stack, is_finishing, &mut self.previous);
294
295 self.take_resume::<A, B, C>()
296 }
297 }
298
299 pub(crate) fn exit<A, B, C>(&mut self, result: RunResult<A, B, C>) {
300 self.switch(result);
301 unreachable!()
302 }
303
304 unsafe fn take_resume<A, B, C>(&self) -> A {
305 unsafe {
306 match (*self.result_location::<A, B, C>()).replace(RunResult::Executing) {
307 RunResult::Resuming(val) => val,
308 _ => panic!("not in resuming state"),
309 }
310 }
311 }
312
313 unsafe fn result_location<A, B, C>(&self) -> *const Cell<RunResult<A, B, C>> {
314 unsafe {
315 let ret = self.top_of_stack.cast::<*const u8>().offset(-1).read();
316 assert!(!ret.is_null());
317 ret.cast()
318 }
319 }
320}
321
322#[cfg(asan)]
333mod asan {
334 use super::{FiberStack, MmapFiberStack, RuntimeFiberStack, host_page_size};
335 use alloc::boxed::Box;
336 use alloc::vec::Vec;
337 use std::mem::ManuallyDrop;
338 use std::ops::Range;
339 use std::sync::Mutex;
340
341 pub struct PreviousStack {
344 bottom: *const u8,
345 size: usize,
346 }
347
348 impl PreviousStack {
349 pub fn new(stack: &FiberStack) -> PreviousStack {
350 let range = stack.range().unwrap();
351 PreviousStack {
352 bottom: range.start as *const u8,
353 size: range.len() - 2 * std::mem::size_of::<*const u8>(),
356 }
357 }
358 }
359
360 impl Default for PreviousStack {
361 fn default() -> PreviousStack {
362 PreviousStack {
363 bottom: std::ptr::null(),
364 size: 0,
365 }
366 }
367 }
368
369 pub unsafe fn fiber_switch(
379 top_of_stack: *mut u8,
380 is_finishing: bool,
381 prev: &mut PreviousStack,
382 ) {
383 assert!(super::SUPPORTED_ARCH);
384 let mut private_asan_pointer = std::ptr::null_mut();
385
386 let private_asan_pointer_ref = if is_finishing {
390 None
391 } else {
392 Some(&mut private_asan_pointer)
393 };
394
395 unsafe {
402 __sanitizer_start_switch_fiber(private_asan_pointer_ref, prev.bottom, prev.size);
403 super::wasmtime_fiber_switch(top_of_stack);
404 __sanitizer_finish_switch_fiber(private_asan_pointer, &mut prev.bottom, &mut prev.size);
405 }
406 }
407
408 pub unsafe fn fiber_start_complete() -> PreviousStack {
410 let mut ret = PreviousStack::default();
411 unsafe {
412 __sanitizer_finish_switch_fiber(std::ptr::null_mut(), &mut ret.bottom, &mut ret.size);
413 }
414 ret
415 }
416
417 unsafe extern "C" {
420 fn __sanitizer_start_switch_fiber(
421 private_asan_pointer_save: Option<&mut *mut u8>,
422 bottom: *const u8,
423 size: usize,
424 );
425 fn __sanitizer_finish_switch_fiber(
426 private_asan_pointer: *mut u8,
427 bottom_old: &mut *const u8,
428 size_old: &mut usize,
429 );
430 }
431
432 static FIBER_STACKS: Mutex<Vec<MmapFiberStack>> = Mutex::new(Vec::new());
451
452 pub fn new_fiber_stack(size: usize) -> std::io::Result<Box<dyn RuntimeFiberStack>> {
453 let page_size = host_page_size();
454 let needed_size = size + page_size;
455 let mut stacks = FIBER_STACKS.lock().unwrap();
456
457 let stack = match stacks.iter().position(|i| needed_size <= i.mapping_len) {
458 Some(i) => stacks.remove(i),
461 None => MmapFiberStack::new(size)?,
463 };
464 let stack = AsanFiberStack {
465 mmap: ManuallyDrop::new(stack),
466 };
467 Ok(Box::new(stack))
468 }
469
470 struct AsanFiberStack {
476 mmap: ManuallyDrop<MmapFiberStack>,
477 }
478
479 unsafe impl RuntimeFiberStack for AsanFiberStack {
480 fn top(&self) -> *mut u8 {
481 self.mmap
482 .mapping_base
483 .wrapping_byte_add(self.mmap.mapping_len)
484 }
485
486 fn range(&self) -> Range<usize> {
487 let base = self.mmap.mapping_base as usize;
488 let end = base + self.mmap.mapping_len;
489 base + host_page_size()..end
490 }
491
492 fn guard_range(&self) -> Range<*mut u8> {
493 self.mmap.mapping_base..self.mmap.mapping_base.wrapping_add(host_page_size())
494 }
495 }
496
497 impl Drop for AsanFiberStack {
498 fn drop(&mut self) {
499 let stack = unsafe { ManuallyDrop::take(&mut self.mmap) };
500 FIBER_STACKS.lock().unwrap().push(stack);
501 }
502 }
503}
504
505#[cfg(not(asan))]
507mod asan_disabled {
508 use super::{FiberStack, RuntimeFiberStack};
509 use std::boxed::Box;
510
511 #[derive(Default)]
512 pub struct PreviousStack;
513
514 impl PreviousStack {
515 #[inline]
516 pub fn new(_stack: &FiberStack) -> PreviousStack {
517 PreviousStack
518 }
519 }
520
521 pub unsafe fn fiber_switch(
522 top_of_stack: *mut u8,
523 _is_finishing: bool,
524 _prev: &mut PreviousStack,
525 ) {
526 assert!(super::SUPPORTED_ARCH);
527 unsafe {
528 super::wasmtime_fiber_switch(top_of_stack);
529 }
530 }
531
532 #[inline]
533 pub unsafe fn fiber_start_complete() -> PreviousStack {
534 PreviousStack
535 }
536
537 pub fn new_fiber_stack(_size: usize) -> std::io::Result<Box<dyn RuntimeFiberStack>> {
538 unimplemented!()
539 }
540}
541
542#[cfg(not(asan))]
543use asan_disabled as asan;