1use super::sys::DecommitBehavior;
5use crate::Engine;
6use crate::prelude::*;
7use crate::runtime::vm::sys::vm::{self, MemoryImageSource, PageMap, reset_with_pagemap};
8use crate::runtime::vm::{
9 HostAlignedByteCount, MmapOffset, ModuleMemoryImageSource, host_page_size,
10};
11use alloc::sync::Arc;
12use core::fmt;
13use core::ops::Range;
14use wasmtime_environ::{DefinedMemoryIndex, MemoryInitialization, Module, PrimaryMap, Tunables};
15
16pub struct ModuleMemoryImages {
21 memories: PrimaryMap<DefinedMemoryIndex, Option<Arc<MemoryImage>>>,
22}
23
24impl ModuleMemoryImages {
25 pub fn get_memory_image(&self, defined_index: DefinedMemoryIndex) -> Option<&Arc<MemoryImage>> {
27 self.memories[defined_index].as_ref()
28 }
29}
30
31pub struct MemoryImage {
33 source: MemoryImageSource,
39
40 len: HostAlignedByteCount,
47
48 source_offset: u64,
60
61 linear_memory_offset: HostAlignedByteCount,
65
66 module_source: Arc<dyn ModuleMemoryImageSource>,
68
69 module_source_offset: usize,
72}
73
74impl MemoryImage {
75 fn new(
76 engine: &Engine,
77 page_size: u32,
78 linear_memory_offset: HostAlignedByteCount,
79 module_source: &Arc<impl ModuleMemoryImageSource>,
80 data_range: Range<usize>,
81 ) -> Result<Option<MemoryImage>> {
82 let assert_page_aligned = |val: usize| {
83 assert_eq!(val % (page_size as usize), 0);
84 };
85 let len =
87 HostAlignedByteCount::new(data_range.len()).expect("memory image data is page-aligned");
88
89 let data = &module_source.wasm_data()[data_range.clone()];
104 if !engine.config().force_memory_init_memfd {
105 if let Some(mmap) = module_source.mmap() {
106 let start = mmap.as_ptr() as usize;
107 let end = start + mmap.len();
108 let data_start = data.as_ptr() as usize;
109 let data_end = data_start + data.len();
110 assert!(start <= data_start && data_end <= end);
111 assert_page_aligned(start);
112 assert_page_aligned(data_start);
113 assert_page_aligned(data_end);
114
115 #[cfg(feature = "std")]
116 if let Some(file) = mmap.original_file() {
117 if let Some(source) = MemoryImageSource::from_file(file) {
118 return Ok(Some(MemoryImage {
119 source,
120 source_offset: u64::try_from(data_start - start).unwrap(),
121 linear_memory_offset,
122 len,
123 module_source: module_source.clone(),
124 module_source_offset: data_range.start,
125 }));
126 }
127 }
128 }
129 }
130
131 if let Some(source) = MemoryImageSource::from_data(data)? {
134 return Ok(Some(MemoryImage {
135 source,
136 source_offset: 0,
137 linear_memory_offset,
138 len,
139 module_source: module_source.clone(),
140 module_source_offset: data_range.start,
141 }));
142 }
143
144 Ok(None)
145 }
146
147 unsafe fn map_at(&self, mmap_base: &MmapOffset) -> Result<()> {
148 unsafe {
149 mmap_base.map_image_at(
150 &self.source,
151 self.source_offset,
152 self.linear_memory_offset,
153 self.len,
154 )
155 }
156 }
157
158 unsafe fn remap_as_zeros_at(&self, base: *mut u8) -> Result<()> {
159 unsafe {
160 self.source.remap_as_zeros_at(
161 base.add(self.linear_memory_offset.byte_count()),
162 self.len.byte_count(),
163 )?;
164 }
165 Ok(())
166 }
167}
168
169impl ModuleMemoryImages {
170 pub fn new(
174 engine: &Engine,
175 module: &Module,
176 source: &Arc<impl ModuleMemoryImageSource>,
177 ) -> Result<Option<ModuleMemoryImages>> {
178 let map = match &module.memory_initialization {
179 MemoryInitialization::Static { map } => map,
180 _ => return Ok(None),
181 };
182 let mut memories = PrimaryMap::with_capacity(map.len());
183 let page_size = crate::runtime::vm::host_page_size();
184 let page_size = u32::try_from(page_size).unwrap();
185 for (memory_index, init) in map {
186 let defined_memory = match module.defined_memory_index(memory_index) {
190 Some(idx) => idx,
191 None => return Ok(None),
192 };
193
194 let init = match init {
197 Some(init) => init,
198 None => {
199 memories.push(None);
200 continue;
201 }
202 };
203
204 let data_range = init.data.start as usize..init.data.end as usize;
205 if module.memories[memory_index]
206 .minimum_byte_size()
207 .map_or(false, |mem_initial_len| {
208 init.offset + u64::try_from(data_range.len()).unwrap() > mem_initial_len
209 })
210 {
211 return Ok(None);
220 }
221
222 let offset_usize = match usize::try_from(init.offset) {
223 Ok(offset) => offset,
224 Err(_) => return Ok(None),
225 };
226 let offset = HostAlignedByteCount::new(offset_usize)
227 .expect("memory init offset is a multiple of the host page size");
228
229 let image = match MemoryImage::new(engine, page_size, offset, source, data_range)? {
232 Some(image) => image,
233 None => return Ok(None),
234 };
235
236 let idx = memories.push(Some(Arc::new(image)));
237 assert_eq!(idx, defined_memory);
238 }
239
240 Ok(Some(ModuleMemoryImages { memories }))
241 }
242}
243
244pub struct MemoryImageSlot {
301 base: MmapOffset,
304
305 static_size: usize,
307
308 image: Option<Arc<MemoryImage>>,
313
314 accessible: HostAlignedByteCount,
323
324 dirty: bool,
336}
337
338impl fmt::Debug for MemoryImageSlot {
339 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
340 f.debug_struct("MemoryImageSlot")
341 .field("base", &self.base)
342 .field("static_size", &self.static_size)
343 .field("accessible", &self.accessible)
344 .field("dirty", &self.dirty)
345 .finish_non_exhaustive()
346 }
347}
348
349impl MemoryImageSlot {
350 pub(crate) fn create(
359 base: MmapOffset,
360 accessible: HostAlignedByteCount,
361 static_size: usize,
362 ) -> Self {
363 MemoryImageSlot {
364 base,
365 static_size,
366 accessible,
367 image: None,
368 dirty: false,
369 }
370 }
371
372 pub(crate) fn set_heap_limit(&mut self, size_bytes: usize) -> Result<()> {
373 let size_bytes_aligned = HostAlignedByteCount::new_rounded_up(size_bytes)?;
374 assert!(size_bytes <= self.static_size);
375 assert!(size_bytes_aligned.byte_count() <= self.static_size);
376
377 if size_bytes_aligned <= self.accessible {
384 return Ok(());
385 }
386
387 self.set_protection(self.accessible..size_bytes_aligned, true)?;
389 self.accessible = size_bytes_aligned;
390
391 Ok(())
392 }
393
394 pub(crate) fn instantiate(
414 &mut self,
415 initial_size_bytes: usize,
416 maybe_image: Option<&Arc<MemoryImage>>,
417 ty: &wasmtime_environ::Memory,
418 tunables: &Tunables,
419 ) -> Result<()> {
420 assert!(!self.dirty);
421 assert!(
422 initial_size_bytes <= self.static_size,
423 "initial_size_bytes <= self.static_size failed: \
424 initial_size_bytes={initial_size_bytes}, self.static_size={}",
425 self.static_size
426 );
427 let initial_size_bytes_page_aligned =
428 HostAlignedByteCount::new_rounded_up(initial_size_bytes)?;
429
430 let images_equal = match (self.image.as_ref(), maybe_image) {
439 (Some(a), Some(b)) if Arc::ptr_eq(a, b) => true,
440 (None, None) => true,
441 _ => false,
442 };
443 if !images_equal {
444 self.remove_image()?;
445 }
446
447 if self.accessible < initial_size_bytes_page_aligned {
451 self.set_protection(self.accessible..initial_size_bytes_page_aligned, true)?;
452 self.accessible = initial_size_bytes_page_aligned;
453 }
454
455 let host_page_size_log2 = u8::try_from(host_page_size().ilog2()).unwrap();
461 if initial_size_bytes_page_aligned < self.accessible
462 && (tunables.memory_guard_size > 0
463 || ty.can_elide_bounds_check(tunables, host_page_size_log2))
464 {
465 self.set_protection(initial_size_bytes_page_aligned..self.accessible, false)?;
466 self.accessible = initial_size_bytes_page_aligned;
467 }
468
469 assert!(initial_size_bytes <= self.accessible.byte_count());
473 assert!(initial_size_bytes_page_aligned <= self.accessible);
474 if !images_equal {
475 if let Some(image) = maybe_image.as_ref() {
476 assert!(
477 image
478 .linear_memory_offset
479 .checked_add(image.len)
480 .unwrap()
481 .byte_count()
482 <= initial_size_bytes
483 );
484 if !image.len.is_zero() {
485 unsafe {
486 image.map_at(&self.base)?;
487 }
488 }
489 }
490 self.image = maybe_image.cloned();
491 }
492
493 self.dirty = true;
496
497 Ok(())
498 }
499
500 pub(crate) fn remove_image(&mut self) -> Result<()> {
501 if let Some(image) = &self.image {
502 unsafe {
503 image.remap_as_zeros_at(self.base.as_mut_ptr())?;
504 }
505 self.image = None;
506 }
507 Ok(())
508 }
509
510 #[allow(dead_code, reason = "only used in some cfgs")]
521 pub(crate) fn clear_and_remain_ready(
522 &mut self,
523 pagemap: Option<&PageMap>,
524 keep_resident: HostAlignedByteCount,
525 decommit: impl FnMut(*mut u8, usize),
526 ) -> Result<usize> {
527 assert!(self.dirty);
528
529 let bytes_resident =
530 unsafe { self.reset_all_memory_contents(pagemap, keep_resident, decommit)? };
531
532 self.dirty = false;
533 Ok(bytes_resident)
534 }
535
536 #[allow(dead_code, reason = "only used in some cfgs")]
537 unsafe fn reset_all_memory_contents(
538 &mut self,
539 pagemap: Option<&PageMap>,
540 keep_resident: HostAlignedByteCount,
541 decommit: impl FnMut(*mut u8, usize),
542 ) -> Result<usize> {
543 match vm::decommit_behavior() {
544 DecommitBehavior::Zero => {
545 self.reset_with_anon_memory()?;
552 Ok(0)
553 }
554 DecommitBehavior::RestoreOriginalMapping => {
555 let bytes_resident =
556 unsafe { self.reset_with_original_mapping(pagemap, keep_resident, decommit) };
557 Ok(bytes_resident)
558 }
559 }
560 }
561
562 #[allow(dead_code, reason = "only used in some cfgs")]
563 unsafe fn reset_with_original_mapping(
564 &mut self,
565 pagemap: Option<&PageMap>,
566 keep_resident: HostAlignedByteCount,
567 decommit: impl FnMut(*mut u8, usize),
568 ) -> usize {
569 assert_eq!(
570 vm::decommit_behavior(),
571 DecommitBehavior::RestoreOriginalMapping
572 );
573
574 unsafe {
575 return match &self.image {
576 Some(image) => reset_with_pagemap(
580 pagemap,
581 self.base.as_mut_ptr(),
582 self.accessible,
583 keep_resident,
584 |region| manually_reset_region(self.base.as_mut_ptr().addr(), image, region),
585 decommit,
586 ),
587
588 None => reset_with_pagemap(
591 pagemap,
592 self.base.as_mut_ptr(),
593 self.accessible,
594 keep_resident,
595 |region| region.fill(0),
596 decommit,
597 ),
598 };
599 }
600
601 fn manually_reset_region(base_addr: usize, image: &MemoryImage, mut region: &mut [u8]) {
612 let image_start = image.linear_memory_offset.byte_count();
613 let image_end = image_start + image.len.byte_count();
614 let mut region_start = region.as_ptr().addr() - base_addr;
615 let region_end = region_start + region.len();
616 let image_bytes = image.module_source.wasm_data();
617 let image_bytes = &image_bytes[image.module_source_offset..][..image.len.byte_count()];
618
619 if let Some(len_before_image) = image_start.checked_sub(region_start) {
621 let len = len_before_image.min(region.len());
622 let (a, b) = region.split_at_mut(len);
623 a.fill(0);
624 region = b;
625 region_start += len;
626
627 if region.is_empty() {
628 return;
629 }
630 }
631
632 debug_assert_eq!(region_end - region_start, region.len());
633 debug_assert!(region_start >= image_start);
634
635 if let Some(len_in_image) = image_end.checked_sub(region_start) {
638 let len = len_in_image.min(region.len());
639 let (a, b) = region.split_at_mut(len);
640 a.copy_from_slice(&image_bytes[region_start - image_start..][..len]);
641 region = b;
642 region_start += len;
643
644 if region.is_empty() {
645 return;
646 }
647 }
648
649 debug_assert_eq!(region_end - region_start, region.len());
650 debug_assert!(region_start >= image_end);
651
652 region.fill(0);
654 }
655 }
656
657 fn set_protection(&self, range: Range<HostAlignedByteCount>, readwrite: bool) -> Result<()> {
658 let len = range
659 .end
660 .checked_sub(range.start)
661 .expect("range.start <= range.end");
662 assert!(range.end.byte_count() <= self.static_size);
663 if len.is_zero() {
664 return Ok(());
665 }
666
667 unsafe {
670 let start = self.base.as_mut_ptr().add(range.start.byte_count());
671 if readwrite {
672 vm::expose_existing_mapping(start, len.byte_count())?;
673 } else {
674 vm::hide_existing_mapping(start, len.byte_count())?;
675 }
676 }
677
678 Ok(())
679 }
680
681 pub(crate) fn has_image(&self) -> bool {
682 self.image.is_some()
683 }
684
685 #[allow(dead_code, reason = "only used in some cfgs")]
686 pub(crate) fn is_dirty(&self) -> bool {
687 self.dirty
688 }
689
690 pub(crate) fn reset_with_anon_memory(&mut self) -> Result<()> {
693 if self.static_size == 0 {
694 assert!(self.image.is_none());
695 assert_eq!(self.accessible, 0);
696 return Ok(());
697 }
698
699 unsafe {
700 vm::erase_existing_mapping(self.base.as_mut_ptr(), self.static_size)?;
701 }
702
703 self.image = None;
704 self.accessible = HostAlignedByteCount::ZERO;
705
706 Ok(())
707 }
708}
709
710#[cfg(all(test, target_os = "linux", not(miri)))]
711mod test {
712 use super::*;
713 use crate::runtime::vm::mmap::{AlignedLength, Mmap};
714 use crate::runtime::vm::sys::vm::decommit_pages;
715 use crate::runtime::vm::{HostAlignedByteCount, MmapVec, host_page_size};
716 use std::sync::Arc;
717 use wasmtime_environ::{IndexType, Limits, Memory};
718
719 fn create_memfd_with_data(offset: usize, data: &[u8]) -> Result<MemoryImage> {
720 let linear_memory_offset =
722 HostAlignedByteCount::new(offset).expect("offset is page-aligned");
723 let image_len = HostAlignedByteCount::new_rounded_up(data.len()).unwrap();
725
726 let mut source = TestDataSource {
727 data: vec![0; image_len.byte_count()],
728 };
729 source.data[..data.len()].copy_from_slice(data);
730
731 return Ok(MemoryImage {
732 source: MemoryImageSource::from_data(data)?.unwrap(),
733 len: image_len,
734 source_offset: 0,
735 linear_memory_offset,
736 module_source: Arc::new(source),
737 module_source_offset: 0,
738 });
739
740 struct TestDataSource {
741 data: Vec<u8>,
742 }
743
744 impl ModuleMemoryImageSource for TestDataSource {
745 fn wasm_data(&self) -> &[u8] {
746 &self.data
747 }
748 fn mmap(&self) -> Option<&MmapVec> {
749 None
750 }
751 }
752 }
753
754 fn dummy_memory() -> Memory {
755 Memory {
756 idx_type: IndexType::I32,
757 limits: Limits { min: 0, max: None },
758 shared: false,
759 page_size_log2: Memory::DEFAULT_PAGE_SIZE_LOG2,
760 }
761 }
762
763 fn mmap_4mib_inaccessible() -> Arc<Mmap<AlignedLength>> {
764 let four_mib = HostAlignedByteCount::new(4 << 20).expect("4 MiB is page aligned");
765 Arc::new(Mmap::accessible_reserved(HostAlignedByteCount::ZERO, four_mib).unwrap())
766 }
767
768 unsafe fn with_slice_mut(
781 mmap: &Arc<Mmap<AlignedLength>>,
782 range: Range<usize>,
783 f: impl FnOnce(&mut [u8]) + 'static,
784 ) {
785 let ptr = mmap.as_ptr().cast_mut();
786 let slice = unsafe {
787 core::slice::from_raw_parts_mut(ptr.add(range.start), range.end - range.start)
788 };
789 f(slice);
790 }
791
792 #[test]
793 fn instantiate_no_image() {
794 let ty = dummy_memory();
795 let tunables = Tunables {
796 memory_reservation: 4 << 30,
797 ..Tunables::default_miri()
798 };
799 let mmap = mmap_4mib_inaccessible();
801 let mut memfd =
803 MemoryImageSlot::create(mmap.zero_offset(), HostAlignedByteCount::ZERO, 4 << 20);
804 assert!(!memfd.is_dirty());
805 memfd.instantiate(64 << 10, None, &ty, &tunables).unwrap();
807 assert!(memfd.is_dirty());
808
809 unsafe {
812 with_slice_mut(&mmap, 0..65536, |slice| {
813 assert_eq!(0, slice[0]);
814 assert_eq!(0, slice[65535]);
815 slice[1024] = 42;
816 assert_eq!(42, slice[1024]);
817 });
818 }
819
820 memfd.set_heap_limit(128 << 10).unwrap();
822 let slice = unsafe { mmap.slice(0..1 << 20) };
823 assert_eq!(42, slice[1024]);
824 assert_eq!(0, slice[131071]);
825 memfd
828 .clear_and_remain_ready(None, HostAlignedByteCount::ZERO, |ptr, len| unsafe {
829 decommit_pages(ptr, len).unwrap()
830 })
831 .unwrap();
832 assert!(!memfd.is_dirty());
833 memfd.instantiate(64 << 10, None, &ty, &tunables).unwrap();
834 let slice = unsafe { mmap.slice(0..65536) };
835 assert_eq!(0, slice[1024]);
836 }
837
838 #[test]
839 fn instantiate_image() {
840 let page_size = host_page_size();
841 let ty = dummy_memory();
842 let tunables = Tunables {
843 memory_reservation: 4 << 30,
844 ..Tunables::default_miri()
845 };
846 let mmap = mmap_4mib_inaccessible();
848 let mut memfd =
850 MemoryImageSlot::create(mmap.zero_offset(), HostAlignedByteCount::ZERO, 4 << 20);
851 let image = Arc::new(create_memfd_with_data(page_size, &[1, 2, 3, 4]).unwrap());
853 memfd
855 .instantiate(64 << 10, Some(&image), &ty, &tunables)
856 .unwrap();
857 assert!(memfd.has_image());
858
859 unsafe {
860 with_slice_mut(&mmap, 0..65536, move |slice| {
861 assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]);
862 slice[page_size] = 5;
863 });
864 }
865
866 memfd
868 .clear_and_remain_ready(None, HostAlignedByteCount::ZERO, |ptr, len| unsafe {
869 decommit_pages(ptr, len).unwrap()
870 })
871 .unwrap();
872 memfd
873 .instantiate(64 << 10, Some(&image), &ty, &tunables)
874 .unwrap();
875 let slice = unsafe { mmap.slice(0..65536) };
876 assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]);
877
878 memfd
880 .clear_and_remain_ready(None, HostAlignedByteCount::ZERO, |ptr, len| unsafe {
881 decommit_pages(ptr, len).unwrap()
882 })
883 .unwrap();
884 memfd.instantiate(64 << 10, None, &ty, &tunables).unwrap();
885 assert!(!memfd.has_image());
886 let slice = unsafe { mmap.slice(0..65536) };
887 assert_eq!(&[0, 0, 0, 0], &slice[page_size..][..4]);
888
889 memfd
891 .clear_and_remain_ready(None, HostAlignedByteCount::ZERO, |ptr, len| unsafe {
892 decommit_pages(ptr, len).unwrap()
893 })
894 .unwrap();
895 memfd
896 .instantiate(64 << 10, Some(&image), &ty, &tunables)
897 .unwrap();
898 let slice = unsafe { mmap.slice(0..65536) };
899 assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]);
900
901 let image2 = Arc::new(create_memfd_with_data(page_size, &[10, 11, 12, 13]).unwrap());
903 memfd
904 .clear_and_remain_ready(None, HostAlignedByteCount::ZERO, |ptr, len| unsafe {
905 decommit_pages(ptr, len).unwrap()
906 })
907 .unwrap();
908 memfd
909 .instantiate(128 << 10, Some(&image2), &ty, &tunables)
910 .unwrap();
911 let slice = unsafe { mmap.slice(0..65536) };
912 assert_eq!(&[10, 11, 12, 13], &slice[page_size..][..4]);
913
914 memfd
917 .clear_and_remain_ready(None, HostAlignedByteCount::ZERO, |ptr, len| unsafe {
918 decommit_pages(ptr, len).unwrap()
919 })
920 .unwrap();
921 memfd
922 .instantiate(64 << 10, Some(&image), &ty, &tunables)
923 .unwrap();
924 let slice = unsafe { mmap.slice(0..65536) };
925 assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]);
926 }
927
928 #[test]
929 #[cfg(target_os = "linux")]
930 fn memset_instead_of_madvise() {
931 let page_size = host_page_size();
932 let ty = dummy_memory();
933 let tunables = Tunables {
934 memory_reservation: 100 << 16,
935 ..Tunables::default_miri()
936 };
937 let mmap = mmap_4mib_inaccessible();
938 let mut memfd =
939 MemoryImageSlot::create(mmap.zero_offset(), HostAlignedByteCount::ZERO, 4 << 20);
940
941 for image_off in [0, page_size, page_size * 2] {
943 let image = Arc::new(create_memfd_with_data(image_off, &[1, 2, 3, 4]).unwrap());
944 for amt_to_memset in [0, page_size, page_size * 10, 1 << 20, 10 << 20] {
945 let amt_to_memset = HostAlignedByteCount::new(amt_to_memset).unwrap();
946 memfd
947 .instantiate(64 << 10, Some(&image), &ty, &tunables)
948 .unwrap();
949 assert!(memfd.has_image());
950
951 unsafe {
952 with_slice_mut(&mmap, 0..64 << 10, move |slice| {
953 if image_off > 0 {
954 assert_eq!(slice[image_off - 1], 0);
955 }
956 assert_eq!(slice[image_off + 5], 0);
957 assert_eq!(&[1, 2, 3, 4], &slice[image_off..][..4]);
958 slice[image_off] = 5;
959 assert_eq!(&[5, 2, 3, 4], &slice[image_off..][..4]);
960 })
961 };
962
963 memfd
964 .clear_and_remain_ready(None, amt_to_memset, |ptr, len| unsafe {
965 decommit_pages(ptr, len).unwrap()
966 })
967 .unwrap();
968 }
969 }
970
971 for amt_to_memset in [0, page_size, page_size * 10, 1 << 20, 10 << 20] {
973 let amt_to_memset = HostAlignedByteCount::new(amt_to_memset).unwrap();
974 memfd.instantiate(64 << 10, None, &ty, &tunables).unwrap();
975
976 unsafe {
977 with_slice_mut(&mmap, 0..64 << 10, |slice| {
978 for chunk in slice.chunks_mut(1024) {
979 assert_eq!(chunk[0], 0);
980 chunk[0] = 5;
981 }
982 });
983 }
984 memfd
985 .clear_and_remain_ready(None, amt_to_memset, |ptr, len| unsafe {
986 decommit_pages(ptr, len).unwrap()
987 })
988 .unwrap();
989 }
990 }
991
992 #[test]
993 #[cfg(target_os = "linux")]
994 fn dynamic() {
995 let page_size = host_page_size();
996 let ty = dummy_memory();
997 let tunables = Tunables {
998 memory_reservation: 0,
999 memory_reservation_for_growth: 200,
1000 ..Tunables::default_miri()
1001 };
1002
1003 let mmap = mmap_4mib_inaccessible();
1004 let mut memfd =
1005 MemoryImageSlot::create(mmap.zero_offset(), HostAlignedByteCount::ZERO, 4 << 20);
1006 let image = Arc::new(create_memfd_with_data(page_size, &[1, 2, 3, 4]).unwrap());
1007 let initial = 64 << 10;
1008
1009 memfd
1012 .instantiate(initial, Some(&image), &ty, &tunables)
1013 .unwrap();
1014 assert!(memfd.has_image());
1015
1016 unsafe {
1017 with_slice_mut(&mmap, 0..(64 << 10) + page_size, move |slice| {
1018 assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]);
1019 slice[page_size] = 5;
1020 assert_eq!(&[5, 2, 3, 4], &slice[page_size..][..4]);
1021 });
1022 }
1023
1024 memfd
1025 .clear_and_remain_ready(None, HostAlignedByteCount::ZERO, |ptr, len| unsafe {
1026 decommit_pages(ptr, len).unwrap()
1027 })
1028 .unwrap();
1029 let slice = unsafe { mmap.slice(0..(64 << 10) + page_size) };
1030 assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]);
1031
1032 memfd
1035 .instantiate(initial, Some(&image), &ty, &tunables)
1036 .unwrap();
1037 assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]);
1038
1039 memfd.set_heap_limit(initial * 2).unwrap();
1040
1041 unsafe {
1042 with_slice_mut(&mmap, 0..(64 << 10) + page_size, move |slice| {
1043 assert_eq!(&[0, 0], &slice[initial..initial + 2]);
1044 slice[initial] = 100;
1045 assert_eq!(&[100, 0], &slice[initial..initial + 2]);
1046 });
1047 }
1048
1049 memfd
1050 .clear_and_remain_ready(None, HostAlignedByteCount::ZERO, |ptr, len| unsafe {
1051 decommit_pages(ptr, len).unwrap()
1052 })
1053 .unwrap();
1054
1055 assert_eq!(&[0, 0], &slice[initial..initial + 2]);
1057
1058 memfd
1061 .instantiate(initial, Some(&image), &ty, &tunables)
1062 .unwrap();
1063 assert_eq!(&[0, 0], &slice[initial..initial + 2]);
1064 memfd.set_heap_limit(initial * 2).unwrap();
1065
1066 unsafe {
1067 with_slice_mut(&mmap, 0..(64 << 10) + page_size, move |slice| {
1068 assert_eq!(&[0, 0], &slice[initial..initial + 2]);
1069 slice[initial] = 100;
1070 assert_eq!(&[100, 0], &slice[initial..initial + 2]);
1071 });
1072 }
1073
1074 memfd
1075 .clear_and_remain_ready(None, HostAlignedByteCount::ZERO, |ptr, len| unsafe {
1076 decommit_pages(ptr, len).unwrap()
1077 })
1078 .unwrap();
1079
1080 memfd.instantiate(64 << 10, None, &ty, &tunables).unwrap();
1082 assert!(!memfd.has_image());
1083 assert_eq!(&[0, 0, 0, 0], &slice[page_size..][..4]);
1084 assert_eq!(&[0, 0], &slice[initial..initial + 2]);
1085 }
1086
1087 #[test]
1088 fn reset_with_pagemap() {
1089 let page_size = host_page_size();
1090 let ty = dummy_memory();
1091 let tunables = Tunables {
1092 memory_reservation: 100 << 16,
1093 ..Tunables::default_miri()
1094 };
1095 let mmap = mmap_4mib_inaccessible();
1096 let mmap_len = page_size * 9;
1097 let mut memfd =
1098 MemoryImageSlot::create(mmap.zero_offset(), HostAlignedByteCount::ZERO, mmap_len);
1099 let pagemap = PageMap::new();
1100 let pagemap = pagemap.as_ref();
1101
1102 let mut data = vec![0; 3 * page_size];
1103 for (i, chunk) in data.chunks_mut(page_size).enumerate() {
1104 for slot in chunk {
1105 *slot = u8::try_from(i + 1).unwrap();
1106 }
1107 }
1108 let image = Arc::new(create_memfd_with_data(3 * page_size, &data).unwrap());
1109
1110 memfd
1111 .instantiate(mmap_len, Some(&image), &ty, &tunables)
1112 .unwrap();
1113
1114 let keep_resident = HostAlignedByteCount::new(mmap_len).unwrap();
1115 let assert_pristine_after_reset = |memfd: &mut MemoryImageSlot| unsafe {
1116 memfd
1118 .clear_and_remain_ready(pagemap, keep_resident, |ptr, len| {
1119 decommit_pages(ptr, len).unwrap()
1120 })
1121 .unwrap();
1122
1123 with_slice_mut(&mmap, 0..mmap_len, move |slice| {
1126 for (i, chunk) in slice.chunks(page_size).enumerate() {
1127 let expected = match i {
1128 0..3 => 0,
1129 3..6 => u8::try_from(i).unwrap() - 2,
1130 6..9 => 0,
1131 _ => unreachable!(),
1132 };
1133 for slot in chunk {
1134 assert_eq!(*slot, expected);
1135 }
1136 }
1137 });
1138
1139 memfd
1142 .instantiate(mmap_len, Some(&image), &ty, &tunables)
1143 .unwrap();
1144 memfd
1145 .clear_and_remain_ready(pagemap, HostAlignedByteCount::ZERO, |ptr, len| {
1146 decommit_pages(ptr, len).unwrap()
1147 })
1148 .unwrap();
1149
1150 memfd
1152 .instantiate(mmap_len, Some(&image), &ty, &tunables)
1153 .unwrap();
1154 };
1155
1156 let write_page = |_memfd: &mut MemoryImageSlot, page: usize| unsafe {
1157 with_slice_mut(
1158 &mmap,
1159 page * page_size..(page + 1) * page_size,
1160 move |slice| slice.fill(0xff),
1161 );
1162 };
1163
1164 assert_pristine_after_reset(&mut memfd);
1170
1171 for i in 0..9 {
1172 write_page(&mut memfd, i);
1173 assert_pristine_after_reset(&mut memfd);
1174 }
1175 write_page(&mut memfd, 0);
1176 write_page(&mut memfd, 1);
1177 assert_pristine_after_reset(&mut memfd);
1178 write_page(&mut memfd, 1);
1179 assert_pristine_after_reset(&mut memfd);
1180 write_page(&mut memfd, 2);
1181 write_page(&mut memfd, 3);
1182 assert_pristine_after_reset(&mut memfd);
1183 write_page(&mut memfd, 3);
1184 write_page(&mut memfd, 4);
1185 write_page(&mut memfd, 5);
1186 assert_pristine_after_reset(&mut memfd);
1187 write_page(&mut memfd, 0);
1188 write_page(&mut memfd, 1);
1189 write_page(&mut memfd, 2);
1190 assert_pristine_after_reset(&mut memfd);
1191 write_page(&mut memfd, 0);
1192 write_page(&mut memfd, 3);
1193 write_page(&mut memfd, 6);
1194 assert_pristine_after_reset(&mut memfd);
1195 write_page(&mut memfd, 2);
1196 write_page(&mut memfd, 3);
1197 write_page(&mut memfd, 4);
1198 write_page(&mut memfd, 5);
1199 write_page(&mut memfd, 6);
1200 assert_pristine_after_reset(&mut memfd);
1201 write_page(&mut memfd, 4);
1202 write_page(&mut memfd, 5);
1203 write_page(&mut memfd, 6);
1204 write_page(&mut memfd, 7);
1205 assert_pristine_after_reset(&mut memfd);
1206 write_page(&mut memfd, 4);
1207 write_page(&mut memfd, 5);
1208 write_page(&mut memfd, 8);
1209 assert_pristine_after_reset(&mut memfd);
1210 }
1211}