wasmtime/runtime/vm/
vmcontext.rs

1//! This file declares `VMContext` and several related structs which contain
2//! fields that compiled wasm code accesses directly.
3
4mod vm_host_func_context;
5
6pub use self::vm_host_func_context::VMArrayCallHostFuncContext;
7use crate::runtime::vm::{GcStore, VMGcRef};
8use core::cell::UnsafeCell;
9use core::ffi::c_void;
10use core::fmt;
11use core::marker;
12use core::mem;
13use core::ptr::{self, NonNull};
14use core::sync::atomic::{AtomicUsize, Ordering};
15use sptr::Strict;
16use wasmtime_environ::{
17    BuiltinFunctionIndex, DefinedMemoryIndex, Unsigned, VMSharedTypeIndex, WasmHeapTopType,
18    WasmValType, VMCONTEXT_MAGIC,
19};
20
21/// A function pointer that exposes the array calling convention.
22///
23/// Regardless of the underlying Wasm function type, all functions using the
24/// array calling convention have the same Rust signature.
25///
26/// Arguments:
27///
28/// * Callee `vmctx` for the function itself.
29///
30/// * Caller's `vmctx` (so that host functions can access the linear memory of
31///   their Wasm callers).
32///
33/// * A pointer to a buffer of `ValRaw`s where both arguments are passed into
34///   this function, and where results are returned from this function.
35///
36/// * The capacity of the `ValRaw` buffer. Must always be at least
37///   `max(len(wasm_params), len(wasm_results))`.
38pub type VMArrayCallFunction =
39    unsafe extern "C" fn(*mut VMOpaqueContext, *mut VMOpaqueContext, *mut ValRaw, usize);
40
41/// A function pointer that exposes the Wasm calling convention.
42///
43/// In practice, different Wasm function types end up mapping to different Rust
44/// function types, so this isn't simply a type alias the way that
45/// `VMArrayCallFunction` is. However, the exact details of the calling
46/// convention are left to the Wasm compiler (e.g. Cranelift or Winch). Runtime
47/// code never does anything with these function pointers except shuffle them
48/// around and pass them back to Wasm.
49#[repr(transparent)]
50pub struct VMWasmCallFunction(VMFunctionBody);
51
52/// An imported function.
53#[derive(Debug, Copy, Clone)]
54#[repr(C)]
55pub struct VMFunctionImport {
56    /// Function pointer to use when calling this imported function from Wasm.
57    pub wasm_call: NonNull<VMWasmCallFunction>,
58
59    /// Function pointer to use when calling this imported function with the
60    /// "array" calling convention that `Func::new` et al use.
61    pub array_call: VMArrayCallFunction,
62
63    /// The VM state associated with this function.
64    ///
65    /// For Wasm functions defined by core wasm instances this will be `*mut
66    /// VMContext`, but for lifted/lowered component model functions this will
67    /// be a `VMComponentContext`, and for a host function it will be a
68    /// `VMHostFuncContext`, etc.
69    pub vmctx: *mut VMOpaqueContext,
70}
71
72// Declare that this type is send/sync, it's the responsibility of users of
73// `VMFunctionImport` to uphold this guarantee.
74unsafe impl Send for VMFunctionImport {}
75unsafe impl Sync for VMFunctionImport {}
76
77#[cfg(test)]
78mod test_vmfunction_import {
79    use super::VMFunctionImport;
80    use core::mem::offset_of;
81    use std::mem::size_of;
82    use wasmtime_environ::{Module, VMOffsets};
83
84    #[test]
85    fn check_vmfunction_import_offsets() {
86        let module = Module::new();
87        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
88        assert_eq!(
89            size_of::<VMFunctionImport>(),
90            usize::from(offsets.size_of_vmfunction_import())
91        );
92        assert_eq!(
93            offset_of!(VMFunctionImport, wasm_call),
94            usize::from(offsets.vmfunction_import_wasm_call())
95        );
96        assert_eq!(
97            offset_of!(VMFunctionImport, array_call),
98            usize::from(offsets.vmfunction_import_array_call())
99        );
100        assert_eq!(
101            offset_of!(VMFunctionImport, vmctx),
102            usize::from(offsets.vmfunction_import_vmctx())
103        );
104    }
105}
106
107/// A placeholder byte-sized type which is just used to provide some amount of type
108/// safety when dealing with pointers to JIT-compiled function bodies. Note that it's
109/// deliberately not Copy, as we shouldn't be carelessly copying function body bytes
110/// around.
111#[repr(C)]
112pub struct VMFunctionBody(u8);
113
114#[cfg(test)]
115mod test_vmfunction_body {
116    use super::VMFunctionBody;
117    use std::mem::size_of;
118
119    #[test]
120    fn check_vmfunction_body_offsets() {
121        assert_eq!(size_of::<VMFunctionBody>(), 1);
122    }
123}
124
125/// The fields compiled code needs to access to utilize a WebAssembly table
126/// imported from another instance.
127#[derive(Debug, Copy, Clone)]
128#[repr(C)]
129pub struct VMTableImport {
130    /// A pointer to the imported table description.
131    pub from: *mut VMTableDefinition,
132
133    /// A pointer to the `VMContext` that owns the table description.
134    pub vmctx: *mut VMContext,
135}
136
137// Declare that this type is send/sync, it's the responsibility of users of
138// `VMTableImport` to uphold this guarantee.
139unsafe impl Send for VMTableImport {}
140unsafe impl Sync for VMTableImport {}
141
142#[cfg(test)]
143mod test_vmtable_import {
144    use super::VMTableImport;
145    use core::mem::offset_of;
146    use std::mem::size_of;
147    use wasmtime_environ::{Module, VMOffsets};
148
149    #[test]
150    fn check_vmtable_import_offsets() {
151        let module = Module::new();
152        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
153        assert_eq!(
154            size_of::<VMTableImport>(),
155            usize::from(offsets.size_of_vmtable_import())
156        );
157        assert_eq!(
158            offset_of!(VMTableImport, from),
159            usize::from(offsets.vmtable_import_from())
160        );
161        assert_eq!(
162            offset_of!(VMTableImport, vmctx),
163            usize::from(offsets.vmtable_import_vmctx())
164        );
165    }
166}
167
168/// The fields compiled code needs to access to utilize a WebAssembly linear
169/// memory imported from another instance.
170#[derive(Debug, Copy, Clone)]
171#[repr(C)]
172pub struct VMMemoryImport {
173    /// A pointer to the imported memory description.
174    pub from: *mut VMMemoryDefinition,
175
176    /// A pointer to the `VMContext` that owns the memory description.
177    pub vmctx: *mut VMContext,
178
179    /// The index of the memory in the containing `vmctx`.
180    pub index: DefinedMemoryIndex,
181}
182
183// Declare that this type is send/sync, it's the responsibility of users of
184// `VMMemoryImport` to uphold this guarantee.
185unsafe impl Send for VMMemoryImport {}
186unsafe impl Sync for VMMemoryImport {}
187
188#[cfg(test)]
189mod test_vmmemory_import {
190    use super::VMMemoryImport;
191    use core::mem::offset_of;
192    use std::mem::size_of;
193    use wasmtime_environ::{Module, VMOffsets};
194
195    #[test]
196    fn check_vmmemory_import_offsets() {
197        let module = Module::new();
198        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
199        assert_eq!(
200            size_of::<VMMemoryImport>(),
201            usize::from(offsets.size_of_vmmemory_import())
202        );
203        assert_eq!(
204            offset_of!(VMMemoryImport, from),
205            usize::from(offsets.vmmemory_import_from())
206        );
207        assert_eq!(
208            offset_of!(VMMemoryImport, vmctx),
209            usize::from(offsets.vmmemory_import_vmctx())
210        );
211    }
212}
213
214/// The fields compiled code needs to access to utilize a WebAssembly global
215/// variable imported from another instance.
216///
217/// Note that unlike with functions, tables, and memories, `VMGlobalImport`
218/// doesn't include a `vmctx` pointer. Globals are never resized, and don't
219/// require a `vmctx` pointer to access.
220#[derive(Debug, Copy, Clone)]
221#[repr(C)]
222pub struct VMGlobalImport {
223    /// A pointer to the imported global variable description.
224    pub from: *mut VMGlobalDefinition,
225}
226
227// Declare that this type is send/sync, it's the responsibility of users of
228// `VMGlobalImport` to uphold this guarantee.
229unsafe impl Send for VMGlobalImport {}
230unsafe impl Sync for VMGlobalImport {}
231
232#[cfg(test)]
233mod test_vmglobal_import {
234    use super::VMGlobalImport;
235    use core::mem::offset_of;
236    use std::mem::size_of;
237    use wasmtime_environ::{Module, VMOffsets};
238
239    #[test]
240    fn check_vmglobal_import_offsets() {
241        let module = Module::new();
242        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
243        assert_eq!(
244            size_of::<VMGlobalImport>(),
245            usize::from(offsets.size_of_vmglobal_import())
246        );
247        assert_eq!(
248            offset_of!(VMGlobalImport, from),
249            usize::from(offsets.vmglobal_import_from())
250        );
251    }
252}
253
254/// The fields compiled code needs to access to utilize a WebAssembly linear
255/// memory defined within the instance, namely the start address and the
256/// size in bytes.
257#[derive(Debug)]
258#[repr(C)]
259pub struct VMMemoryDefinition {
260    /// The start address.
261    pub base: *mut u8,
262
263    /// The current logical size of this linear memory in bytes.
264    ///
265    /// This is atomic because shared memories must be able to grow their length
266    /// atomically. For relaxed access, see
267    /// [`VMMemoryDefinition::current_length()`].
268    pub current_length: AtomicUsize,
269}
270
271impl VMMemoryDefinition {
272    /// Return the current length (in bytes) of the [`VMMemoryDefinition`] by
273    /// performing a relaxed load; do not use this function for situations in
274    /// which a precise length is needed. Owned memories (i.e., non-shared) will
275    /// always return a precise result (since no concurrent modification is
276    /// possible) but shared memories may see an imprecise value--a
277    /// `current_length` potentially smaller than what some other thread
278    /// observes. Since Wasm memory only grows, this under-estimation may be
279    /// acceptable in certain cases.
280    pub fn current_length(&self) -> usize {
281        self.current_length.load(Ordering::Relaxed)
282    }
283
284    /// Return a copy of the [`VMMemoryDefinition`] using the relaxed value of
285    /// `current_length`; see [`VMMemoryDefinition::current_length()`].
286    pub unsafe fn load(ptr: *mut Self) -> Self {
287        let other = &*ptr;
288        VMMemoryDefinition {
289            base: other.base,
290            current_length: other.current_length().into(),
291        }
292    }
293}
294
295#[cfg(test)]
296mod test_vmmemory_definition {
297    use super::VMMemoryDefinition;
298    use core::mem::offset_of;
299    use std::mem::size_of;
300    use wasmtime_environ::{Module, PtrSize, VMOffsets};
301
302    #[test]
303    fn check_vmmemory_definition_offsets() {
304        let module = Module::new();
305        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
306        assert_eq!(
307            size_of::<VMMemoryDefinition>(),
308            usize::from(offsets.ptr.size_of_vmmemory_definition())
309        );
310        assert_eq!(
311            offset_of!(VMMemoryDefinition, base),
312            usize::from(offsets.ptr.vmmemory_definition_base())
313        );
314        assert_eq!(
315            offset_of!(VMMemoryDefinition, current_length),
316            usize::from(offsets.ptr.vmmemory_definition_current_length())
317        );
318        /* TODO: Assert that the size of `current_length` matches.
319        assert_eq!(
320            size_of::<VMMemoryDefinition::current_length>(),
321            usize::from(offsets.size_of_vmmemory_definition_current_length())
322        );
323        */
324    }
325}
326
327/// The fields compiled code needs to access to utilize a WebAssembly table
328/// defined within the instance.
329#[derive(Debug, Copy, Clone)]
330#[repr(C)]
331pub struct VMTableDefinition {
332    /// Pointer to the table data.
333    pub base: *mut u8,
334
335    /// The current number of elements in the table.
336    pub current_elements: u32,
337}
338
339#[cfg(test)]
340mod test_vmtable_definition {
341    use super::VMTableDefinition;
342    use core::mem::offset_of;
343    use std::mem::size_of;
344    use wasmtime_environ::{Module, VMOffsets};
345
346    #[test]
347    fn check_vmtable_definition_offsets() {
348        let module = Module::new();
349        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
350        assert_eq!(
351            size_of::<VMTableDefinition>(),
352            usize::from(offsets.size_of_vmtable_definition())
353        );
354        assert_eq!(
355            offset_of!(VMTableDefinition, base),
356            usize::from(offsets.vmtable_definition_base())
357        );
358        assert_eq!(
359            offset_of!(VMTableDefinition, current_elements),
360            usize::from(offsets.vmtable_definition_current_elements())
361        );
362    }
363}
364
365/// The storage for a WebAssembly global defined within the instance.
366///
367/// TODO: Pack the globals more densely, rather than using the same size
368/// for every type.
369#[derive(Debug)]
370#[repr(C, align(16))]
371pub struct VMGlobalDefinition {
372    storage: [u8; 16],
373    // If more elements are added here, remember to add offset_of tests below!
374}
375
376#[cfg(test)]
377mod test_vmglobal_definition {
378    use super::VMGlobalDefinition;
379    use std::mem::{align_of, size_of};
380    use wasmtime_environ::{Module, PtrSize, VMOffsets};
381
382    #[test]
383    fn check_vmglobal_definition_alignment() {
384        assert!(align_of::<VMGlobalDefinition>() >= align_of::<i32>());
385        assert!(align_of::<VMGlobalDefinition>() >= align_of::<i64>());
386        assert!(align_of::<VMGlobalDefinition>() >= align_of::<f32>());
387        assert!(align_of::<VMGlobalDefinition>() >= align_of::<f64>());
388        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[u8; 16]>());
389    }
390
391    #[test]
392    fn check_vmglobal_definition_offsets() {
393        let module = Module::new();
394        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
395        assert_eq!(
396            size_of::<VMGlobalDefinition>(),
397            usize::from(offsets.ptr.size_of_vmglobal_definition())
398        );
399    }
400
401    #[test]
402    fn check_vmglobal_begins_aligned() {
403        let module = Module::new();
404        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
405        assert_eq!(offsets.vmctx_globals_begin() % 16, 0);
406    }
407
408    #[test]
409    #[cfg(feature = "gc")]
410    fn check_vmglobal_can_contain_gc_ref() {
411        assert!(size_of::<crate::runtime::vm::VMGcRef>() <= size_of::<VMGlobalDefinition>());
412    }
413}
414
415impl VMGlobalDefinition {
416    /// Construct a `VMGlobalDefinition`.
417    pub fn new() -> Self {
418        Self { storage: [0; 16] }
419    }
420
421    /// Create a `VMGlobalDefinition` from a `ValRaw`.
422    ///
423    /// # Unsafety
424    ///
425    /// This raw value's type must match the given `WasmValType`.
426    pub unsafe fn from_val_raw(wasm_ty: WasmValType, raw: ValRaw) -> Self {
427        let mut global = Self::new();
428        match wasm_ty {
429            WasmValType::I32 => *global.as_i32_mut() = raw.get_i32(),
430            WasmValType::I64 => *global.as_i64_mut() = raw.get_i64(),
431            WasmValType::F32 => *global.as_f32_bits_mut() = raw.get_f32(),
432            WasmValType::F64 => *global.as_f64_bits_mut() = raw.get_f64(),
433            WasmValType::V128 => *global.as_u128_mut() = raw.get_v128(),
434            WasmValType::Ref(r) => match r.heap_type.top() {
435                WasmHeapTopType::Extern => {
436                    global.init_gc_ref(VMGcRef::from_raw_u32(raw.get_externref()))
437                }
438                WasmHeapTopType::Any => global.init_gc_ref(VMGcRef::from_raw_u32(raw.get_anyref())),
439                WasmHeapTopType::Func => *global.as_func_ref_mut() = raw.get_funcref().cast(),
440            },
441        }
442        global
443    }
444
445    /// Get this global's value as a `ValRaw`.
446    ///
447    /// # Unsafety
448    ///
449    /// This global's value's type must match the given `WasmValType`.
450    pub unsafe fn to_val_raw(&self, gc_store: &mut GcStore, wasm_ty: WasmValType) -> ValRaw {
451        match wasm_ty {
452            WasmValType::I32 => ValRaw::i32(*self.as_i32()),
453            WasmValType::I64 => ValRaw::i64(*self.as_i64()),
454            WasmValType::F32 => ValRaw::f32(*self.as_f32_bits()),
455            WasmValType::F64 => ValRaw::f64(*self.as_f64_bits()),
456            WasmValType::V128 => ValRaw::v128(*self.as_u128()),
457            WasmValType::Ref(r) => match r.heap_type.top() {
458                WasmHeapTopType::Extern => ValRaw::externref(
459                    self.as_gc_ref()
460                        .map_or(0, |r| gc_store.clone_gc_ref(r).as_raw_u32()),
461                ),
462                WasmHeapTopType::Any => ValRaw::anyref(
463                    self.as_gc_ref()
464                        .map_or(0, |r| gc_store.clone_gc_ref(r).as_raw_u32()),
465                ),
466                WasmHeapTopType::Func => ValRaw::funcref(self.as_func_ref().cast()),
467            },
468        }
469    }
470
471    /// Return a reference to the value as an i32.
472    pub unsafe fn as_i32(&self) -> &i32 {
473        &*(self.storage.as_ref().as_ptr().cast::<i32>())
474    }
475
476    /// Return a mutable reference to the value as an i32.
477    pub unsafe fn as_i32_mut(&mut self) -> &mut i32 {
478        &mut *(self.storage.as_mut().as_mut_ptr().cast::<i32>())
479    }
480
481    /// Return a reference to the value as a u32.
482    pub unsafe fn as_u32(&self) -> &u32 {
483        &*(self.storage.as_ref().as_ptr().cast::<u32>())
484    }
485
486    /// Return a mutable reference to the value as an u32.
487    pub unsafe fn as_u32_mut(&mut self) -> &mut u32 {
488        &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>())
489    }
490
491    /// Return a reference to the value as an i64.
492    pub unsafe fn as_i64(&self) -> &i64 {
493        &*(self.storage.as_ref().as_ptr().cast::<i64>())
494    }
495
496    /// Return a mutable reference to the value as an i64.
497    pub unsafe fn as_i64_mut(&mut self) -> &mut i64 {
498        &mut *(self.storage.as_mut().as_mut_ptr().cast::<i64>())
499    }
500
501    /// Return a reference to the value as an u64.
502    pub unsafe fn as_u64(&self) -> &u64 {
503        &*(self.storage.as_ref().as_ptr().cast::<u64>())
504    }
505
506    /// Return a mutable reference to the value as an u64.
507    pub unsafe fn as_u64_mut(&mut self) -> &mut u64 {
508        &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>())
509    }
510
511    /// Return a reference to the value as an f32.
512    pub unsafe fn as_f32(&self) -> &f32 {
513        &*(self.storage.as_ref().as_ptr().cast::<f32>())
514    }
515
516    /// Return a mutable reference to the value as an f32.
517    pub unsafe fn as_f32_mut(&mut self) -> &mut f32 {
518        &mut *(self.storage.as_mut().as_mut_ptr().cast::<f32>())
519    }
520
521    /// Return a reference to the value as f32 bits.
522    pub unsafe fn as_f32_bits(&self) -> &u32 {
523        &*(self.storage.as_ref().as_ptr().cast::<u32>())
524    }
525
526    /// Return a mutable reference to the value as f32 bits.
527    pub unsafe fn as_f32_bits_mut(&mut self) -> &mut u32 {
528        &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>())
529    }
530
531    /// Return a reference to the value as an f64.
532    pub unsafe fn as_f64(&self) -> &f64 {
533        &*(self.storage.as_ref().as_ptr().cast::<f64>())
534    }
535
536    /// Return a mutable reference to the value as an f64.
537    pub unsafe fn as_f64_mut(&mut self) -> &mut f64 {
538        &mut *(self.storage.as_mut().as_mut_ptr().cast::<f64>())
539    }
540
541    /// Return a reference to the value as f64 bits.
542    pub unsafe fn as_f64_bits(&self) -> &u64 {
543        &*(self.storage.as_ref().as_ptr().cast::<u64>())
544    }
545
546    /// Return a mutable reference to the value as f64 bits.
547    pub unsafe fn as_f64_bits_mut(&mut self) -> &mut u64 {
548        &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>())
549    }
550
551    /// Return a reference to the value as an u128.
552    pub unsafe fn as_u128(&self) -> &u128 {
553        &*(self.storage.as_ref().as_ptr().cast::<u128>())
554    }
555
556    /// Return a mutable reference to the value as an u128.
557    pub unsafe fn as_u128_mut(&mut self) -> &mut u128 {
558        &mut *(self.storage.as_mut().as_mut_ptr().cast::<u128>())
559    }
560
561    /// Return a reference to the value as u128 bits.
562    pub unsafe fn as_u128_bits(&self) -> &[u8; 16] {
563        &*(self.storage.as_ref().as_ptr().cast::<[u8; 16]>())
564    }
565
566    /// Return a mutable reference to the value as u128 bits.
567    pub unsafe fn as_u128_bits_mut(&mut self) -> &mut [u8; 16] {
568        &mut *(self.storage.as_mut().as_mut_ptr().cast::<[u8; 16]>())
569    }
570
571    /// Return a reference to the global value as a borrowed GC reference.
572    pub unsafe fn as_gc_ref(&self) -> Option<&VMGcRef> {
573        let raw_ptr = self.storage.as_ref().as_ptr().cast::<Option<VMGcRef>>();
574        let ret = (*raw_ptr).as_ref();
575        assert!(cfg!(feature = "gc") || ret.is_none());
576        ret
577    }
578
579    /// Initialize a global to the given GC reference.
580    pub unsafe fn init_gc_ref(&mut self, gc_ref: Option<VMGcRef>) {
581        assert!(cfg!(feature = "gc") || gc_ref.is_none());
582        let raw_ptr = self.storage.as_mut().as_mut_ptr().cast::<Option<VMGcRef>>();
583        ptr::write(raw_ptr, gc_ref);
584    }
585
586    /// Write a GC reference into this global value.
587    pub unsafe fn write_gc_ref(&mut self, gc_store: &mut GcStore, gc_ref: Option<&VMGcRef>) {
588        assert!(cfg!(feature = "gc") || gc_ref.is_none());
589
590        let dest = &mut *(self.storage.as_mut().as_mut_ptr().cast::<Option<VMGcRef>>());
591        assert!(cfg!(feature = "gc") || dest.is_none());
592
593        gc_store.write_gc_ref(dest, gc_ref)
594    }
595
596    /// Return a reference to the value as a `VMFuncRef`.
597    pub unsafe fn as_func_ref(&self) -> *mut VMFuncRef {
598        *(self.storage.as_ref().as_ptr().cast::<*mut VMFuncRef>())
599    }
600
601    /// Return a mutable reference to the value as a `VMFuncRef`.
602    pub unsafe fn as_func_ref_mut(&mut self) -> &mut *mut VMFuncRef {
603        &mut *(self.storage.as_mut().as_mut_ptr().cast::<*mut VMFuncRef>())
604    }
605}
606
607#[cfg(test)]
608mod test_vmshared_type_index {
609    use super::VMSharedTypeIndex;
610    use std::mem::size_of;
611    use wasmtime_environ::{Module, VMOffsets};
612
613    #[test]
614    fn check_vmshared_type_index() {
615        let module = Module::new();
616        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
617        assert_eq!(
618            size_of::<VMSharedTypeIndex>(),
619            usize::from(offsets.size_of_vmshared_type_index())
620        );
621    }
622}
623
624/// The VM caller-checked "funcref" record, for caller-side signature checking.
625///
626/// It consists of function pointer(s), a type id to be checked by the
627/// caller, and the vmctx closure associated with this function.
628#[derive(Debug, Clone)]
629#[repr(C)]
630pub struct VMFuncRef {
631    /// Function pointer for this funcref if being called via the "array"
632    /// calling convention that `Func::new` et al use.
633    pub array_call: VMArrayCallFunction,
634
635    /// Function pointer for this funcref if being called via the calling
636    /// convention we use when compiling Wasm.
637    ///
638    /// Most functions come with a function pointer that we can use when they
639    /// are called from Wasm. The notable exception is when we `Func::wrap` a
640    /// host function, and we don't have a Wasm compiler on hand to compile a
641    /// Wasm-to-native trampoline for the function. In this case, we leave
642    /// `wasm_call` empty until the function is passed as an import to Wasm (or
643    /// otherwise exposed to Wasm via tables/globals). At this point, we look up
644    /// a Wasm-to-native trampoline for the function in the Wasm's compiled
645    /// module and use that fill in `VMFunctionImport::wasm_call`. **However**
646    /// there is no guarantee that the Wasm module has a trampoline for this
647    /// function's signature. The Wasm module only has trampolines for its
648    /// types, and if this function isn't of one of those types, then the Wasm
649    /// module will not have a trampoline for it. This is actually okay, because
650    /// it means that the Wasm cannot actually call this function. But it does
651    /// mean that this field needs to be an `Option` even though it is non-null
652    /// the vast vast vast majority of the time.
653    pub wasm_call: Option<NonNull<VMWasmCallFunction>>,
654
655    /// Function signature's type id.
656    pub type_index: VMSharedTypeIndex,
657
658    /// The VM state associated with this function.
659    ///
660    /// The actual definition of what this pointer points to depends on the
661    /// function being referenced: for core Wasm functions, this is a `*mut
662    /// VMContext`, for host functions it is a `*mut VMHostFuncContext`, and for
663    /// component functions it is a `*mut VMComponentContext`.
664    pub vmctx: *mut VMOpaqueContext,
665    // If more elements are added here, remember to add offset_of tests below!
666}
667
668unsafe impl Send for VMFuncRef {}
669unsafe impl Sync for VMFuncRef {}
670
671#[cfg(test)]
672mod test_vm_func_ref {
673    use super::VMFuncRef;
674    use core::mem::offset_of;
675    use std::mem::size_of;
676    use wasmtime_environ::{Module, PtrSize, VMOffsets};
677
678    #[test]
679    fn check_vm_func_ref_offsets() {
680        let module = Module::new();
681        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
682        assert_eq!(
683            size_of::<VMFuncRef>(),
684            usize::from(offsets.ptr.size_of_vm_func_ref())
685        );
686        assert_eq!(
687            offset_of!(VMFuncRef, array_call),
688            usize::from(offsets.ptr.vm_func_ref_array_call())
689        );
690        assert_eq!(
691            offset_of!(VMFuncRef, wasm_call),
692            usize::from(offsets.ptr.vm_func_ref_wasm_call())
693        );
694        assert_eq!(
695            offset_of!(VMFuncRef, type_index),
696            usize::from(offsets.ptr.vm_func_ref_type_index())
697        );
698        assert_eq!(
699            offset_of!(VMFuncRef, vmctx),
700            usize::from(offsets.ptr.vm_func_ref_vmctx())
701        );
702    }
703}
704
705macro_rules! define_builtin_array {
706    (
707        $(
708            $( #[$attr:meta] )*
709            $name:ident( $( $pname:ident: $param:ident ),* ) $( -> $result:ident )?;
710        )*
711    ) => {
712        /// An array that stores addresses of builtin functions. We translate code
713        /// to use indirect calls. This way, we don't have to patch the code.
714        #[repr(C)]
715        pub struct VMBuiltinFunctionsArray {
716            $(
717                $name: unsafe extern "C" fn(
718                    $(define_builtin_array!(@ty $param)),*
719                ) $( -> define_builtin_array!(@ty $result))?,
720            )*
721        }
722
723        impl VMBuiltinFunctionsArray {
724            #[allow(unused_doc_comments)]
725            pub const INIT: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray {
726                $(
727                    $name: crate::runtime::vm::libcalls::raw::$name,
728                )*
729            };
730        }
731    };
732
733    (@ty i32) => (u32);
734    (@ty i64) => (u64);
735    (@ty reference) => (u32);
736    (@ty pointer) => (*mut u8);
737    (@ty vmctx) => (*mut VMContext);
738}
739
740wasmtime_environ::foreach_builtin_function!(define_builtin_array);
741
742const _: () = {
743    assert!(
744        mem::size_of::<VMBuiltinFunctionsArray>()
745            == mem::size_of::<usize>()
746                * (BuiltinFunctionIndex::builtin_functions_total_number() as usize)
747    )
748};
749
750/// Structure used to control interrupting wasm code.
751#[derive(Debug)]
752#[repr(C)]
753pub struct VMRuntimeLimits {
754    /// Current stack limit of the wasm module.
755    ///
756    /// For more information see `crates/cranelift/src/lib.rs`.
757    pub stack_limit: UnsafeCell<usize>,
758
759    /// Indicator of how much fuel has been consumed and is remaining to
760    /// WebAssembly.
761    ///
762    /// This field is typically negative and increments towards positive. Upon
763    /// turning positive a wasm trap will be generated. This field is only
764    /// modified if wasm is configured to consume fuel.
765    pub fuel_consumed: UnsafeCell<i64>,
766
767    /// Deadline epoch for interruption: if epoch-based interruption
768    /// is enabled and the global (per engine) epoch counter is
769    /// observed to reach or exceed this value, the guest code will
770    /// yield if running asynchronously.
771    pub epoch_deadline: UnsafeCell<u64>,
772
773    /// The value of the frame pointer register when we last called from Wasm to
774    /// the host.
775    ///
776    /// Maintained by our Wasm-to-host trampoline, and cleared just before
777    /// calling into Wasm in `catch_traps`.
778    ///
779    /// This member is `0` when Wasm is actively running and has not called out
780    /// to the host.
781    ///
782    /// Used to find the start of a a contiguous sequence of Wasm frames when
783    /// walking the stack.
784    pub last_wasm_exit_fp: UnsafeCell<usize>,
785
786    /// The last Wasm program counter before we called from Wasm to the host.
787    ///
788    /// Maintained by our Wasm-to-host trampoline, and cleared just before
789    /// calling into Wasm in `catch_traps`.
790    ///
791    /// This member is `0` when Wasm is actively running and has not called out
792    /// to the host.
793    ///
794    /// Used when walking a contiguous sequence of Wasm frames.
795    pub last_wasm_exit_pc: UnsafeCell<usize>,
796
797    /// The last host stack pointer before we called into Wasm from the host.
798    ///
799    /// Maintained by our host-to-Wasm trampoline, and cleared just before
800    /// calling into Wasm in `catch_traps`.
801    ///
802    /// This member is `0` when Wasm is actively running and has not called out
803    /// to the host.
804    ///
805    /// When a host function is wrapped into a `wasmtime::Func`, and is then
806    /// called from the host, then this member has the sentinel value of `-1 as
807    /// usize`, meaning that this contiguous sequence of Wasm frames is the
808    /// empty sequence, and it is not safe to dereference the
809    /// `last_wasm_exit_fp`.
810    ///
811    /// Used to find the end of a contiguous sequence of Wasm frames when
812    /// walking the stack.
813    pub last_wasm_entry_sp: UnsafeCell<usize>,
814}
815
816// The `VMRuntimeLimits` type is a pod-type with no destructor, and we don't
817// access any fields from other threads, so add in these trait impls which are
818// otherwise not available due to the `fuel_consumed` and `epoch_deadline`
819// variables in `VMRuntimeLimits`.
820unsafe impl Send for VMRuntimeLimits {}
821unsafe impl Sync for VMRuntimeLimits {}
822
823impl Default for VMRuntimeLimits {
824    fn default() -> VMRuntimeLimits {
825        VMRuntimeLimits {
826            stack_limit: UnsafeCell::new(usize::max_value()),
827            fuel_consumed: UnsafeCell::new(0),
828            epoch_deadline: UnsafeCell::new(0),
829            last_wasm_exit_fp: UnsafeCell::new(0),
830            last_wasm_exit_pc: UnsafeCell::new(0),
831            last_wasm_entry_sp: UnsafeCell::new(0),
832        }
833    }
834}
835
836#[cfg(test)]
837mod test_vmruntime_limits {
838    use super::VMRuntimeLimits;
839    use core::mem::offset_of;
840    use std::mem::size_of;
841    use wasmtime_environ::{Module, PtrSize, VMOffsets};
842
843    #[test]
844    fn field_offsets() {
845        let module = Module::new();
846        let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
847        assert_eq!(
848            offset_of!(VMRuntimeLimits, stack_limit),
849            usize::from(offsets.ptr.vmruntime_limits_stack_limit())
850        );
851        assert_eq!(
852            offset_of!(VMRuntimeLimits, fuel_consumed),
853            usize::from(offsets.ptr.vmruntime_limits_fuel_consumed())
854        );
855        assert_eq!(
856            offset_of!(VMRuntimeLimits, epoch_deadline),
857            usize::from(offsets.ptr.vmruntime_limits_epoch_deadline())
858        );
859        assert_eq!(
860            offset_of!(VMRuntimeLimits, last_wasm_exit_fp),
861            usize::from(offsets.ptr.vmruntime_limits_last_wasm_exit_fp())
862        );
863        assert_eq!(
864            offset_of!(VMRuntimeLimits, last_wasm_exit_pc),
865            usize::from(offsets.ptr.vmruntime_limits_last_wasm_exit_pc())
866        );
867        assert_eq!(
868            offset_of!(VMRuntimeLimits, last_wasm_entry_sp),
869            usize::from(offsets.ptr.vmruntime_limits_last_wasm_entry_sp())
870        );
871    }
872}
873
874/// The VM "context", which is pointed to by the `vmctx` arg in Cranelift.
875/// This has information about globals, memories, tables, and other runtime
876/// state associated with the current instance.
877///
878/// The struct here is empty, as the sizes of these fields are dynamic, and
879/// we can't describe them in Rust's type system. Sufficient memory is
880/// allocated at runtime.
881#[derive(Debug)]
882#[repr(C, align(16))] // align 16 since globals are aligned to that and contained inside
883pub struct VMContext {
884    /// There's some more discussion about this within `wasmtime/src/lib.rs` but
885    /// the idea is that we want to tell the compiler that this contains
886    /// pointers which transitively refers to itself, to suppress some
887    /// optimizations that might otherwise assume this doesn't exist.
888    ///
889    /// The self-referential pointer we care about is the `*mut Store` pointer
890    /// early on in this context, which if you follow through enough levels of
891    /// nesting, eventually can refer back to this `VMContext`
892    pub _marker: marker::PhantomPinned,
893}
894
895impl VMContext {
896    /// Helper function to cast between context types using a debug assertion to
897    /// protect against some mistakes.
898    #[inline]
899    pub unsafe fn from_opaque(opaque: *mut VMOpaqueContext) -> *mut VMContext {
900        // Note that in general the offset of the "magic" field is stored in
901        // `VMOffsets::vmctx_magic`. Given though that this is a sanity check
902        // about converting this pointer to another type we ideally don't want
903        // to read the offset from potentially corrupt memory. Instead it would
904        // be better to catch errors here as soon as possible.
905        //
906        // To accomplish this the `VMContext` structure is laid out with the
907        // magic field at a statically known offset (here it's 0 for now). This
908        // static offset is asserted in `VMOffsets::from` and needs to be kept
909        // in sync with this line for this debug assertion to work.
910        //
911        // Also note that this magic is only ever invalid in the presence of
912        // bugs, meaning we don't actually read the magic and act differently
913        // at runtime depending what it is, so this is a debug assertion as
914        // opposed to a regular assertion.
915        debug_assert_eq!((*opaque).magic, VMCONTEXT_MAGIC);
916        opaque.cast()
917    }
918}
919
920/// A "raw" and unsafe representation of a WebAssembly value.
921///
922/// This is provided for use with the `Func::new_unchecked` and
923/// `Func::call_unchecked` APIs. In general it's unlikely you should be using
924/// this from Rust, rather using APIs like `Func::wrap` and `TypedFunc::call`.
925///
926/// This is notably an "unsafe" way to work with `Val` and it's recommended to
927/// instead use `Val` where possible. An important note about this union is that
928/// fields are all stored in little-endian format, regardless of the endianness
929/// of the host system.
930#[allow(missing_docs)]
931#[repr(C)]
932#[derive(Copy, Clone)]
933pub union ValRaw {
934    /// A WebAssembly `i32` value.
935    ///
936    /// Note that the payload here is a Rust `i32` but the WebAssembly `i32`
937    /// type does not assign an interpretation of the upper bit as either signed
938    /// or unsigned. The Rust type `i32` is simply chosen for convenience.
939    ///
940    /// This value is always stored in a little-endian format.
941    i32: i32,
942
943    /// A WebAssembly `i64` value.
944    ///
945    /// Note that the payload here is a Rust `i64` but the WebAssembly `i64`
946    /// type does not assign an interpretation of the upper bit as either signed
947    /// or unsigned. The Rust type `i64` is simply chosen for convenience.
948    ///
949    /// This value is always stored in a little-endian format.
950    i64: i64,
951
952    /// A WebAssembly `f32` value.
953    ///
954    /// Note that the payload here is a Rust `u32`. This is to allow passing any
955    /// representation of NaN into WebAssembly without risk of changing NaN
956    /// payload bits as its gets passed around the system. Otherwise though this
957    /// `u32` value is the return value of `f32::to_bits` in Rust.
958    ///
959    /// This value is always stored in a little-endian format.
960    f32: u32,
961
962    /// A WebAssembly `f64` value.
963    ///
964    /// Note that the payload here is a Rust `u64`. This is to allow passing any
965    /// representation of NaN into WebAssembly without risk of changing NaN
966    /// payload bits as its gets passed around the system. Otherwise though this
967    /// `u64` value is the return value of `f64::to_bits` in Rust.
968    ///
969    /// This value is always stored in a little-endian format.
970    f64: u64,
971
972    /// A WebAssembly `v128` value.
973    ///
974    /// The payload here is a Rust `[u8; 16]` which has the same number of bits
975    /// but note that `v128` in WebAssembly is often considered a vector type
976    /// such as `i32x4` or `f64x2`. This means that the actual interpretation
977    /// of the underlying bits is left up to the instructions which consume
978    /// this value.
979    ///
980    /// This value is always stored in a little-endian format.
981    v128: [u8; 16],
982
983    /// A WebAssembly `funcref` value (or one of its subtypes).
984    ///
985    /// The payload here is a pointer which is runtime-defined. This is one of
986    /// the main points of unsafety about the `ValRaw` type as the validity of
987    /// the pointer here is not easily verified and must be preserved by
988    /// carefully calling the correct functions throughout the runtime.
989    ///
990    /// This value is always stored in a little-endian format.
991    funcref: *mut c_void,
992
993    /// A WebAssembly `externref` value (or one of its subtypes).
994    ///
995    /// The payload here is a compressed pointer value which is
996    /// runtime-defined. This is one of the main points of unsafety about the
997    /// `ValRaw` type as the validity of the pointer here is not easily verified
998    /// and must be preserved by carefully calling the correct functions
999    /// throughout the runtime.
1000    ///
1001    /// This value is always stored in a little-endian format.
1002    externref: u32,
1003
1004    /// A WebAssembly `anyref` value (or one of its subtypes).
1005    ///
1006    /// The payload here is a compressed pointer value which is
1007    /// runtime-defined. This is one of the main points of unsafety about the
1008    /// `ValRaw` type as the validity of the pointer here is not easily verified
1009    /// and must be preserved by carefully calling the correct functions
1010    /// throughout the runtime.
1011    ///
1012    /// This value is always stored in a little-endian format.
1013    anyref: u32,
1014}
1015
1016// The `ValRaw` type is matched as `wasmtime_val_raw_t` in the C API so these
1017// are some simple assertions about the shape of the type which are additionally
1018// matched in C.
1019const _: () = {
1020    assert!(mem::size_of::<ValRaw>() == 16);
1021    assert!(mem::align_of::<ValRaw>() == mem::align_of::<u64>());
1022};
1023
1024// This type is just a bag-of-bits so it's up to the caller to figure out how
1025// to safely deal with threading concerns and safely access interior bits.
1026unsafe impl Send for ValRaw {}
1027unsafe impl Sync for ValRaw {}
1028
1029impl fmt::Debug for ValRaw {
1030    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1031        struct Hex<T>(T);
1032        impl<T: fmt::LowerHex> fmt::Debug for Hex<T> {
1033            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1034                let bytes = mem::size_of::<T>();
1035                let hex_digits_per_byte = 2;
1036                let hex_digits = bytes * hex_digits_per_byte;
1037                write!(f, "0x{:0width$x}", self.0, width = hex_digits)
1038            }
1039        }
1040
1041        unsafe {
1042            f.debug_struct("ValRaw")
1043                .field("i32", &Hex(self.i32))
1044                .field("i64", &Hex(self.i64))
1045                .field("f32", &Hex(self.f32))
1046                .field("f64", &Hex(self.f64))
1047                .field("v128", &Hex(u128::from_le_bytes(self.v128)))
1048                .field("funcref", &self.funcref)
1049                .field("externref", &Hex(self.externref))
1050                .field("anyref", &Hex(self.anyref))
1051                .finish()
1052        }
1053    }
1054}
1055
1056impl ValRaw {
1057    /// Create a null reference that is compatible with any of
1058    /// `{any,extern,func}ref`.
1059    pub fn null() -> ValRaw {
1060        unsafe {
1061            let raw = mem::MaybeUninit::<Self>::zeroed().assume_init();
1062            debug_assert_eq!(raw.get_anyref(), 0);
1063            debug_assert_eq!(raw.get_externref(), 0);
1064            debug_assert_eq!(raw.get_funcref(), ptr::null_mut());
1065            raw
1066        }
1067    }
1068
1069    /// Creates a WebAssembly `i32` value
1070    #[inline]
1071    pub fn i32(i: i32) -> ValRaw {
1072        // Note that this is intentionally not setting the `i32` field, instead
1073        // setting the `i64` field with a zero-extended version of `i`. For more
1074        // information on this see the comments on `Lower for Result` in the
1075        // `wasmtime` crate. Otherwise though all `ValRaw` constructors are
1076        // otherwise constrained to guarantee that the initial 64-bits are
1077        // always initialized.
1078        ValRaw::u64(i.unsigned().into())
1079    }
1080
1081    /// Creates a WebAssembly `i64` value
1082    #[inline]
1083    pub fn i64(i: i64) -> ValRaw {
1084        ValRaw { i64: i.to_le() }
1085    }
1086
1087    /// Creates a WebAssembly `i32` value
1088    #[inline]
1089    pub fn u32(i: u32) -> ValRaw {
1090        // See comments in `ValRaw::i32` for why this is setting the upper
1091        // 32-bits as well.
1092        ValRaw::u64(i.into())
1093    }
1094
1095    /// Creates a WebAssembly `i64` value
1096    #[inline]
1097    pub fn u64(i: u64) -> ValRaw {
1098        ValRaw::i64(i as i64)
1099    }
1100
1101    /// Creates a WebAssembly `f32` value
1102    #[inline]
1103    pub fn f32(i: u32) -> ValRaw {
1104        // See comments in `ValRaw::i32` for why this is setting the upper
1105        // 32-bits as well.
1106        ValRaw::u64(i.into())
1107    }
1108
1109    /// Creates a WebAssembly `f64` value
1110    #[inline]
1111    pub fn f64(i: u64) -> ValRaw {
1112        ValRaw { f64: i.to_le() }
1113    }
1114
1115    /// Creates a WebAssembly `v128` value
1116    #[inline]
1117    pub fn v128(i: u128) -> ValRaw {
1118        ValRaw {
1119            v128: i.to_le_bytes(),
1120        }
1121    }
1122
1123    /// Creates a WebAssembly `funcref` value
1124    #[inline]
1125    pub fn funcref(i: *mut c_void) -> ValRaw {
1126        ValRaw {
1127            funcref: Strict::map_addr(i, |i| i.to_le()),
1128        }
1129    }
1130
1131    /// Creates a WebAssembly `externref` value
1132    #[inline]
1133    pub fn externref(e: u32) -> ValRaw {
1134        assert!(cfg!(feature = "gc") || e == 0);
1135        ValRaw {
1136            externref: e.to_le(),
1137        }
1138    }
1139
1140    /// Creates a WebAssembly `anyref` value
1141    #[inline]
1142    pub fn anyref(r: u32) -> ValRaw {
1143        assert!(cfg!(feature = "gc") || r == 0);
1144        ValRaw { anyref: r.to_le() }
1145    }
1146
1147    /// Gets the WebAssembly `i32` value
1148    #[inline]
1149    pub fn get_i32(&self) -> i32 {
1150        unsafe { i32::from_le(self.i32) }
1151    }
1152
1153    /// Gets the WebAssembly `i64` value
1154    #[inline]
1155    pub fn get_i64(&self) -> i64 {
1156        unsafe { i64::from_le(self.i64) }
1157    }
1158
1159    /// Gets the WebAssembly `i32` value
1160    #[inline]
1161    pub fn get_u32(&self) -> u32 {
1162        self.get_i32().unsigned()
1163    }
1164
1165    /// Gets the WebAssembly `i64` value
1166    #[inline]
1167    pub fn get_u64(&self) -> u64 {
1168        self.get_i64().unsigned()
1169    }
1170
1171    /// Gets the WebAssembly `f32` value
1172    #[inline]
1173    pub fn get_f32(&self) -> u32 {
1174        unsafe { u32::from_le(self.f32) }
1175    }
1176
1177    /// Gets the WebAssembly `f64` value
1178    #[inline]
1179    pub fn get_f64(&self) -> u64 {
1180        unsafe { u64::from_le(self.f64) }
1181    }
1182
1183    /// Gets the WebAssembly `v128` value
1184    #[inline]
1185    pub fn get_v128(&self) -> u128 {
1186        unsafe { u128::from_le_bytes(self.v128) }
1187    }
1188
1189    /// Gets the WebAssembly `funcref` value
1190    #[inline]
1191    pub fn get_funcref(&self) -> *mut c_void {
1192        unsafe { Strict::map_addr(self.funcref, |i| usize::from_le(i)) }
1193    }
1194
1195    /// Gets the WebAssembly `externref` value
1196    #[inline]
1197    pub fn get_externref(&self) -> u32 {
1198        let externref = u32::from_le(unsafe { self.externref });
1199        assert!(cfg!(feature = "gc") || externref == 0);
1200        externref
1201    }
1202
1203    /// Gets the WebAssembly `anyref` value
1204    #[inline]
1205    pub fn get_anyref(&self) -> u32 {
1206        let anyref = u32::from_le(unsafe { self.anyref });
1207        assert!(cfg!(feature = "gc") || anyref == 0);
1208        anyref
1209    }
1210}
1211
1212/// An "opaque" version of `VMContext` which must be explicitly casted to a
1213/// target context.
1214///
1215/// This context is used to represent that contexts specified in
1216/// `VMFuncRef` can have any type and don't have an implicit
1217/// structure. Neither wasmtime nor cranelift-generated code can rely on the
1218/// structure of an opaque context in general and only the code which configured
1219/// the context is able to rely on a particular structure. This is because the
1220/// context pointer configured for `VMFuncRef` is guaranteed to be
1221/// the first parameter passed.
1222///
1223/// Note that Wasmtime currently has a layout where all contexts that are casted
1224/// to an opaque context start with a 32-bit "magic" which can be used in debug
1225/// mode to debug-assert that the casts here are correct and have at least a
1226/// little protection against incorrect casts.
1227pub struct VMOpaqueContext {
1228    pub(crate) magic: u32,
1229    _marker: marker::PhantomPinned,
1230}
1231
1232impl VMOpaqueContext {
1233    /// Helper function to clearly indicate that casts are desired.
1234    #[inline]
1235    pub fn from_vmcontext(ptr: *mut VMContext) -> *mut VMOpaqueContext {
1236        ptr.cast()
1237    }
1238
1239    /// Helper function to clearly indicate that casts are desired.
1240    #[inline]
1241    pub fn from_vm_array_call_host_func_context(
1242        ptr: *mut VMArrayCallHostFuncContext,
1243    ) -> *mut VMOpaqueContext {
1244        ptr.cast()
1245    }
1246}